From 974ee925df1962f559d6cb43318ee301e330e8f2 Mon Sep 17 00:00:00 2001 From: HemangChothani <50404902+HemangChothani@users.noreply.github.com> Date: Wed, 19 Feb 2020 05:09:44 +0530 Subject: [PATCH 01/14] feat(spanner): exporting transaction._rolled_back as transaction.rolled_back (#16) Co-authored-by: larkee <31196561+larkee@users.noreply.github.com> --- google/cloud/spanner_v1/pool.py | 2 +- google/cloud/spanner_v1/session.py | 2 +- google/cloud/spanner_v1/transaction.py | 8 ++++---- tests/unit/test_pool.py | 2 +- tests/unit/test_session.py | 6 +++--- tests/unit/test_transaction.py | 16 ++++++++-------- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index ce7a196b6b..1b23575faa 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -503,7 +503,7 @@ def put(self, session): raise queue.Full txn = session._transaction - if txn is None or txn.committed or txn._rolled_back: + if txn is None or txn.committed or txn.rolled_back: session.transaction() self._pending_sessions.put(session) else: diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index f8e7e88d97..863053d4ef 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -255,7 +255,7 @@ def transaction(self): raise ValueError("Session has not been created.") if self._transaction is not None: - self._transaction._rolled_back = True + self._transaction.rolled_back = True del self._transaction txn = self._transaction = Transaction(self) diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index 29a2e5f786..55e2837df4 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -36,7 +36,7 @@ class Transaction(_SnapshotBase, _BatchBase): committed = None """Timestamp at which the transaction was successfully committed.""" - _rolled_back = False + rolled_back = False _multi_use = True _execute_sql_count = 0 @@ -58,7 +58,7 @@ def _check_state(self): if self.committed is not None: raise ValueError("Transaction is already committed") - if self._rolled_back: + if self.rolled_back: raise ValueError("Transaction is already rolled back") def _make_txn_selector(self): @@ -85,7 +85,7 @@ def begin(self): if self.committed is not None: raise ValueError("Transaction already committed") - if self._rolled_back: + if self.rolled_back: raise ValueError("Transaction is already rolled back") database = self._session._database @@ -105,7 +105,7 @@ def rollback(self): api = database.spanner_api metadata = _metadata_with_prefix(database.name) api.rollback(self._session.name, self._transaction_id, metadata=metadata) - self._rolled_back = True + self.rolled_back = True del self._session._transaction def commit(self): diff --git a/tests/unit/test_pool.py b/tests/unit/test_pool.py index 2d4a9d8822..b6786a7f0e 100644 --- a/tests/unit/test_pool.py +++ b/tests/unit/test_pool.py @@ -837,7 +837,7 @@ def _make_transaction(*args, **kw): txn = mock.create_autospec(Transaction)(*args, **kw) txn.committed = None - txn._rolled_back = False + txn.rolled_back = False return txn diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 98d98deaba..1eff634af0 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -463,7 +463,7 @@ def test_transaction_w_existing_txn(self): another = session.transaction() # invalidates existing txn self.assertIs(session._transaction, another) - self.assertTrue(existing._rolled_back) + self.assertTrue(existing.rolled_back) def test_run_in_transaction_callback_raises_non_gax_error(self): from google.cloud.spanner_v1.proto.transaction_pb2 import ( @@ -506,7 +506,7 @@ def unit_of_work(txn, *args, **kw): txn, args, kw = called_with[0] self.assertIsInstance(txn, Transaction) self.assertIsNone(txn.committed) - self.assertTrue(txn._rolled_back) + self.assertTrue(txn.rolled_back) self.assertEqual(args, ()) self.assertEqual(kw, {}) @@ -561,7 +561,7 @@ def unit_of_work(txn, *args, **kw): txn, args, kw = called_with[0] self.assertIsInstance(txn, Transaction) self.assertIsNone(txn.committed) - self.assertFalse(txn._rolled_back) + self.assertFalse(txn.rolled_back) self.assertEqual(args, ()) self.assertEqual(kw, {}) diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index cceff89fca..9ef13c2ab6 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -76,7 +76,7 @@ def test_ctor_defaults(self): self.assertIs(transaction._session, session) self.assertIsNone(transaction._transaction_id) self.assertIsNone(transaction.committed) - self.assertFalse(transaction._rolled_back) + self.assertFalse(transaction.rolled_back) self.assertTrue(transaction._multi_use) self.assertEqual(transaction._execute_sql_count, 0) @@ -98,7 +98,7 @@ def test__check_state_already_rolled_back(self): session = _Session() transaction = self._make_one(session) transaction._transaction_id = self.TRANSACTION_ID - transaction._rolled_back = True + transaction.rolled_back = True with self.assertRaises(ValueError): transaction._check_state() @@ -125,7 +125,7 @@ def test_begin_already_begun(self): def test_begin_already_rolled_back(self): session = _Session() transaction = self._make_one(session) - transaction._rolled_back = True + transaction.rolled_back = True with self.assertRaises(ValueError): transaction.begin() @@ -187,7 +187,7 @@ def test_rollback_already_rolled_back(self): session = _Session() transaction = self._make_one(session) transaction._transaction_id = self.TRANSACTION_ID - transaction._rolled_back = True + transaction.rolled_back = True with self.assertRaises(ValueError): transaction.rollback() @@ -203,7 +203,7 @@ def test_rollback_w_other_error(self): with self.assertRaises(RuntimeError): transaction.rollback() - self.assertFalse(transaction._rolled_back) + self.assertFalse(transaction.rolled_back) def test_rollback_ok(self): from google.protobuf.empty_pb2 import Empty @@ -218,7 +218,7 @@ def test_rollback_ok(self): transaction.rollback() - self.assertTrue(transaction._rolled_back) + self.assertTrue(transaction.rolled_back) self.assertIsNone(session._transaction) session_id, txn_id, metadata = api._rolled_back @@ -244,7 +244,7 @@ def test_commit_already_rolled_back(self): session = _Session() transaction = self._make_one(session) transaction._transaction_id = self.TRANSACTION_ID - transaction._rolled_back = True + transaction.rolled_back = True with self.assertRaises(ValueError): transaction.commit() @@ -546,7 +546,7 @@ def test_context_mgr_failure(self): raise Exception("bail out") self.assertEqual(transaction.committed, None) - self.assertTrue(transaction._rolled_back) + self.assertTrue(transaction.rolled_back) self.assertEqual(len(transaction._mutations), 1) self.assertEqual(api._committed, None) From 250f19e06b88b3d60709cbca4caad6882435c723 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Tue, 18 Feb 2020 15:59:50 -0800 Subject: [PATCH 02/14] [CHANGE ME] Re-generated to pick up changes in the API or client library generator. (#9) Co-authored-by: larkee <31196561+larkee@users.noreply.github.com> --- google/cloud/spanner_v1/proto/query_plan_pb2.py | 4 ++-- synth.metadata | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/google/cloud/spanner_v1/proto/query_plan_pb2.py b/google/cloud/spanner_v1/proto/query_plan_pb2.py index bc715b4549..4602cd6b15 100644 --- a/google/cloud/spanner_v1/proto/query_plan_pb2.py +++ b/google/cloud/spanner_v1/proto/query_plan_pb2.py @@ -511,8 +511,8 @@ ), DESCRIPTOR=_PLANNODE_SHORTREPRESENTATION, __module__="google.cloud.spanner_v1.proto.query_plan_pb2", - __doc__="""Condensed representation of a node and its subtree. Only present for - ``SCALAR`` [PlanNode(s)][google.spanner.v1.PlanNode]. + __doc__="""Condensed representation of a node and its subtree. Only + present for ``SCALAR`` [PlanNode(s)][google.spanner.v1.PlanNode]. Attributes: diff --git a/synth.metadata b/synth.metadata index 19a28d2922..6e39ad3e0a 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,5 +1,5 @@ { - "updateTime": "2020-01-31T21:10:03.527484Z", + "updateTime": "2020-02-01T13:21:36.175336Z", "sources": [ { "generator": { @@ -12,9 +12,9 @@ "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "2717b8a1c762b26911b45ecc2e4ee01d98401b28", - "internalRef": "292555664", - "log": "2717b8a1c762b26911b45ecc2e4ee01d98401b28\nFix dataproc artman client library generation.\n\nPiperOrigin-RevId: 292555664\n\n7ac66d9be8a7d7de4f13566d8663978c9ee9dcd7\nAdd Dataproc Autoscaling API to V1.\n\nPiperOrigin-RevId: 292450564\n\n5d932b2c1be3a6ef487d094e3cf5c0673d0241dd\n- Improve documentation\n- Add a client_id field to StreamingPullRequest\n\nPiperOrigin-RevId: 292434036\n\neaff9fa8edec3e914995ce832b087039c5417ea7\nmonitoring: v3 publish annotations and client retry config\n\nPiperOrigin-RevId: 292425288\n\n70958bab8c5353870d31a23fb2c40305b050d3fe\nBigQuery Storage Read API v1 clients.\n\nPiperOrigin-RevId: 292407644\n\n7a15e7fe78ff4b6d5c9606a3264559e5bde341d1\nUpdate backend proto for Google Cloud Endpoints\n\nPiperOrigin-RevId: 292391607\n\n3ca2c014e24eb5111c8e7248b1e1eb833977c83d\nbazel: Add --flaky_test_attempts=3 argument to prevent CI failures caused by flaky tests\n\nPiperOrigin-RevId: 292382559\n\n9933347c1f677e81e19a844c2ef95bfceaf694fe\nbazel:Integrate latest protoc-java-resource-names-plugin changes (fix for PyYAML dependency in bazel rules)\n\nPiperOrigin-RevId: 292376626\n\nb835ab9d2f62c88561392aa26074c0b849fb0bd3\nasset: v1p2beta1 add client config annotations\n\n* remove unintentionally exposed RPCs\n* remove messages relevant to removed RPCs\n\nPiperOrigin-RevId: 292369593\n\n" + "sha": "b5cbe4a4ba64ab19e6627573ff52057a1657773d", + "internalRef": "292647187", + "log": "b5cbe4a4ba64ab19e6627573ff52057a1657773d\nSecurityCenter v1p1beta1: move file-level option on top to workaround protobuf.js bug.\n\nPiperOrigin-RevId: 292647187\n\nb224b317bf20c6a4fbc5030b4a969c3147f27ad3\nAdds API definitions for bigqueryreservation v1beta1.\n\nPiperOrigin-RevId: 292634722\n\nc1468702f9b17e20dd59007c0804a089b83197d2\nSynchronize new proto/yaml changes.\n\nPiperOrigin-RevId: 292626173\n\nffdfa4f55ab2f0afc11d0eb68f125ccbd5e404bd\nvision: v1p3beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292605599\n\n78f61482cd028fc1d9892aa5d89d768666a954cd\nvision: v1p1beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292605125\n\n60bb5a294a604fd1778c7ec87b265d13a7106171\nvision: v1p2beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292604980\n\n3bcf7aa79d45eb9ec29ab9036e9359ea325a7fc3\nvision: v1p4beta1 publish annotations and retry config\n\nPiperOrigin-RevId: 292604656\n\n" } }, { From b315593bd3e473d96cc3033f5bbf0da7487e38eb Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Wed, 19 Feb 2020 14:02:26 +1300 Subject: [PATCH 03/14] feat(spanner): add emulator support (#14) * emulator support implementation * facilitate running system test against an emulator * add tests * formatting * remove brittle error string checks * add skips for tests when emulator support is used * fix lint errors --- google/cloud/spanner_v1/client.py | 75 ++++++++++++++++++++++---- google/cloud/spanner_v1/database.py | 16 +++++- google/cloud/spanner_v1/instance.py | 2 + noxfile.py | 10 ++-- tests/system/test_system.py | 25 ++++----- tests/unit/test_client.py | 81 ++++++++++++++++++++++++++++- tests/unit/test_database.py | 26 ++++++++- 7 files changed, 202 insertions(+), 33 deletions(-) diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index 264731178e..c7b331adc0 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -23,11 +23,21 @@ * a :class:`~google.cloud.spanner_v1.instance.Instance` owns a :class:`~google.cloud.spanner_v1.database.Database` """ +import grpc +import os import warnings from google.api_core.gapic_v1 import client_info import google.api_core.client_options +from google.cloud.spanner_admin_instance_v1.gapic.transports import ( + instance_admin_grpc_transport, +) + +from google.cloud.spanner_admin_database_v1.gapic.transports import ( + database_admin_grpc_transport, +) + # pylint: disable=line-too-long from google.cloud.spanner_admin_database_v1.gapic.database_admin_client import ( # noqa DatabaseAdminClient, @@ -45,6 +55,12 @@ from google.cloud.spanner_v1.instance import Instance _CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) +EMULATOR_ENV_VAR = "SPANNER_EMULATOR_HOST" +_EMULATOR_HOST_HTTP_SCHEME = ( + "%s contains a http scheme. When used with a scheme it may cause gRPC's " + "DNS resolver to endlessly attempt to resolve. %s is intended to be used " + "without a scheme: ex %s=localhost:8080." +) % ((EMULATOR_ENV_VAR,) * 3) SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" _USER_AGENT_DEPRECATED = ( "The 'user_agent' argument to 'Client' is deprecated / unused. " @@ -52,6 +68,10 @@ ) +def _get_spanner_emulator_host(): + return os.getenv(EMULATOR_ENV_VAR) + + class InstanceConfig(object): """Named configurations for Spanner instances. @@ -156,6 +176,12 @@ def __init__( warnings.warn(_USER_AGENT_DEPRECATED, DeprecationWarning, stacklevel=2) self.user_agent = user_agent + if _get_spanner_emulator_host() is not None and ( + "http://" in _get_spanner_emulator_host() + or "https://" in _get_spanner_emulator_host() + ): + warnings.warn(_EMULATOR_HOST_HTTP_SCHEME) + @property def credentials(self): """Getter for client's credentials. @@ -189,22 +215,42 @@ def project_name(self): def instance_admin_api(self): """Helper for session-related API calls.""" if self._instance_admin_api is None: - self._instance_admin_api = InstanceAdminClient( - credentials=self.credentials, - client_info=self._client_info, - client_options=self._client_options, - ) + if _get_spanner_emulator_host() is not None: + transport = instance_admin_grpc_transport.InstanceAdminGrpcTransport( + channel=grpc.insecure_channel(_get_spanner_emulator_host()) + ) + self._instance_admin_api = InstanceAdminClient( + client_info=self._client_info, + client_options=self._client_options, + transport=transport, + ) + else: + self._instance_admin_api = InstanceAdminClient( + credentials=self.credentials, + client_info=self._client_info, + client_options=self._client_options, + ) return self._instance_admin_api @property def database_admin_api(self): """Helper for session-related API calls.""" if self._database_admin_api is None: - self._database_admin_api = DatabaseAdminClient( - credentials=self.credentials, - client_info=self._client_info, - client_options=self._client_options, - ) + if _get_spanner_emulator_host() is not None: + transport = database_admin_grpc_transport.DatabaseAdminGrpcTransport( + channel=grpc.insecure_channel(_get_spanner_emulator_host()) + ) + self._database_admin_api = DatabaseAdminClient( + client_info=self._client_info, + client_options=self._client_options, + transport=transport, + ) + else: + self._database_admin_api = DatabaseAdminClient( + credentials=self.credentials, + client_info=self._client_info, + client_options=self._client_options, + ) return self._database_admin_api def copy(self): @@ -288,7 +334,14 @@ def instance( :rtype: :class:`~google.cloud.spanner_v1.instance.Instance` :returns: an instance owned by this client. """ - return Instance(instance_id, self, configuration_name, node_count, display_name) + return Instance( + instance_id, + self, + configuration_name, + node_count, + display_name, + _get_spanner_emulator_host(), + ) def list_instances(self, filter_="", page_size=None, page_token=None): """List instances for the client's project. diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 49abe919d5..f5ea3e46dd 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -16,6 +16,7 @@ import copy import functools +import grpc import os import re import threading @@ -33,6 +34,7 @@ from google.cloud.spanner_v1._helpers import _metadata_with_prefix from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient +from google.cloud.spanner_v1.gapic.transports import spanner_grpc_transport from google.cloud.spanner_v1.keyset import KeySet from google.cloud.spanner_v1.pool import BurstyPool from google.cloud.spanner_v1.pool import SessionCheckout @@ -190,11 +192,21 @@ def ddl_statements(self): def spanner_api(self): """Helper for session-related API calls.""" if self._spanner_api is None: + client_info = self._instance._client._client_info + client_options = self._instance._client._client_options + if self._instance.emulator_host is not None: + transport = spanner_grpc_transport.SpannerGrpcTransport( + channel=grpc.insecure_channel(self._instance.emulator_host) + ) + self._spanner_api = SpannerClient( + client_info=client_info, + client_options=client_options, + transport=transport, + ) + return self._spanner_api credentials = self._instance._client.credentials if isinstance(credentials, google.auth.credentials.Scoped): credentials = credentials.with_scopes((SPANNER_DATA_SCOPE,)) - client_info = self._instance._client._client_info - client_options = self._instance._client._client_options if ( os.getenv("GOOGLE_CLOUD_SPANNER_ENABLE_RESOURCE_BASED_ROUTING") == "true" diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index 83a600bd10..05e596622c 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -76,12 +76,14 @@ def __init__( configuration_name=None, node_count=DEFAULT_NODE_COUNT, display_name=None, + emulator_host=None, ): self.instance_id = instance_id self._client = client self.configuration_name = configuration_name self.node_count = node_count self.display_name = display_name or instance_id + self.emulator_host = emulator_host def _update_from_pb(self, instance_pb): """Refresh self from the server-provided protobuf. diff --git a/noxfile.py b/noxfile.py index 200b68e04c..22f328c4af 100644 --- a/noxfile.py +++ b/noxfile.py @@ -94,9 +94,13 @@ def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") system_test_folder_path = os.path.join("tests", "system") - # Sanity check: Only run tests if the environment variable is set. - if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", ""): - session.skip("Credentials must be set via environment variable") + # Sanity check: Only run tests if either credentials or emulator host is set. + if not os.environ.get("GOOGLE_APPLICATION_CREDENTIALS", "") and not os.environ.get( + "SPANNER_EMULATOR_HOST", "" + ): + session.skip( + "Credentials or emulator host must be set via environment variable" + ) system_test_exists = os.path.exists(system_test_path) system_test_folder_exists = os.path.exists(system_test_folder_path) diff --git a/tests/system/test_system.py b/tests/system/test_system.py index ae688029b4..a8d349e677 100644 --- a/tests/system/test_system.py +++ b/tests/system/test_system.py @@ -56,6 +56,7 @@ CREATE_INSTANCE = os.getenv("GOOGLE_CLOUD_TESTS_CREATE_SPANNER_INSTANCE") is not None +USE_EMULATOR = os.getenv("SPANNER_EMULATOR_HOST") is not None USE_RESOURCE_ROUTING = ( os.getenv("GOOGLE_CLOUD_SPANNER_ENABLE_RESOURCE_BASED_ROUTING") == "true" ) @@ -105,10 +106,10 @@ def setUpModule(): EXISTING_INSTANCES[:] = instances if CREATE_INSTANCE: - - # Defend against back-end returning configs for regions we aren't - # actually allowed to use. - configs = [config for config in configs if "-us-" in config.name] + if not USE_EMULATOR: + # Defend against back-end returning configs for regions we aren't + # actually allowed to use. + configs = [config for config in configs if "-us-" in config.name] if not configs: raise ValueError("List instance configs failed in module set up.") @@ -185,6 +186,7 @@ def test_create_instance(self): self.assertEqual(instance, instance_alt) self.assertEqual(instance.display_name, instance_alt.display_name) + @unittest.skipIf(USE_EMULATOR, "Skipping updating instance") def test_update_instance(self): OLD_DISPLAY_NAME = Config.INSTANCE.display_name NEW_DISPLAY_NAME = "Foo Bar Baz" @@ -382,12 +384,9 @@ def test_table_not_found(self): temp_db_id, ddl_statements=[create_table, index] ) self.to_delete.append(temp_db) - with self.assertRaises(exceptions.NotFound) as exc_info: + with self.assertRaises(exceptions.NotFound): temp_db.create() - expected = "Table not found: {0}".format(incorrect_table) - self.assertEqual(exc_info.exception.args, (expected,)) - @pytest.mark.skip( reason=( "update_dataset_ddl() has a flaky timeout" @@ -993,6 +992,7 @@ def test_transaction_batch_update_wo_statements(self): with self.assertRaises(InvalidArgument): transaction.batch_update([]) + @unittest.skipIf(USE_EMULATOR, "Skipping partitioned DML") def test_execute_partitioned_dml(self): # [START spanner_test_dml_partioned_dml_update] retry = RetryInstanceState(_has_all_ddl) @@ -1625,6 +1625,7 @@ def test_read_with_range_keys_and_index_open_open(self): expected = [data[keyrow]] + data[start + 1 : end] self.assertEqual(rows, expected) + @unittest.skipIf(USE_EMULATOR, "Skipping partitioned reads") def test_partition_read_w_index(self): row_count = 10 columns = self.COLUMNS[1], self.COLUMNS[2] @@ -1724,16 +1725,11 @@ def test_invalid_type(self): batch.insert(table, columns, valid_input) invalid_input = ((0, ""),) - with self.assertRaises(exceptions.FailedPrecondition) as exc_info: + with self.assertRaises(exceptions.FailedPrecondition): with self._db.batch() as batch: batch.delete(table, self.ALL) batch.insert(table, columns, invalid_input) - error_msg = ( - "Invalid value for column value in table " "counters: Expected INT64." - ) - self.assertIn(error_msg, str(exc_info.exception)) - def test_execute_sql_select_1(self): self._db.snapshot(multi_use=True) @@ -2111,6 +2107,7 @@ def test_execute_sql_returning_transfinite_floats(self): # NaNs cannot be searched for by equality. self.assertTrue(math.isnan(float_array[2])) + @unittest.skipIf(USE_EMULATOR, "Skipping partitioned queries") def test_partition_query(self): row_count = 40 sql = "SELECT * FROM {}".format(self.TABLE) diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 35e63bfd68..2e04537e02 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -98,6 +98,17 @@ def _constructor_test_helper( expected_client_options.api_endpoint, ) + @mock.patch("google.cloud.spanner_v1.client.os.getenv") + @mock.patch("warnings.warn") + def test_constructor_emulator_host_warning(self, mock_warn, mock_os): + from google.cloud.spanner_v1 import client as MUT + + expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) + creds = _make_credentials() + mock_os.return_value = "http://emulator.host.com" + self._constructor_test_helper(expected_scopes, creds) + mock_warn.assert_called_once_with(MUT._EMULATOR_HOST_HTTP_SCHEME) + def test_constructor_default_scopes(self): from google.cloud.spanner_v1 import client as MUT @@ -164,7 +175,8 @@ def test_constructor_custom_client_options_dict(self): expected_scopes, creds, client_options={"api_endpoint": "endpoint"} ) - def test_instance_admin_api(self): + @mock.patch("google.cloud.spanner_v1.client.os.getenv") + def test_instance_admin_api(self, mock_getenv): from google.cloud.spanner_v1.client import SPANNER_ADMIN_SCOPE credentials = _make_credentials() @@ -178,6 +190,7 @@ def test_instance_admin_api(self): ) expected_scopes = (SPANNER_ADMIN_SCOPE,) + mock_getenv.return_value = None inst_module = "google.cloud.spanner_v1.client.InstanceAdminClient" with mock.patch(inst_module) as instance_admin_client: api = client.instance_admin_api @@ -196,7 +209,39 @@ def test_instance_admin_api(self): credentials.with_scopes.assert_called_once_with(expected_scopes) - def test_database_admin_api(self): + @mock.patch("google.cloud.spanner_v1.client.os.getenv") + def test_instance_admin_api_emulator(self, mock_getenv): + credentials = _make_credentials() + client_info = mock.Mock() + client_options = mock.Mock() + client = self._make_one( + project=self.PROJECT, + credentials=credentials, + client_info=client_info, + client_options=client_options, + ) + + mock_getenv.return_value = "true" + inst_module = "google.cloud.spanner_v1.client.InstanceAdminClient" + with mock.patch(inst_module) as instance_admin_client: + api = client.instance_admin_api + + self.assertIs(api, instance_admin_client.return_value) + + # API instance is cached + again = client.instance_admin_api + self.assertIs(again, api) + + self.assertEqual(len(instance_admin_client.call_args_list), 1) + called_args, called_kw = instance_admin_client.call_args + self.assertEqual(called_args, ()) + self.assertEqual(called_kw["client_info"], client_info) + self.assertEqual(called_kw["client_options"], client_options) + self.assertIn("transport", called_kw) + self.assertNotIn("credentials", called_kw) + + @mock.patch("google.cloud.spanner_v1.client.os.getenv") + def test_database_admin_api(self, mock_getenv): from google.cloud.spanner_v1.client import SPANNER_ADMIN_SCOPE credentials = _make_credentials() @@ -210,6 +255,7 @@ def test_database_admin_api(self): ) expected_scopes = (SPANNER_ADMIN_SCOPE,) + mock_getenv.return_value = None db_module = "google.cloud.spanner_v1.client.DatabaseAdminClient" with mock.patch(db_module) as database_admin_client: api = client.database_admin_api @@ -228,6 +274,37 @@ def test_database_admin_api(self): credentials.with_scopes.assert_called_once_with(expected_scopes) + @mock.patch("google.cloud.spanner_v1.client.os.getenv") + def test_database_admin_api_emulator(self, mock_getenv): + credentials = _make_credentials() + client_info = mock.Mock() + client_options = mock.Mock() + client = self._make_one( + project=self.PROJECT, + credentials=credentials, + client_info=client_info, + client_options=client_options, + ) + + mock_getenv.return_value = "true" + db_module = "google.cloud.spanner_v1.client.DatabaseAdminClient" + with mock.patch(db_module) as database_admin_client: + api = client.database_admin_api + + self.assertIs(api, database_admin_client.return_value) + + # API instance is cached + again = client.database_admin_api + self.assertIs(again, api) + + self.assertEqual(len(database_admin_client.call_args_list), 1) + called_args, called_kw = database_admin_client.call_args + self.assertEqual(called_args, ()) + self.assertEqual(called_kw["client_info"], client_info) + self.assertEqual(called_kw["client_options"], client_options) + self.assertIn("transport", called_kw) + self.assertNotIn("credentials", called_kw) + def test_copy(self): credentials = _make_credentials() # Make sure it "already" is scoped. diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 0f4071d868..7bf14de751 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -535,6 +535,27 @@ def test_spanner_api_resource_routing_error(self): client.instance_admin_api.get_instance.assert_called_once() + def test_spanner_api_w_emulator_host(self): + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client, emulator_host="host") + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + + patch = mock.patch("google.cloud.spanner_v1.database.SpannerClient") + with patch as spanner_client: + api = database.spanner_api + + self.assertIs(api, spanner_client.return_value) + + # API instance is cached + again = database.spanner_api + self.assertIs(again, api) + + self.assertEqual(len(spanner_client.call_args_list), 1) + called_args, called_kw = spanner_client.call_args + self.assertEqual(called_args, ()) + self.assertIsNotNone(called_kw["transport"]) + def test___eq__(self): instance = _Instance(self.INSTANCE_NAME) pool1, pool2 = _Pool(), _Pool() @@ -1765,13 +1786,16 @@ def __init__(self, project=TestDatabase.PROJECT_ID): self.project_name = "projects/" + self.project self._endpoint_cache = {} self.instance_admin_api = _make_instance_api() + self._client_info = mock.Mock() + self._client_options = mock.Mock() class _Instance(object): - def __init__(self, name, client=None): + def __init__(self, name, client=None, emulator_host=None): self.name = name self.instance_id = name.rsplit("/", 1)[1] self._client = client + self.emulator_host = emulator_host class _Database(object): From 997a03477b07ec39c718480d9bfe729404bf5748 Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Wed, 19 Feb 2020 16:06:14 +1300 Subject: [PATCH 04/14] fix: remove erroneous timeouts for batch_create_session calls (#18) * fix: remove erroneous timeouts for batch_create_session calls in session pools * blacken --- google/cloud/spanner_v1/pool.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/google/cloud/spanner_v1/pool.py b/google/cloud/spanner_v1/pool.py index 1b23575faa..cf3413ceb1 100644 --- a/google/cloud/spanner_v1/pool.py +++ b/google/cloud/spanner_v1/pool.py @@ -171,10 +171,7 @@ def bind(self, database): while not self._sessions.full(): resp = api.batch_create_sessions( - database.name, - self.size - self._sessions.qsize(), - timeout=self.default_timeout, - metadata=metadata, + database.name, self.size - self._sessions.qsize(), metadata=metadata ) for session_pb in resp.session: session = self._new_session() @@ -365,10 +362,7 @@ def bind(self, database): while created_session_count < self.size: resp = api.batch_create_sessions( - database.name, - self.size - created_session_count, - timeout=self.default_timeout, - metadata=metadata, + database.name, self.size - created_session_count, metadata=metadata ) for session_pb in resp.session: session = self._new_session() From a4c170c04f0c5daa57eb0f657bd1c77243a2e77a Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Wed, 19 Feb 2020 17:27:48 +1300 Subject: [PATCH 05/14] refactor: remove unnecessary import (#15) --- tests/system/test_system.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/system/test_system.py b/tests/system/test_system.py index a8d349e677..926cbb4b82 100644 --- a/tests/system/test_system.py +++ b/tests/system/test_system.py @@ -23,7 +23,6 @@ import unittest import uuid -import pytest import grpc from google.rpc import code_pb2 @@ -387,8 +386,8 @@ def test_table_not_found(self): with self.assertRaises(exceptions.NotFound): temp_db.create() - @pytest.mark.skip( - reason=( + @unittest.skip( + ( "update_dataset_ddl() has a flaky timeout" "https://github.com/GoogleCloudPlatform/google-cloud-python/issues/" "5629" From d0f505c7b476416864f7c84100692b108c7db7fc Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Thu, 20 Feb 2020 19:04:15 +1300 Subject: [PATCH 06/14] chore!: remove Python 2.7 from testing (#22) * chore: remove Python 2.7 from testing * update README * remove Python 2 from PyPi page --- README.rst | 2 +- noxfile.py | 4 ++-- setup.py | 4 +--- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/README.rst b/README.rst index d18dbcfbc6..650a2d42d8 100644 --- a/README.rst +++ b/README.rst @@ -60,7 +60,7 @@ Python >= 3.5 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support will be removed on January 1, 2020. +Python == 2.7. Python 2.7 support has been removed as of January 1, 2020. Mac/Linux diff --git a/noxfile.py b/noxfile.py index 22f328c4af..c0de8948fb 100644 --- a/noxfile.py +++ b/noxfile.py @@ -83,13 +83,13 @@ def default(session): ) -@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) +@nox.session(python=["3.5", "3.6", "3.7", "3.8"]) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python=["2.7", "3.7"]) +@nox.session(python="3.7") def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") diff --git a/setup.py b/setup.py index cc86f650ea..8f459ca216 100644 --- a/setup.py +++ b/setup.py @@ -70,8 +70,6 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", @@ -84,7 +82,7 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", + python_requires=">=3.5", include_package_data=True, zip_safe=False, ) From 5e7a106455963067f491754126f575aeb0b2fff9 Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Fri, 21 Feb 2020 11:30:47 +1300 Subject: [PATCH 07/14] Revert "chore!: remove Python 2.7 from testing (#22)" (#24) This reverts commit d0f505c7b476416864f7c84100692b108c7db7fc. --- README.rst | 2 +- noxfile.py | 4 ++-- setup.py | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/README.rst b/README.rst index 650a2d42d8..d18dbcfbc6 100644 --- a/README.rst +++ b/README.rst @@ -60,7 +60,7 @@ Python >= 3.5 Deprecated Python Versions ^^^^^^^^^^^^^^^^^^^^^^^^^^ -Python == 2.7. Python 2.7 support has been removed as of January 1, 2020. +Python == 2.7. Python 2.7 support will be removed on January 1, 2020. Mac/Linux diff --git a/noxfile.py b/noxfile.py index c0de8948fb..22f328c4af 100644 --- a/noxfile.py +++ b/noxfile.py @@ -83,13 +83,13 @@ def default(session): ) -@nox.session(python=["3.5", "3.6", "3.7", "3.8"]) +@nox.session(python=["2.7", "3.5", "3.6", "3.7", "3.8"]) def unit(session): """Run the unit test suite.""" default(session) -@nox.session(python="3.7") +@nox.session(python=["2.7", "3.7"]) def system(session): """Run the system test suite.""" system_test_path = os.path.join("tests", "system.py") diff --git a/setup.py b/setup.py index 8f459ca216..cc86f650ea 100644 --- a/setup.py +++ b/setup.py @@ -70,6 +70,8 @@ "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Programming Language :: Python", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", @@ -82,7 +84,7 @@ namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, - python_requires=">=3.5", + python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*", include_package_data=True, zip_safe=False, ) From 2331aa453630611fbe4d4215c9410f781d199785 Mon Sep 17 00:00:00 2001 From: Gurov Ilya Date: Mon, 9 Mar 2020 03:32:58 +0300 Subject: [PATCH 08/14] docs: correct URLs for old issues (#29) * docs: correct URLs for old issues * Update CHANGELOG.md Revert unrelated formatter change --- CHANGELOG.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7785f5672c..f708046b40 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,14 +10,14 @@ ### Features * **spanner:** add deprecation warnings; add field_mask to get_instance; add endpoint_uris to Instance proto; update timeouts; make mutations optional for commits (via synth) ([62edbe1](https://www.github.com/googleapis/python-spanner/commit/62edbe12a0c5a74eacb8d87ca265a19e6d27f890)) -* **spanner:** add resource based routing implementation ([#10183](https://www.github.com/googleapis/python-spanner/issues/10183)) ([e072d5d](https://www.github.com/googleapis/python-spanner/commit/e072d5dd04d58fff7f62ce19ce42e906dfd11012)) -* **spanner:** un-deprecate resource name helper functions, add 3.8 tests (via synth) ([#10062](https://www.github.com/googleapis/python-spanner/issues/10062)) ([dbb79b0](https://www.github.com/googleapis/python-spanner/commit/dbb79b0d8b0c79f6ed1772f28e4eedb9d986b108)) +* **spanner:** add resource based routing implementation ([#10183](https://www.github.com/googleapis/google-cloud-python/issues/10183)) ([e072d5d](https://www.github.com/googleapis/python-spanner/commit/e072d5dd04d58fff7f62ce19ce42e906dfd11012)) +* **spanner:** un-deprecate resource name helper functions, add 3.8 tests (via synth) ([#10062](https://www.github.com/googleapis/google-cloud-python/issues/10062)) ([dbb79b0](https://www.github.com/googleapis/python-spanner/commit/dbb79b0d8b0c79f6ed1772f28e4eedb9d986b108)) ### Bug Fixes -* be permssive about merging an empty struct ([#10079](https://www.github.com/googleapis/python-spanner/issues/10079)) ([cfae63d](https://www.github.com/googleapis/python-spanner/commit/cfae63d5a8b8332f8875307283da6075a544c838)) -* **spanner:** fix imports for doc samples ([#10283](https://www.github.com/googleapis/python-spanner/issues/10283)) ([55a21d9](https://www.github.com/googleapis/python-spanner/commit/55a21d97d0c863cbbbb2d973b6faa4aeba8e38bb)) +* be permssive about merging an empty struct ([#10079](https://www.github.com/googleapis/google-cloud-python/issues/10079)) ([cfae63d](https://www.github.com/googleapis/python-spanner/commit/cfae63d5a8b8332f8875307283da6075a544c838)) +* **spanner:** fix imports for doc samples ([#10283](https://www.github.com/googleapis/google-cloud-python/issues/10283)) ([55a21d9](https://www.github.com/googleapis/python-spanner/commit/55a21d97d0c863cbbbb2d973b6faa4aeba8e38bb)) ## 1.13.0 From 23916c511a54be7e7304affc55680d86f150a6d3 Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 12 Mar 2020 00:35:31 -0700 Subject: [PATCH 09/14] [CHANGE ME] Re-generated to pick up changes in the API or client library generator. (#27) Co-authored-by: larkee <31196561+larkee@users.noreply.github.com> --- .../proto/spanner_database_admin.proto | 85 ++--- .../proto/spanner_database_admin_pb2_grpc.py | 7 +- .../gapic/instance_admin_client.py | 19 +- .../proto/spanner_instance_admin.proto | 26 +- .../proto/spanner_instance_admin_pb2.py | 37 +- .../cloud/spanner_v1/gapic/spanner_client.py | 20 +- .../transports/spanner_grpc_transport.py | 2 +- google/cloud/spanner_v1/proto/keys.proto | 3 +- google/cloud/spanner_v1/proto/mutation.proto | 12 +- google/cloud/spanner_v1/proto/mutation_pb2.py | 14 +- .../cloud/spanner_v1/proto/query_plan.proto | 3 +- .../cloud/spanner_v1/proto/result_set.proto | 3 +- google/cloud/spanner_v1/proto/spanner.proto | 317 +++++++++--------- google/cloud/spanner_v1/proto/spanner_pb2.py | 170 +++++++--- .../spanner_v1/proto/spanner_pb2_grpc.py | 68 ++-- .../cloud/spanner_v1/proto/transaction.proto | 3 +- google/cloud/spanner_v1/proto/type.proto | 3 +- synth.metadata | 16 +- 18 files changed, 434 insertions(+), 374 deletions(-) diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto index ea5200b4cb..5ee127d1ef 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -33,10 +32,6 @@ option java_multiple_files = true; option java_outer_classname = "SpannerDatabaseAdminProto"; option java_package = "com.google.spanner.admin.database.v1"; option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; - -// The Instance resource is defined in `google.spanner.admin.instance.v1`. -// Because this is a separate, independent API (technically), we redefine -// the resource name pattern here. option (google.api.resource_definition) = { type: "spanner.googleapis.com/Instance" pattern: "projects/{project}/instances/{instance}" @@ -66,11 +61,10 @@ service DatabaseAdmin { // have a name of the format `/operations/` and // can be used to track preparation of the database. The // [metadata][google.longrunning.Operation.metadata] field type is - // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. - // The [response][google.longrunning.Operation.response] field type is + // [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + // [response][google.longrunning.Operation.response] field type is // [Database][google.spanner.admin.database.v1.Database], if successful. - rpc CreateDatabase(CreateDatabaseRequest) - returns (google.longrunning.Operation) { + rpc CreateDatabase(CreateDatabaseRequest) returns (google.longrunning.Operation) { option (google.api.http) = { post: "/v1/{parent=projects/*/instances/*}/databases" body: "*" @@ -96,10 +90,8 @@ service DatabaseAdmin { // the format `/operations/` and can be used to // track execution of the schema change(s). The // [metadata][google.longrunning.Operation.metadata] field type is - // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. - // The operation has no response. - rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) - returns (google.longrunning.Operation) { + // [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. + rpc UpdateDatabaseDdl(UpdateDatabaseDdlRequest) returns (google.longrunning.Operation) { option (google.api.http) = { patch: "/v1/{database=projects/*/instances/*/databases/*}/ddl" body: "*" @@ -134,8 +126,7 @@ service DatabaseAdmin { // // Authorization requires `spanner.databases.setIamPolicy` // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. - rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" body: "*" @@ -153,8 +144,7 @@ service DatabaseAdmin { // // Authorization requires `spanner.databases.getIamPolicy` permission on // [resource][google.iam.v1.GetIamPolicyRequest.resource]. - rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) - returns (google.iam.v1.Policy) { + rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" body: "*" @@ -172,8 +162,7 @@ service DatabaseAdmin { // result in a NOT_FOUND error if the user has // `spanner.databases.list` permission on the containing Cloud // Spanner instance. Otherwise returns an empty set of permissions. - rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) - returns (google.iam.v1.TestIamPermissionsResponse) { + rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" body: "*" @@ -217,8 +206,7 @@ message Database { State state = 2; } -// The request for -// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +// The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. message ListDatabasesRequest { // Required. The instance whose databases should be listed. // Values are of the form `projects//instances/`. @@ -234,26 +222,23 @@ message ListDatabasesRequest { int32 page_size = 3; // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] - // from a previous - // [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. + // [next_page_token][google.spanner.admin.database.v1.ListDatabasesResponse.next_page_token] from a + // previous [ListDatabasesResponse][google.spanner.admin.database.v1.ListDatabasesResponse]. string page_token = 4; } -// The response for -// [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. +// The response for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. message ListDatabasesResponse { // Databases that matched the request. repeated Database databases = 1; // `next_page_token` can be sent in a subsequent - // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] - // call to fetch more of the matching databases. + // [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases] call to fetch more + // of the matching databases. string next_page_token = 2; } -// The request for -// [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. +// The request for [CreateDatabase][google.spanner.admin.database.v1.DatabaseAdmin.CreateDatabase]. message CreateDatabaseRequest { // Required. The name of the instance that will serve the new database. // Values are of the form `projects//instances/`. @@ -283,12 +268,11 @@ message CreateDatabaseRequest { message CreateDatabaseMetadata { // The database being created. string database = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; } -// The request for -// [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. +// The request for [GetDatabase][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabase]. message GetDatabaseRequest { // Required. The name of the requested database. Values are of the form // `projects//instances//databases/`. @@ -314,8 +298,8 @@ message GetDatabaseRequest { // Each batch of statements is assigned a name which can be used with // the [Operations][google.longrunning.Operations] API to monitor // progress. See the -// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] -// field for more details. +// [operation_id][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.operation_id] field for more +// details. message UpdateDatabaseDdlRequest { // Required. The database to update. string database = 1 [ @@ -335,20 +319,18 @@ message UpdateDatabaseDdlRequest { // // Specifying an explicit operation ID simplifies determining // whether the statements were executed in the event that the - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] - // call is replayed, or the return value is otherwise lost: the - // [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] - // and `operation_id` fields can be combined to form the + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] call is replayed, + // or the return value is otherwise lost: the [database][google.spanner.admin.database.v1.UpdateDatabaseDdlRequest.database] and + // `operation_id` fields can be combined to form the // [name][google.longrunning.Operation.name] of the resulting - // [longrunning.Operation][google.longrunning.Operation]: - // `/operations/`. + // [longrunning.Operation][google.longrunning.Operation]: `/operations/`. // // `operation_id` should be unique within the database, and must be // a valid identifier: `[a-z][a-z0-9_]*`. Note that // automatically-generated operation IDs always begin with an // underscore. If the named operation already exists, - // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] - // returns `ALREADY_EXISTS`. + // [UpdateDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.UpdateDatabaseDdl] returns + // `ALREADY_EXISTS`. string operation_id = 3; } @@ -357,8 +339,8 @@ message UpdateDatabaseDdlRequest { message UpdateDatabaseDdlMetadata { // The database being modified. string database = 1 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/Database" - }]; + type: "spanner.googleapis.com/Database" + }]; // For an update this list contains all the statements. For an // individual statement, this list contains only that statement. @@ -370,8 +352,7 @@ message UpdateDatabaseDdlMetadata { repeated google.protobuf.Timestamp commit_timestamps = 3; } -// The request for -// [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. +// The request for [DropDatabase][google.spanner.admin.database.v1.DatabaseAdmin.DropDatabase]. message DropDatabaseRequest { // Required. The database to be dropped. string database = 1 [ @@ -382,8 +363,7 @@ message DropDatabaseRequest { ]; } -// The request for -// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +// The request for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. message GetDatabaseDdlRequest { // Required. The database whose schema we wish to get. string database = 1 [ @@ -394,8 +374,7 @@ message GetDatabaseDdlRequest { ]; } -// The response for -// [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. +// The response for [GetDatabaseDdl][google.spanner.admin.database.v1.DatabaseAdmin.GetDatabaseDdl]. message GetDatabaseDdlResponse { // A list of formatted DDL statements defining the schema of the database // specified in the request. diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py index 7ea7ddb6fa..2491691e6b 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py @@ -94,8 +94,8 @@ def CreateDatabase(self, request, context): have a name of the format `/operations/` and can be used to track preparation of the database. The [metadata][google.longrunning.Operation.metadata] field type is - [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. - The [response][google.longrunning.Operation.response] field type is + [CreateDatabaseMetadata][google.spanner.admin.database.v1.CreateDatabaseMetadata]. The + [response][google.longrunning.Operation.response] field type is [Database][google.spanner.admin.database.v1.Database], if successful. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) @@ -116,8 +116,7 @@ def UpdateDatabaseDdl(self, request, context): the format `/operations/` and can be used to track execution of the schema change(s). The [metadata][google.longrunning.Operation.metadata] field type is - [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. - The operation has no response. + [UpdateDatabaseDdlMetadata][google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata]. The operation has no response. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") diff --git a/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py b/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py index c7c4912f2a..130a069bf5 100644 --- a/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py +++ b/google/cloud/spanner_admin_instance_v1/gapic/instance_admin_client.py @@ -562,8 +562,8 @@ def get_instance( Args: name (str): Required. The name of the requested instance. Values are of the form ``projects//instances/``. - field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): If field\_mask is present, specifies the subset of [][google.spanner.admin.instance.v1.Instance] fields - that should be returned. If absent, all [][google.spanner.admin.instance.v1.Instance] fields are + field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): If field\_mask is present, specifies the subset of ``Instance`` fields + that should be returned. If absent, all ``Instance`` fields are returned. If a dict is provided, it must be of the same form as the protobuf @@ -821,18 +821,15 @@ def update_instance( Args: instance (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.Instance]): Required. The instance to update, which must always include the instance - name. Otherwise, only fields mentioned in - [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field\_mask] - need be included. + name. Otherwise, only fields mentioned in ``field_mask`` need be + included. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_admin_instance_v1.types.Instance` - field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in - [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] - should be updated. The field mask must always be specified; this - prevents any future fields in - [][google.spanner.admin.instance.v1.Instance] from being erased - accidentally by clients that do not know about them. + field_mask (Union[dict, ~google.cloud.spanner_admin_instance_v1.types.FieldMask]): Required. A mask specifying which fields in ``Instance`` should be + updated. The field mask must always be specified; this prevents any + future fields in ``Instance`` from being erased accidentally by clients + that do not know about them. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_admin_instance_v1.types.FieldMask` diff --git a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto index a437874133..6a068baca2 100644 --- a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto +++ b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -353,8 +352,8 @@ message Instance { // also [InstanceConfig][google.spanner.admin.instance.v1.InstanceConfig] and // [ListInstanceConfigs][google.spanner.admin.instance.v1.InstanceAdmin.ListInstanceConfigs]. string config = 2 [(google.api.resource_reference) = { - type: "spanner.googleapis.com/InstanceConfig" - }]; + type: "spanner.googleapis.com/InstanceConfig" + }]; // Required. The descriptive name for this instance as it appears in UIs. // Must be unique per project and between 4 and 30 characters in length. @@ -398,14 +397,7 @@ message Instance { // allow "_" in a future release. map labels = 7; - // Output only. The endpoint URIs based on the instance config. - // For example, instances located in a specific cloud region (or multi region) - // such as nam3, would have a nam3 specific endpoint URI. - // This URI is to be used implictly by SDK clients, with fallback to default - // URI. These endpoints are intended to optimize the network routing between - // the client and the instance's serving resources. - // If multiple endpoints are present, client may establish connections using - // any of the given URIs. + // Deprecated. This field is not populated. repeated string endpoint_uris = 8; } @@ -466,9 +458,9 @@ message GetInstanceRequest { } ]; - // If field_mask is present, specifies the subset of [][Instance] fields that + // If field_mask is present, specifies the subset of [Instance][google.spanner.admin.instance.v1.Instance] fields that // should be returned. - // If absent, all [][Instance] fields are returned. + // If absent, all [Instance][google.spanner.admin.instance.v1.Instance] fields are returned. google.protobuf.FieldMask field_mask = 2; } @@ -549,12 +541,12 @@ message ListInstancesResponse { // The request for [UpdateInstance][google.spanner.admin.instance.v1.InstanceAdmin.UpdateInstance]. message UpdateInstanceRequest { // Required. The instance to update, which must always include the instance - // name. Otherwise, only fields mentioned in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. + // name. Otherwise, only fields mentioned in [field_mask][google.spanner.admin.instance.v1.UpdateInstanceRequest.field_mask] need be included. Instance instance = 1 [(google.api.field_behavior) = REQUIRED]; - // Required. A mask specifying which fields in [][google.spanner.admin.instance.v1.UpdateInstanceRequest.instance] should be updated. + // Required. A mask specifying which fields in [Instance][google.spanner.admin.instance.v1.Instance] should be updated. // The field mask must always be specified; this prevents any future fields in - // [][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know + // [Instance][google.spanner.admin.instance.v1.Instance] from being erased accidentally by clients that do not know // about them. google.protobuf.FieldMask field_mask = 2 [(google.api.field_behavior) = REQUIRED]; } diff --git a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py index 356c47f1a0..d55c007030 100644 --- a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py +++ b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py @@ -1340,8 +1340,8 @@ ), DESCRIPTOR=_INSTANCE, __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""An isolated set of Cloud Spanner resources on which - databases can be hosted. + __doc__="""An isolated set of Cloud Spanner resources on which databases can be + hosted. Attributes: @@ -1398,15 +1398,7 @@ as the string: name + "*" + value would prove problematic if we were to allow "*" in a future release. endpoint_uris: - Output only. The endpoint URIs based on the instance config. - For example, instances located in a specific cloud region (or - multi region) such as nam3, would have a nam3 specific - endpoint URI. This URI is to be used implictly by SDK clients, - with fallback to default URI. These endpoints are intended to - optimize the network routing between the client and the - instance's serving resources. If multiple endpoints are - present, client may establish connections using any of the - given URIs. + Deprecated. This field is not populated. """, # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.Instance) ), @@ -1506,8 +1498,10 @@ the form ``projects//instances/``. field_mask: If field\_mask is present, specifies the subset of - [][Instance] fields that should be returned. If absent, all - [][Instance] fields are returned. + [Instance][google.spanner.admin.instance.v1.Instance] fields + that should be returned. If absent, all + [Instance][google.spanner.admin.instance.v1.Instance] fields + are returned. """, # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.GetInstanceRequest) ), @@ -1620,16 +1614,17 @@ Attributes: instance: Required. The instance to update, which must always include - the instance name. Otherwise, only fields mentioned in [][goog - le.spanner.admin.instance.v1.UpdateInstanceRequest.field\_mask - ] need be included. + the instance name. Otherwise, only fields mentioned in [field\ + _mask][google.spanner.admin.instance.v1.UpdateInstanceRequest. + field\_mask] need be included. field_mask: - Required. A mask specifying which fields in [][google.spanner. - admin.instance.v1.UpdateInstanceRequest.instance] should be - updated. The field mask must always be specified; this + Required. A mask specifying which fields in + [Instance][google.spanner.admin.instance.v1.Instance] should + be updated. The field mask must always be specified; this prevents any future fields in - [][google.spanner.admin.instance.v1.Instance] from being - erased accidentally by clients that do not know about them. + [Instance][google.spanner.admin.instance.v1.Instance] from + being erased accidentally by clients that do not know about + them. """, # @@protoc_insertion_point(class_scope:google.spanner.admin.instance.v1.UpdateInstanceRequest) ), diff --git a/google/cloud/spanner_v1/gapic/spanner_client.py b/google/cloud/spanner_v1/gapic/spanner_client.py index cf6aafd6b6..20b6522780 100644 --- a/google/cloud/spanner_v1/gapic/spanner_client.py +++ b/google/cloud/spanner_v1/gapic/spanner_client.py @@ -237,7 +237,7 @@ def create_session( Active sessions use additional server resources, so it is a good idea to delete idle and unneeded sessions. Aside from explicit deletes, Cloud - Spanner can delete sessions for which no operations are sent for more + Spanner may delete sessions for which no operations are sent for more than an hour. If a session is deleted, requests to it return ``NOT_FOUND``. @@ -659,6 +659,7 @@ def execute_sql( query_mode=None, partition_token=None, seqno=None, + query_options=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -752,6 +753,10 @@ def execute_sql( handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. + query_options (Union[dict, ~google.cloud.spanner_v1.types.QueryOptions]): Query optimizer configuration to use for the given query. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_v1.types.QueryOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -792,6 +797,7 @@ def execute_sql( query_mode=query_mode, partition_token=partition_token, seqno=seqno, + query_options=query_options, ) if metadata is None: metadata = [] @@ -821,6 +827,7 @@ def execute_streaming_sql( query_mode=None, partition_token=None, seqno=None, + query_options=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, @@ -909,6 +916,10 @@ def execute_streaming_sql( handled requests will yield the same response as the first execution. Required for DML statements. Ignored for queries. + query_options (Union[dict, ~google.cloud.spanner_v1.types.QueryOptions]): Query optimizer configuration to use for the given query. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_v1.types.QueryOptions` retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will be retried using a default configuration. @@ -949,6 +960,7 @@ def execute_streaming_sql( query_mode=query_mode, partition_token=partition_token, seqno=seqno, + query_options=query_options, ) if metadata is None: metadata = [] @@ -1028,9 +1040,9 @@ def execute_batch_dml( If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.spanner_v1.types.Statement` - seqno (long): Required. A per-transaction sequence number used to identify this request. - This field makes each request idempotent such that if the request is - received multiple times, at most one will succeed. + seqno (long): Required. A per-transaction sequence number used to identify this request. This field + makes each request idempotent such that if the request is received multiple + times, at most one will succeed. The sequence number must be monotonically increasing within the transaction. If a request arrives for the first time with an out-of-order diff --git a/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py b/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py index 47cedd3cc8..3d43f5088e 100644 --- a/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py +++ b/google/cloud/spanner_v1/gapic/transports/spanner_grpc_transport.py @@ -133,7 +133,7 @@ def create_session(self): Active sessions use additional server resources, so it is a good idea to delete idle and unneeded sessions. Aside from explicit deletes, Cloud - Spanner can delete sessions for which no operations are sent for more + Spanner may delete sessions for which no operations are sent for more than an hour. If a session is deleted, requests to it return ``NOT_FOUND``. diff --git a/google/cloud/spanner_v1/proto/keys.proto b/google/cloud/spanner_v1/proto/keys.proto index de5307aaaf..d129255c45 100644 --- a/google/cloud/spanner_v1/proto/keys.proto +++ b/google/cloud/spanner_v1/proto/keys.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/spanner_v1/proto/mutation.proto b/google/cloud/spanner_v1/proto/mutation.proto index 7df99c0ee6..2c675830f0 100644 --- a/google/cloud/spanner_v1/proto/mutation.proto +++ b/google/cloud/spanner_v1/proto/mutation.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -61,7 +60,10 @@ message Mutation { // Required. The table whose rows will be deleted. string table = 1; - // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. + // Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. The + // primary keys must be specified in the order in which they appear in the + // `PRIMARY KEY()` clause of the table's equivalent DDL statement (the DDL + // statement used to create the table). // Delete is idempotent. The transaction will succeed even if some or all // rows do not exist. KeySet key_set = 2; @@ -80,6 +82,10 @@ message Mutation { // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, then // its column values are overwritten with the ones provided. Any // column values not explicitly written are preserved. + // + // When using [insert_or_update][google.spanner.v1.Mutation.insert_or_update], just as when using [insert][google.spanner.v1.Mutation.insert], all `NOT + // NULL` columns in the table must be given a value. This holds true + // even when the row already exists and will therefore actually be updated. Write insert_or_update = 3; // Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, it is diff --git a/google/cloud/spanner_v1/proto/mutation_pb2.py b/google/cloud/spanner_v1/proto/mutation_pb2.py index db5a781f69..b6ad0429b8 100644 --- a/google/cloud/spanner_v1/proto/mutation_pb2.py +++ b/google/cloud/spanner_v1/proto/mutation_pb2.py @@ -383,8 +383,11 @@ key_set: Required. The primary keys of the rows within [table][google.spanner.v1.Mutation.Delete.table] to delete. - Delete is idempotent. The transaction will succeed even if - some or all rows do not exist. + The primary keys must be specified in the order in which they + appear in the ``PRIMARY KEY()`` clause of the table's + equivalent DDL statement (the DDL statement used to create the + table). Delete is idempotent. The transaction will succeed + even if some or all rows do not exist. """, # @@protoc_insertion_point(class_scope:google.spanner.v1.Mutation.Delete) ), @@ -409,7 +412,12 @@ Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, then its column values are overwritten with the ones provided. Any column values not - explicitly written are preserved. + explicitly written are preserved. When using [insert\_or\_upd + ate][google.spanner.v1.Mutation.insert\_or\_update], just as + when using [insert][google.spanner.v1.Mutation.insert], all + ``NOT NULL`` columns in the table must be given a value. This + holds true even when the row already exists and will therefore + actually be updated. replace: Like [insert][google.spanner.v1.Mutation.insert], except that if the row already exists, it is deleted, and the column diff --git a/google/cloud/spanner_v1/proto/query_plan.proto b/google/cloud/spanner_v1/proto/query_plan.proto index 2d6be2e2bd..6ad13a77b0 100644 --- a/google/cloud/spanner_v1/proto/query_plan.proto +++ b/google/cloud/spanner_v1/proto/query_plan.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/spanner_v1/proto/result_set.proto b/google/cloud/spanner_v1/proto/result_set.proto index a4b785283c..e24a35aaf8 100644 --- a/google/cloud/spanner_v1/proto/result_set.proto +++ b/google/cloud/spanner_v1/proto/result_set.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; diff --git a/google/cloud/spanner_v1/proto/spanner.proto b/google/cloud/spanner_v1/proto/spanner.proto index 2ff4c8db89..0c7da37c72 100644 --- a/google/cloud/spanner_v1/proto/spanner.proto +++ b/google/cloud/spanner_v1/proto/spanner.proto @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// syntax = "proto3"; @@ -37,10 +36,6 @@ option java_multiple_files = true; option java_outer_classname = "SpannerProto"; option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; - -// The Database resource is defined in `google.spanner.admin.database.v1`. -// Because this is a separate, independent API (technically), we redefine -// the resource name pattern here. option (google.api.resource_definition) = { type: "spanner.googleapis.com/Database" pattern: "projects/{project}/instances/{instance}/databases/{database}" @@ -69,7 +64,7 @@ service Spanner { // // Active sessions use additional server resources, so it is a good idea to // delete idle and unneeded sessions. - // Aside from explicit deletes, Cloud Spanner can delete sessions for which no + // Aside from explicit deletes, Cloud Spanner may delete sessions for which no // operations are sent for more than an hour. If a session is deleted, // requests to it return `NOT_FOUND`. // @@ -87,8 +82,7 @@ service Spanner { // // This API can be used to initialize a session cache on the clients. // See https://goo.gl/TgSFN2 for best practices on session cache management. - rpc BatchCreateSessions(BatchCreateSessionsRequest) - returns (BatchCreateSessionsResponse) { + rpc BatchCreateSessions(BatchCreateSessionsRequest) returns (BatchCreateSessionsResponse) { option (google.api.http) = { post: "/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate" body: "*" @@ -131,12 +125,10 @@ service Spanner { // // Operations inside read-write transactions might return `ABORTED`. If // this occurs, the application should restart the transaction from - // the beginning. See [Transaction][google.spanner.v1.Transaction] for more - // details. + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. // // Larger result sets can be fetched in streaming fashion by calling - // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] - // instead. + // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] instead. rpc ExecuteSql(ExecuteSqlRequest) returns (ResultSet) { option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql" @@ -144,11 +136,11 @@ service Spanner { }; } - // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the - // result set as a stream. Unlike - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there is no limit on - // the size of the returned result set. However, no individual row in the - // result set can exceed 100 MiB, and no column value can exceed 10 MiB. + // Like [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], except returns the result + // set as a stream. Unlike [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql], there + // is no limit on the size of the returned result set. However, no + // individual row in the result set can exceed 100 MiB, and no + // column value can exceed 10 MiB. rpc ExecuteStreamingSql(ExecuteSqlRequest) returns (stream PartialResultSet) { option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql" @@ -161,15 +153,13 @@ service Spanner { // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. // // Statements are executed in sequential order. A request can succeed even if - // a statement fails. The - // [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] - // field in the response provides information about the statement that failed. - // Clients must inspect this field to determine whether an error occurred. + // a statement fails. The [ExecuteBatchDmlResponse.status][google.spanner.v1.ExecuteBatchDmlResponse.status] field in the + // response provides information about the statement that failed. Clients must + // inspect this field to determine whether an error occurred. // // Execution stops after the first failed statement; the remaining statements // are not executed. - rpc ExecuteBatchDml(ExecuteBatchDmlRequest) - returns (ExecuteBatchDmlResponse) { + rpc ExecuteBatchDml(ExecuteBatchDmlRequest) returns (ExecuteBatchDmlResponse) { option (google.api.http) = { post: "/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml" body: "*" @@ -178,15 +168,14 @@ service Spanner { // Reads rows from the database using key lookups and scans, as a // simple key/value style alternative to - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be - // used to return a result set larger than 10 MiB; if the read matches more + // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql]. This method cannot be used to + // return a result set larger than 10 MiB; if the read matches more // data than that, the read fails with a `FAILED_PRECONDITION` // error. // // Reads inside read-write transactions might return `ABORTED`. If // this occurs, the application should restart the transaction from - // the beginning. See [Transaction][google.spanner.v1.Transaction] for more - // details. + // the beginning. See [Transaction][google.spanner.v1.Transaction] for more details. // // Larger result sets can be yielded in streaming fashion by calling // [StreamingRead][google.spanner.v1.Spanner.StreamingRead] instead. @@ -197,9 +186,9 @@ service Spanner { }; } - // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set - // as a stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no - // limit on the size of the returned result set. However, no individual row in + // Like [Read][google.spanner.v1.Spanner.Read], except returns the result set as a + // stream. Unlike [Read][google.spanner.v1.Spanner.Read], there is no limit on the + // size of the returned result set. However, no individual row in // the result set can exceed 100 MiB, and no column value can exceed // 10 MiB. rpc StreamingRead(ReadRequest) returns (stream PartialResultSet) { @@ -210,8 +199,7 @@ service Spanner { } // Begins a new transaction. This step can often be skipped: - // [Read][google.spanner.v1.Spanner.Read], - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and + // [Read][google.spanner.v1.Spanner.Read], [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and // [Commit][google.spanner.v1.Spanner.Commit] can begin a new transaction as a // side-effect. rpc BeginTransaction(BeginTransactionRequest) returns (Transaction) { @@ -236,15 +224,13 @@ service Spanner { body: "*" }; option (google.api.method_signature) = "session,transaction_id,mutations"; - option (google.api.method_signature) = - "session,single_use_transaction,mutations"; + option (google.api.method_signature) = "session,single_use_transaction,mutations"; } // Rolls back a transaction, releasing any locks it holds. It is a good // idea to call this for any transaction that includes one or more - // [Read][google.spanner.v1.Spanner.Read] or - // [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and ultimately - // decides not to commit. + // [Read][google.spanner.v1.Spanner.Read] or [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] requests and + // ultimately decides not to commit. // // `Rollback` returns `OK` if it successfully aborts the transaction, the // transaction was already aborted, or the transaction is not @@ -259,11 +245,10 @@ service Spanner { // Creates a set of partition tokens that can be used to execute a query // operation in parallel. Each of the returned partition tokens can be used - // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to - // specify a subset of the query result to read. The same session and - // read-only transaction must be used by the PartitionQueryRequest used to - // create the partition tokens and the ExecuteSqlRequests that use the - // partition tokens. + // by [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] to specify a subset + // of the query result to read. The same session and read-only transaction + // must be used by the PartitionQueryRequest used to create the + // partition tokens and the ExecuteSqlRequests that use the partition tokens. // // Partition tokens become invalid when the session used to create them // is deleted, is idle for too long, begins a new transaction, or becomes too @@ -278,13 +263,12 @@ service Spanner { // Creates a set of partition tokens that can be used to execute a read // operation in parallel. Each of the returned partition tokens can be used - // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a - // subset of the read result to read. The same session and read-only - // transaction must be used by the PartitionReadRequest used to create the - // partition tokens and the ReadRequests that use the partition tokens. There - // are no ordering guarantees on rows returned among the returned partition - // tokens, or even within each individual StreamingRead call issued with a - // partition_token. + // by [StreamingRead][google.spanner.v1.Spanner.StreamingRead] to specify a subset of the read + // result to read. The same session and read-only transaction must be used by + // the PartitionReadRequest used to create the partition tokens and the + // ReadRequests that use the partition tokens. There are no ordering + // guarantees on rows returned among the returned partition tokens, or even + // within each individual StreamingRead call issued with a partition_token. // // Partition tokens become invalid when the session used to create them // is deleted, is idle for too long, begins a new transaction, or becomes too @@ -312,8 +296,7 @@ message CreateSessionRequest { Session session = 2; } -// The request for -// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +// The request for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. message BatchCreateSessionsRequest { // Required. The database in which the new sessions are created. string database = 1 [ @@ -330,13 +313,11 @@ message BatchCreateSessionsRequest { // The API may return fewer than the requested number of sessions. If a // specific number of sessions are desired, the client can make additional // calls to BatchCreateSessions (adjusting - // [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] - // as necessary). + // [session_count][google.spanner.v1.BatchCreateSessionsRequest.session_count] as necessary). int32 session_count = 3 [(google.api.field_behavior) = REQUIRED]; } -// The response for -// [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. +// The response for [BatchCreateSessions][google.spanner.v1.Spanner.BatchCreateSessions]. message BatchCreateSessionsResponse { // The freshly created sessions. repeated Session session = 1; @@ -377,7 +358,9 @@ message GetSessionRequest { // Required. The name of the session to retrieve. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; } @@ -396,8 +379,7 @@ message ListSessionsRequest { int32 page_size = 2; // If non-empty, `page_token` should contain a - // [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] - // from a previous + // [next_page_token][google.spanner.v1.ListSessionsResponse.next_page_token] from a previous // [ListSessionsResponse][google.spanner.v1.ListSessionsResponse]. string page_token = 3; @@ -420,8 +402,8 @@ message ListSessionsResponse { repeated Session sessions = 1; // `next_page_token` can be sent in a subsequent - // [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more - // of the matching sessions. + // [ListSessions][google.spanner.v1.Spanner.ListSessions] call to fetch more of the matching + // sessions. string next_page_token = 2; } @@ -430,13 +412,36 @@ message DeleteSessionRequest { // Required. The name of the session to delete. string name = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; } // The request for [ExecuteSql][google.spanner.v1.Spanner.ExecuteSql] and // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql]. message ExecuteSqlRequest { + // Query optimizer configuration. + message QueryOptions { + // An option to control the selection of optimizer version. + // + // This parameter allows individual queries to pick different query + // optimizer versions. + // + // Specifying "latest" as a value instructs Cloud Spanner to use the + // latest supported query optimizer version. If not specified, Cloud Spanner + // uses optimizer version set at the database level options. Any other + // positive integer (from the list of supported optimizer versions) + // overrides the default optimizer version for query execution. + // The list of supported optimizer versions can be queried from + // SPANNER_SYS.SUPPORTED_OPTIMIZER_VERSIONS. Executing a SQL statement + // with an invalid optimizer version will fail with a syntax error + // (`INVALID_ARGUMENT`) status. + // + // The `optimizer_version` statement hint has precedence over this setting. + string optimizer_version = 1; + } + // Mode in which the statement must be processed. enum QueryMode { // The default mode. Only the statement results are returned. @@ -454,7 +459,9 @@ message ExecuteSqlRequest { // Required. The session in which the SQL query should be performed. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // The transaction to use. @@ -488,8 +495,7 @@ message ExecuteSqlRequest { // It is not always possible for Cloud Spanner to infer the right SQL type // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in - // [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. + // of type `STRING` both appear in [params][google.spanner.v1.ExecuteSqlRequest.params] as JSON strings. // // In these cases, `param_types` can be used to specify the exact // SQL type for some or all of the SQL statement parameters. See the @@ -499,18 +505,15 @@ message ExecuteSqlRequest { // If this request is resuming a previously interrupted SQL statement // execution, `resume_token` should be copied from the last - // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the - // interruption. Doing this enables the new SQL statement execution to resume - // where the last one left off. The rest of the request parameters must - // exactly match the request that yielded this token. + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this + // enables the new SQL statement execution to resume where the last one left + // off. The rest of the request parameters must exactly match the + // request that yielded this token. bytes resume_token = 6; // Used to control the amount of debugging information returned in - // [ResultSetStats][google.spanner.v1.ResultSetStats]. If - // [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is - // set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only - // be set to - // [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. + // [ResultSetStats][google.spanner.v1.ResultSetStats]. If [partition_token][google.spanner.v1.ExecuteSqlRequest.partition_token] is set, [query_mode][google.spanner.v1.ExecuteSqlRequest.query_mode] can only + // be set to [QueryMode.NORMAL][google.spanner.v1.ExecuteSqlRequest.QueryMode.NORMAL]. QueryMode query_mode = 7; // If present, results will be restricted to the specified partition @@ -530,6 +533,9 @@ message ExecuteSqlRequest { // // Required for DML statements. Ignored for queries. int64 seqno = 9; + + // Query optimizer configuration to use for the given query. + QueryOptions query_options = 10; } // The request for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. @@ -555,9 +561,7 @@ message ExecuteBatchDmlRequest { // It is not always possible for Cloud Spanner to infer the right SQL type // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in - // [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as - // JSON strings. + // of type `STRING` both appear in [params][google.spanner.v1.ExecuteBatchDmlRequest.Statement.params] as JSON strings. // // In these cases, `param_types` can be used to specify the exact // SQL type for some or all of the SQL statement parameters. See the @@ -569,7 +573,9 @@ message ExecuteBatchDmlRequest { // Required. The session in which the DML statements should be performed. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // Required. The transaction to use. Must be a read-write transaction. @@ -579,17 +585,17 @@ message ExecuteBatchDmlRequest { // transaction. TransactionSelector transaction = 2 [(google.api.field_behavior) = REQUIRED]; - // Required. The list of statements to execute in this batch. Statements are - // executed serially, such that the effects of statement `i` are visible to - // statement `i+1`. Each statement must be a DML statement. Execution stops at - // the first failed statement; the remaining statements are not executed. + // Required. The list of statements to execute in this batch. Statements are executed + // serially, such that the effects of statement `i` are visible to statement + // `i+1`. Each statement must be a DML statement. Execution stops at the + // first failed statement; the remaining statements are not executed. // // Callers must provide at least one statement. repeated Statement statements = 3 [(google.api.field_behavior) = REQUIRED]; - // Required. A per-transaction sequence number used to identify this request. - // This field makes each request idempotent such that if the request is - // received multiple times, at most one will succeed. + // Required. A per-transaction sequence number used to identify this request. This field + // makes each request idempotent such that if the request is received multiple + // times, at most one will succeed. // // The sequence number must be monotonically increasing within the // transaction. If a request arrives for the first time with an out-of-order @@ -598,47 +604,38 @@ message ExecuteBatchDmlRequest { int64 seqno = 4 [(google.api.field_behavior) = REQUIRED]; } -// The response for -// [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list -// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML -// statement that has successfully executed, in the same order as the statements -// in the request. If a statement fails, the status in the response body -// identifies the cause of the failure. +// The response for [ExecuteBatchDml][google.spanner.v1.Spanner.ExecuteBatchDml]. Contains a list +// of [ResultSet][google.spanner.v1.ResultSet] messages, one for each DML statement that has successfully +// executed, in the same order as the statements in the request. If a statement +// fails, the status in the response body identifies the cause of the failure. // // To check for DML statements that failed, use the following approach: // -// 1. Check the status in the response message. The -// [google.rpc.Code][google.rpc.Code] enum +// 1. Check the status in the response message. The [google.rpc.Code][google.rpc.Code] enum // value `OK` indicates that all statements were executed successfully. // 2. If the status was not `OK`, check the number of result sets in the -// response. If the response contains `N` -// [ResultSet][google.spanner.v1.ResultSet] messages, then statement `N+1` in -// the request failed. +// response. If the response contains `N` [ResultSet][google.spanner.v1.ResultSet] messages, then +// statement `N+1` in the request failed. // // Example 1: // // * Request: 5 DML statements, all executed successfully. -// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the -// status `OK`. +// * Response: 5 [ResultSet][google.spanner.v1.ResultSet] messages, with the status `OK`. // // Example 2: // // * Request: 5 DML statements. The third statement has a syntax error. -// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax -// error (`INVALID_ARGUMENT`) -// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages -// indicates that the third statement failed, and the fourth and fifth -// statements were not executed. +// * Response: 2 [ResultSet][google.spanner.v1.ResultSet] messages, and a syntax error (`INVALID_ARGUMENT`) +// status. The number of [ResultSet][google.spanner.v1.ResultSet] messages indicates that the third +// statement failed, and the fourth and fifth statements were not executed. message ExecuteBatchDmlResponse { - // One [ResultSet][google.spanner.v1.ResultSet] for each statement in the - // request that ran successfully, in the same order as the statements in the - // request. Each [ResultSet][google.spanner.v1.ResultSet] does not contain any - // rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each - // [ResultSet][google.spanner.v1.ResultSet] contain the number of rows - // modified by the statement. - // - // Only the first [ResultSet][google.spanner.v1.ResultSet] in the response - // contains valid [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. + // One [ResultSet][google.spanner.v1.ResultSet] for each statement in the request that ran successfully, + // in the same order as the statements in the request. Each [ResultSet][google.spanner.v1.ResultSet] does + // not contain any rows. The [ResultSetStats][google.spanner.v1.ResultSetStats] in each [ResultSet][google.spanner.v1.ResultSet] contain + // the number of rows modified by the statement. + // + // Only the first [ResultSet][google.spanner.v1.ResultSet] in the response contains valid + // [ResultSetMetadata][google.spanner.v1.ResultSetMetadata]. repeated ResultSet result_sets = 1; // If all DML statements are executed successfully, the status is `OK`. @@ -673,23 +670,24 @@ message PartitionQueryRequest { // Required. The session used to create the partitions. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // Read only snapshot transactions are supported, read/write and single use // transactions are not. TransactionSelector transaction = 2; - // Required. The query request to generate partitions for. The request will - // fail if the query is not root partitionable. The query plan of a root + // Required. The query request to generate partitions for. The request will fail if + // the query is not root partitionable. The query plan of a root // partitionable query has a single distributed union operator. A distributed // union operator conceptually divides one or more tables into multiple // splits, remotely evaluates a subquery independently on each split, and // then unions all results. // // This must not contain DML commands, such as INSERT, UPDATE, or - // DELETE. Use - // [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a + // DELETE. Use [ExecuteStreamingSql][google.spanner.v1.Spanner.ExecuteStreamingSql] with a // PartitionedDml transaction for large, partition-friendly DML operations. string sql = 3 [(google.api.field_behavior) = REQUIRED]; @@ -709,8 +707,7 @@ message PartitionQueryRequest { // It is not always possible for Cloud Spanner to infer the right SQL type // from a JSON value. For example, values of type `BYTES` and values - // of type `STRING` both appear in - // [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. + // of type `STRING` both appear in [params][google.spanner.v1.PartitionQueryRequest.params] as JSON strings. // // In these cases, `param_types` can be used to specify the exact // SQL type for some or all of the SQL query parameters. See the @@ -727,7 +724,9 @@ message PartitionReadRequest { // Required. The session used to create the partitions. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // Read only snapshot transactions are supported, read/write and single use @@ -737,24 +736,18 @@ message PartitionReadRequest { // Required. The name of the table in the database to be read. string table = 3 [(google.api.field_behavior) = REQUIRED]; - // If non-empty, the name of an index on - // [table][google.spanner.v1.PartitionReadRequest.table]. This index is used - // instead of the table primary key when interpreting - // [key_set][google.spanner.v1.PartitionReadRequest.key_set] and sorting - // result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] - // for further information. + // If non-empty, the name of an index on [table][google.spanner.v1.PartitionReadRequest.table]. This index is + // used instead of the table primary key when interpreting [key_set][google.spanner.v1.PartitionReadRequest.key_set] + // and sorting result rows. See [key_set][google.spanner.v1.PartitionReadRequest.key_set] for further information. string index = 4; - // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be - // returned for each row matching this request. + // The columns of [table][google.spanner.v1.PartitionReadRequest.table] to be returned for each row matching + // this request. repeated string columns = 5; // Required. `key_set` identifies the rows to be yielded. `key_set` names the - // primary keys of the rows in - // [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless - // [index][google.spanner.v1.PartitionReadRequest.index] is present. If - // [index][google.spanner.v1.PartitionReadRequest.index] is present, then - // [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names + // primary keys of the rows in [table][google.spanner.v1.PartitionReadRequest.table] to be yielded, unless [index][google.spanner.v1.PartitionReadRequest.index] + // is present. If [index][google.spanner.v1.PartitionReadRequest.index] is present, then [key_set][google.spanner.v1.PartitionReadRequest.key_set] instead names // index keys in [index][google.spanner.v1.PartitionReadRequest.index]. // // It is not an error for the `key_set` to name rows that do not @@ -790,7 +783,9 @@ message ReadRequest { // Required. The session in which the read should be performed. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // The transaction to use. If none is provided, the default is a @@ -800,31 +795,24 @@ message ReadRequest { // Required. The name of the table in the database to be read. string table = 3 [(google.api.field_behavior) = REQUIRED]; - // If non-empty, the name of an index on - // [table][google.spanner.v1.ReadRequest.table]. This index is used instead of - // the table primary key when interpreting - // [key_set][google.spanner.v1.ReadRequest.key_set] and sorting result rows. - // See [key_set][google.spanner.v1.ReadRequest.key_set] for further - // information. + // If non-empty, the name of an index on [table][google.spanner.v1.ReadRequest.table]. This index is + // used instead of the table primary key when interpreting [key_set][google.spanner.v1.ReadRequest.key_set] + // and sorting result rows. See [key_set][google.spanner.v1.ReadRequest.key_set] for further information. string index = 4; - // Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be - // returned for each row matching this request. + // Required. The columns of [table][google.spanner.v1.ReadRequest.table] to be returned for each row matching + // this request. repeated string columns = 5 [(google.api.field_behavior) = REQUIRED]; // Required. `key_set` identifies the rows to be yielded. `key_set` names the - // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to - // be yielded, unless [index][google.spanner.v1.ReadRequest.index] is present. - // If [index][google.spanner.v1.ReadRequest.index] is present, then - // [key_set][google.spanner.v1.ReadRequest.key_set] instead names index keys - // in [index][google.spanner.v1.ReadRequest.index]. - // - // If the [partition_token][google.spanner.v1.ReadRequest.partition_token] - // field is empty, rows are yielded in table primary key order (if - // [index][google.spanner.v1.ReadRequest.index] is empty) or index key order - // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the - // [partition_token][google.spanner.v1.ReadRequest.partition_token] field is - // not empty, rows will be yielded in an unspecified order. + // primary keys of the rows in [table][google.spanner.v1.ReadRequest.table] to be yielded, unless [index][google.spanner.v1.ReadRequest.index] + // is present. If [index][google.spanner.v1.ReadRequest.index] is present, then [key_set][google.spanner.v1.ReadRequest.key_set] instead names + // index keys in [index][google.spanner.v1.ReadRequest.index]. + // + // If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is empty, rows are yielded + // in table primary key order (if [index][google.spanner.v1.ReadRequest.index] is empty) or index key order + // (if [index][google.spanner.v1.ReadRequest.index] is non-empty). If the [partition_token][google.spanner.v1.ReadRequest.partition_token] field is not + // empty, rows will be yielded in an unspecified order. // // It is not an error for the `key_set` to name rows that do not // exist in the database. Read yields nothing for nonexistent rows. @@ -837,9 +825,9 @@ message ReadRequest { // If this request is resuming a previously interrupted read, // `resume_token` should be copied from the last - // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the - // interruption. Doing this enables the new read to resume where the last read - // left off. The rest of the request parameters must exactly match the request + // [PartialResultSet][google.spanner.v1.PartialResultSet] yielded before the interruption. Doing this + // enables the new read to resume where the last read left off. The + // rest of the request parameters must exactly match the request // that yielded this token. bytes resume_token = 9; @@ -850,13 +838,14 @@ message ReadRequest { bytes partition_token = 10; } -// The request for -// [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. +// The request for [BeginTransaction][google.spanner.v1.Spanner.BeginTransaction]. message BeginTransactionRequest { // Required. The session in which the transaction runs. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // Required. Options for the new transaction. @@ -868,7 +857,9 @@ message CommitRequest { // Required. The session in which the transaction to be committed is running. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // Required. The transaction in which to commit. @@ -905,7 +896,9 @@ message RollbackRequest { // Required. The session in which the transaction to roll back is running. string session = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "spanner.googleapis.com/Session" } + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Session" + } ]; // Required. The transaction to roll back. diff --git a/google/cloud/spanner_v1/proto/spanner_pb2.py b/google/cloud/spanner_v1/proto/spanner_pb2.py index 3415264909..ab1ff4e42e 100644 --- a/google/cloud/spanner_v1/proto/spanner_pb2.py +++ b/google/cloud/spanner_v1/proto/spanner_pb2.py @@ -48,7 +48,7 @@ "\n\025com.google.spanner.v1B\014SpannerProtoP\001Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cloud\\Spanner\\V1\352A_\n\037spanner.googleapis.com/Database\022\n\x11partition_options\x18\x06 \x01(\x0b\x32#.google.spanner.v1.PartitionOptions\x1aJ\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type:\x02\x38\x01"\xb1\x02\n\x14PartitionReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x05 \x03(\t\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12>\n\x11partition_options\x18\t \x01(\x0b\x32#.google.spanner.v1.PartitionOptions"$\n\tPartition\x12\x17\n\x0fpartition_token\x18\x01 \x01(\x0c"z\n\x11PartitionResponse\x12\x30\n\npartitions\x18\x01 \x03(\x0b\x32\x1c.google.spanner.v1.Partition\x12\x33\n\x0btransaction\x18\x02 \x01(\x0b\x32\x1e.google.spanner.v1.Transaction"\xab\x02\n\x0bReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x14\n\x07\x63olumns\x18\x05 \x03(\tB\x03\xe0\x41\x02\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12\r\n\x05limit\x18\x08 \x01(\x03\x12\x14\n\x0cresume_token\x18\t \x01(\x0c\x12\x17\n\x0fpartition_token\x18\n \x01(\x0c"\x8f\x01\n\x17\x42\x65ginTransactionRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x07options\x18\x02 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsB\x03\xe0\x41\x02"\xea\x01\n\rCommitRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x18\n\x0etransaction_id\x18\x02 \x01(\x0cH\x00\x12G\n\x16single_use_transaction\x18\x03 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x12.\n\tmutations\x18\x04 \x03(\x0b\x32\x1b.google.spanner.v1.MutationB\r\n\x0btransaction"F\n\x0e\x43ommitResponse\x12\x34\n\x10\x63ommit_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"g\n\x0fRollbackRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x1b\n\x0etransaction_id\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x32\xc0\x16\n\x07Spanner\x12\xa6\x01\n\rCreateSession\x12\'.google.spanner.v1.CreateSessionRequest\x1a\x1a.google.spanner.v1.Session"P\x82\xd3\xe4\x93\x02?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\x01*\xda\x41\x08\x64\x61tabase\x12\xe0\x01\n\x13\x42\x61tchCreateSessions\x12-.google.spanner.v1.BatchCreateSessionsRequest\x1a..google.spanner.v1.BatchCreateSessionsResponse"j\x82\xd3\xe4\x93\x02K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\x01*\xda\x41\x16\x64\x61tabase,session_count\x12\x97\x01\n\nGetSession\x12$.google.spanner.v1.GetSessionRequest\x1a\x1a.google.spanner.v1.Session"G\x82\xd3\xe4\x93\x02:\x12\x38/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xae\x01\n\x0cListSessions\x12&.google.spanner.v1.ListSessionsRequest\x1a\'.google.spanner.v1.ListSessionsResponse"M\x82\xd3\xe4\x93\x02<\x12:/v1/{database=projects/*/instances/*/databases/*}/sessions\xda\x41\x08\x64\x61tabase\x12\x99\x01\n\rDeleteSession\x12\'.google.spanner.v1.DeleteSessionRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xa3\x01\n\nExecuteSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a\x1c.google.spanner.v1.ResultSet"Q\x82\xd3\xe4\x93\x02K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\x01*\x12\xbe\x01\n\x13\x45xecuteStreamingSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a#.google.spanner.v1.PartialResultSet"Z\x82\xd3\xe4\x93\x02T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\x01*0\x01\x12\xc0\x01\n\x0f\x45xecuteBatchDml\x12).google.spanner.v1.ExecuteBatchDmlRequest\x1a*.google.spanner.v1.ExecuteBatchDmlResponse"V\x82\xd3\xe4\x93\x02P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\x01*\x12\x91\x01\n\x04Read\x12\x1e.google.spanner.v1.ReadRequest\x1a\x1c.google.spanner.v1.ResultSet"K\x82\xd3\xe4\x93\x02\x45"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\x01*\x12\xac\x01\n\rStreamingRead\x12\x1e.google.spanner.v1.ReadRequest\x1a#.google.spanner.v1.PartialResultSet"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\x01*0\x01\x12\xc9\x01\n\x10\x42\x65ginTransaction\x12*.google.spanner.v1.BeginTransactionRequest\x1a\x1e.google.spanner.v1.Transaction"i\x82\xd3\xe4\x93\x02Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\x01*\xda\x41\x0fsession,options\x12\xeb\x01\n\x06\x43ommit\x12 .google.spanner.v1.CommitRequest\x1a!.google.spanner.v1.CommitResponse"\x9b\x01\x82\xd3\xe4\x93\x02G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\x01*\xda\x41 session,transaction_id,mutations\xda\x41(session,single_use_transaction,mutations\x12\xb0\x01\n\x08Rollback\x12".google.spanner.v1.RollbackRequest\x1a\x16.google.protobuf.Empty"h\x82\xd3\xe4\x93\x02I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\x01*\xda\x41\x16session,transaction_id\x12\xb7\x01\n\x0ePartitionQuery\x12(.google.spanner.v1.PartitionQueryRequest\x1a$.google.spanner.v1.PartitionResponse"U\x82\xd3\xe4\x93\x02O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\x01*\x12\xb4\x01\n\rPartitionRead\x12\'.google.spanner.v1.PartitionReadRequest\x1a$.google.spanner.v1.PartitionResponse"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\x01*\x1aw\xca\x41\x16spanner.googleapis.com\xd2\x41[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.dataB\xf7\x01\n\x15\x63om.google.spanner.v1B\x0cSpannerProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x41_\n\x1fspanner.googleapis.com/Database\x12\n\x11partition_options\x18\x06 \x01(\x0b\x32#.google.spanner.v1.PartitionOptions\x1aJ\n\x0fParamTypesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.google.spanner.v1.Type:\x02\x38\x01"\xb1\x02\n\x14PartitionReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x0f\n\x07\x63olumns\x18\x05 \x03(\t\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12>\n\x11partition_options\x18\t \x01(\x0b\x32#.google.spanner.v1.PartitionOptions"$\n\tPartition\x12\x17\n\x0fpartition_token\x18\x01 \x01(\x0c"z\n\x11PartitionResponse\x12\x30\n\npartitions\x18\x01 \x03(\x0b\x32\x1c.google.spanner.v1.Partition\x12\x33\n\x0btransaction\x18\x02 \x01(\x0b\x32\x1e.google.spanner.v1.Transaction"\xab\x02\n\x0bReadRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x0btransaction\x18\x02 \x01(\x0b\x32&.google.spanner.v1.TransactionSelector\x12\x12\n\x05table\x18\x03 \x01(\tB\x03\xe0\x41\x02\x12\r\n\x05index\x18\x04 \x01(\t\x12\x14\n\x07\x63olumns\x18\x05 \x03(\tB\x03\xe0\x41\x02\x12/\n\x07key_set\x18\x06 \x01(\x0b\x32\x19.google.spanner.v1.KeySetB\x03\xe0\x41\x02\x12\r\n\x05limit\x18\x08 \x01(\x03\x12\x14\n\x0cresume_token\x18\t \x01(\x0c\x12\x17\n\x0fpartition_token\x18\n \x01(\x0c"\x8f\x01\n\x17\x42\x65ginTransactionRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12;\n\x07options\x18\x02 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsB\x03\xe0\x41\x02"\xea\x01\n\rCommitRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x18\n\x0etransaction_id\x18\x02 \x01(\x0cH\x00\x12G\n\x16single_use_transaction\x18\x03 \x01(\x0b\x32%.google.spanner.v1.TransactionOptionsH\x00\x12.\n\tmutations\x18\x04 \x03(\x0b\x32\x1b.google.spanner.v1.MutationB\r\n\x0btransaction"F\n\x0e\x43ommitResponse\x12\x34\n\x10\x63ommit_timestamp\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"g\n\x0fRollbackRequest\x12\x37\n\x07session\x18\x01 \x01(\tB&\xe0\x41\x02\xfa\x41 \n\x1espanner.googleapis.com/Session\x12\x1b\n\x0etransaction_id\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x32\xc0\x16\n\x07Spanner\x12\xa6\x01\n\rCreateSession\x12\'.google.spanner.v1.CreateSessionRequest\x1a\x1a.google.spanner.v1.Session"P\x82\xd3\xe4\x93\x02?":/v1/{database=projects/*/instances/*/databases/*}/sessions:\x01*\xda\x41\x08\x64\x61tabase\x12\xe0\x01\n\x13\x42\x61tchCreateSessions\x12-.google.spanner.v1.BatchCreateSessionsRequest\x1a..google.spanner.v1.BatchCreateSessionsResponse"j\x82\xd3\xe4\x93\x02K"F/v1/{database=projects/*/instances/*/databases/*}/sessions:batchCreate:\x01*\xda\x41\x16\x64\x61tabase,session_count\x12\x97\x01\n\nGetSession\x12$.google.spanner.v1.GetSessionRequest\x1a\x1a.google.spanner.v1.Session"G\x82\xd3\xe4\x93\x02:\x12\x38/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xae\x01\n\x0cListSessions\x12&.google.spanner.v1.ListSessionsRequest\x1a\'.google.spanner.v1.ListSessionsResponse"M\x82\xd3\xe4\x93\x02<\x12:/v1/{database=projects/*/instances/*/databases/*}/sessions\xda\x41\x08\x64\x61tabase\x12\x99\x01\n\rDeleteSession\x12\'.google.spanner.v1.DeleteSessionRequest\x1a\x16.google.protobuf.Empty"G\x82\xd3\xe4\x93\x02:*8/v1/{name=projects/*/instances/*/databases/*/sessions/*}\xda\x41\x04name\x12\xa3\x01\n\nExecuteSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a\x1c.google.spanner.v1.ResultSet"Q\x82\xd3\xe4\x93\x02K"F/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeSql:\x01*\x12\xbe\x01\n\x13\x45xecuteStreamingSql\x12$.google.spanner.v1.ExecuteSqlRequest\x1a#.google.spanner.v1.PartialResultSet"Z\x82\xd3\xe4\x93\x02T"O/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeStreamingSql:\x01*0\x01\x12\xc0\x01\n\x0f\x45xecuteBatchDml\x12).google.spanner.v1.ExecuteBatchDmlRequest\x1a*.google.spanner.v1.ExecuteBatchDmlResponse"V\x82\xd3\xe4\x93\x02P"K/v1/{session=projects/*/instances/*/databases/*/sessions/*}:executeBatchDml:\x01*\x12\x91\x01\n\x04Read\x12\x1e.google.spanner.v1.ReadRequest\x1a\x1c.google.spanner.v1.ResultSet"K\x82\xd3\xe4\x93\x02\x45"@/v1/{session=projects/*/instances/*/databases/*/sessions/*}:read:\x01*\x12\xac\x01\n\rStreamingRead\x12\x1e.google.spanner.v1.ReadRequest\x1a#.google.spanner.v1.PartialResultSet"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:streamingRead:\x01*0\x01\x12\xc9\x01\n\x10\x42\x65ginTransaction\x12*.google.spanner.v1.BeginTransactionRequest\x1a\x1e.google.spanner.v1.Transaction"i\x82\xd3\xe4\x93\x02Q"L/v1/{session=projects/*/instances/*/databases/*/sessions/*}:beginTransaction:\x01*\xda\x41\x0fsession,options\x12\xeb\x01\n\x06\x43ommit\x12 .google.spanner.v1.CommitRequest\x1a!.google.spanner.v1.CommitResponse"\x9b\x01\x82\xd3\xe4\x93\x02G"B/v1/{session=projects/*/instances/*/databases/*/sessions/*}:commit:\x01*\xda\x41 session,transaction_id,mutations\xda\x41(session,single_use_transaction,mutations\x12\xb0\x01\n\x08Rollback\x12".google.spanner.v1.RollbackRequest\x1a\x16.google.protobuf.Empty"h\x82\xd3\xe4\x93\x02I"D/v1/{session=projects/*/instances/*/databases/*/sessions/*}:rollback:\x01*\xda\x41\x16session,transaction_id\x12\xb7\x01\n\x0ePartitionQuery\x12(.google.spanner.v1.PartitionQueryRequest\x1a$.google.spanner.v1.PartitionResponse"U\x82\xd3\xe4\x93\x02O"J/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionQuery:\x01*\x12\xb4\x01\n\rPartitionRead\x12\'.google.spanner.v1.PartitionReadRequest\x1a$.google.spanner.v1.PartitionResponse"T\x82\xd3\xe4\x93\x02N"I/v1/{session=projects/*/instances/*/databases/*/sessions/*}:partitionRead:\x01*\x1aw\xca\x41\x16spanner.googleapis.com\xd2\x41[https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.dataB\xf7\x01\n\x15\x63om.google.spanner.v1B\x0cSpannerProtoP\x01Z8google.golang.org/genproto/googleapis/spanner/v1;spanner\xaa\x02\x17Google.Cloud.Spanner.V1\xca\x02\x17Google\\Cloud\\Spanner\\V1\xea\x41_\n\x1fspanner.googleapis.com/Database\x12=0.4.0 (see commit https://github.com/googleapis/protoc-docs-plugin/commit/979f03ede6678c487337f3d7e88bae58df5207af) is incompatible with protobuf 3.9.1.\n\nPiperOrigin-RevId: 296986742\n\n1e47e676cddbbd8d93f19ba0665af15b5532417e\nFix: Restore a method signature for UpdateCluster\n\nPiperOrigin-RevId: 296901854\n\n7f910bcc4fc4704947ccfd3ceed015d16b9e00c2\nUpdate Dataproc v1beta2 client.\n\nPiperOrigin-RevId: 296451205\n\nde287524405a3dce124d301634731584fc0432d7\nFix: Reinstate method signatures that had been missed off some RPCs\nFix: Correct resource types for two fields\n\nPiperOrigin-RevId: 296435091\n\ne5bc9566ae057fb4c92f8b7e047f1c8958235b53\nDeprecate the endpoint_uris field, as it is unused.\n\nPiperOrigin-RevId: 296357191\n\n8c12e2b4dca94e12bff9f538bdac29524ff7ef7a\nUpdate Dataproc v1 client.\n\nPiperOrigin-RevId: 296336662\n\n17567c4a1ef0a9b50faa87024d66f8acbb561089\nRemoving erroneous comment, a la https://github.com/googleapis/java-speech/pull/103\n\nPiperOrigin-RevId: 296332968\n\n3eaaaf8626ce5b0c0bc7eee05e143beffa373b01\nAdd BUILD.bazel for v1 secretmanager.googleapis.com\n\nPiperOrigin-RevId: 296274723\n\ne76149c3d992337f85eeb45643106aacae7ede82\nMove securitycenter v1 to use generate from annotations.\n\nPiperOrigin-RevId: 296266862\n\n203740c78ac69ee07c3bf6be7408048751f618f8\nAdd StackdriverLoggingConfig field to Cloud Tasks v2 API.\n\nPiperOrigin-RevId: 296256388\n\ne4117d5e9ed8bbca28da4a60a94947ca51cb2083\nCreate a Bazel BUILD file for the google.actions.type export.\n\nPiperOrigin-RevId: 296212567\n\na9639a0a9854fd6e1be08bba1ac3897f4f16cb2f\nAdd secretmanager.googleapis.com v1 protos\n\nPiperOrigin-RevId: 295983266\n\nce4f4c21d9dd2bfab18873a80449b9d9851efde8\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295861722\n\ncb61d6c2d070b589980c779b68ffca617f789116\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295855449\n\nab2685d8d3a0e191dc8aef83df36773c07cb3d06\nfix: Dataproc v1 - AutoscalingPolicy annotation\n\nThis adds the second resource name pattern to the\nAutoscalingPolicy resource.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 295738415\n\n8a1020bf6828f6e3c84c3014f2c51cb62b739140\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 295286165\n\n5cfa105206e77670369e4b2225597386aba32985\nAdd service control related proto build rule.\n\nPiperOrigin-RevId: 295262088\n\nee4dddf805072004ab19ac94df2ce669046eec26\nmonitoring v3: Add prefix \"https://cloud.google.com/\" into the link for global access\ncl 295167522, get ride of synth.py hacks\n\nPiperOrigin-RevId: 295238095\n\nd9835e922ea79eed8497db270d2f9f85099a519c\nUpdate some minor docs changes about user event proto\n\nPiperOrigin-RevId: 295185610\n\n5f311e416e69c170243de722023b22f3df89ec1c\nfix: use correct PHP package name in gapic configuration\n\nPiperOrigin-RevId: 295161330\n\n6cdd74dcdb071694da6a6b5a206e3a320b62dd11\npubsub: v1 add client config annotations and retry config\n\nPiperOrigin-RevId: 295158776\n\n5169f46d9f792e2934d9fa25c36d0515b4fd0024\nAdded cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 295026522\n\n56b55aa8818cd0a532a7d779f6ef337ba809ccbd\nFix: Resource annotations for CreateTimeSeriesRequest and ListTimeSeriesRequest should refer to valid resources. TimeSeries is not a named resource.\n\nPiperOrigin-RevId: 294931650\n\n0646bc775203077226c2c34d3e4d50cc4ec53660\nRemove unnecessary languages from bigquery-related artman configuration files.\n\nPiperOrigin-RevId: 294809380\n\n8b78aa04382e3d4147112ad6d344666771bb1909\nUpdate backend.proto for schemes and protocol\n\nPiperOrigin-RevId: 294788800\n\n80b8f8b3de2359831295e24e5238641a38d8488f\nAdds artman config files for bigquerystorage endpoints v1beta2, v1alpha2, v1\n\nPiperOrigin-RevId: 294763931\n\n2c17ac33b226194041155bb5340c3f34733f1b3a\nAdd parameter to sample generated for UpdateInstance. Related to https://github.com/googleapis/python-redis/issues/4\n\nPiperOrigin-RevId: 294734008\n\nd5e8a8953f2acdfe96fb15e85eb2f33739623957\nMove bigquery datatransfer to gapic v2.\n\nPiperOrigin-RevId: 294703703\n\nefd36705972cfcd7d00ab4c6dfa1135bafacd4ae\nfix: Add two annotations that we missed.\n\nPiperOrigin-RevId: 294664231\n\n8a36b928873ff9c05b43859b9d4ea14cd205df57\nFix: Define the \"bigquery.googleapis.com/Table\" resource in the BigQuery Storage API (v1beta2).\n\nPiperOrigin-RevId: 294459768\n\nc7a3caa2c40c49f034a3c11079dd90eb24987047\nFix: Define the \"bigquery.googleapis.com/Table\" resource in the BigQuery Storage API (v1).\n\nPiperOrigin-RevId: 294456889\n\n5006247aa157e59118833658084345ee59af7c09\nFix: Make deprecated fields optional\nFix: Deprecate SetLoggingServiceRequest.zone in line with the comments\nFeature: Add resource name method signatures where appropriate\n\nPiperOrigin-RevId: 294383128\n\neabba40dac05c5cbe0fca3a35761b17e372036c4\nFix: C# and PHP package/namespace capitalization for BigQuery Storage v1.\n\nPiperOrigin-RevId: 294382444\n\nf8d9a858a7a55eba8009a23aa3f5cc5fe5e88dde\nfix: artman configuration file for bigtable-admin\n\nPiperOrigin-RevId: 294322616\n\n0f29555d1cfcf96add5c0b16b089235afbe9b1a9\nAPI definition for (not-yet-launched) GCS gRPC.\n\nPiperOrigin-RevId: 294321472\n\nfcc86bee0e84dc11e9abbff8d7c3529c0626f390\nfix: Bigtable Admin v2\n\nChange LRO metadata from PartialUpdateInstanceMetadata\nto UpdateInstanceMetadata. (Otherwise, it will not build.)\n\nPiperOrigin-RevId: 294264582\n\n6d9361eae2ebb3f42d8c7ce5baf4bab966fee7c0\nrefactor: Add annotations to Bigtable Admin v2.\n\nPiperOrigin-RevId: 294243406\n\nad7616f3fc8e123451c8b3a7987bc91cea9e6913\nFix: Resource type in CreateLogMetricRequest should use logging.googleapis.com.\nFix: ListLogEntries should have a method signature for convenience of calling it.\n\nPiperOrigin-RevId: 294222165\n\n63796fcbb08712676069e20a3e455c9f7aa21026\nFix: Remove extraneous resource definition for cloudkms.googleapis.com/CryptoKey.\n\nPiperOrigin-RevId: 294176658\n\ne7d8a694f4559201e6913f6610069cb08b39274e\nDepend on the latest gapic-generator and resource names plugin.\n\nThis fixes the very old an very annoying bug: https://github.com/googleapis/gapic-generator/pull/3087\n\nPiperOrigin-RevId: 293903652\n\n806b2854a966d55374ee26bb0cef4e30eda17b58\nfix: correct capitalization of Ruby namespaces in SecurityCenter V1p1beta1\n\nPiperOrigin-RevId: 293903613\n\n1b83c92462b14d67a7644e2980f723112472e03a\nPublish annotations and grpc service config for Logging API.\n\nPiperOrigin-RevId: 293893514\n\ne46f761cd6ec15a9e3d5ed4ff321a4bcba8e8585\nGenerate the Bazel build file for recommendengine public api\n\nPiperOrigin-RevId: 293710856\n\n68477017c4173c98addac0373950c6aa9d7b375f\nMake `language_code` optional for UpdateIntentRequest and BatchUpdateIntentsRequest.\n\nThe comments and proto annotations describe this parameter as optional.\n\nPiperOrigin-RevId: 293703548\n\n16f823f578bca4e845a19b88bb9bc5870ea71ab2\nAdd BUILD.bazel files for managedidentities API\n\nPiperOrigin-RevId: 293698246\n\n2f53fd8178c9a9de4ad10fae8dd17a7ba36133f2\nAdd v1p1beta1 config file\n\nPiperOrigin-RevId: 293696729\n\n052b274138fce2be80f97b6dcb83ab343c7c8812\nAdd source field for user event and add field behavior annotations\n\nPiperOrigin-RevId: 293693115\n\n1e89732b2d69151b1b3418fff3d4cc0434f0dded\ndatacatalog: v1beta1 add three new RPCs to gapic v1beta1 config\n\nPiperOrigin-RevId: 293692823\n\n9c8bd09bbdc7c4160a44f1fbab279b73cd7a2337\nchange the name of AccessApproval service to AccessApprovalAdmin\n\nPiperOrigin-RevId: 293690934\n\n2e23b8fbc45f5d9e200572ca662fe1271bcd6760\nAdd ListEntryGroups method, add http bindings to support entry group tagging, and update some comments.\n\nPiperOrigin-RevId: 293666452\n\n0275e38a4ca03a13d3f47a9613aac8c8b0d3f1f2\nAdd proto_package field to managedidentities API. It is needed for APIs that still depend on artman generation.\n\nPiperOrigin-RevId: 293643323\n\n4cdfe8278cb6f308106580d70648001c9146e759\nRegenerating public protos for Data Catalog to add new Custom Type Entry feature.\n\nPiperOrigin-RevId: 293614782\n\n45d2a569ab526a1fad3720f95eefb1c7330eaada\nEnable client generation for v1 ManagedIdentities API.\n\nPiperOrigin-RevId: 293515675\n\n2c17086b77e6f3bcf04a1f65758dfb0c3da1568f\nAdd the Actions on Google common types (//google/actions/type/*).\n\nPiperOrigin-RevId: 293478245\n\n781aadb932e64a12fb6ead7cd842698d99588433\nDialogflow weekly v2/v2beta1 library update:\n- Documentation updates\nImportant updates are also posted at\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 293443396\n\ne2602608c9138c2fca24162720e67f9307c30b95\nDialogflow weekly v2/v2beta1 library update:\n- Documentation updates\nImportant updates are also posted at\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 293442964\n\nc8aef82028d06b7992278fa9294c18570dc86c3d\nAdd cc_proto_library and cc_grpc_library targets for Bigtable protos.\n\nAlso fix indentation of cc_grpc_library targets in Spanner and IAM protos.\n\nPiperOrigin-RevId: 293440538\n\ne2faab04f4cb7f9755072330866689b1943a16e9\ncloudtasks: v2 replace non-standard retry params in gapic config v2\n\nPiperOrigin-RevId: 293424055\n\ndfb4097ea628a8470292c6590a4313aee0c675bd\nerrorreporting: v1beta1 add legacy artman config for php\n\nPiperOrigin-RevId: 293423790\n\nb18aed55b45bfe5b62476292c72759e6c3e573c6\nasset: v1p1beta1 updated comment for `page_size` limit.\n\nPiperOrigin-RevId: 293421386\n\nc9ef36b7956d9859a2fc86ad35fcaa16958ab44f\nbazel: Refactor CI build scripts\n\nPiperOrigin-RevId: 293387911\n\na8ed9d921fdddc61d8467bfd7c1668f0ad90435c\nfix: set Ruby module name for OrgPolicy\n\nPiperOrigin-RevId: 293257997\n\n6c7d28509bd8315de8af0889688ee20099594269\nredis: v1beta1 add UpgradeInstance and connect_mode field to Instance\n\nPiperOrigin-RevId: 293242878\n\nae0abed4fcb4c21f5cb67a82349a049524c4ef68\nredis: v1 add connect_mode field to Instance\n\nPiperOrigin-RevId: 293241914\n\n3f7a0d29b28ee9365771da2b66edf7fa2b4e9c56\nAdds service config definition for bigqueryreservation v1beta1\n\nPiperOrigin-RevId: 293234418\n\n0c88168d5ed6fe353a8cf8cbdc6bf084f6bb66a5\naddition of BUILD & configuration for accessapproval v1\n\nPiperOrigin-RevId: 293219198\n\n39bedc2e30f4778ce81193f6ba1fec56107bcfc4\naccessapproval: v1 publish protos\n\nPiperOrigin-RevId: 293167048\n\n69d9945330a5721cd679f17331a78850e2618226\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080182\n\nf6a1a6b417f39694275ca286110bc3c1ca4db0dc\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080178\n\n29d40b78e3dc1579b0b209463fbcb76e5767f72a\nExpose managedidentities/v1beta1/ API for client library usage.\n\nPiperOrigin-RevId: 292979741\n\na22129a1fb6e18056d576dfb7717aef74b63734a\nExpose managedidentities/v1/ API for client library usage.\n\nPiperOrigin-RevId: 292968186\n\n" } }, { "template": { - "name": "python_split_library", + "name": "python_library", "origin": "synthtool.gcp", - "version": "2019.10.17" + "version": "2020.2.4" } } ], From 514792151c2fe4fc7a6cf4ad0dd141c9090a634b Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Fri, 13 Mar 2020 10:46:44 +1100 Subject: [PATCH 10/14] feat: implement query options versioning support (#30) * feat: implement query options versioning support * refactor _merge_query_options to use MergeFrom protobuf function * address comments * fix assignment Co-authored-by: larkee --- google/cloud/spanner_v1/_helpers.py | 39 ++++++++++++++ google/cloud/spanner_v1/client.py | 25 ++++++++- google/cloud/spanner_v1/database.py | 39 ++++++++++++-- google/cloud/spanner_v1/session.py | 14 ++++- google/cloud/spanner_v1/snapshot.py | 16 ++++++ google/cloud/spanner_v1/transaction.py | 22 ++++++-- tests/unit/test__helpers.py | 55 ++++++++++++++++++++ tests/unit/test_client.py | 72 ++++++++++++++++++++------ tests/unit/test_database.py | 47 ++++++++++++++--- tests/unit/test_session.py | 10 +++- tests/unit/test_snapshot.py | 44 ++++++++++++++-- tests/unit/test_transaction.py | 43 +++++++++++++-- 12 files changed, 385 insertions(+), 41 deletions(-) diff --git a/google/cloud/spanner_v1/_helpers.py b/google/cloud/spanner_v1/_helpers.py index 3b7fd586c9..91e8c8d29c 100644 --- a/google/cloud/spanner_v1/_helpers.py +++ b/google/cloud/spanner_v1/_helpers.py @@ -26,6 +26,7 @@ from google.cloud._helpers import _date_from_iso8601_date from google.cloud._helpers import _datetime_to_rfc3339 from google.cloud.spanner_v1.proto import type_pb2 +from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest def _try_to_coerce_bytes(bytestring): @@ -47,6 +48,44 @@ def _try_to_coerce_bytes(bytestring): ) +def _merge_query_options(base, merge): + """Merge higher precedence QueryOptions with current QueryOptions. + + :type base: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` or None + :param base: The current QueryOptions that is intended for use. + + :type merge: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` or None + :param merge: + The QueryOptions that have a higher priority than base. These options + should overwrite the fields in base. + + :rtype: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or None + :returns: + QueryOptions object formed by merging the two given QueryOptions. + If the resultant object only has empty fields, returns None. + """ + combined = base or ExecuteSqlRequest.QueryOptions() + if type(combined) == dict: + combined = ExecuteSqlRequest.QueryOptions( + optimizer_version=combined.get("optimizer_version", "") + ) + merge = merge or ExecuteSqlRequest.QueryOptions() + if type(merge) == dict: + merge = ExecuteSqlRequest.QueryOptions( + optimizer_version=merge.get("optimizer_version", "") + ) + combined.MergeFrom(merge) + if not combined.optimizer_version: + return None + return combined + + # pylint: disable=too-many-return-statements,too-many-branches def _make_value_pb(value): """Helper for :func:`_make_list_value_pbs`. diff --git a/google/cloud/spanner_v1/client.py b/google/cloud/spanner_v1/client.py index c7b331adc0..01b3ddfabf 100644 --- a/google/cloud/spanner_v1/client.py +++ b/google/cloud/spanner_v1/client.py @@ -50,9 +50,10 @@ from google.cloud.client import ClientWithProject from google.cloud.spanner_v1 import __version__ -from google.cloud.spanner_v1._helpers import _metadata_with_prefix +from google.cloud.spanner_v1._helpers import _merge_query_options, _metadata_with_prefix from google.cloud.spanner_v1.instance import DEFAULT_NODE_COUNT from google.cloud.spanner_v1.instance import Instance +from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest _CLIENT_INFO = client_info.ClientInfo(client_library_version=__version__) EMULATOR_ENV_VAR = "SPANNER_EMULATOR_HOST" @@ -62,6 +63,7 @@ "without a scheme: ex %s=localhost:8080." ) % ((EMULATOR_ENV_VAR,) * 3) SPANNER_ADMIN_SCOPE = "https://www.googleapis.com/auth/spanner.admin" +OPTIMIZER_VERSION_ENV_VAR = "SPANNER_OPTIMIZER_VERSION" _USER_AGENT_DEPRECATED = ( "The 'user_agent' argument to 'Client' is deprecated / unused. " "Please pass an appropriate 'client_info' instead." @@ -72,6 +74,10 @@ def _get_spanner_emulator_host(): return os.getenv(EMULATOR_ENV_VAR) +def _get_spanner_optimizer_version(): + return os.getenv(OPTIMIZER_VERSION_ENV_VAR, "") + + class InstanceConfig(object): """Named configurations for Spanner instances. @@ -132,11 +138,20 @@ class Client(ClientWithProject): :param user_agent: (Deprecated) The user agent to be used with API request. Not used. + :type client_options: :class:`~google.api_core.client_options.ClientOptions` or :class:`dict` :param client_options: (Optional) Client options used to set user options on the client. API Endpoint should be set through client_options. + :type query_options: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` + :param query_options: + (Optional) Query optimizer configuration to use for the given query. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_v1.types.QueryOptions` + :raises: :class:`ValueError ` if both ``read_only`` and ``admin`` are :data:`True` """ @@ -157,6 +172,7 @@ def __init__( client_info=_CLIENT_INFO, user_agent=None, client_options=None, + query_options=None, ): # NOTE: This API has no use for the _http argument, but sending it # will have no impact since the _http() @property only lazily @@ -172,6 +188,13 @@ def __init__( else: self._client_options = client_options + env_query_options = ExecuteSqlRequest.QueryOptions( + optimizer_version=_get_spanner_optimizer_version() + ) + + # Environment flag config has higher precedence than application config. + self._query_options = _merge_query_options(query_options, env_query_options) + if user_agent is not None: warnings.warn(_USER_AGENT_DEPRECATED, DeprecationWarning, stacklevel=2) self.user_agent = user_agent diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index f5ea3e46dd..9ee046e094 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -30,8 +30,11 @@ import six # pylint: disable=ungrouped-imports -from google.cloud.spanner_v1._helpers import _make_value_pb -from google.cloud.spanner_v1._helpers import _metadata_with_prefix +from google.cloud.spanner_v1._helpers import ( + _make_value_pb, + _merge_query_options, + _metadata_with_prefix, +) from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient from google.cloud.spanner_v1.gapic.transports import spanner_grpc_transport @@ -350,7 +353,9 @@ def drop(self): metadata = _metadata_with_prefix(self.name) api.drop_database(self.name, metadata=metadata) - def execute_partitioned_dml(self, dml, params=None, param_types=None): + def execute_partitioned_dml( + self, dml, params=None, param_types=None, query_options=None + ): """Execute a partitionable DML statement. :type dml: str @@ -365,9 +370,20 @@ def execute_partitioned_dml(self, dml, params=None, param_types=None): (Optional) maps explicit types for one or more param values; required if parameters are passed. + :type query_options: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` + :param query_options: + (Optional) Query optimizer configuration to use for the given query. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_v1.types.QueryOptions` + :rtype: int :returns: Count of rows affected by the DML statement. """ + query_options = _merge_query_options( + self._instance._client._query_options, query_options + ) if params is not None: if param_types is None: raise ValueError("Specify 'param_types' when passing 'params'.") @@ -398,6 +414,7 @@ def execute_partitioned_dml(self, dml, params=None, param_types=None): transaction=txn_selector, params=params_pb, param_types=param_types, + query_options=query_options, metadata=metadata, ) @@ -748,6 +765,7 @@ def generate_query_batches( param_types=None, partition_size_bytes=None, max_partitions=None, + query_options=None, ): """Start a partitioned query operation. @@ -783,6 +801,14 @@ def generate_query_batches( service uses this as a hint, the actual number of partitions may differ. + :type query_options: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` + :param query_options: + (Optional) Query optimizer configuration to use for the given query. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_v1.types.QueryOptions` + :rtype: iterable of dict :returns: mappings of information used peform actual partitioned reads via @@ -801,6 +827,13 @@ def generate_query_batches( query_info["params"] = params query_info["param_types"] = param_types + # Query-level options have higher precedence than client-level and + # environment-level options + default_query_options = self._database._instance._client._query_options + query_info["query_options"] = _merge_query_options( + default_query_options, query_options + ) + for partition in partitions: yield {"partition": partition, "query": query_info} diff --git a/google/cloud/spanner_v1/session.py b/google/cloud/spanner_v1/session.py index 863053d4ef..fc6bb028b7 100644 --- a/google/cloud/spanner_v1/session.py +++ b/google/cloud/spanner_v1/session.py @@ -202,6 +202,7 @@ def execute_sql( params=None, param_types=None, query_mode=None, + query_options=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, ): @@ -225,11 +226,22 @@ def execute_sql( :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 + :type query_options: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` + :param query_options: (Optional) Options that are provided for query plan stability. + :rtype: :class:`~google.cloud.spanner_v1.streamed.StreamedResultSet` :returns: a result set instance which can be used to consume rows. """ return self.snapshot().execute_sql( - sql, params, param_types, query_mode, retry=retry, timeout=timeout + sql, + params, + param_types, + query_mode, + query_options=query_options, + retry=retry, + timeout=timeout, ) def batch(self): diff --git a/google/cloud/spanner_v1/snapshot.py b/google/cloud/spanner_v1/snapshot.py index ec7008fb75..56b3b6a813 100644 --- a/google/cloud/spanner_v1/snapshot.py +++ b/google/cloud/spanner_v1/snapshot.py @@ -23,6 +23,7 @@ from google.api_core.exceptions import ServiceUnavailable import google.api_core.gapic_v1.method from google.cloud._helpers import _datetime_to_pb_timestamp +from google.cloud.spanner_v1._helpers import _merge_query_options from google.cloud._helpers import _timedelta_to_duration_pb from google.cloud.spanner_v1._helpers import _make_value_pb from google.cloud.spanner_v1._helpers import _metadata_with_prefix @@ -157,6 +158,7 @@ def execute_sql( params=None, param_types=None, query_mode=None, + query_options=None, partition=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, @@ -180,6 +182,14 @@ def execute_sql( :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 + :type query_options: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` + :param query_options: + (Optional) Query optimizer configuration to use for the given query. + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_v1.types.QueryOptions` + :type partition: bytes :param partition: (Optional) one of the partition tokens returned from :meth:`partition_query`. @@ -211,6 +221,11 @@ def execute_sql( transaction = self._make_txn_selector() api = database.spanner_api + # Query-level options have higher precedence than client-level and + # environment-level options + default_query_options = database._instance._client._query_options + query_options = _merge_query_options(default_query_options, query_options) + restart = functools.partial( api.execute_streaming_sql, self._session.name, @@ -221,6 +236,7 @@ def execute_sql( query_mode=query_mode, partition_token=partition, seqno=self._execute_sql_count, + query_options=query_options, metadata=metadata, retry=retry, timeout=timeout, diff --git a/google/cloud/spanner_v1/transaction.py b/google/cloud/spanner_v1/transaction.py index 55e2837df4..5a161fd8a6 100644 --- a/google/cloud/spanner_v1/transaction.py +++ b/google/cloud/spanner_v1/transaction.py @@ -17,8 +17,11 @@ from google.protobuf.struct_pb2 import Struct from google.cloud._helpers import _pb_timestamp_to_datetime -from google.cloud.spanner_v1._helpers import _make_value_pb -from google.cloud.spanner_v1._helpers import _metadata_with_prefix +from google.cloud.spanner_v1._helpers import ( + _make_value_pb, + _merge_query_options, + _metadata_with_prefix, +) from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionOptions from google.cloud.spanner_v1.snapshot import _SnapshotBase @@ -162,7 +165,9 @@ def _make_params_pb(params, param_types): return None - def execute_update(self, dml, params=None, param_types=None, query_mode=None): + def execute_update( + self, dml, params=None, param_types=None, query_mode=None, query_options=None + ): """Perform an ``ExecuteSql`` API request with DML. :type dml: str @@ -182,6 +187,11 @@ def execute_update(self, dml, params=None, param_types=None, query_mode=None): :param query_mode: Mode governing return of results / query plan. See https://cloud.google.com/spanner/reference/rpc/google.spanner.v1#google.spanner.v1.ExecuteSqlRequest.QueryMode1 + :type query_options: + :class:`google.cloud.spanner_v1.proto.ExecuteSqlRequest.QueryOptions` + or :class:`dict` + :param query_options: (Optional) Options that are provided for query plan stability. + :rtype: int :returns: Count of rows affected by the DML statement. """ @@ -191,6 +201,11 @@ def execute_update(self, dml, params=None, param_types=None, query_mode=None): transaction = self._make_txn_selector() api = database.spanner_api + # Query-level options have higher precedence than client-level and + # environment-level options + default_query_options = database._instance._client._query_options + query_options = _merge_query_options(default_query_options, query_options) + response = api.execute_sql( self._session.name, dml, @@ -198,6 +213,7 @@ def execute_update(self, dml, params=None, param_types=None, query_mode=None): params=params_pb, param_types=param_types, query_mode=query_mode, + query_options=query_options, seqno=self._execute_sql_count, metadata=metadata, ) diff --git a/tests/unit/test__helpers.py b/tests/unit/test__helpers.py index 86ce78727b..b2f2c7d5e7 100644 --- a/tests/unit/test__helpers.py +++ b/tests/unit/test__helpers.py @@ -16,6 +16,61 @@ import unittest +class Test_merge_query_options(unittest.TestCase): + def _callFUT(self, *args, **kw): + from google.cloud.spanner_v1._helpers import _merge_query_options + + return _merge_query_options(*args, **kw) + + def test_base_none_and_merge_none(self): + base = merge = None + result = self._callFUT(base, merge) + self.assertIsNone(result) + + def test_base_dict_and_merge_none(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + base = {"optimizer_version": "2"} + merge = None + expected = ExecuteSqlRequest.QueryOptions(optimizer_version="2") + result = self._callFUT(base, merge) + self.assertEqual(result, expected) + + def test_base_empty_and_merge_empty(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + base = ExecuteSqlRequest.QueryOptions() + merge = ExecuteSqlRequest.QueryOptions() + result = self._callFUT(base, merge) + self.assertIsNone(result) + + def test_base_none_merge_object(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + base = None + merge = ExecuteSqlRequest.QueryOptions(optimizer_version="3") + result = self._callFUT(base, merge) + self.assertEqual(result, merge) + + def test_base_none_merge_dict(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + base = None + merge = {"optimizer_version": "3"} + expected = ExecuteSqlRequest.QueryOptions(optimizer_version="3") + result = self._callFUT(base, merge) + self.assertEqual(result, expected) + + def test_base_object_merge_dict(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + base = ExecuteSqlRequest.QueryOptions(optimizer_version="1") + merge = {"optimizer_version": "3"} + expected = ExecuteSqlRequest.QueryOptions(optimizer_version="3") + result = self._callFUT(base, merge) + self.assertEqual(result, expected) + + class Test_make_value_pb(unittest.TestCase): def _callFUT(self, *args, **kw): from google.cloud.spanner_v1._helpers import _make_value_pb diff --git a/tests/unit/test_client.py b/tests/unit/test_client.py index 2e04537e02..8308ed6e92 100644 --- a/tests/unit/test_client.py +++ b/tests/unit/test_client.py @@ -56,6 +56,8 @@ def _constructor_test_helper( client_info=None, user_agent=None, client_options=None, + query_options=None, + expected_query_options=None, ): import google.api_core.client_options from google.cloud.spanner_v1 import client as MUT @@ -76,7 +78,11 @@ def _constructor_test_helper( expected_client_options = client_options client = self._make_one( - project=self.PROJECT, credentials=creds, user_agent=user_agent, **kwargs + project=self.PROJECT, + credentials=creds, + user_agent=user_agent, + query_options=query_options, + **kwargs ) expected_creds = expected_creds or creds.with_scopes.return_value @@ -97,15 +103,17 @@ def _constructor_test_helper( client._client_options.api_endpoint, expected_client_options.api_endpoint, ) + if expected_query_options is not None: + self.assertEqual(client._query_options, expected_query_options) - @mock.patch("google.cloud.spanner_v1.client.os.getenv") + @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") @mock.patch("warnings.warn") - def test_constructor_emulator_host_warning(self, mock_warn, mock_os): + def test_constructor_emulator_host_warning(self, mock_warn, mock_em): from google.cloud.spanner_v1 import client as MUT expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) creds = _make_credentials() - mock_os.return_value = "http://emulator.host.com" + mock_em.return_value = "http://emulator.host.com" self._constructor_test_helper(expected_scopes, creds) mock_warn.assert_called_once_with(MUT._EMULATOR_HOST_HTTP_SCHEME) @@ -175,8 +183,40 @@ def test_constructor_custom_client_options_dict(self): expected_scopes, creds, client_options={"api_endpoint": "endpoint"} ) - @mock.patch("google.cloud.spanner_v1.client.os.getenv") - def test_instance_admin_api(self, mock_getenv): + def test_constructor_custom_query_options_client_config(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import client as MUT + + expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) + creds = _make_credentials() + self._constructor_test_helper( + expected_scopes, + creds, + query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="1"), + expected_query_options=ExecuteSqlRequest.QueryOptions( + optimizer_version="1" + ), + ) + + @mock.patch("google.cloud.spanner_v1.client._get_spanner_optimizer_version") + def test_constructor_custom_query_options_env_config(self, mock_ver): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + from google.cloud.spanner_v1 import client as MUT + + expected_scopes = (MUT.SPANNER_ADMIN_SCOPE,) + creds = _make_credentials() + mock_ver.return_value = "2" + self._constructor_test_helper( + expected_scopes, + creds, + query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="1"), + expected_query_options=ExecuteSqlRequest.QueryOptions( + optimizer_version="2" + ), + ) + + @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") + def test_instance_admin_api(self, mock_em): from google.cloud.spanner_v1.client import SPANNER_ADMIN_SCOPE credentials = _make_credentials() @@ -190,7 +230,7 @@ def test_instance_admin_api(self, mock_getenv): ) expected_scopes = (SPANNER_ADMIN_SCOPE,) - mock_getenv.return_value = None + mock_em.return_value = None inst_module = "google.cloud.spanner_v1.client.InstanceAdminClient" with mock.patch(inst_module) as instance_admin_client: api = client.instance_admin_api @@ -209,8 +249,8 @@ def test_instance_admin_api(self, mock_getenv): credentials.with_scopes.assert_called_once_with(expected_scopes) - @mock.patch("google.cloud.spanner_v1.client.os.getenv") - def test_instance_admin_api_emulator(self, mock_getenv): + @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") + def test_instance_admin_api_emulator(self, mock_em): credentials = _make_credentials() client_info = mock.Mock() client_options = mock.Mock() @@ -221,7 +261,7 @@ def test_instance_admin_api_emulator(self, mock_getenv): client_options=client_options, ) - mock_getenv.return_value = "true" + mock_em.return_value = "true" inst_module = "google.cloud.spanner_v1.client.InstanceAdminClient" with mock.patch(inst_module) as instance_admin_client: api = client.instance_admin_api @@ -240,8 +280,8 @@ def test_instance_admin_api_emulator(self, mock_getenv): self.assertIn("transport", called_kw) self.assertNotIn("credentials", called_kw) - @mock.patch("google.cloud.spanner_v1.client.os.getenv") - def test_database_admin_api(self, mock_getenv): + @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") + def test_database_admin_api(self, mock_em): from google.cloud.spanner_v1.client import SPANNER_ADMIN_SCOPE credentials = _make_credentials() @@ -255,7 +295,7 @@ def test_database_admin_api(self, mock_getenv): ) expected_scopes = (SPANNER_ADMIN_SCOPE,) - mock_getenv.return_value = None + mock_em.return_value = None db_module = "google.cloud.spanner_v1.client.DatabaseAdminClient" with mock.patch(db_module) as database_admin_client: api = client.database_admin_api @@ -274,8 +314,8 @@ def test_database_admin_api(self, mock_getenv): credentials.with_scopes.assert_called_once_with(expected_scopes) - @mock.patch("google.cloud.spanner_v1.client.os.getenv") - def test_database_admin_api_emulator(self, mock_getenv): + @mock.patch("google.cloud.spanner_v1.client._get_spanner_emulator_host") + def test_database_admin_api_emulator(self, mock_em): credentials = _make_credentials() client_info = mock.Mock() client_options = mock.Mock() @@ -286,7 +326,7 @@ def test_database_admin_api_emulator(self, mock_getenv): client_options=client_options, ) - mock_getenv.return_value = "true" + mock_em.return_value = "host:port" db_module = "google.cloud.spanner_v1.client.DatabaseAdminClient" with mock.patch(db_module) as database_admin_client: api = client.database_admin_api diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 7bf14de751..2d7e2e1888 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -924,7 +924,9 @@ def test_drop_success(self): metadata=[("google-cloud-resource-prefix", database.name)], ) - def _execute_partitioned_dml_helper(self, dml, params=None, param_types=None): + def _execute_partitioned_dml_helper( + self, dml, params=None, param_types=None, query_options=None + ): from google.protobuf.struct_pb2 import Struct from google.cloud.spanner_v1.proto.result_set_pb2 import ( PartialResultSet, @@ -935,7 +937,10 @@ def _execute_partitioned_dml_helper(self, dml, params=None, param_types=None): TransactionSelector, TransactionOptions, ) - from google.cloud.spanner_v1._helpers import _make_value_pb + from google.cloud.spanner_v1._helpers import ( + _make_value_pb, + _merge_query_options, + ) transaction_pb = TransactionPB(id=self.TRANSACTION_ID) @@ -953,7 +958,9 @@ def _execute_partitioned_dml_helper(self, dml, params=None, param_types=None): api.begin_transaction.return_value = transaction_pb api.execute_streaming_sql.return_value = iterator - row_count = database.execute_partitioned_dml(dml, params, param_types) + row_count = database.execute_partitioned_dml( + dml, params, param_types, query_options + ) self.assertEqual(row_count, 2) @@ -975,6 +982,11 @@ def _execute_partitioned_dml_helper(self, dml, params=None, param_types=None): expected_params = None expected_transaction = TransactionSelector(id=self.TRANSACTION_ID) + expected_query_options = client._query_options + if query_options: + expected_query_options = _merge_query_options( + expected_query_options, query_options + ) api.execute_streaming_sql.assert_called_once_with( self.SESSION_NAME, @@ -982,6 +994,7 @@ def _execute_partitioned_dml_helper(self, dml, params=None, param_types=None): transaction=expected_transaction, params=expected_params, param_types=param_types, + query_options=expected_query_options, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -997,6 +1010,14 @@ def test_execute_partitioned_dml_w_params_and_param_types(self): dml=DML_W_PARAM, params=PARAMS, param_types=PARAM_TYPES ) + def test_execute_partitioned_dml_w_query_options(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + self._execute_partitioned_dml_helper( + dml=DML_W_PARAM, + query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3"), + ) + def test_session_factory_defaults(self): from google.cloud.spanner_v1.session import Session @@ -1615,7 +1636,9 @@ def test_process_read_batch(self): def test_generate_query_batches_w_max_partitions(self): sql = "SELECT COUNT(*) FROM table_name" max_partitions = len(self.TOKENS) - database = self._make_database() + client = _Client(self.PROJECT_ID) + instance = _Instance(self.INSTANCE_NAME, client=client) + database = _Database(self.DATABASE_NAME, instance=instance) batch_txn = self._make_one(database) snapshot = batch_txn._snapshot = self._make_snapshot() snapshot.partition_query.return_value = self.TOKENS @@ -1624,7 +1647,7 @@ def test_generate_query_batches_w_max_partitions(self): batch_txn.generate_query_batches(sql, max_partitions=max_partitions) ) - expected_query = {"sql": sql} + expected_query = {"sql": sql, "query_options": client._query_options} self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): self.assertEqual(batch["partition"], token) @@ -1645,7 +1668,9 @@ def test_generate_query_batches_w_params_w_partition_size_bytes(self): params = {"max_age": 30} param_types = {"max_age": "INT64"} size = 1 << 20 - database = self._make_database() + client = _Client(self.PROJECT_ID) + instance = _Instance(self.INSTANCE_NAME, client=client) + database = _Database(self.DATABASE_NAME, instance=instance) batch_txn = self._make_one(database) snapshot = batch_txn._snapshot = self._make_snapshot() snapshot.partition_query.return_value = self.TOKENS @@ -1656,7 +1681,12 @@ def test_generate_query_batches_w_params_w_partition_size_bytes(self): ) ) - expected_query = {"sql": sql, "params": params, "param_types": param_types} + expected_query = { + "sql": sql, + "params": params, + "param_types": param_types, + "query_options": client._query_options, + } self.assertEqual(len(batches), len(self.TOKENS)) for batch, token in zip(batches, self.TOKENS): self.assertEqual(batch["partition"], token) @@ -1782,12 +1812,15 @@ def _make_instance_api(): class _Client(object): def __init__(self, project=TestDatabase.PROJECT_ID): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + self.project = project self.project_name = "projects/" + self.project self._endpoint_cache = {} self.instance_admin_api = _make_instance_api() self._client_info = mock.Mock() self._client_options = mock.Mock() + self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") class _Instance(object): diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 1eff634af0..e2bf18c723 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -362,6 +362,7 @@ def test_execute_sql_defaults(self): None, None, None, + query_options=None, timeout=google.api_core.gapic_v1.method.DEFAULT, retry=google.api_core.gapic_v1.method.DEFAULT, ) @@ -386,7 +387,13 @@ def test_execute_sql_non_default_retry(self): self.assertIs(found, snapshot().execute_sql.return_value) snapshot().execute_sql.assert_called_once_with( - SQL, params, param_types, "PLAN", timeout=None, retry=None + SQL, + params, + param_types, + "PLAN", + query_options=None, + timeout=None, + retry=None, ) def test_execute_sql_explicit(self): @@ -411,6 +418,7 @@ def test_execute_sql_explicit(self): params, param_types, "PLAN", + query_options=None, timeout=google.api_core.gapic_v1.method.DEFAULT, retry=google.api_core.gapic_v1.method.DEFAULT, ) diff --git a/tests/unit/test_snapshot.py b/tests/unit/test_snapshot.py index 883ab73258..e29b19d5f1 100644 --- a/tests/unit/test_snapshot.py +++ b/tests/unit/test_snapshot.py @@ -326,6 +326,7 @@ def _execute_sql_helper( count=0, partition=None, sql_count=0, + query_options=None, timeout=google.api_core.gapic_v1.method.DEFAULT, retry=google.api_core.gapic_v1.method.DEFAULT, ): @@ -341,7 +342,10 @@ def _execute_sql_helper( ) from google.cloud.spanner_v1.proto.type_pb2 import Type, StructType from google.cloud.spanner_v1.proto.type_pb2 import STRING, INT64 - from google.cloud.spanner_v1._helpers import _make_value_pb + from google.cloud.spanner_v1._helpers import ( + _make_value_pb, + _merge_query_options, + ) VALUES = [[u"bharney", u"rhubbyl", 31], [u"phred", u"phlyntstone", 32]] VALUE_PBS = [[_make_value_pb(item) for item in row] for row in VALUES] @@ -378,6 +382,7 @@ def _execute_sql_helper( PARAMS, PARAM_TYPES, query_mode=MODE, + query_options=query_options, partition=partition, retry=retry, timeout=timeout, @@ -410,6 +415,12 @@ def _execute_sql_helper( fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()} ) + expected_query_options = database._instance._client._query_options + if query_options: + expected_query_options = _merge_query_options( + expected_query_options, query_options + ) + api.execute_streaming_sql.assert_called_once_with( self.SESSION_NAME, SQL_QUERY_WITH_PARAM, @@ -417,6 +428,7 @@ def _execute_sql_helper( params=expected_params, param_types=PARAM_TYPES, query_mode=MODE, + query_options=expected_query_options, partition_token=partition, seqno=sql_count, metadata=[("google-cloud-resource-prefix", database.name)], @@ -452,6 +464,14 @@ def test_execute_sql_w_retry(self): def test_execute_sql_w_timeout(self): self._execute_sql_helper(multi_use=False, timeout=None) + def test_execute_sql_w_query_options(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + self._execute_sql_helper( + multi_use=False, + query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3"), + ) + def _partition_read_helper( self, multi_use, w_txn, size=None, max_partitions=None, index=None ): @@ -971,16 +991,30 @@ def test_begin_ok_exact_strong(self): ) +class _Client(object): + def __init__(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") + + +class _Instance(object): + def __init__(self): + self._client = _Client() + + +class _Database(object): + def __init__(self): + self.name = "testing" + self._instance = _Instance() + + class _Session(object): def __init__(self, database=None, name=TestSnapshot.SESSION_NAME): self._database = database self.name = name -class _Database(object): - name = "testing" - - class _MockIterator(object): def __init__(self, *values, **kw): self._iter_values = iter(values) diff --git a/tests/unit/test_transaction.py b/tests/unit/test_transaction.py index 9ef13c2ab6..dcb6cb95d3 100644 --- a/tests/unit/test_transaction.py +++ b/tests/unit/test_transaction.py @@ -350,14 +350,17 @@ def test_execute_update_w_params_wo_param_types(self): with self.assertRaises(ValueError): transaction.execute_update(DML_QUERY_WITH_PARAM, PARAMS) - def _execute_update_helper(self, count=0): + def _execute_update_helper(self, count=0, query_options=None): from google.protobuf.struct_pb2 import Struct from google.cloud.spanner_v1.proto.result_set_pb2 import ( ResultSet, ResultSetStats, ) from google.cloud.spanner_v1.proto.transaction_pb2 import TransactionSelector - from google.cloud.spanner_v1._helpers import _make_value_pb + from google.cloud.spanner_v1._helpers import ( + _make_value_pb, + _merge_query_options, + ) MODE = 2 # PROFILE stats_pb = ResultSetStats(row_count_exact=1) @@ -370,7 +373,11 @@ def _execute_update_helper(self, count=0): transaction._execute_sql_count = count row_count = transaction.execute_update( - DML_QUERY_WITH_PARAM, PARAMS, PARAM_TYPES, query_mode=MODE + DML_QUERY_WITH_PARAM, + PARAMS, + PARAM_TYPES, + query_mode=MODE, + query_options=query_options, ) self.assertEqual(row_count, 1) @@ -380,6 +387,12 @@ def _execute_update_helper(self, count=0): fields={key: _make_value_pb(value) for (key, value) in PARAMS.items()} ) + expected_query_options = database._instance._client._query_options + if query_options: + expected_query_options = _merge_query_options( + expected_query_options, query_options + ) + api.execute_sql.assert_called_once_with( self.SESSION_NAME, DML_QUERY_WITH_PARAM, @@ -387,6 +400,7 @@ def _execute_update_helper(self, count=0): params=expected_params, param_types=PARAM_TYPES, query_mode=MODE, + query_options=expected_query_options, seqno=count, metadata=[("google-cloud-resource-prefix", database.name)], ) @@ -399,6 +413,13 @@ def test_execute_update_new_transaction(self): def test_execute_update_w_count(self): self._execute_update_helper(count=1) + def test_execute_update_w_query_options(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + self._execute_update_helper( + query_options=ExecuteSqlRequest.QueryOptions(optimizer_version="3") + ) + def test_batch_update_other_error(self): database = _Database() database.spanner_api = self._make_spanner_api() @@ -557,8 +578,22 @@ def test_context_mgr_failure(self): self.assertEqual(metadata, [("google-cloud-resource-prefix", database.name)]) +class _Client(object): + def __init__(self): + from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest + + self._query_options = ExecuteSqlRequest.QueryOptions(optimizer_version="1") + + +class _Instance(object): + def __init__(self): + self._client = _Client() + + class _Database(object): - name = "testing" + def __init__(self): + self.name = "testing" + self._instance = _Instance() class _Session(object): From a48c4b1c669e62232244315e9f509fc309388c2c Mon Sep 17 00:00:00 2001 From: Yoshi Automation Bot Date: Thu, 12 Mar 2020 17:30:38 -0700 Subject: [PATCH 11/14] chore: add backups (via synth) Co-authored-by: larkee <31196561+larkee@users.noreply.github.com> --- .../gapic/database_admin_client.py | 941 ++++++++++- .../gapic/database_admin_client_config.py | 40 + .../spanner_admin_database_v1/gapic/enums.py | 38 + .../database_admin_grpc_transport.py | 166 +- .../proto/backup.proto | 363 +++++ .../proto/backup_pb2.py | 1379 +++++++++++++++++ .../proto/backup_pb2_grpc.py | 2 + .../proto/common.proto | 43 + .../proto/common_pb2.py | 151 ++ .../proto/common_pb2_grpc.py | 2 + .../proto/spanner_database_admin.proto | 364 ++++- .../proto/spanner_database_admin_pb2.py | 1174 ++++++++++++-- .../proto/spanner_database_admin_pb2_grpc.py | 210 ++- .../proto/spanner_instance_admin_pb2.py | 4 +- synth.metadata | 12 +- .../gapic/v1/test_database_admin_client_v1.py | 382 +++++ 16 files changed, 5126 insertions(+), 145 deletions(-) create mode 100644 google/cloud/spanner_admin_database_v1/proto/backup.proto create mode 100644 google/cloud/spanner_admin_database_v1/proto/backup_pb2.py create mode 100644 google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py create mode 100644 google/cloud/spanner_admin_database_v1/proto/common.proto create mode 100644 google/cloud/spanner_admin_database_v1/proto/common_pb2.py create mode 100644 google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py diff --git a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py b/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py index cbb2c084cd..38f16638bd 100644 --- a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py +++ b/google/cloud/spanner_admin_database_v1/gapic/database_admin_client.py @@ -31,6 +31,7 @@ import google.api_core.operations_v1 import google.api_core.page_iterator import google.api_core.path_template +import google.api_core.protobuf_helpers import grpc from google.cloud.spanner_admin_database_v1.gapic import database_admin_client_config @@ -38,6 +39,7 @@ from google.cloud.spanner_admin_database_v1.gapic.transports import ( database_admin_grpc_transport, ) +from google.cloud.spanner_admin_database_v1.proto import backup_pb2 from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2_grpc from google.iam.v1 import iam_policy_pb2 @@ -45,6 +47,7 @@ from google.iam.v1 import policy_pb2 from google.longrunning import operations_pb2 from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 _GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-spanner").version @@ -56,7 +59,8 @@ class DatabaseAdminClient(object): The Cloud Spanner Database Admin API can be used to create, drop, and list databases. It also enables updating the schema of pre-existing - databases. + databases. It can be also used to create, delete and list backups for a + database and to restore from an existing backup. """ SERVICE_ADDRESS = "spanner.googleapis.com:443" @@ -86,6 +90,16 @@ def from_service_account_file(cls, filename, *args, **kwargs): from_service_account_json = from_service_account_file + @classmethod + def backup_path(cls, project, instance, backup): + """Return a fully-qualified backup string.""" + return google.api_core.path_template.expand( + "projects/{project}/instances/{instance}/backups/{backup}", + project=project, + instance=instance, + backup=backup, + ) + @classmethod def database_path(cls, project, instance, database): """Return a fully-qualified database string.""" @@ -264,7 +278,7 @@ def create_database( ``[a-z][a-z0-9_\-]*[a-z0-9]`` and be between 2 and 30 characters in length. If the database ID is a reserved word or if it contains a hyphen, the database ID must be enclosed in backticks (`````). - extra_statements (list[str]): An optional list of DDL statements to run inside the newly created + extra_statements (list[str]): Optional. A list of DDL statements to run inside the newly created database. Statements can create tables, indexes, etc. These statements execute atomically with the creation of the database: if there is an error in any statement, the database is not created. @@ -516,7 +530,8 @@ def drop_database( metadata=None, ): """ - Drops (aka deletes) a Cloud Spanner database. + Drops (aka deletes) a Cloud Spanner database. Completed backups for the + database will be retained according to their ``expire_time``. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -654,11 +669,12 @@ def set_iam_policy( metadata=None, ): """ - Sets the access control policy on a database resource. Replaces any - existing policy. + Sets the access control policy on a database or backup resource. + Replaces any existing policy. Authorization requires ``spanner.databases.setIamPolicy`` permission on - ``resource``. + ``resource``. For backups, authorization requires + ``spanner.backups.setIamPolicy`` permission on ``resource``. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -740,11 +756,13 @@ def get_iam_policy( metadata=None, ): """ - Gets the access control policy for a database resource. Returns an empty - policy if a database exists but does not have a policy set. + Gets the access control policy for a database or backup resource. + Returns an empty policy if a database or backup exists but does not have + a policy set. Authorization requires ``spanner.databases.getIamPolicy`` permission on - ``resource``. + ``resource``. For backups, authorization requires + ``spanner.backups.getIamPolicy`` permission on ``resource``. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -823,13 +841,15 @@ def test_iam_permissions( metadata=None, ): """ - Returns permissions that the caller has on the specified database - resource. + Returns permissions that the caller has on the specified database or + backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT\_FOUND error if the user has ``spanner.databases.list`` permission on the containing Cloud Spanner instance. Otherwise returns - an empty set of permissions. + an empty set of permissions. Calling this method on a backup that does + not exist will result in a NOT\_FOUND error if the user has + ``spanner.backups.list`` permission on the containing instance. Example: >>> from google.cloud import spanner_admin_database_v1 @@ -901,6 +921,903 @@ def test_iam_permissions( request, retry=retry, timeout=timeout, metadata=metadata ) + def create_backup( + self, + parent, + backup_id, + backup, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Starts creating a new Cloud Spanner Backup. The returned backup + ``long-running operation`` will have a name of the format + ``projects//instances//backups//operations/`` + and can be used to track creation of the backup. The ``metadata`` field + type is ``CreateBackupMetadata``. The ``response`` field type is + ``Backup``, if successful. Cancelling the returned operation will stop + the creation and delete the backup. There can be only one pending backup + creation per database. Backup creation of different databases can run + concurrently. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize `backup_id`: + >>> backup_id = '' + >>> + >>> # TODO: Initialize `backup`: + >>> backup = {} + >>> + >>> response = client.create_backup(parent, backup_id, backup) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. The name of the instance in which the backup will be created. + This must be the same instance that contains the database the backup + will be created from. The backup will be stored in the location(s) + specified in the instance configuration of this instance. Values are of + the form ``projects//instances/``. + backup_id (str): Required. The id of the backup to be created. The ``backup_id`` appended + to ``parent`` forms the full backup name of the form + ``projects//instances//backups/``. + backup (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Backup]): Required. The backup to create. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_admin_database_v1.types.Backup` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.spanner_admin_database_v1.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "create_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "create_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.create_backup, + default_retry=self._method_configs["CreateBackup"].retry, + default_timeout=self._method_configs["CreateBackup"].timeout, + client_info=self._client_info, + ) + + request = backup_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["create_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + backup_pb2.Backup, + metadata_type=backup_pb2.CreateBackupMetadata, + ) + + def get_backup( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Gets metadata on a pending or completed ``Backup``. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[BACKUP]') + >>> + >>> response = client.get_backup(name) + + Args: + name (str): Required. Name of the backup. Values are of the form + ``projects//instances//backups/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.spanner_admin_database_v1.types.Backup` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "get_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "get_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.get_backup, + default_retry=self._method_configs["GetBackup"].retry, + default_timeout=self._method_configs["GetBackup"].timeout, + client_info=self._client_info, + ) + + request = backup_pb2.GetBackupRequest(name=name) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["get_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def update_backup( + self, + backup, + update_mask, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Updates a pending or completed ``Backup``. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> # TODO: Initialize `backup`: + >>> backup = {} + >>> + >>> # TODO: Initialize `update_mask`: + >>> update_mask = {} + >>> + >>> response = client.update_backup(backup, update_mask) + + Args: + backup (Union[dict, ~google.cloud.spanner_admin_database_v1.types.Backup]): Required. The backup to update. ``backup.name``, and the fields to be + updated as specified by ``update_mask`` are required. Other fields are + ignored. Update is only supported for the following fields: + + - ``backup.expire_time``. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_admin_database_v1.types.Backup` + update_mask (Union[dict, ~google.cloud.spanner_admin_database_v1.types.FieldMask]): Required. A mask specifying which fields (e.g. ``expire_time``) in the + Backup resource should be updated. This mask is relative to the Backup + resource, not to the request message. The field mask must always be + specified; this prevents any future fields from being erased + accidentally by clients that do not know about them. + + If a dict is provided, it must be of the same form as the protobuf + message :class:`~google.cloud.spanner_admin_database_v1.types.FieldMask` + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.spanner_admin_database_v1.types.Backup` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "update_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "update_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.update_backup, + default_retry=self._method_configs["UpdateBackup"].retry, + default_timeout=self._method_configs["UpdateBackup"].timeout, + client_info=self._client_info, + ) + + request = backup_pb2.UpdateBackupRequest(backup=backup, update_mask=update_mask) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("backup.name", backup.name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + return self._inner_api_calls["update_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def delete_backup( + self, + name, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Deletes a pending or completed ``Backup``. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> name = client.backup_path('[PROJECT]', '[INSTANCE]', '[BACKUP]') + >>> + >>> client.delete_backup(name) + + Args: + name (str): Required. Name of the backup to delete. Values are of the form + ``projects//instances//backups/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "delete_backup" not in self._inner_api_calls: + self._inner_api_calls[ + "delete_backup" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.delete_backup, + default_retry=self._method_configs["DeleteBackup"].retry, + default_timeout=self._method_configs["DeleteBackup"].timeout, + client_info=self._client_info, + ) + + request = backup_pb2.DeleteBackupRequest(name=name) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("name", name)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + self._inner_api_calls["delete_backup"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + + def list_backups( + self, + parent, + filter_=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists completed and pending backups. Backups returned are ordered by + ``create_time`` in descending order, starting from the most recent + ``create_time``. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # Iterate over all results + >>> for element in client.list_backups(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_backups(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The instance to list backups from. Values are of the form + ``projects//instances/``. + filter_ (str): An expression that filters the list of returned backups. + + A filter expression consists of a field name, a comparison operator, and + a value for filtering. The value must be a string, a number, or a + boolean. The comparison operator must be one of: ``<``, ``>``, ``<=``, + ``>=``, ``!=``, ``=``, or ``:``. Colon ``:`` is the contains operator. + Filter rules are not case sensitive. + + The following fields in the ``Backup`` are eligible for filtering: + + - ``name`` + - ``database`` + - ``state`` + - ``create_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + You can combine multiple expressions by enclosing each expression in + parentheses. By default, expressions are combined with AND logic, but + you can specify AND, OR, and NOT logic explicitly. + + Here are a few examples: + + - ``name:Howl`` - The backup's name contains the string "howl". + - ``database:prod`` - The database's name contains the string "prod". + - ``state:CREATING`` - The backup is pending creation. + - ``state:READY`` - The backup is fully created and ready for use. + - ``(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")`` - The + backup name contains the string "howl" and ``create_time`` of the + backup is before 2018-03-28T14:50:00Z. + - ``expire_time < \"2018-03-28T14:50:00Z\"`` - The backup + ``expire_time`` is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` - The backup's size is greater than 10GB + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Backup` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_backups" not in self._inner_api_calls: + self._inner_api_calls[ + "list_backups" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_backups, + default_retry=self._method_configs["ListBackups"].retry, + default_timeout=self._method_configs["ListBackups"].timeout, + client_info=self._client_info, + ) + + request = backup_pb2.ListBackupsRequest( + parent=parent, filter=filter_, page_size=page_size + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_backups"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="backups", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + + def restore_database( + self, + parent, + database_id, + backup=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Create a new database by restoring from a completed backup. The new + database must be in the same project and in an instance with the same + instance configuration as the instance containing the backup. The + returned database ``long-running operation`` has a name of the format + ``projects//instances//databases//operations/``, + and can be used to track the progress of the operation, and to cancel + it. The ``metadata`` field type is ``RestoreDatabaseMetadata``. The + ``response`` type is ``Database``, if successful. Cancelling the + returned operation will stop the restore and delete the database. There + can be only one database being restored into an instance at a time. Once + the restore operation completes, a new restore operation can be + initiated, without waiting for the optimize operation associated with + the first restore to complete. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # TODO: Initialize `database_id`: + >>> database_id = '' + >>> + >>> response = client.restore_database(parent, database_id) + >>> + >>> def callback(operation_future): + ... # Handle result. + ... result = operation_future.result() + >>> + >>> response.add_done_callback(callback) + >>> + >>> # Handle metadata. + >>> metadata = response.metadata() + + Args: + parent (str): Required. The name of the instance in which to create the restored + database. This instance must be in the same project and have the same + instance configuration as the instance containing the source backup. + Values are of the form ``projects//instances/``. + database_id (str): Required. The id of the database to create and restore to. This database + must not already exist. The ``database_id`` appended to ``parent`` forms + the full database name of the form + ``projects//instances//databases/``. + backup (str): Name of the backup from which to restore. Values are of the form + ``projects//instances//backups/``. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.cloud.spanner_admin_database_v1.types._OperationFuture` instance. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "restore_database" not in self._inner_api_calls: + self._inner_api_calls[ + "restore_database" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.restore_database, + default_retry=self._method_configs["RestoreDatabase"].retry, + default_timeout=self._method_configs["RestoreDatabase"].timeout, + client_info=self._client_info, + ) + + # Sanity check: We have some fields which are mutually exclusive; + # raise ValueError if more than one is sent. + google.api_core.protobuf_helpers.check_oneof(backup=backup) + + request = spanner_database_admin_pb2.RestoreDatabaseRequest( + parent=parent, database_id=database_id, backup=backup + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + operation = self._inner_api_calls["restore_database"]( + request, retry=retry, timeout=timeout, metadata=metadata + ) + return google.api_core.operation.from_gapic( + operation, + self.transport._operations_client, + spanner_database_admin_pb2.Database, + metadata_type=spanner_database_admin_pb2.RestoreDatabaseMetadata, + ) + + def list_database_operations( + self, + parent, + filter_=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists database ``longrunning-operations``. A database operation has a + name of the form + ``projects//instances//databases//operations/``. + The long-running operation ``metadata`` field type ``metadata.type_url`` + describes the type of the metadata. Operations returned include those + that have completed/failed/canceled within the last 7 days, and pending + operations. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # Iterate over all results + >>> for element in client.list_database_operations(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_database_operations(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The instance of the database operations. Values are of the + form ``projects//instances/``. + filter_ (str): An expression that filters the list of returned operations. + + A filter expression consists of a field name, a comparison operator, and + a value for filtering. The value must be a string, a number, or a + boolean. The comparison operator must be one of: ``<``, ``>``, ``<=``, + ``>=``, ``!=``, ``=``, or ``:``. Colon ``:`` is the contains operator. + Filter rules are not case sensitive. + + The following fields in the ``Operation`` are eligible for filtering: + + - ``name`` - The name of the long-running operation + - ``done`` - False if the operation is in progress, else true. + - ``metadata.@type`` - the type of metadata. For example, the type + string for ``RestoreDatabaseMetadata`` is + ``type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata``. + - ``metadata.`` - any field in metadata.value. + - ``error`` - Error associated with the long-running operation. + - ``response.@type`` - the type of response. + - ``response.`` - any field in response.value. + + You can combine multiple expressions by enclosing each expression in + parentheses. By default, expressions are combined with AND logic. + However, you can specify AND, OR, and NOT logic explicitly. + + Here are a few examples: + + - ``done:true`` - The operation is complete. + - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND`` + ``(metadata.source_type:BACKUP) AND`` + ``(metadata.backup_info.backup:backup_howl) AND`` + ``(metadata.name:restored_howl) AND`` + ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`` + ``(error:*)`` - Return operations where: + + - The operation's metadata type is ``RestoreDatabaseMetadata``. + - The database is restored from a backup. + - The backup name contains "backup\_howl". + - The restored database's name contains "restored\_howl". + - The operation started before 2018-03-28T14:50:00Z. + - The operation resulted in an error. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Operation` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_database_operations" not in self._inner_api_calls: + self._inner_api_calls[ + "list_database_operations" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_database_operations, + default_retry=self._method_configs["ListDatabaseOperations"].retry, + default_timeout=self._method_configs["ListDatabaseOperations"].timeout, + client_info=self._client_info, + ) + + request = spanner_database_admin_pb2.ListDatabaseOperationsRequest( + parent=parent, filter=filter_, page_size=page_size + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_database_operations"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="operations", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + + def list_backup_operations( + self, + parent, + filter_=None, + page_size=None, + retry=google.api_core.gapic_v1.method.DEFAULT, + timeout=google.api_core.gapic_v1.method.DEFAULT, + metadata=None, + ): + """ + Lists the backup ``long-running operations`` in the given instance. A + backup operation has a name of the form + ``projects//instances//backups//operations/``. + The long-running operation ``metadata`` field type ``metadata.type_url`` + describes the type of the metadata. Operations returned include those + that have completed/failed/canceled within the last 7 days, and pending + operations. Operations returned are ordered by + ``operation.metadata.value.progress.start_time`` in descending order + starting from the most recently started operation. + + Example: + >>> from google.cloud import spanner_admin_database_v1 + >>> + >>> client = spanner_admin_database_v1.DatabaseAdminClient() + >>> + >>> parent = client.instance_path('[PROJECT]', '[INSTANCE]') + >>> + >>> # Iterate over all results + >>> for element in client.list_backup_operations(parent): + ... # process element + ... pass + >>> + >>> + >>> # Alternatively: + >>> + >>> # Iterate over results one page at a time + >>> for page in client.list_backup_operations(parent).pages: + ... for element in page: + ... # process element + ... pass + + Args: + parent (str): Required. The instance of the backup operations. Values are of the form + ``projects//instances/``. + filter_ (str): An expression that filters the list of returned backup operations. + + A filter expression consists of a field name, a comparison operator, and + a value for filtering. The value must be a string, a number, or a + boolean. The comparison operator must be one of: ``<``, ``>``, ``<=``, + ``>=``, ``!=``, ``=``, or ``:``. Colon ``:`` is the contains operator. + Filter rules are not case sensitive. + + The following fields in the ``operation`` are eligible for filtering: + + - ``name`` - The name of the long-running operation + - ``done`` - False if the operation is in progress, else true. + - ``metadata.@type`` - the type of metadata. For example, the type + string for ``CreateBackupMetadata`` is + ``type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata``. + - ``metadata.`` - any field in metadata.value. + - ``error`` - Error associated with the long-running operation. + - ``response.@type`` - the type of response. + - ``response.`` - any field in response.value. + + You can combine multiple expressions by enclosing each expression in + parentheses. By default, expressions are combined with AND logic, but + you can specify AND, OR, and NOT logic explicitly. + + Here are a few examples: + + - ``done:true`` - The operation is complete. + - ``metadata.database:prod`` - The database the backup was taken from + has a name containing the string "prod". + - ``(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND`` + ``(metadata.name:howl) AND`` + ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`` + ``(error:*)`` - Returns operations where: + + - The operation's metadata type is ``CreateBackupMetadata``. + - The backup name contains the string "howl". + - The operation started before 2018-03-28T14:50:00Z. + - The operation resulted in an error. + page_size (int): The maximum number of resources contained in the + underlying API response. If page streaming is performed per- + resource, this parameter does not affect the return value. If page + streaming is performed per-page, this determines the maximum number + of resources in a page. + retry (Optional[google.api_core.retry.Retry]): A retry object used + to retry requests. If ``None`` is specified, requests will + be retried using a default configuration. + timeout (Optional[float]): The amount of time, in seconds, to wait + for the request to complete. Note that if ``retry`` is + specified, the timeout applies to each individual attempt. + metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata + that is provided to the method. + + Returns: + A :class:`~google.api_core.page_iterator.PageIterator` instance. + An iterable of :class:`~google.cloud.spanner_admin_database_v1.types.Operation` instances. + You can also iterate over the pages of the response + using its `pages` property. + + Raises: + google.api_core.exceptions.GoogleAPICallError: If the request + failed for any reason. + google.api_core.exceptions.RetryError: If the request failed due + to a retryable error and retry attempts failed. + ValueError: If the parameters are invalid. + """ + # Wrap the transport method to add retry and timeout logic. + if "list_backup_operations" not in self._inner_api_calls: + self._inner_api_calls[ + "list_backup_operations" + ] = google.api_core.gapic_v1.method.wrap_method( + self.transport.list_backup_operations, + default_retry=self._method_configs["ListBackupOperations"].retry, + default_timeout=self._method_configs["ListBackupOperations"].timeout, + client_info=self._client_info, + ) + + request = backup_pb2.ListBackupOperationsRequest( + parent=parent, filter=filter_, page_size=page_size + ) + if metadata is None: + metadata = [] + metadata = list(metadata) + try: + routing_header = [("parent", parent)] + except AttributeError: + pass + else: + routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( + routing_header + ) + metadata.append(routing_metadata) + + iterator = google.api_core.page_iterator.GRPCIterator( + client=None, + method=functools.partial( + self._inner_api_calls["list_backup_operations"], + retry=retry, + timeout=timeout, + metadata=metadata, + ), + request=request, + items_field="operations", + request_token_field="page_token", + response_token_field="next_page_token", + ) + return iterator + def list_databases( self, parent, diff --git a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py b/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py index 90c9f796e2..d6f830eeee 100644 --- a/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py +++ b/google/cloud/spanner_admin_database_v1/gapic/database_admin_client_config.py @@ -57,6 +57,46 @@ "retry_codes_name": "non_idempotent", "retry_params_name": "default", }, + "CreateBackup": { + "timeout_millis": 30000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "GetBackup": { + "timeout_millis": 30000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "UpdateBackup": { + "timeout_millis": 30000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "DeleteBackup": { + "timeout_millis": 30000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "ListBackups": { + "timeout_millis": 30000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "RestoreDatabase": { + "timeout_millis": 30000, + "retry_codes_name": "non_idempotent", + "retry_params_name": "default", + }, + "ListDatabaseOperations": { + "timeout_millis": 30000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, + "ListBackupOperations": { + "timeout_millis": 30000, + "retry_codes_name": "idempotent", + "retry_params_name": "default", + }, "ListDatabases": { "timeout_millis": 60000, "retry_codes_name": "idempotent", diff --git a/google/cloud/spanner_admin_database_v1/gapic/enums.py b/google/cloud/spanner_admin_database_v1/gapic/enums.py index aa1a519027..d972ddfc57 100644 --- a/google/cloud/spanner_admin_database_v1/gapic/enums.py +++ b/google/cloud/spanner_admin_database_v1/gapic/enums.py @@ -19,6 +19,36 @@ import enum +class RestoreSourceType(enum.IntEnum): + """ + Indicates the type of the restore source. + + Attributes: + TYPE_UNSPECIFIED (int): No restore associated. + BACKUP (int): A backup was used as the source of the restore. + """ + + TYPE_UNSPECIFIED = 0 + BACKUP = 1 + + +class Backup(object): + class State(enum.IntEnum): + """ + Indicates the current state of the backup. + + Attributes: + STATE_UNSPECIFIED (int): Not specified. + CREATING (int): The pending backup is still being created. Operations on the backup may + fail with ``FAILED_PRECONDITION`` in this state. + READY (int): The backup is complete and ready for use. + """ + + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + class Database(object): class State(enum.IntEnum): """ @@ -29,8 +59,16 @@ class State(enum.IntEnum): CREATING (int): The database is still being created. Operations on the database may fail with ``FAILED_PRECONDITION`` in this state. READY (int): The database is fully created and ready for use. + READY_OPTIMIZING (int): The database is fully created and ready for use, but is still being + optimized for performance and cannot handle full load. + + In this state, the database still references the backup it was restore + from, preventing the backup from being deleted. When optimizations are + complete, the full performance of the database will be restored, and the + database will transition to ``READY`` state. """ STATE_UNSPECIFIED = 0 CREATING = 1 READY = 2 + READY_OPTIMIZING = 3 diff --git a/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py b/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py index cd56873704..2fb41caab2 100644 --- a/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py +++ b/google/cloud/spanner_admin_database_v1/gapic/transports/database_admin_grpc_transport.py @@ -173,7 +173,8 @@ def update_database_ddl(self): def drop_database(self): """Return the gRPC stub for :meth:`DatabaseAdminClient.drop_database`. - Drops (aka deletes) a Cloud Spanner database. + Drops (aka deletes) a Cloud Spanner database. Completed backups for the + database will be retained according to their ``expire_time``. Returns: Callable: A callable which accepts the appropriate @@ -201,11 +202,12 @@ def get_database_ddl(self): def set_iam_policy(self): """Return the gRPC stub for :meth:`DatabaseAdminClient.set_iam_policy`. - Sets the access control policy on a database resource. Replaces any - existing policy. + Sets the access control policy on a database or backup resource. + Replaces any existing policy. Authorization requires ``spanner.databases.setIamPolicy`` permission on - ``resource``. + ``resource``. For backups, authorization requires + ``spanner.backups.setIamPolicy`` permission on ``resource``. Returns: Callable: A callable which accepts the appropriate @@ -218,11 +220,13 @@ def set_iam_policy(self): def get_iam_policy(self): """Return the gRPC stub for :meth:`DatabaseAdminClient.get_iam_policy`. - Gets the access control policy for a database resource. Returns an empty - policy if a database exists but does not have a policy set. + Gets the access control policy for a database or backup resource. + Returns an empty policy if a database or backup exists but does not have + a policy set. Authorization requires ``spanner.databases.getIamPolicy`` permission on - ``resource``. + ``resource``. For backups, authorization requires + ``spanner.backups.getIamPolicy`` permission on ``resource``. Returns: Callable: A callable which accepts the appropriate @@ -235,13 +239,15 @@ def get_iam_policy(self): def test_iam_permissions(self): """Return the gRPC stub for :meth:`DatabaseAdminClient.test_iam_permissions`. - Returns permissions that the caller has on the specified database - resource. + Returns permissions that the caller has on the specified database or + backup resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT\_FOUND error if the user has ``spanner.databases.list`` permission on the containing Cloud Spanner instance. Otherwise returns - an empty set of permissions. + an empty set of permissions. Calling this method on a backup that does + not exist will result in a NOT\_FOUND error if the user has + ``spanner.backups.list`` permission on the containing instance. Returns: Callable: A callable which accepts the appropriate @@ -250,6 +256,146 @@ def test_iam_permissions(self): """ return self._stubs["database_admin_stub"].TestIamPermissions + @property + def create_backup(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.create_backup`. + + Starts creating a new Cloud Spanner Backup. The returned backup + ``long-running operation`` will have a name of the format + ``projects//instances//backups//operations/`` + and can be used to track creation of the backup. The ``metadata`` field + type is ``CreateBackupMetadata``. The ``response`` field type is + ``Backup``, if successful. Cancelling the returned operation will stop + the creation and delete the backup. There can be only one pending backup + creation per database. Backup creation of different databases can run + concurrently. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].CreateBackup + + @property + def get_backup(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.get_backup`. + + Gets metadata on a pending or completed ``Backup``. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].GetBackup + + @property + def update_backup(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.update_backup`. + + Updates a pending or completed ``Backup``. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].UpdateBackup + + @property + def delete_backup(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.delete_backup`. + + Deletes a pending or completed ``Backup``. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].DeleteBackup + + @property + def list_backups(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.list_backups`. + + Lists completed and pending backups. Backups returned are ordered by + ``create_time`` in descending order, starting from the most recent + ``create_time``. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].ListBackups + + @property + def restore_database(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.restore_database`. + + Create a new database by restoring from a completed backup. The new + database must be in the same project and in an instance with the same + instance configuration as the instance containing the backup. The + returned database ``long-running operation`` has a name of the format + ``projects//instances//databases//operations/``, + and can be used to track the progress of the operation, and to cancel + it. The ``metadata`` field type is ``RestoreDatabaseMetadata``. The + ``response`` type is ``Database``, if successful. Cancelling the + returned operation will stop the restore and delete the database. There + can be only one database being restored into an instance at a time. Once + the restore operation completes, a new restore operation can be + initiated, without waiting for the optimize operation associated with + the first restore to complete. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].RestoreDatabase + + @property + def list_database_operations(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.list_database_operations`. + + Lists database ``longrunning-operations``. A database operation has a + name of the form + ``projects//instances//databases//operations/``. + The long-running operation ``metadata`` field type ``metadata.type_url`` + describes the type of the metadata. Operations returned include those + that have completed/failed/canceled within the last 7 days, and pending + operations. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].ListDatabaseOperations + + @property + def list_backup_operations(self): + """Return the gRPC stub for :meth:`DatabaseAdminClient.list_backup_operations`. + + Lists the backup ``long-running operations`` in the given instance. A + backup operation has a name of the form + ``projects//instances//backups//operations/``. + The long-running operation ``metadata`` field type ``metadata.type_url`` + describes the type of the metadata. Operations returned include those + that have completed/failed/canceled within the last 7 days, and pending + operations. Operations returned are ordered by + ``operation.metadata.value.progress.start_time`` in descending order + starting from the most recently started operation. + + Returns: + Callable: A callable which accepts the appropriate + deserialized request object and returns a + deserialized response object. + """ + return self._stubs["database_admin_stub"].ListBackupOperations + @property def list_databases(self): """Return the gRPC stub for :meth:`DatabaseAdminClient.list_databases`. diff --git a/google/cloud/spanner_admin_database_v1/proto/backup.proto b/google/cloud/spanner_admin_database_v1/proto/backup.proto new file mode 100644 index 0000000000..d9b6fd74cd --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/proto/backup.proto @@ -0,0 +1,363 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.database.v1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/field_mask.proto"; +import "google/protobuf/timestamp.proto"; +import "google/spanner/admin/database/v1/common.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; +option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database"; +option java_multiple_files = true; +option java_outer_classname = "BackupProto"; +option java_package = "com.google.spanner.admin.database.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; + +// A backup of a Cloud Spanner database. +message Backup { + option (google.api.resource) = { + type: "spanner.googleapis.com/Backup" + pattern: "projects/{project}/instances/{instance}/backups/{backup}" + }; + + // Indicates the current state of the backup. + enum State { + // Not specified. + STATE_UNSPECIFIED = 0; + + // The pending backup is still being created. Operations on the + // backup may fail with `FAILED_PRECONDITION` in this state. + CREATING = 1; + + // The backup is complete and ready for use. + READY = 2; + } + + // Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. + // Name of the database from which this backup was + // created. This needs to be in the same instance as the backup. + // Values are of the form + // `projects//instances//databases/`. + string database = 2; + + // Required for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] + // operation. The expiration time of the backup, with microseconds + // granularity that must be at least 6 hours and at most 366 days + // from the time the CreateBackup request is processed. Once the `expire_time` + // has passed, the backup is eligible to be automatically deleted by Cloud + // Spanner to free the resources used by the backup. + google.protobuf.Timestamp expire_time = 3; + + // Output only for the [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. + // Required for the [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup] operation. + // + // A globally unique identifier for the backup which cannot be + // changed. Values are of the form + // `projects//instances//backups/[a-z][a-z0-9_\-]*[a-z0-9]` + // The final segment of the name must be between 2 and 60 characters + // in length. + // + // The backup is stored in the location(s) specified in the instance + // configuration of the instance containing the backup, identified + // by the prefix of the backup name of the form + // `projects//instances/`. + string name = 1; + + // Output only. The backup will contain an externally consistent + // copy of the database at the timestamp specified by + // `create_time`. `create_time` is approximately the time the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] request is received. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Size of the backup in bytes. + int64 size_bytes = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The current state of the backup. + State state = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The names of the restored databases that reference the backup. + // The database names are of + // the form `projects//instances//databases/`. + // Referencing databases may exist in different instances. The existence of + // any referencing database prevents the backup from being deleted. When a + // restored database from the backup enters the `READY` state, the reference + // to the backup is removed. + repeated string referencing_databases = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// The request for [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. +message CreateBackupRequest { + // Required. The name of the instance in which the backup will be + // created. This must be the same instance that contains the database the + // backup will be created from. The backup will be stored in the + // location(s) specified in the instance configuration of this + // instance. Values are of the form + // `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. The id of the backup to be created. The `backup_id` appended to + // `parent` forms the full backup name of the form + // `projects//instances//backups/`. + string backup_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The backup to create. + Backup backup = 3 [(google.api.field_behavior) = REQUIRED]; +} + +// Metadata type for the operation returned by +// [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. +message CreateBackupMetadata { + // The name of the backup being created. + string name = 1; + + // The name of the database the backup is created from. + string database = 2; + + // The progress of the + // [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup] operation. + OperationProgress progress = 3; + + // The time at which cancellation of this operation was received. + // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation] + // starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not guaranteed. + // Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`. + google.protobuf.Timestamp cancel_time = 4; +} + +// The request for [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. +message UpdateBackupRequest { + // Required. The backup to update. `backup.name`, and the fields to be updated + // as specified by `update_mask` are required. Other fields are ignored. + // Update is only supported for the following fields: + // * `backup.expire_time`. + Backup backup = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. A mask specifying which fields (e.g. `expire_time`) in the + // Backup resource should be updated. This mask is relative to the Backup + // resource, not to the request message. The field mask must always be + // specified; this prevents any future fields from being erased accidentally + // by clients that do not know about them. + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// The request for [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. +message GetBackupRequest { + // Required. Name of the backup. + // Values are of the form + // `projects//instances//backups/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Backup" + } + ]; +} + +// The request for [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. +message DeleteBackupRequest { + // Required. Name of the backup to delete. + // Values are of the form + // `projects//instances//backups/`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Backup" + } + ]; +} + +// The request for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +message ListBackupsRequest { + // Required. The instance to list backups from. Values are of the + // form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // An expression that filters the list of returned backups. + // + // A filter expression consists of a field name, a comparison operator, and a + // value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the [Backup][google.spanner.admin.database.v1.Backup] are eligible for filtering: + // + // * `name` + // * `database` + // * `state` + // * `create_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `expire_time` (and values are of the format YYYY-MM-DDTHH:MM:SSZ) + // * `size_bytes` + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic, but + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `name:Howl` - The backup's name contains the string "howl". + // * `database:prod` + // - The database's name contains the string "prod". + // * `state:CREATING` - The backup is pending creation. + // * `state:READY` - The backup is fully created and ready for use. + // * `(name:howl) AND (create_time < \"2018-03-28T14:50:00Z\")` + // - The backup name contains the string "howl" and `create_time` + // of the backup is before 2018-03-28T14:50:00Z. + // * `expire_time < \"2018-03-28T14:50:00Z\"` + // - The backup `expire_time` is before 2018-03-28T14:50:00Z. + // * `size_bytes > 10000000000` - The backup's size is greater than 10GB + string filter = 2; + + // Number of backups to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListBackupsResponse.next_page_token] from a + // previous [ListBackupsResponse][google.spanner.admin.database.v1.ListBackupsResponse] to the same `parent` and with the same + // `filter`. + string page_token = 4; +} + +// The response for [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. +message ListBackupsResponse { + // The list of matching backups. Backups returned are ordered by `create_time` + // in descending order, starting from the most recent `create_time`. + repeated Backup backups = 1; + + // `next_page_token` can be sent in a subsequent + // [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] call to fetch more + // of the matching backups. + string next_page_token = 2; +} + +// The request for +// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. +message ListBackupOperationsRequest { + // Required. The instance of the backup operations. Values are of + // the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // An expression that filters the list of returned backup operations. + // + // A filter expression consists of a field name, a + // comparison operator, and a value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the [operation][google.longrunning.Operation] + // are eligible for filtering: + // + // * `name` - The name of the long-running operation + // * `done` - False if the operation is in progress, else true. + // * `metadata.@type` - the type of metadata. For example, the type string + // for [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata] is + // `type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata`. + // * `metadata.` - any field in metadata.value. + // * `error` - Error associated with the long-running operation. + // * `response.@type` - the type of response. + // * `response.` - any field in response.value. + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic, but + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `done:true` - The operation is complete. + // * `metadata.database:prod` - The database the backup was taken from has + // a name containing the string "prod". + // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.CreateBackupMetadata) AND`
+ // `(metadata.name:howl) AND`
+ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`
+ // `(error:*)` - Returns operations where: + // * The operation's metadata type is [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. + // * The backup name contains the string "howl". + // * The operation started before 2018-03-28T14:50:00Z. + // * The operation resulted in an error. + string filter = 2; + + // Number of operations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token] + // from a previous [ListBackupOperationsResponse][google.spanner.admin.database.v1.ListBackupOperationsResponse] to the + // same `parent` and with the same `filter`. + string page_token = 4; +} + +// The response for +// [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. +message ListBackupOperationsResponse { + // The list of matching backup [long-running + // operations][google.longrunning.Operation]. Each operation's name will be + // prefixed by the backup's name and the operation's + // [metadata][google.longrunning.Operation.metadata] will be of type + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. Operations returned include those that are + // pending or have completed/failed/canceled within the last 7 days. + // Operations returned are ordered by + // `operation.metadata.value.progress.start_time` in descending order starting + // from the most recently started operation. + repeated google.longrunning.Operation operations = 1; + + // `next_page_token` can be sent in a subsequent + // [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations] + // call to fetch more of the matching metadata. + string next_page_token = 2; +} + +// Information about a backup. +message BackupInfo { + // Name of the backup. + string backup = 1; + + // The backup contains an externally consistent copy of `source_database` at + // the timestamp specified by `create_time`. + google.protobuf.Timestamp create_time = 2; + + // Name of the database the backup was created from. + string source_database = 3; +} diff --git a/google/cloud/spanner_admin_database_v1/proto/backup_pb2.py b/google/cloud/spanner_admin_database_v1/proto/backup_pb2.py new file mode 100644 index 0000000000..edc596bd94 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/proto/backup_pb2.py @@ -0,0 +1,1379 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/spanner/admin/database_v1/proto/backup.proto + +import sys + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.longrunning import ( + operations_pb2 as google_dot_longrunning_dot_operations__pb2, +) +from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.cloud.spanner_admin_database_v1.proto import ( + common_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_common__pb2, +) +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/spanner/admin/database_v1/proto/backup.proto", + package="google.spanner.admin.database.v1", + syntax="proto3", + serialized_options=_b( + "\n$com.google.spanner.admin.database.v1B\013BackupProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\252\002&Google.Cloud.Spanner.Admin.Database.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Database\\V1" + ), + serialized_pb=_b( + '\n9google/cloud/spanner/admin/database_v1/proto/backup.proto\x12 google.spanner.admin.database.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a#google/longrunning/operations.proto\x1a google/protobuf/field_mask.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x39google/cloud/spanner/admin/database_v1/proto/common.proto\x1a\x1cgoogle/api/annotations.proto"\xa7\x03\n\x06\x42\x61\x63kup\x12\x10\n\x08\x64\x61tabase\x18\x02 \x01(\t\x12/\n\x0b\x65xpire_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x34\n\x0b\x63reate_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\x03\xe0\x41\x03\x12\x17\n\nsize_bytes\x18\x05 \x01(\x03\x42\x03\xe0\x41\x03\x12\x42\n\x05state\x18\x06 \x01(\x0e\x32..google.spanner.admin.database.v1.Backup.StateB\x03\xe0\x41\x03\x12"\n\x15referencing_databases\x18\x07 \x03(\tB\x03\xe0\x41\x03"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:\\\xea\x41Y\n\x1dspanner.googleapis.com/Backup\x12\x38projects/{project}/instances/{instance}/backups/{backup}"\xa5\x01\n\x13\x43reateBackupRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12\x16\n\tbackup_id\x18\x02 \x01(\tB\x03\xe0\x41\x02\x12=\n\x06\x62\x61\x63kup\x18\x03 \x01(\x0b\x32(.google.spanner.admin.database.v1.BackupB\x03\xe0\x41\x02"\xae\x01\n\x14\x43reateBackupMetadata\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x10\n\x08\x64\x61tabase\x18\x02 \x01(\t\x12\x45\n\x08progress\x18\x03 \x01(\x0b\x32\x33.google.spanner.admin.database.v1.OperationProgress\x12/\n\x0b\x63\x61ncel_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp"\x8a\x01\n\x13UpdateBackupRequest\x12=\n\x06\x62\x61\x63kup\x18\x01 \x01(\x0b\x32(.google.spanner.admin.database.v1.BackupB\x03\xe0\x41\x02\x12\x34\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMaskB\x03\xe0\x41\x02"G\n\x10GetBackupRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1dspanner.googleapis.com/Backup"J\n\x13\x44\x65leteBackupRequest\x12\x33\n\x04name\x18\x01 \x01(\tB%\xe0\x41\x02\xfa\x41\x1f\n\x1dspanner.googleapis.com/Backup"\x84\x01\n\x12ListBackupsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"i\n\x13ListBackupsResponse\x12\x39\n\x07\x62\x61\x63kups\x18\x01 \x03(\x0b\x32(.google.spanner.admin.database.v1.Backup\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"\x8d\x01\n\x1bListBackupOperationsRequest\x12\x37\n\x06parent\x18\x01 \x01(\tB\'\xe0\x41\x02\xfa\x41!\n\x1fspanner.googleapis.com/Instance\x12\x0e\n\x06\x66ilter\x18\x02 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t"j\n\x1cListBackupOperationsResponse\x12\x31\n\noperations\x18\x01 \x03(\x0b\x32\x1d.google.longrunning.Operation\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t"f\n\nBackupInfo\x12\x0e\n\x06\x62\x61\x63kup\x18\x01 \x01(\t\x12/\n\x0b\x63reate_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x17\n\x0fsource_database\x18\x03 \x01(\tB\xd1\x01\n$com.google.spanner.admin.database.v1B\x0b\x42\x61\x63kupProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\xaa\x02&Google.Cloud.Spanner.Admin.Database.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Database\\V1b\x06proto3' + ), + dependencies=[ + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_api_dot_resource__pb2.DESCRIPTOR, + google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, + google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_common__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + ], +) + + +_BACKUP_STATE = _descriptor.EnumDescriptor( + name="State", + full_name="google.spanner.admin.database.v1.Backup.State", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="STATE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="CREATING", index=1, number=1, serialized_options=None, type=None + ), + _descriptor.EnumValueDescriptor( + name="READY", index=2, number=2, serialized_options=None, type=None + ), + ], + containing_type=None, + serialized_options=None, + serialized_start=623, + serialized_end=678, +) +_sym_db.RegisterEnumDescriptor(_BACKUP_STATE) + + +_BACKUP = _descriptor.Descriptor( + name="Backup", + full_name="google.spanner.admin.database.v1.Backup", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="database", + full_name="google.spanner.admin.database.v1.Backup.database", + index=0, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="expire_time", + full_name="google.spanner.admin.database.v1.Backup.expire_time", + index=1, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="name", + full_name="google.spanner.admin.database.v1.Backup.name", + index=2, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="create_time", + full_name="google.spanner.admin.database.v1.Backup.create_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\003"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="size_bytes", + full_name="google.spanner.admin.database.v1.Backup.size_bytes", + index=4, + number=5, + type=3, + cpp_type=2, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\003"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="state", + full_name="google.spanner.admin.database.v1.Backup.state", + index=5, + number=6, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\003"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="referencing_databases", + full_name="google.spanner.admin.database.v1.Backup.referencing_databases", + index=6, + number=7, + type=9, + cpp_type=9, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\003"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[_BACKUP_STATE], + serialized_options=_b( + "\352AY\n\035spanner.googleapis.com/Backup\0228projects/{project}/instances/{instance}/backups/{backup}" + ), + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=349, + serialized_end=772, +) + + +_CREATEBACKUPREQUEST = _descriptor.Descriptor( + name="CreateBackupRequest", + full_name="google.spanner.admin.database.v1.CreateBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.spanner.admin.database.v1.CreateBackupRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b( + "\340A\002\372A!\n\037spanner.googleapis.com/Instance" + ), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="backup_id", + full_name="google.spanner.admin.database.v1.CreateBackupRequest.backup_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\002"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="backup", + full_name="google.spanner.admin.database.v1.CreateBackupRequest.backup", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\002"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=775, + serialized_end=940, +) + + +_CREATEBACKUPMETADATA = _descriptor.Descriptor( + name="CreateBackupMetadata", + full_name="google.spanner.admin.database.v1.CreateBackupMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.spanner.admin.database.v1.CreateBackupMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="database", + full_name="google.spanner.admin.database.v1.CreateBackupMetadata.database", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.spanner.admin.database.v1.CreateBackupMetadata.progress", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cancel_time", + full_name="google.spanner.admin.database.v1.CreateBackupMetadata.cancel_time", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=943, + serialized_end=1117, +) + + +_UPDATEBACKUPREQUEST = _descriptor.Descriptor( + name="UpdateBackupRequest", + full_name="google.spanner.admin.database.v1.UpdateBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="backup", + full_name="google.spanner.admin.database.v1.UpdateBackupRequest.backup", + index=0, + number=1, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\002"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="update_mask", + full_name="google.spanner.admin.database.v1.UpdateBackupRequest.update_mask", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\002"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1120, + serialized_end=1258, +) + + +_GETBACKUPREQUEST = _descriptor.Descriptor( + name="GetBackupRequest", + full_name="google.spanner.admin.database.v1.GetBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.spanner.admin.database.v1.GetBackupRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b( + "\340A\002\372A\037\n\035spanner.googleapis.com/Backup" + ), + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1260, + serialized_end=1331, +) + + +_DELETEBACKUPREQUEST = _descriptor.Descriptor( + name="DeleteBackupRequest", + full_name="google.spanner.admin.database.v1.DeleteBackupRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.spanner.admin.database.v1.DeleteBackupRequest.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b( + "\340A\002\372A\037\n\035spanner.googleapis.com/Backup" + ), + file=DESCRIPTOR, + ) + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1333, + serialized_end=1407, +) + + +_LISTBACKUPSREQUEST = _descriptor.Descriptor( + name="ListBackupsRequest", + full_name="google.spanner.admin.database.v1.ListBackupsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.spanner.admin.database.v1.ListBackupsRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b( + "\340A\002\372A!\n\037spanner.googleapis.com/Instance" + ), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="filter", + full_name="google.spanner.admin.database.v1.ListBackupsRequest.filter", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.spanner.admin.database.v1.ListBackupsRequest.page_size", + index=2, + number=3, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.spanner.admin.database.v1.ListBackupsRequest.page_token", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1410, + serialized_end=1542, +) + + +_LISTBACKUPSRESPONSE = _descriptor.Descriptor( + name="ListBackupsResponse", + full_name="google.spanner.admin.database.v1.ListBackupsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="backups", + full_name="google.spanner.admin.database.v1.ListBackupsResponse.backups", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.spanner.admin.database.v1.ListBackupsResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1544, + serialized_end=1649, +) + + +_LISTBACKUPOPERATIONSREQUEST = _descriptor.Descriptor( + name="ListBackupOperationsRequest", + full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b( + "\340A\002\372A!\n\037spanner.googleapis.com/Instance" + ), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="filter", + full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.filter", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.page_size", + index=2, + number=3, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.spanner.admin.database.v1.ListBackupOperationsRequest.page_token", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1652, + serialized_end=1793, +) + + +_LISTBACKUPOPERATIONSRESPONSE = _descriptor.Descriptor( + name="ListBackupOperationsResponse", + full_name="google.spanner.admin.database.v1.ListBackupOperationsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="operations", + full_name="google.spanner.admin.database.v1.ListBackupOperationsResponse.operations", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.spanner.admin.database.v1.ListBackupOperationsResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1795, + serialized_end=1901, +) + + +_BACKUPINFO = _descriptor.Descriptor( + name="BackupInfo", + full_name="google.spanner.admin.database.v1.BackupInfo", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="backup", + full_name="google.spanner.admin.database.v1.BackupInfo.backup", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="create_time", + full_name="google.spanner.admin.database.v1.BackupInfo.create_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="source_database", + full_name="google.spanner.admin.database.v1.BackupInfo.source_database", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=1903, + serialized_end=2005, +) + +_BACKUP.fields_by_name[ + "expire_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name[ + "create_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_BACKUP.fields_by_name["state"].enum_type = _BACKUP_STATE +_BACKUP_STATE.containing_type = _BACKUP +_CREATEBACKUPREQUEST.fields_by_name["backup"].message_type = _BACKUP +_CREATEBACKUPMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +_CREATEBACKUPMETADATA.fields_by_name[ + "cancel_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_UPDATEBACKUPREQUEST.fields_by_name["backup"].message_type = _BACKUP +_UPDATEBACKUPREQUEST.fields_by_name[ + "update_mask" +].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK +_LISTBACKUPSRESPONSE.fields_by_name["backups"].message_type = _BACKUP +_LISTBACKUPOPERATIONSRESPONSE.fields_by_name[ + "operations" +].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION +_BACKUPINFO.fields_by_name[ + "create_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name["Backup"] = _BACKUP +DESCRIPTOR.message_types_by_name["CreateBackupRequest"] = _CREATEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["CreateBackupMetadata"] = _CREATEBACKUPMETADATA +DESCRIPTOR.message_types_by_name["UpdateBackupRequest"] = _UPDATEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["GetBackupRequest"] = _GETBACKUPREQUEST +DESCRIPTOR.message_types_by_name["DeleteBackupRequest"] = _DELETEBACKUPREQUEST +DESCRIPTOR.message_types_by_name["ListBackupsRequest"] = _LISTBACKUPSREQUEST +DESCRIPTOR.message_types_by_name["ListBackupsResponse"] = _LISTBACKUPSRESPONSE +DESCRIPTOR.message_types_by_name[ + "ListBackupOperationsRequest" +] = _LISTBACKUPOPERATIONSREQUEST +DESCRIPTOR.message_types_by_name[ + "ListBackupOperationsResponse" +] = _LISTBACKUPOPERATIONSRESPONSE +DESCRIPTOR.message_types_by_name["BackupInfo"] = _BACKUPINFO +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Backup = _reflection.GeneratedProtocolMessageType( + "Backup", + (_message.Message,), + dict( + DESCRIPTOR=_BACKUP, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""A backup of a Cloud Spanner database. + + + Attributes: + database: + Required for the [CreateBackup][google.spanner.admin.database. + v1.DatabaseAdmin.CreateBackup] operation. Name of the database + from which this backup was created. This needs to be in the + same instance as the backup. Values are of the form ``projects + //instances//databases/``. + expire_time: + Required for the [CreateBackup][google.spanner.admin.database. + v1.DatabaseAdmin.CreateBackup] operation. The expiration time + of the backup, with microseconds granularity that must be at + least 6 hours and at most 366 days from the time the + CreateBackup request is processed. Once the ``expire_time`` + has passed, the backup is eligible to be automatically deleted + by Cloud Spanner to free the resources used by the backup. + name: + Output only for the [CreateBackup][google.spanner.admin.databa + se.v1.DatabaseAdmin.CreateBackup] operation. Required for the + [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin. + UpdateBackup] operation. A globally unique identifier for the + backup which cannot be changed. Values are of the form ``proje + cts//instances//backups/[a-z][a-z0-9_\-]*[a + -z0-9]`` The final segment of the name must be between 2 and + 60 characters in length. The backup is stored in the + location(s) specified in the instance configuration of the + instance containing the backup, identified by the prefix of + the backup name of the form + ``projects//instances/``. + create_time: + Output only. The backup will contain an externally consistent + copy of the database at the timestamp specified by + ``create_time``. ``create_time`` is approximately the time the + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin. + CreateBackup] request is received. + size_bytes: + Output only. Size of the backup in bytes. + state: + Output only. The current state of the backup. + referencing_databases: + Output only. The names of the restored databases that + reference the backup. The database names are of the form ``pro + jects//instances//databases/``. + Referencing databases may exist in different instances. The + existence of any referencing database prevents the backup from + being deleted. When a restored database from the backup enters + the ``READY`` state, the reference to the backup is removed. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Backup) + ), +) +_sym_db.RegisterMessage(Backup) + +CreateBackupRequest = _reflection.GeneratedProtocolMessageType( + "CreateBackupRequest", + (_message.Message,), + dict( + DESCRIPTOR=_CREATEBACKUPREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The request for + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. + + + Attributes: + parent: + Required. The name of the instance in which the backup will be + created. This must be the same instance that contains the + database the backup will be created from. The backup will be + stored in the location(s) specified in the instance + configuration of this instance. Values are of the form + ``projects//instances/``. + backup_id: + Required. The id of the backup to be created. The + ``backup_id`` appended to ``parent`` forms the full backup + name of the form ``projects//instances//bac + kups/``. + backup: + Required. The backup to create. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupRequest) + ), +) +_sym_db.RegisterMessage(CreateBackupRequest) + +CreateBackupMetadata = _reflection.GeneratedProtocolMessageType( + "CreateBackupMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_CREATEBACKUPMETADATA, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""Metadata type for the operation returned by + [CreateBackup][google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup]. + + + Attributes: + name: + The name of the backup being created. + database: + The name of the database the backup is created from. + progress: + The progress of the [CreateBackup][google.spanner.admin.databa + se.v1.DatabaseAdmin.CreateBackup] operation. + cancel_time: + The time at which cancellation of this operation was received. + [Operations.CancelOperation][google.longrunning.Operations.Can + celOperation] starts asynchronous cancellation on a long- + running operation. The server makes a best effort to cancel + the operation, but success is not guaranteed. Clients can use + [Operations.GetOperation][google.longrunning.Operations.GetOpe + ration] or other methods to check whether the cancellation + succeeded or whether the operation completed despite + cancellation. On successful cancellation, the operation is not + deleted; instead, it becomes an operation with an + [Operation.error][] value with a + [google.rpc.Status.code][google.rpc.Status.code] of 1, + corresponding to ``Code.CANCELLED``. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.CreateBackupMetadata) + ), +) +_sym_db.RegisterMessage(CreateBackupMetadata) + +UpdateBackupRequest = _reflection.GeneratedProtocolMessageType( + "UpdateBackupRequest", + (_message.Message,), + dict( + DESCRIPTOR=_UPDATEBACKUPREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The request for + [UpdateBackup][google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup]. + + + Attributes: + backup: + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only supported + for the following fields: \* ``backup.expire_time``. + update_mask: + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be updated. + This mask is relative to the Backup resource, not to the + request message. The field mask must always be specified; this + prevents any future fields from being erased accidentally by + clients that do not know about them. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.UpdateBackupRequest) + ), +) +_sym_db.RegisterMessage(UpdateBackupRequest) + +GetBackupRequest = _reflection.GeneratedProtocolMessageType( + "GetBackupRequest", + (_message.Message,), + dict( + DESCRIPTOR=_GETBACKUPREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The request for + [GetBackup][google.spanner.admin.database.v1.DatabaseAdmin.GetBackup]. + + + Attributes: + name: + Required. Name of the backup. Values are of the form + ``projects//instances//backups/``. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.GetBackupRequest) + ), +) +_sym_db.RegisterMessage(GetBackupRequest) + +DeleteBackupRequest = _reflection.GeneratedProtocolMessageType( + "DeleteBackupRequest", + (_message.Message,), + dict( + DESCRIPTOR=_DELETEBACKUPREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The request for + [DeleteBackup][google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup]. + + + Attributes: + name: + Required. Name of the backup to delete. Values are of the form + ``projects//instances//backups/``. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.DeleteBackupRequest) + ), +) +_sym_db.RegisterMessage(DeleteBackupRequest) + +ListBackupsRequest = _reflection.GeneratedProtocolMessageType( + "ListBackupsRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTBACKUPSREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The request for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + + + Attributes: + parent: + Required. The instance to list backups from. Values are of the + form ``projects//instances/``. + filter: + An expression that filters the list of returned backups. A + filter expression consists of a field name, a comparison + operator, and a value for filtering. The value must be a + string, a number, or a boolean. The comparison operator must + be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, or + ``:``. Colon ``:`` is the contains operator. Filter rules are + not case sensitive. The following fields in the + [Backup][google.spanner.admin.database.v1.Backup] are eligible + for filtering: - ``name`` - ``database`` - ``state`` - + ``create_time`` (and values are of the format YYYY-MM- + DDTHH:MM:SSZ) - ``expire_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) - ``size_bytes`` You can combine + multiple expressions by enclosing each expression in + parentheses. By default, expressions are combined with AND + logic, but you can specify AND, OR, and NOT logic explicitly. + Here are a few examples: - ``name:Howl`` - The backup's name + contains the string "howl". - ``database:prod`` - The + database's name contains the string "prod". - + ``state:CREATING`` - The backup is pending creation. - + ``state:READY`` - The backup is fully created and ready for + use. - ``(name:howl) AND (create_time < + \"2018-03-28T14:50:00Z\")`` - The backup name contains the + string "howl" and ``create_time`` of the backup is before + 2018-03-28T14:50:00Z. - ``expire_time < + \"2018-03-28T14:50:00Z\"`` - The backup ``expire_time`` is + before 2018-03-28T14:50:00Z. - ``size_bytes > 10000000000`` - + The backup's size is greater than 10GB + page_size: + Number of backups to be returned in the response. If 0 or + less, defaults to the server's maximum allowed page size. + page_token: + If non-empty, ``page_token`` should contain a [next\_page\_tok + en][google.spanner.admin.database.v1.ListBackupsResponse.next\ + _page\_token] from a previous [ListBackupsResponse][google.spa + nner.admin.database.v1.ListBackupsResponse] to the same + ``parent`` and with the same ``filter``. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupsRequest) + ), +) +_sym_db.RegisterMessage(ListBackupsRequest) + +ListBackupsResponse = _reflection.GeneratedProtocolMessageType( + "ListBackupsResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTBACKUPSRESPONSE, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The response for + [ListBackups][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups]. + + + Attributes: + backups: + The list of matching backups. Backups returned are ordered by + ``create_time`` in descending order, starting from the most + recent ``create_time``. + next_page_token: + \ ``next_page_token`` can be sent in a subsequent [ListBackups + ][google.spanner.admin.database.v1.DatabaseAdmin.ListBackups] + call to fetch more of the matching backups. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupsResponse) + ), +) +_sym_db.RegisterMessage(ListBackupsResponse) + +ListBackupOperationsRequest = _reflection.GeneratedProtocolMessageType( + "ListBackupOperationsRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTBACKUPOPERATIONSREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The request for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + + + Attributes: + parent: + Required. The instance of the backup operations. Values are of + the form ``projects//instances/``. + filter: + An expression that filters the list of returned backup + operations. A filter expression consists of a field name, a + comparison operator, and a value for filtering. The value must + be a string, a number, or a boolean. The comparison operator + must be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, + or ``:``. Colon ``:`` is the contains operator. Filter rules + are not case sensitive. The following fields in the + [operation][google.longrunning.Operation] are eligible for + filtering: - ``name`` - The name of the long-running + operation - ``done`` - False if the operation is in progress, + else true. - ``metadata.@type`` - the type of metadata. For + example, the type string for [CreateBackupMetadata][goog + le.spanner.admin.database.v1.CreateBackupMetadata] is `` + type.googleapis.com/google.spanner.admin.database.v1.CreateBac + kupMetadata``. - ``metadata.`` - any field in + metadata.value. - ``error`` - Error associated with the long- + running operation. - ``response.@type`` - the type of + response. - ``response.`` - any field in + response.value. You can combine multiple expressions by + enclosing each expression in parentheses. By default, + expressions are combined with AND logic, but you can specify + AND, OR, and NOT logic explicitly. Here are a few examples: + - ``done:true`` - The operation is complete. - + ``metadata.database:prod`` - The database the backup was taken + from has a name containing the string "prod". - ``(metadat + a.@type=type.googleapis.com/google.spanner.admin.database.v1.C + reateBackupMetadata) AND`` ``(metadata.name:howl) AND`` + ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") + AND`` ``(error:*)`` - Returns operations where: - The + operation's metadata type is [CreateBackupMetadata][goog + le.spanner.admin.database.v1.CreateBackupMetadata]. - The + backup name contains the string "howl". - The operation + started before 2018-03-28T14:50:00Z. - The operation + resulted in an error. + page_size: + Number of operations to be returned in the response. If 0 or + less, defaults to the server's maximum allowed page size. + page_token: + If non-empty, ``page_token`` should contain a [next\_page\_tok + en][google.spanner.admin.database.v1.ListBackupOperationsRespo + nse.next\_page\_token] from a previous [ListBackupOperationsRe + sponse][google.spanner.admin.database.v1.ListBackupOperationsR + esponse] to the same ``parent`` and with the same ``filter``. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupOperationsRequest) + ), +) +_sym_db.RegisterMessage(ListBackupOperationsRequest) + +ListBackupOperationsResponse = _reflection.GeneratedProtocolMessageType( + "ListBackupOperationsResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTBACKUPOPERATIONSRESPONSE, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""The response for + [ListBackupOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations]. + + + Attributes: + operations: + The list of matching backup [long-running + operations][google.longrunning.Operation]. Each operation's + name will be prefixed by the backup's name and the operation's + [metadata][google.longrunning.Operation.metadata] will be of + type [CreateBackupMetadata][google.spanner.admin.database.v1.C + reateBackupMetadata]. Operations returned include those that + are pending or have completed/failed/canceled within the last + 7 days. Operations returned are ordered by + ``operation.metadata.value.progress.start_time`` in descending + order starting from the most recently started operation. + next_page_token: + \ ``next_page_token`` can be sent in a subsequent [ListBackupO + perations][google.spanner.admin.database.v1.DatabaseAdmin.List + BackupOperations] call to fetch more of the matching metadata. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListBackupOperationsResponse) + ), +) +_sym_db.RegisterMessage(ListBackupOperationsResponse) + +BackupInfo = _reflection.GeneratedProtocolMessageType( + "BackupInfo", + (_message.Message,), + dict( + DESCRIPTOR=_BACKUPINFO, + __module__="google.cloud.spanner.admin.database_v1.proto.backup_pb2", + __doc__="""Information about a backup. + + + Attributes: + backup: + Name of the backup. + create_time: + The backup contains an externally consistent copy of + ``source_database`` at the timestamp specified by + ``create_time``. + source_database: + Name of the database the backup was created from. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.BackupInfo) + ), +) +_sym_db.RegisterMessage(BackupInfo) + + +DESCRIPTOR._options = None +_BACKUP.fields_by_name["create_time"]._options = None +_BACKUP.fields_by_name["size_bytes"]._options = None +_BACKUP.fields_by_name["state"]._options = None +_BACKUP.fields_by_name["referencing_databases"]._options = None +_BACKUP._options = None +_CREATEBACKUPREQUEST.fields_by_name["parent"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["backup_id"]._options = None +_CREATEBACKUPREQUEST.fields_by_name["backup"]._options = None +_UPDATEBACKUPREQUEST.fields_by_name["backup"]._options = None +_UPDATEBACKUPREQUEST.fields_by_name["update_mask"]._options = None +_GETBACKUPREQUEST.fields_by_name["name"]._options = None +_DELETEBACKUPREQUEST.fields_by_name["name"]._options = None +_LISTBACKUPSREQUEST.fields_by_name["parent"]._options = None +_LISTBACKUPOPERATIONSREQUEST.fields_by_name["parent"]._options = None +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py b/google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py new file mode 100644 index 0000000000..07cb78fe03 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/proto/backup_pb2_grpc.py @@ -0,0 +1,2 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc diff --git a/google/cloud/spanner_admin_database_v1/proto/common.proto b/google/cloud/spanner_admin_database_v1/proto/common.proto new file mode 100644 index 0000000000..4914cb8ac7 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/proto/common.proto @@ -0,0 +1,43 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.spanner.admin.database.v1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; +option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database"; +option java_multiple_files = true; +option java_outer_classname = "CommonProto"; +option java_package = "com.google.spanner.admin.database.v1"; +option php_namespace = "Google\\Cloud\\Spanner\\Admin\\Database\\V1"; + +// Encapsulates progress related information for a Cloud Spanner long +// running operation. +message OperationProgress { + // Percent completion of the operation. + // Values are between 0 and 100 inclusive. + int32 progress_percent = 1; + + // Time the request was received. + google.protobuf.Timestamp start_time = 2; + + // If set, the time at which this operation failed or was completed + // successfully. + google.protobuf.Timestamp end_time = 3; +} diff --git a/google/cloud/spanner_admin_database_v1/proto/common_pb2.py b/google/cloud/spanner_admin_database_v1/proto/common_pb2.py new file mode 100644 index 0000000000..6dc9895d39 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/proto/common_pb2.py @@ -0,0 +1,151 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/cloud/spanner/admin/database_v1/proto/common.proto + +import sys + +_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database + +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name="google/cloud/spanner/admin/database_v1/proto/common.proto", + package="google.spanner.admin.database.v1", + syntax="proto3", + serialized_options=_b( + "\n$com.google.spanner.admin.database.v1B\013CommonProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\252\002&Google.Cloud.Spanner.Admin.Database.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Database\\V1" + ), + serialized_pb=_b( + '\n9google/cloud/spanner/admin/database_v1/proto/common.proto\x12 google.spanner.admin.database.v1\x1a\x1fgoogle/api/field_behavior.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/api/annotations.proto"\x8b\x01\n\x11OperationProgress\x12\x18\n\x10progress_percent\x18\x01 \x01(\x05\x12.\n\nstart_time\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12,\n\x08\x65nd_time\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampB\xd1\x01\n$com.google.spanner.admin.database.v1B\x0b\x43ommonProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\xaa\x02&Google.Cloud.Spanner.Admin.Database.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Database\\V1b\x06proto3' + ), + dependencies=[ + google_dot_api_dot_field__behavior__pb2.DESCRIPTOR, + google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + google_dot_api_dot_annotations__pb2.DESCRIPTOR, + ], +) + + +_OPERATIONPROGRESS = _descriptor.Descriptor( + name="OperationProgress", + full_name="google.spanner.admin.database.v1.OperationProgress", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="progress_percent", + full_name="google.spanner.admin.database.v1.OperationProgress.progress_percent", + index=0, + number=1, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="start_time", + full_name="google.spanner.admin.database.v1.OperationProgress.start_time", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="end_time", + full_name="google.spanner.admin.database.v1.OperationProgress.end_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=192, + serialized_end=331, +) + +_OPERATIONPROGRESS.fields_by_name[ + "start_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_OPERATIONPROGRESS.fields_by_name[ + "end_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +DESCRIPTOR.message_types_by_name["OperationProgress"] = _OPERATIONPROGRESS +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +OperationProgress = _reflection.GeneratedProtocolMessageType( + "OperationProgress", + (_message.Message,), + dict( + DESCRIPTOR=_OPERATIONPROGRESS, + __module__="google.cloud.spanner.admin.database_v1.proto.common_pb2", + __doc__="""Encapsulates progress related information for a Cloud + Spanner long running operation. + + + Attributes: + progress_percent: + Percent completion of the operation. Values are between 0 and + 100 inclusive. + start_time: + Time the request was received. + end_time: + If set, the time at which this operation failed or was + completed successfully. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.OperationProgress) + ), +) +_sym_db.RegisterMessage(OperationProgress) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope) diff --git a/google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py b/google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py new file mode 100644 index 0000000000..07cb78fe03 --- /dev/null +++ b/google/cloud/spanner_admin_database_v1/proto/common_pb2_grpc.py @@ -0,0 +1,2 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto index 5ee127d1ef..d48adc8aba 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin.proto @@ -25,6 +25,8 @@ import "google/iam/v1/policy.proto"; import "google/longrunning/operations.proto"; import "google/protobuf/empty.proto"; import "google/protobuf/timestamp.proto"; +import "google/spanner/admin/database/v1/backup.proto"; +import "google/spanner/admin/database/v1/common.proto"; option csharp_namespace = "Google.Cloud.Spanner.Admin.Database.V1"; option go_package = "google.golang.org/genproto/googleapis/spanner/admin/database/v1;database"; @@ -41,7 +43,8 @@ option (google.api.resource_definition) = { // // The Cloud Spanner Database Admin API can be used to create, drop, and // list databases. It also enables updating the schema of pre-existing -// databases. +// databases. It can be also used to create, delete and list backups for a +// database and to restore from an existing backup. service DatabaseAdmin { option (google.api.default_host) = "spanner.googleapis.com"; option (google.api.oauth_scopes) = @@ -104,6 +107,8 @@ service DatabaseAdmin { } // Drops (aka deletes) a Cloud Spanner database. + // Completed backups for the database will be retained according to their + // `expire_time`. rpc DropDatabase(DropDatabaseRequest) returns (google.protobuf.Empty) { option (google.api.http) = { delete: "/v1/{database=projects/*/instances/*/databases/*}" @@ -121,11 +126,13 @@ service DatabaseAdmin { option (google.api.method_signature) = "database"; } - // Sets the access control policy on a database resource. + // Sets the access control policy on a database or backup resource. // Replaces any existing policy. // // Authorization requires `spanner.databases.setIamPolicy` // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + // For backups, authorization requires `spanner.backups.setIamPolicy` + // permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. rpc SetIamPolicy(google.iam.v1.SetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy" @@ -138,12 +145,14 @@ service DatabaseAdmin { option (google.api.method_signature) = "resource,policy"; } - // Gets the access control policy for a database resource. - // Returns an empty policy if a database exists but does - // not have a policy set. + // Gets the access control policy for a database or backup resource. + // Returns an empty policy if a database or backup exists but does not have a + // policy set. // // Authorization requires `spanner.databases.getIamPolicy` permission on // [resource][google.iam.v1.GetIamPolicyRequest.resource]. + // For backups, authorization requires `spanner.backups.getIamPolicy` + // permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. rpc GetIamPolicy(google.iam.v1.GetIamPolicyRequest) returns (google.iam.v1.Policy) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy" @@ -156,12 +165,16 @@ service DatabaseAdmin { option (google.api.method_signature) = "resource"; } - // Returns permissions that the caller has on the specified database resource. + // Returns permissions that the caller has on the specified database or backup + // resource. // // Attempting this RPC on a non-existent Cloud Spanner database will // result in a NOT_FOUND error if the user has // `spanner.databases.list` permission on the containing Cloud // Spanner instance. Otherwise returns an empty set of permissions. + // Calling this method on a backup that does not exist will + // result in a NOT_FOUND error if the user has + // `spanner.backups.list` permission on the containing instance. rpc TestIamPermissions(google.iam.v1.TestIamPermissionsRequest) returns (google.iam.v1.TestIamPermissionsResponse) { option (google.api.http) = { post: "/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions" @@ -173,6 +186,139 @@ service DatabaseAdmin { }; option (google.api.method_signature) = "resource,permissions"; } + + // Starts creating a new Cloud Spanner Backup. + // The returned backup [long-running operation][google.longrunning.Operation] + // will have a name of the format + // `projects//instances//backups//operations/` + // and can be used to track creation of the backup. The + // [metadata][google.longrunning.Operation.metadata] field type is + // [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The + // [response][google.longrunning.Operation.response] field type is + // [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the + // creation and delete the backup. + // There can be only one pending backup creation per database. Backup creation + // of different databases can run concurrently. + rpc CreateBackup(CreateBackupRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*}/backups" + body: "backup" + }; + option (google.api.method_signature) = "parent,backup,backup_id"; + option (google.longrunning.operation_info) = { + response_type: "Backup" + metadata_type: "google.spanner.admin.database.v1.CreateBackupMetadata" + }; + } + + // Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + rpc GetBackup(GetBackupRequest) returns (Backup) { + option (google.api.http) = { + get: "/v1/{name=projects/*/instances/*/backups/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + rpc UpdateBackup(UpdateBackupRequest) returns (Backup) { + option (google.api.http) = { + patch: "/v1/{backup.name=projects/*/instances/*/backups/*}" + body: "backup" + }; + option (google.api.method_signature) = "backup,update_mask"; + } + + // Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + rpc DeleteBackup(DeleteBackupRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + delete: "/v1/{name=projects/*/instances/*/backups/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists completed and pending backups. + // Backups returned are ordered by `create_time` in descending order, + // starting from the most recent `create_time`. + rpc ListBackups(ListBackupsRequest) returns (ListBackupsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/backups" + }; + option (google.api.method_signature) = "parent"; + } + + // Create a new database by restoring from a completed backup. The new + // database must be in the same project and in an instance with the same + // instance configuration as the instance containing + // the backup. The returned database [long-running + // operation][google.longrunning.Operation] has a name of the format + // `projects//instances//databases//operations/`, + // and can be used to track the progress of the operation, and to cancel it. + // The [metadata][google.longrunning.Operation.metadata] field type is + // [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + // The [response][google.longrunning.Operation.response] type + // is [Database][google.spanner.admin.database.v1.Database], if + // successful. Cancelling the returned operation will stop the restore and + // delete the database. + // There can be only one database being restored into an instance at a time. + // Once the restore operation completes, a new restore operation can be + // initiated, without waiting for the optimize operation associated with the + // first restore to complete. + rpc RestoreDatabase(RestoreDatabaseRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1/{parent=projects/*/instances/*}/databases:restore" + body: "*" + }; + option (google.api.method_signature) = "parent,database_id,backup"; + option (google.longrunning.operation_info) = { + response_type: "google.spanner.admin.database.v1.Database" + metadata_type: "google.spanner.admin.database.v1.RestoreDatabaseMetadata" + }; + } + + // Lists database [longrunning-operations][google.longrunning.Operation]. + // A database operation has a name of the form + // `projects//instances//databases//operations/`. + // The long-running operation + // [metadata][google.longrunning.Operation.metadata] field type + // `metadata.type_url` describes the type of the metadata. Operations returned + // include those that have completed/failed/canceled within the last 7 days, + // and pending operations. + rpc ListDatabaseOperations(ListDatabaseOperationsRequest) returns (ListDatabaseOperationsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/databaseOperations" + }; + option (google.api.method_signature) = "parent"; + } + + // Lists the backup [long-running operations][google.longrunning.Operation] in + // the given instance. A backup operation has a name of the form + // `projects//instances//backups//operations/`. + // The long-running operation + // [metadata][google.longrunning.Operation.metadata] field type + // `metadata.type_url` describes the type of the metadata. Operations returned + // include those that have completed/failed/canceled within the last 7 days, + // and pending operations. Operations returned are ordered by + // `operation.metadata.value.progress.start_time` in descending order starting + // from the most recently started operation. + rpc ListBackupOperations(ListBackupOperationsRequest) returns (ListBackupOperationsResponse) { + option (google.api.http) = { + get: "/v1/{parent=projects/*/instances/*}/backupOperations" + }; + option (google.api.method_signature) = "parent"; + } +} + +// Information about the database restore. +message RestoreInfo { + // The type of the restore source. + RestoreSourceType source_type = 1; + + // Information about the source used to restore the database. + oneof source_info { + // Information about the backup used to restore the database. The backup + // may no longer exist. + BackupInfo backup_info = 2; + } } // A Cloud Spanner database. @@ -193,6 +339,16 @@ message Database { // The database is fully created and ready for use. READY = 2; + + // The database is fully created and ready for use, but is still + // being optimized for performance and cannot handle full load. + // + // In this state, the database still references the backup + // it was restore from, preventing the backup + // from being deleted. When optimizations are complete, the full performance + // of the database will be restored, and the database will transition to + // `READY` state. + READY_OPTIMIZING = 3; } // Required. The name of the database. Values are of the form @@ -200,10 +356,17 @@ message Database { // where `` is as specified in the `CREATE DATABASE` // statement. This name can be passed to other API methods to // identify the database. - string name = 1; + string name = 1 [(google.api.field_behavior) = REQUIRED]; // Output only. The current database state. - State state = 2; + State state = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If exists, the time at which the database creation started. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Applicable only for restored databases. Contains information + // about the restore source. + RestoreInfo restore_info = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; } // The request for [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. @@ -256,11 +419,11 @@ message CreateDatabaseRequest { // database ID must be enclosed in backticks (`` ` ``). string create_statement = 2 [(google.api.field_behavior) = REQUIRED]; - // An optional list of DDL statements to run inside the newly created + // Optional. A list of DDL statements to run inside the newly created // database. Statements can create tables, indexes, etc. These // statements execute atomically with the creation of the database: // if there is an error in any statement, the database is not created. - repeated string extra_statements = 3; + repeated string extra_statements = 3 [(google.api.field_behavior) = OPTIONAL]; } // Metadata type for the operation returned by @@ -380,3 +543,184 @@ message GetDatabaseDdlResponse { // specified in the request. repeated string statements = 1; } + +// The request for +// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. +message ListDatabaseOperationsRequest { + // Required. The instance of the database operations. + // Values are of the form `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // An expression that filters the list of returned operations. + // + // A filter expression consists of a field name, a + // comparison operator, and a value for filtering. + // The value must be a string, a number, or a boolean. The comparison operator + // must be one of: `<`, `>`, `<=`, `>=`, `!=`, `=`, or `:`. + // Colon `:` is the contains operator. Filter rules are not case sensitive. + // + // The following fields in the [Operation][google.longrunning.Operation] + // are eligible for filtering: + // + // * `name` - The name of the long-running operation + // * `done` - False if the operation is in progress, else true. + // * `metadata.@type` - the type of metadata. For example, the type string + // for [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata] is + // `type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata`. + // * `metadata.` - any field in metadata.value. + // * `error` - Error associated with the long-running operation. + // * `response.@type` - the type of response. + // * `response.` - any field in response.value. + // + // You can combine multiple expressions by enclosing each expression in + // parentheses. By default, expressions are combined with AND logic. However, + // you can specify AND, OR, and NOT logic explicitly. + // + // Here are a few examples: + // + // * `done:true` - The operation is complete. + // * `(metadata.@type=type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata) AND`
+ // `(metadata.source_type:BACKUP) AND`
+ // `(metadata.backup_info.backup:backup_howl) AND`
+ // `(metadata.name:restored_howl) AND`
+ // `(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") AND`
+ // `(error:*)` - Return operations where: + // * The operation's metadata type is [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + // * The database is restored from a backup. + // * The backup name contains "backup_howl". + // * The restored database's name contains "restored_howl". + // * The operation started before 2018-03-28T14:50:00Z. + // * The operation resulted in an error. + string filter = 2; + + // Number of operations to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + int32 page_size = 3; + + // If non-empty, `page_token` should contain a + // [next_page_token][google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token] + // from a previous [ListDatabaseOperationsResponse][google.spanner.admin.database.v1.ListDatabaseOperationsResponse] to the + // same `parent` and with the same `filter`. + string page_token = 4; +} + +// The response for +// [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. +message ListDatabaseOperationsResponse { + // The list of matching database [long-running + // operations][google.longrunning.Operation]. Each operation's name will be + // prefixed by the database's name. The operation's + // [metadata][google.longrunning.Operation.metadata] field type + // `metadata.type_url` describes the type of the metadata. + repeated google.longrunning.Operation operations = 1; + + // `next_page_token` can be sent in a subsequent + // [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations] + // call to fetch more of the matching metadata. + string next_page_token = 2; +} + +// The request for +// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. +message RestoreDatabaseRequest { + // Required. The name of the instance in which to create the + // restored database. This instance must be in the same project and + // have the same instance configuration as the instance containing + // the source backup. Values are of the form + // `projects//instances/`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Instance" + } + ]; + + // Required. The id of the database to create and restore to. This + // database must not already exist. The `database_id` appended to + // `parent` forms the full database name of the form + // `projects//instances//databases/`. + string database_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The source from which to restore. + oneof source { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//backups/`. + string backup = 3 [(google.api.resource_reference) = { + type: "spanner.googleapis.com/Backup" + }]; + } +} + +// Metadata type for the long-running operation returned by +// [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. +message RestoreDatabaseMetadata { + // Name of the database being created and restored to. + string name = 1; + + // The type of the restore source. + RestoreSourceType source_type = 2; + + // Information about the source used to restore the database, as specified by + // `source` in [RestoreDatabaseRequest][google.spanner.admin.database.v1.RestoreDatabaseRequest]. + oneof source_info { + // Information about the backup used to restore the database. + BackupInfo backup_info = 3; + } + + // The progress of the + // [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase] + // operation. + OperationProgress progress = 4; + + // The time at which cancellation of this operation was received. + // [Operations.CancelOperation][google.longrunning.Operations.CancelOperation] + // starts asynchronous cancellation on a long-running operation. The server + // makes a best effort to cancel the operation, but success is not guaranteed. + // Clients can use + // [Operations.GetOperation][google.longrunning.Operations.GetOperation] or + // other methods to check whether the cancellation succeeded or whether the + // operation completed despite cancellation. On successful cancellation, + // the operation is not deleted; instead, it becomes an operation with + // an [Operation.error][google.longrunning.Operation.error] value with a + // [google.rpc.Status.code][google.rpc.Status.code] of 1, corresponding to `Code.CANCELLED`. + google.protobuf.Timestamp cancel_time = 5; + + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored database, and remove the dependency on the restore source. + // The name is of the form + // `projects//instances//databases//operations/` + // where the is the name of database being created and restored to. + // The metadata type of the long-running operation is + // [OptimizeRestoredDatabaseMetadata][google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. This long-running operation will be + // automatically created by the system after the RestoreDatabase long-running + // operation completes successfully. This operation will not be created if the + // restore was not successful. + string optimize_database_operation_name = 6; +} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored database. This long-running +// operation is automatically created by the system after the successful +// completion of a database restore, and cannot be cancelled. +message OptimizeRestoredDatabaseMetadata { + // Name of the restored database being optimized. + string name = 1; + + // The progress of the post-restore optimizations. + OperationProgress progress = 2; +} + +// Indicates the type of the restore source. +enum RestoreSourceType { + // No restore associated. + TYPE_UNSPECIFIED = 0; + + // A backup was used as the source of the restore. + BACKUP = 1; +} diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py index 35fd22717e..125ab3f86b 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2.py @@ -5,6 +5,7 @@ import sys _b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode("latin1")) +from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection @@ -26,6 +27,12 @@ ) from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2 from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.cloud.spanner_admin_database_v1.proto import ( + backup_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2, +) +from google.cloud.spanner_admin_database_v1.proto import ( + common_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_common__pb2, +) DESCRIPTOR = _descriptor.FileDescriptor( @@ -36,7 +43,7 @@ "\n$com.google.spanner.admin.database.v1B\031SpannerDatabaseAdminProtoP\001ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\252\002&Google.Cloud.Spanner.Admin.Database.V1\312\002&Google\\Cloud\\Spanner\\Admin\\Database\\V1\352AJ\n\037spanner.googleapis.com/Instance\022'projects/{project}/instances/{instance}" ), serialized_pb=_b( - '\nIgoogle/cloud/spanner/admin/database_v1/proto/spanner_database_admin.proto\x12 google.spanner.admin.database.v1\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/iam/v1/iam_policy.proto\x1a\x1agoogle/iam/v1/policy.proto\x1a#google/longrunning/operations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto"\xf6\x01\n\x08\x44\x61tabase\x12\x0c\n\x04name\x18\x01 \x01(\t\x12?\n\x05state\x18\x02 \x01(\x0e\x32\x30.google.spanner.admin.database.v1.Database.State"7\n\x05State\x12\x15\n\x11STATE_UNSPECIFIED\x10\x00\x12\x0c\n\x08\x43REATING\x10\x01\x12\t\n\x05READY\x10\x02:b\xea\x41_\n\x1fspanner.googleapis.com/Database\x12\x82\xd3\xe4\x93\x02/\x12-/v1/{parent=projects/*/instances/*}/databases\xda\x41\x06parent\x12\xa4\x02\n\x0e\x43reateDatabase\x12\x37.google.spanner.admin.database.v1.CreateDatabaseRequest\x1a\x1d.google.longrunning.Operation"\xb9\x01\x82\xd3\xe4\x93\x02\x32"-/v1/{parent=projects/*/instances/*}/databases:\x01*\xda\x41\x17parent,create_statement\xca\x41\x64\n)google.spanner.admin.database.v1.Database\x12\x37google.spanner.admin.database.v1.CreateDatabaseMetadata\x12\xad\x01\n\x0bGetDatabase\x12\x34.google.spanner.admin.database.v1.GetDatabaseRequest\x1a*.google.spanner.admin.database.v1.Database"<\x82\xd3\xe4\x93\x02/\x12-/v1/{name=projects/*/instances/*/databases/*}\xda\x41\x04name\x12\x9d\x02\n\x11UpdateDatabaseDdl\x12:.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest\x1a\x1d.google.longrunning.Operation"\xac\x01\x82\xd3\xe4\x93\x02:25/v1/{database=projects/*/instances/*/databases/*}/ddl:\x01*\xda\x41\x13\x64\x61tabase,statements\xca\x41S\n\x15google.protobuf.Empty\x12:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata\x12\xa3\x01\n\x0c\x44ropDatabase\x12\x35.google.spanner.admin.database.v1.DropDatabaseRequest\x1a\x16.google.protobuf.Empty"D\x82\xd3\xe4\x93\x02\x33*1/v1/{database=projects/*/instances/*/databases/*}\xda\x41\x08\x64\x61tabase\x12\xcd\x01\n\x0eGetDatabaseDdl\x12\x37.google.spanner.admin.database.v1.GetDatabaseDdlRequest\x1a\x38.google.spanner.admin.database.v1.GetDatabaseDdlResponse"H\x82\xd3\xe4\x93\x02\x37\x12\x35/v1/{database=projects/*/instances/*/databases/*}/ddl\xda\x41\x08\x64\x61tabase\x12\xeb\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\x9f\x01\x82\xd3\xe4\x93\x02\x86\x01">/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy:\x01*ZA"/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy:\x01*ZA"\x82\xd3\xe4\x93\x02/\x12-/v1/{parent=projects/*/instances/*}/databases\xda\x41\x06parent\x12\xa4\x02\n\x0e\x43reateDatabase\x12\x37.google.spanner.admin.database.v1.CreateDatabaseRequest\x1a\x1d.google.longrunning.Operation"\xb9\x01\x82\xd3\xe4\x93\x02\x32"-/v1/{parent=projects/*/instances/*}/databases:\x01*\xda\x41\x17parent,create_statement\xca\x41\x64\n)google.spanner.admin.database.v1.Database\x12\x37google.spanner.admin.database.v1.CreateDatabaseMetadata\x12\xad\x01\n\x0bGetDatabase\x12\x34.google.spanner.admin.database.v1.GetDatabaseRequest\x1a*.google.spanner.admin.database.v1.Database"<\x82\xd3\xe4\x93\x02/\x12-/v1/{name=projects/*/instances/*/databases/*}\xda\x41\x04name\x12\x9d\x02\n\x11UpdateDatabaseDdl\x12:.google.spanner.admin.database.v1.UpdateDatabaseDdlRequest\x1a\x1d.google.longrunning.Operation"\xac\x01\x82\xd3\xe4\x93\x02:25/v1/{database=projects/*/instances/*/databases/*}/ddl:\x01*\xda\x41\x13\x64\x61tabase,statements\xca\x41S\n\x15google.protobuf.Empty\x12:google.spanner.admin.database.v1.UpdateDatabaseDdlMetadata\x12\xa3\x01\n\x0c\x44ropDatabase\x12\x35.google.spanner.admin.database.v1.DropDatabaseRequest\x1a\x16.google.protobuf.Empty"D\x82\xd3\xe4\x93\x02\x33*1/v1/{database=projects/*/instances/*/databases/*}\xda\x41\x08\x64\x61tabase\x12\xcd\x01\n\x0eGetDatabaseDdl\x12\x37.google.spanner.admin.database.v1.GetDatabaseDdlRequest\x1a\x38.google.spanner.admin.database.v1.GetDatabaseDdlResponse"H\x82\xd3\xe4\x93\x02\x37\x12\x35/v1/{database=projects/*/instances/*/databases/*}/ddl\xda\x41\x08\x64\x61tabase\x12\xeb\x01\n\x0cSetIamPolicy\x12".google.iam.v1.SetIamPolicyRequest\x1a\x15.google.iam.v1.Policy"\x9f\x01\x82\xd3\xe4\x93\x02\x86\x01">/v1/{resource=projects/*/instances/*/databases/*}:setIamPolicy:\x01*ZA"/v1/{resource=projects/*/instances/*/databases/*}:getIamPolicy:\x01*ZA".google.spanner.admin.database.v1.ListBackupOperationsResponse"E\x82\xd3\xe4\x93\x02\x36\x12\x34/v1/{parent=projects/*/instances/*}/backupOperations\xda\x41\x06parent\x1ax\xca\x41\x16spanner.googleapis.com\xd2\x41\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.adminB\xac\x02\n$com.google.spanner.admin.database.v1B\x19SpannerDatabaseAdminProtoP\x01ZHgoogle.golang.org/genproto/googleapis/spanner/admin/database/v1;database\xaa\x02&Google.Cloud.Spanner.Admin.Database.V1\xca\x02&Google\\Cloud\\Spanner\\Admin\\Database\\V1\xea\x41J\n\x1fspanner.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}b\x06proto3' ), dependencies=[ google_dot_api_dot_annotations__pb2.DESCRIPTOR, @@ -48,8 +55,38 @@ google_dot_longrunning_dot_operations__pb2.DESCRIPTOR, google_dot_protobuf_dot_empty__pb2.DESCRIPTOR, google_dot_protobuf_dot_timestamp__pb2.DESCRIPTOR, + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.DESCRIPTOR, + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_common__pb2.DESCRIPTOR, + ], +) + +_RESTORESOURCETYPE = _descriptor.EnumDescriptor( + name="RestoreSourceType", + full_name="google.spanner.admin.database.v1.RestoreSourceType", + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name="TYPE_UNSPECIFIED", + index=0, + number=0, + serialized_options=None, + type=None, + ), + _descriptor.EnumValueDescriptor( + name="BACKUP", index=1, number=1, serialized_options=None, type=None + ), ], + containing_type=None, + serialized_options=None, + serialized_start=3044, + serialized_end=3097, ) +_sym_db.RegisterEnumDescriptor(_RESTORESOURCETYPE) + +RestoreSourceType = enum_type_wrapper.EnumTypeWrapper(_RESTORESOURCETYPE) +TYPE_UNSPECIFIED = 0 +BACKUP = 1 _DATABASE_STATE = _descriptor.EnumDescriptor( @@ -71,15 +108,87 @@ _descriptor.EnumValueDescriptor( name="READY", index=2, number=2, serialized_options=None, type=None ), + _descriptor.EnumValueDescriptor( + name="READY_OPTIMIZING", + index=3, + number=3, + serialized_options=None, + type=None, + ), ], containing_type=None, serialized_options=None, - serialized_start=477, - serialized_end=532, + serialized_start=907, + serialized_end=984, ) _sym_db.RegisterEnumDescriptor(_DATABASE_STATE) +_RESTOREINFO = _descriptor.Descriptor( + name="RestoreInfo", + full_name="google.spanner.admin.database.v1.RestoreInfo", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="source_type", + full_name="google.spanner.admin.database.v1.RestoreInfo.source_type", + index=0, + number=1, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="backup_info", + full_name="google.spanner.admin.database.v1.RestoreInfo.backup_info", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source_info", + full_name="google.spanner.admin.database.v1.RestoreInfo.source_info", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=504, + serialized_end=675, +) + + _DATABASE = _descriptor.Descriptor( name="Database", full_name="google.spanner.admin.database.v1.Database", @@ -102,7 +211,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\002"), file=DESCRIPTOR, ), _descriptor.FieldDescriptor( @@ -120,7 +229,43 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\003"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="create_time", + full_name="google.spanner.admin.database.v1.Database.create_time", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\003"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="restore_info", + full_name="google.spanner.admin.database.v1.Database.restore_info", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\003"), file=DESCRIPTOR, ), ], @@ -134,8 +279,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=386, - serialized_end=632, + serialized_start=678, + serialized_end=1084, ) @@ -211,8 +356,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=634, - serialized_end=752, + serialized_start=1086, + serialized_end=1204, ) @@ -268,8 +413,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=754, - serialized_end=865, + serialized_start=1206, + serialized_end=1317, ) @@ -333,7 +478,7 @@ containing_type=None, is_extension=False, extension_scope=None, - serialized_options=None, + serialized_options=_b("\340A\001"), file=DESCRIPTOR, ), ], @@ -345,8 +490,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=868, - serialized_end=1005, + serialized_start=1320, + serialized_end=1462, ) @@ -384,8 +529,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1007, - serialized_end=1087, + serialized_start=1464, + serialized_end=1544, ) @@ -425,8 +570,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1089, - serialized_end=1164, + serialized_start=1546, + serialized_end=1621, ) @@ -502,8 +647,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1167, - serialized_end=1299, + serialized_start=1624, + serialized_end=1756, ) @@ -577,8 +722,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1302, - serialized_end=1460, + serialized_start=1759, + serialized_end=1917, ) @@ -618,8 +763,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1462, - serialized_end=1542, + serialized_start=1919, + serialized_end=1999, ) @@ -659,8 +804,8 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1544, - serialized_end=1626, + serialized_start=2001, + serialized_end=2083, ) @@ -698,80 +843,603 @@ syntax="proto3", extension_ranges=[], oneofs=[], - serialized_start=1628, - serialized_end=1672, + serialized_start=2085, + serialized_end=2129, ) -_DATABASE.fields_by_name["state"].enum_type = _DATABASE_STATE -_DATABASE_STATE.containing_type = _DATABASE -_LISTDATABASESRESPONSE.fields_by_name["databases"].message_type = _DATABASE -_UPDATEDATABASEDDLMETADATA.fields_by_name[ - "commit_timestamps" -].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP -DESCRIPTOR.message_types_by_name["Database"] = _DATABASE -DESCRIPTOR.message_types_by_name["ListDatabasesRequest"] = _LISTDATABASESREQUEST -DESCRIPTOR.message_types_by_name["ListDatabasesResponse"] = _LISTDATABASESRESPONSE -DESCRIPTOR.message_types_by_name["CreateDatabaseRequest"] = _CREATEDATABASEREQUEST -DESCRIPTOR.message_types_by_name["CreateDatabaseMetadata"] = _CREATEDATABASEMETADATA -DESCRIPTOR.message_types_by_name["GetDatabaseRequest"] = _GETDATABASEREQUEST -DESCRIPTOR.message_types_by_name["UpdateDatabaseDdlRequest"] = _UPDATEDATABASEDDLREQUEST -DESCRIPTOR.message_types_by_name[ - "UpdateDatabaseDdlMetadata" -] = _UPDATEDATABASEDDLMETADATA -DESCRIPTOR.message_types_by_name["DropDatabaseRequest"] = _DROPDATABASEREQUEST -DESCRIPTOR.message_types_by_name["GetDatabaseDdlRequest"] = _GETDATABASEDDLREQUEST -DESCRIPTOR.message_types_by_name["GetDatabaseDdlResponse"] = _GETDATABASEDDLRESPONSE -_sym_db.RegisterFileDescriptor(DESCRIPTOR) -Database = _reflection.GeneratedProtocolMessageType( - "Database", - (_message.Message,), - dict( - DESCRIPTOR=_DATABASE, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""A Cloud Spanner database. - - - Attributes: - name: - Required. The name of the database. Values are of the form ``p - rojects//instances//databases/``, - where ```` is as specified in the ``CREATE - DATABASE`` statement. This name can be passed to other API - methods to identify the database. - state: - Output only. The current database state. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Database) - ), +_LISTDATABASEOPERATIONSREQUEST = _descriptor.Descriptor( + name="ListDatabaseOperationsRequest", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b( + "\340A\002\372A!\n\037spanner.googleapis.com/Instance" + ), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="filter", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsRequest.filter", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_size", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsRequest.page_size", + index=2, + number=3, + type=5, + cpp_type=1, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="page_token", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsRequest.page_token", + index=3, + number=4, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2132, + serialized_end=2275, ) -_sym_db.RegisterMessage(Database) -ListDatabasesRequest = _reflection.GeneratedProtocolMessageType( - "ListDatabasesRequest", - (_message.Message,), - dict( - DESCRIPTOR=_LISTDATABASESREQUEST, - __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", - __doc__="""The request for - [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. - - - Attributes: - parent: - Required. The instance whose databases should be listed. - Values are of the form - ``projects//instances/``. - page_size: - Number of databases to be returned in the response. If 0 or - less, defaults to the server's maximum allowed page size. - page_token: - If non-empty, ``page_token`` should contain a [next\_page\_tok - en][google.spanner.admin.database.v1.ListDatabasesResponse.nex - t\_page\_token] from a previous [ListDatabasesResponse][google - .spanner.admin.database.v1.ListDatabasesResponse]. - """, - # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesRequest) + +_LISTDATABASEOPERATIONSRESPONSE = _descriptor.Descriptor( + name="ListDatabaseOperationsResponse", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsResponse", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="operations", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsResponse.operations", + index=0, + number=1, + type=11, + cpp_type=10, + label=3, + has_default_value=False, + default_value=[], + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="next_page_token", + full_name="google.spanner.admin.database.v1.ListDatabaseOperationsResponse.next_page_token", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2277, + serialized_end=2385, +) + + +_RESTOREDATABASEREQUEST = _descriptor.Descriptor( + name="RestoreDatabaseRequest", + full_name="google.spanner.admin.database.v1.RestoreDatabaseRequest", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="parent", + full_name="google.spanner.admin.database.v1.RestoreDatabaseRequest.parent", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b( + "\340A\002\372A!\n\037spanner.googleapis.com/Instance" + ), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="database_id", + full_name="google.spanner.admin.database.v1.RestoreDatabaseRequest.database_id", + index=1, + number=2, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\340A\002"), + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="backup", + full_name="google.spanner.admin.database.v1.RestoreDatabaseRequest.backup", + index=2, + number=3, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=_b("\372A\037\n\035spanner.googleapis.com/Backup"), + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source", + full_name="google.spanner.admin.database.v1.RestoreDatabaseRequest.source", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=2388, + serialized_end=2559, +) + + +_RESTOREDATABASEMETADATA = _descriptor.Descriptor( + name="RestoreDatabaseMetadata", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="source_type", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata.source_type", + index=1, + number=2, + type=14, + cpp_type=8, + label=1, + has_default_value=False, + default_value=0, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="backup_info", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata.backup_info", + index=2, + number=3, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata.progress", + index=3, + number=4, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="cancel_time", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata.cancel_time", + index=4, + number=5, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="optimize_database_operation_name", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata.optimize_database_operation_name", + index=5, + number=6, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name="source_info", + full_name="google.spanner.admin.database.v1.RestoreDatabaseMetadata.source_info", + index=0, + containing_type=None, + fields=[], + ) + ], + serialized_start=2562, + serialized_end=2921, +) + + +_OPTIMIZERESTOREDDATABASEMETADATA = _descriptor.Descriptor( + name="OptimizeRestoredDatabaseMetadata", + full_name="google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata", + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name="name", + full_name="google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.name", + index=0, + number=1, + type=9, + cpp_type=9, + label=1, + has_default_value=False, + default_value=_b("").decode("utf-8"), + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + _descriptor.FieldDescriptor( + name="progress", + full_name="google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata.progress", + index=1, + number=2, + type=11, + cpp_type=10, + label=1, + has_default_value=False, + default_value=None, + message_type=None, + enum_type=None, + containing_type=None, + is_extension=False, + extension_scope=None, + serialized_options=None, + file=DESCRIPTOR, + ), + ], + extensions=[], + nested_types=[], + enum_types=[], + serialized_options=None, + is_extendable=False, + syntax="proto3", + extension_ranges=[], + oneofs=[], + serialized_start=2923, + serialized_end=3042, +) + +_RESTOREINFO.fields_by_name["source_type"].enum_type = _RESTORESOURCETYPE +_RESTOREINFO.fields_by_name[ + "backup_info" +].message_type = ( + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._BACKUPINFO +) +_RESTOREINFO.oneofs_by_name["source_info"].fields.append( + _RESTOREINFO.fields_by_name["backup_info"] +) +_RESTOREINFO.fields_by_name[ + "backup_info" +].containing_oneof = _RESTOREINFO.oneofs_by_name["source_info"] +_DATABASE.fields_by_name["state"].enum_type = _DATABASE_STATE +_DATABASE.fields_by_name[ + "create_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_DATABASE.fields_by_name["restore_info"].message_type = _RESTOREINFO +_DATABASE_STATE.containing_type = _DATABASE +_LISTDATABASESRESPONSE.fields_by_name["databases"].message_type = _DATABASE +_UPDATEDATABASEDDLMETADATA.fields_by_name[ + "commit_timestamps" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_LISTDATABASEOPERATIONSRESPONSE.fields_by_name[ + "operations" +].message_type = google_dot_longrunning_dot_operations__pb2._OPERATION +_RESTOREDATABASEREQUEST.oneofs_by_name["source"].fields.append( + _RESTOREDATABASEREQUEST.fields_by_name["backup"] +) +_RESTOREDATABASEREQUEST.fields_by_name[ + "backup" +].containing_oneof = _RESTOREDATABASEREQUEST.oneofs_by_name["source"] +_RESTOREDATABASEMETADATA.fields_by_name["source_type"].enum_type = _RESTORESOURCETYPE +_RESTOREDATABASEMETADATA.fields_by_name[ + "backup_info" +].message_type = ( + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._BACKUPINFO +) +_RESTOREDATABASEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +_RESTOREDATABASEMETADATA.fields_by_name[ + "cancel_time" +].message_type = google_dot_protobuf_dot_timestamp__pb2._TIMESTAMP +_RESTOREDATABASEMETADATA.oneofs_by_name["source_info"].fields.append( + _RESTOREDATABASEMETADATA.fields_by_name["backup_info"] +) +_RESTOREDATABASEMETADATA.fields_by_name[ + "backup_info" +].containing_oneof = _RESTOREDATABASEMETADATA.oneofs_by_name["source_info"] +_OPTIMIZERESTOREDDATABASEMETADATA.fields_by_name[ + "progress" +].message_type = ( + google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_common__pb2._OPERATIONPROGRESS +) +DESCRIPTOR.message_types_by_name["RestoreInfo"] = _RESTOREINFO +DESCRIPTOR.message_types_by_name["Database"] = _DATABASE +DESCRIPTOR.message_types_by_name["ListDatabasesRequest"] = _LISTDATABASESREQUEST +DESCRIPTOR.message_types_by_name["ListDatabasesResponse"] = _LISTDATABASESRESPONSE +DESCRIPTOR.message_types_by_name["CreateDatabaseRequest"] = _CREATEDATABASEREQUEST +DESCRIPTOR.message_types_by_name["CreateDatabaseMetadata"] = _CREATEDATABASEMETADATA +DESCRIPTOR.message_types_by_name["GetDatabaseRequest"] = _GETDATABASEREQUEST +DESCRIPTOR.message_types_by_name["UpdateDatabaseDdlRequest"] = _UPDATEDATABASEDDLREQUEST +DESCRIPTOR.message_types_by_name[ + "UpdateDatabaseDdlMetadata" +] = _UPDATEDATABASEDDLMETADATA +DESCRIPTOR.message_types_by_name["DropDatabaseRequest"] = _DROPDATABASEREQUEST +DESCRIPTOR.message_types_by_name["GetDatabaseDdlRequest"] = _GETDATABASEDDLREQUEST +DESCRIPTOR.message_types_by_name["GetDatabaseDdlResponse"] = _GETDATABASEDDLRESPONSE +DESCRIPTOR.message_types_by_name[ + "ListDatabaseOperationsRequest" +] = _LISTDATABASEOPERATIONSREQUEST +DESCRIPTOR.message_types_by_name[ + "ListDatabaseOperationsResponse" +] = _LISTDATABASEOPERATIONSRESPONSE +DESCRIPTOR.message_types_by_name["RestoreDatabaseRequest"] = _RESTOREDATABASEREQUEST +DESCRIPTOR.message_types_by_name["RestoreDatabaseMetadata"] = _RESTOREDATABASEMETADATA +DESCRIPTOR.message_types_by_name[ + "OptimizeRestoredDatabaseMetadata" +] = _OPTIMIZERESTOREDDATABASEMETADATA +DESCRIPTOR.enum_types_by_name["RestoreSourceType"] = _RESTORESOURCETYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +RestoreInfo = _reflection.GeneratedProtocolMessageType( + "RestoreInfo", + (_message.Message,), + dict( + DESCRIPTOR=_RESTOREINFO, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""Information about the database restore. + + + Attributes: + source_type: + The type of the restore source. + source_info: + Information about the source used to restore the database. + backup_info: + Information about the backup used to restore the database. The + backup may no longer exist. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreInfo) + ), +) +_sym_db.RegisterMessage(RestoreInfo) + +Database = _reflection.GeneratedProtocolMessageType( + "Database", + (_message.Message,), + dict( + DESCRIPTOR=_DATABASE, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""A Cloud Spanner database. + + + Attributes: + name: + Required. The name of the database. Values are of the form ``p + rojects//instances//databases/``, + where ```` is as specified in the ``CREATE + DATABASE`` statement. This name can be passed to other API + methods to identify the database. + state: + Output only. The current database state. + create_time: + Output only. If exists, the time at which the database + creation started. + restore_info: + Output only. Applicable only for restored databases. Contains + information about the restore source. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.Database) + ), +) +_sym_db.RegisterMessage(Database) + +ListDatabasesRequest = _reflection.GeneratedProtocolMessageType( + "ListDatabasesRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTDATABASESREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""The request for + [ListDatabases][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabases]. + + + Attributes: + parent: + Required. The instance whose databases should be listed. + Values are of the form + ``projects//instances/``. + page_size: + Number of databases to be returned in the response. If 0 or + less, defaults to the server's maximum allowed page size. + page_token: + If non-empty, ``page_token`` should contain a [next\_page\_tok + en][google.spanner.admin.database.v1.ListDatabasesResponse.nex + t\_page\_token] from a previous [ListDatabasesResponse][google + .spanner.admin.database.v1.ListDatabasesResponse]. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabasesRequest) ), ) _sym_db.RegisterMessage(ListDatabasesRequest) @@ -822,7 +1490,7 @@ reserved word or if it contains a hyphen, the database ID must be enclosed in backticks (`````). extra_statements: - An optional list of DDL statements to run inside the newly + Optional. A list of DDL statements to run inside the newly created database. Statements can create tables, indexes, etc. These statements execute atomically with the creation of the database: if there is an error in any statement, the database @@ -1013,12 +1681,230 @@ ) _sym_db.RegisterMessage(GetDatabaseDdlResponse) +ListDatabaseOperationsRequest = _reflection.GeneratedProtocolMessageType( + "ListDatabaseOperationsRequest", + (_message.Message,), + dict( + DESCRIPTOR=_LISTDATABASEOPERATIONSREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""The request for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + + + Attributes: + parent: + Required. The instance of the database operations. Values are + of the form ``projects//instances/``. + filter: + An expression that filters the list of returned operations. A + filter expression consists of a field name, a comparison + operator, and a value for filtering. The value must be a + string, a number, or a boolean. The comparison operator must + be one of: ``<``, ``>``, ``<=``, ``>=``, ``!=``, ``=``, or + ``:``. Colon ``:`` is the contains operator. Filter rules are + not case sensitive. The following fields in the + [Operation][google.longrunning.Operation] are eligible for + filtering: - ``name`` - The name of the long-running + operation - ``done`` - False if the operation is in progress, + else true. - ``metadata.@type`` - the type of metadata. For + example, the type string for [RestoreDatabaseMetadata][g + oogle.spanner.admin.database.v1.RestoreDatabaseMetadata] is + ``type.googleapis.com/google.spanner.admin.database.v1.Restore + DatabaseMetadata``. - ``metadata.`` - any field + in metadata.value. - ``error`` - Error associated with the + long-running operation. - ``response.@type`` - the type of + response. - ``response.`` - any field in + response.value. You can combine multiple expressions by + enclosing each expression in parentheses. By default, + expressions are combined with AND logic. However, you can + specify AND, OR, and NOT logic explicitly. Here are a few + examples: - ``done:true`` - The operation is complete. - `` + (metadata.@type=type.googleapis.com/google.spanner.admin.datab + ase.v1.RestoreDatabaseMetadata) AND`` + ``(metadata.source_type:BACKUP) AND`` + ``(metadata.backup_info.backup:backup_howl) AND`` + ``(metadata.name:restored_howl) AND`` + ``(metadata.progress.start_time < \"2018-03-28T14:50:00Z\") + AND`` ``(error:*)`` - Return operations where: - The + operation's metadata type is [RestoreDatabaseMetadata][g + oogle.spanner.admin.database.v1.RestoreDatabaseMetadata]. - + The database is restored from a backup. - The backup name + contains "backup\_howl". - The restored database's name + contains "restored\_howl". - The operation started before + 2018-03-28T14:50:00Z. - The operation resulted in an + error. + page_size: + Number of operations to be returned in the response. If 0 or + less, defaults to the server's maximum allowed page size. + page_token: + If non-empty, ``page_token`` should contain a [next\_page\_tok + en][google.spanner.admin.database.v1.ListDatabaseOperationsRes + ponse.next\_page\_token] from a previous [ListDatabaseOperatio + nsResponse][google.spanner.admin.database.v1.ListDatabaseOpera + tionsResponse] to the same ``parent`` and with the same + ``filter``. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseOperationsRequest) + ), +) +_sym_db.RegisterMessage(ListDatabaseOperationsRequest) + +ListDatabaseOperationsResponse = _reflection.GeneratedProtocolMessageType( + "ListDatabaseOperationsResponse", + (_message.Message,), + dict( + DESCRIPTOR=_LISTDATABASEOPERATIONSRESPONSE, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""The response for + [ListDatabaseOperations][google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations]. + + + Attributes: + operations: + The list of matching database [long-running + operations][google.longrunning.Operation]. Each operation's + name will be prefixed by the database's name. The operation's + [metadata][google.longrunning.Operation.metadata] field type + ``metadata.type_url`` describes the type of the metadata. + next_page_token: + \ ``next_page_token`` can be sent in a subsequent [ListDatabas + eOperations][google.spanner.admin.database.v1.DatabaseAdmin.Li + stDatabaseOperations] call to fetch more of the matching + metadata. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.ListDatabaseOperationsResponse) + ), +) +_sym_db.RegisterMessage(ListDatabaseOperationsResponse) + +RestoreDatabaseRequest = _reflection.GeneratedProtocolMessageType( + "RestoreDatabaseRequest", + (_message.Message,), + dict( + DESCRIPTOR=_RESTOREDATABASEREQUEST, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""The request for + [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. + + + Attributes: + parent: + Required. The name of the instance in which to create the + restored database. This instance must be in the same project + and have the same instance configuration as the instance + containing the source backup. Values are of the form + ``projects//instances/``. + database_id: + Required. The id of the database to create and restore to. + This database must not already exist. The ``database_id`` + appended to ``parent`` forms the full database name of the + form ``projects//instances//databases/``. + source: + Required. The source from which to restore. + backup: + Name of the backup from which to restore. Values are of the + form + ``projects//instances//backups/``. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreDatabaseRequest) + ), +) +_sym_db.RegisterMessage(RestoreDatabaseRequest) + +RestoreDatabaseMetadata = _reflection.GeneratedProtocolMessageType( + "RestoreDatabaseMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_RESTOREDATABASEMETADATA, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""Metadata type for the long-running operation returned by + [RestoreDatabase][google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase]. + + + Attributes: + name: + Name of the database being created and restored to. + source_type: + The type of the restore source. + source_info: + Information about the source used to restore the database, as + specified by ``source`` in [RestoreDatabaseRequest][google.spa + nner.admin.database.v1.RestoreDatabaseRequest]. + backup_info: + Information about the backup used to restore the database. + progress: + The progress of the [RestoreDatabase][google.spanner.admin.dat + abase.v1.DatabaseAdmin.RestoreDatabase] operation. + cancel_time: + The time at which cancellation of this operation was received. + [Operations.CancelOperation][google.longrunning.Operations.Can + celOperation] starts asynchronous cancellation on a long- + running operation. The server makes a best effort to cancel + the operation, but success is not guaranteed. Clients can use + [Operations.GetOperation][google.longrunning.Operations.GetOpe + ration] or other methods to check whether the cancellation + succeeded or whether the operation completed despite + cancellation. On successful cancellation, the operation is not + deleted; instead, it becomes an operation with an + [Operation.error][google.longrunning.Operation.error] value + with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + corresponding to ``Code.CANCELLED``. + optimize_database_operation_name: + If exists, the name of the long-running operation that will be + used to track the post-restore optimization process to + optimize the performance of the restored database, and remove + the dependency on the restore source. The name is of the form + ``projects//instances//databases/ + /operations/`` where the is the name of database + being created and restored to. The metadata type of the long- + running operation is [OptimizeRestoredDatabaseMetadata][google + .spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata]. + This long-running operation will be automatically created by + the system after the RestoreDatabase long-running operation + completes successfully. This operation will not be created if + the restore was not successful. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.RestoreDatabaseMetadata) + ), +) +_sym_db.RegisterMessage(RestoreDatabaseMetadata) + +OptimizeRestoredDatabaseMetadata = _reflection.GeneratedProtocolMessageType( + "OptimizeRestoredDatabaseMetadata", + (_message.Message,), + dict( + DESCRIPTOR=_OPTIMIZERESTOREDDATABASEMETADATA, + __module__="google.cloud.spanner.admin.database_v1.proto.spanner_database_admin_pb2", + __doc__="""Metadata type for the long-running operation used to track + the progress of optimizations performed on a newly restored database. + This long-running operation is automatically created by the system after + the successful completion of a database restore, and cannot be + cancelled. + + + Attributes: + name: + Name of the restored database being optimized. + progress: + The progress of the post-restore optimizations. + """, + # @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata) + ), +) +_sym_db.RegisterMessage(OptimizeRestoredDatabaseMetadata) + DESCRIPTOR._options = None +_DATABASE.fields_by_name["name"]._options = None +_DATABASE.fields_by_name["state"]._options = None +_DATABASE.fields_by_name["create_time"]._options = None +_DATABASE.fields_by_name["restore_info"]._options = None _DATABASE._options = None _LISTDATABASESREQUEST.fields_by_name["parent"]._options = None _CREATEDATABASEREQUEST.fields_by_name["parent"]._options = None _CREATEDATABASEREQUEST.fields_by_name["create_statement"]._options = None +_CREATEDATABASEREQUEST.fields_by_name["extra_statements"]._options = None _CREATEDATABASEMETADATA.fields_by_name["database"]._options = None _GETDATABASEREQUEST.fields_by_name["name"]._options = None _UPDATEDATABASEDDLREQUEST.fields_by_name["database"]._options = None @@ -1026,6 +1912,10 @@ _UPDATEDATABASEDDLMETADATA.fields_by_name["database"]._options = None _DROPDATABASEREQUEST.fields_by_name["database"]._options = None _GETDATABASEDDLREQUEST.fields_by_name["database"]._options = None +_LISTDATABASEOPERATIONSREQUEST.fields_by_name["parent"]._options = None +_RESTOREDATABASEREQUEST.fields_by_name["parent"]._options = None +_RESTOREDATABASEREQUEST.fields_by_name["database_id"]._options = None +_RESTOREDATABASEREQUEST.fields_by_name["backup"]._options = None _DATABASEADMIN = _descriptor.ServiceDescriptor( name="DatabaseAdmin", @@ -1035,8 +1925,8 @@ serialized_options=_b( "\312A\026spanner.googleapis.com\322A\\https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/spanner.admin" ), - serialized_start=1675, - serialized_end=3896, + serialized_start=3100, + serialized_end=7054, methods=[ _descriptor.MethodDescriptor( name="ListDatabases", @@ -1137,6 +2027,94 @@ '\202\323\344\223\002\222\001"D/v1/{resource=projects/*/instances/*/databases/*}:testIamPermissions:\001*ZG"B/v1/{resource=projects/*/instances/*/backups/*}:testIamPermissions:\001*\332A\024resource,permissions' ), ), + _descriptor.MethodDescriptor( + name="CreateBackup", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.CreateBackup", + index=9, + containing_service=None, + input_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._CREATEBACKUPREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=_b( + '\202\323\344\223\0025"+/v1/{parent=projects/*/instances/*}/backups:\006backup\332A\027parent,backup,backup_id\312A?\n\006Backup\0225google.spanner.admin.database.v1.CreateBackupMetadata' + ), + ), + _descriptor.MethodDescriptor( + name="GetBackup", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.GetBackup", + index=10, + containing_service=None, + input_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._GETBACKUPREQUEST, + output_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._BACKUP, + serialized_options=_b( + "\202\323\344\223\002-\022+/v1/{name=projects/*/instances/*/backups/*}\332A\004name" + ), + ), + _descriptor.MethodDescriptor( + name="UpdateBackup", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.UpdateBackup", + index=11, + containing_service=None, + input_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._UPDATEBACKUPREQUEST, + output_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._BACKUP, + serialized_options=_b( + "\202\323\344\223\002<22/v1/{backup.name=projects/*/instances/*/backups/*}:\006backup\332A\022backup,update_mask" + ), + ), + _descriptor.MethodDescriptor( + name="DeleteBackup", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.DeleteBackup", + index=12, + containing_service=None, + input_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._DELETEBACKUPREQUEST, + output_type=google_dot_protobuf_dot_empty__pb2._EMPTY, + serialized_options=_b( + "\202\323\344\223\002-*+/v1/{name=projects/*/instances/*/backups/*}\332A\004name" + ), + ), + _descriptor.MethodDescriptor( + name="ListBackups", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.ListBackups", + index=13, + containing_service=None, + input_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._LISTBACKUPSREQUEST, + output_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._LISTBACKUPSRESPONSE, + serialized_options=_b( + "\202\323\344\223\002-\022+/v1/{parent=projects/*/instances/*}/backups\332A\006parent" + ), + ), + _descriptor.MethodDescriptor( + name="RestoreDatabase", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.RestoreDatabase", + index=14, + containing_service=None, + input_type=_RESTOREDATABASEREQUEST, + output_type=google_dot_longrunning_dot_operations__pb2._OPERATION, + serialized_options=_b( + '\202\323\344\223\002:"5/v1/{parent=projects/*/instances/*}/databases:restore:\001*\332A\031parent,database_id,backup\312Ae\n)google.spanner.admin.database.v1.Database\0228google.spanner.admin.database.v1.RestoreDatabaseMetadata' + ), + ), + _descriptor.MethodDescriptor( + name="ListDatabaseOperations", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.ListDatabaseOperations", + index=15, + containing_service=None, + input_type=_LISTDATABASEOPERATIONSREQUEST, + output_type=_LISTDATABASEOPERATIONSRESPONSE, + serialized_options=_b( + "\202\323\344\223\0028\0226/v1/{parent=projects/*/instances/*}/databaseOperations\332A\006parent" + ), + ), + _descriptor.MethodDescriptor( + name="ListBackupOperations", + full_name="google.spanner.admin.database.v1.DatabaseAdmin.ListBackupOperations", + index=16, + containing_service=None, + input_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._LISTBACKUPOPERATIONSREQUEST, + output_type=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2._LISTBACKUPOPERATIONSRESPONSE, + serialized_options=_b( + "\202\323\344\223\0026\0224/v1/{parent=projects/*/instances/*}/backupOperations\332A\006parent" + ), + ), ], ) _sym_db.RegisterServiceDescriptor(_DATABASEADMIN) diff --git a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py index 2491691e6b..8ecb673158 100644 --- a/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py +++ b/google/cloud/spanner_admin_database_v1/proto/spanner_database_admin_pb2_grpc.py @@ -1,6 +1,9 @@ # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! import grpc +from google.cloud.spanner_admin_database_v1.proto import ( + backup_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2, +) from google.cloud.spanner_admin_database_v1.proto import ( spanner_database_admin_pb2 as google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2, ) @@ -17,7 +20,8 @@ class DatabaseAdminStub(object): The Cloud Spanner Database Admin API can be used to create, drop, and list databases. It also enables updating the schema of pre-existing - databases. + databases. It can be also used to create, delete and list backups for a + database and to restore from an existing backup. """ def __init__(self, channel): @@ -71,6 +75,46 @@ def __init__(self, channel): request_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.FromString, ) + self.CreateBackup = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/CreateBackup", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.CreateBackupRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.GetBackup = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/GetBackup", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.GetBackupRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.Backup.FromString, + ) + self.UpdateBackup = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/UpdateBackup", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.UpdateBackupRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.Backup.FromString, + ) + self.DeleteBackup = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/DeleteBackup", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.DeleteBackupRequest.SerializeToString, + response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString, + ) + self.ListBackups = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackups", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupsResponse.FromString, + ) + self.RestoreDatabase = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/RestoreDatabase", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.SerializeToString, + response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString, + ) + self.ListDatabaseOperations = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListDatabaseOperations", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.FromString, + ) + self.ListBackupOperations = channel.unary_unary( + "/google.spanner.admin.database.v1.DatabaseAdmin/ListBackupOperations", + request_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsRequest.SerializeToString, + response_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsResponse.FromString, + ) class DatabaseAdminServicer(object): @@ -78,7 +122,8 @@ class DatabaseAdminServicer(object): The Cloud Spanner Database Admin API can be used to create, drop, and list databases. It also enables updating the schema of pre-existing - databases. + databases. It can be also used to create, delete and list backups for a + database and to restore from an existing backup. """ def ListDatabases(self, request, context): @@ -124,6 +169,8 @@ def UpdateDatabaseDdl(self, request, context): def DropDatabase(self, request, context): """Drops (aka deletes) a Cloud Spanner database. + Completed backups for the database will be retained according to their + `expire_time`. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -139,35 +186,144 @@ def GetDatabaseDdl(self, request, context): raise NotImplementedError("Method not implemented!") def SetIamPolicy(self, request, context): - """Sets the access control policy on a database resource. + """Sets the access control policy on a database or backup resource. Replaces any existing policy. Authorization requires `spanner.databases.setIamPolicy` permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. + For backups, authorization requires `spanner.backups.setIamPolicy` + permission on [resource][google.iam.v1.SetIamPolicyRequest.resource]. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def GetIamPolicy(self, request, context): - """Gets the access control policy for a database resource. - Returns an empty policy if a database exists but does - not have a policy set. + """Gets the access control policy for a database or backup resource. + Returns an empty policy if a database or backup exists but does not have a + policy set. Authorization requires `spanner.databases.getIamPolicy` permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. + For backups, authorization requires `spanner.backups.getIamPolicy` + permission on [resource][google.iam.v1.GetIamPolicyRequest.resource]. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") raise NotImplementedError("Method not implemented!") def TestIamPermissions(self, request, context): - """Returns permissions that the caller has on the specified database resource. + """Returns permissions that the caller has on the specified database or backup + resource. Attempting this RPC on a non-existent Cloud Spanner database will result in a NOT_FOUND error if the user has `spanner.databases.list` permission on the containing Cloud Spanner instance. Otherwise returns an empty set of permissions. + Calling this method on a backup that does not exist will + result in a NOT_FOUND error if the user has + `spanner.backups.list` permission on the containing instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def CreateBackup(self, request, context): + """Starts creating a new Cloud Spanner Backup. + The returned backup [long-running operation][google.longrunning.Operation] + will have a name of the format + `projects//instances//backups//operations/` + and can be used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.spanner.admin.database.v1.CreateBackupMetadata]. The + [response][google.longrunning.Operation.response] field type is + [Backup][google.spanner.admin.database.v1.Backup], if successful. Cancelling the returned operation will stop the + creation and delete the backup. + There can be only one pending backup creation per database. Backup creation + of different databases can run concurrently. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def GetBackup(self, request, context): + """Gets metadata on a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def UpdateBackup(self, request, context): + """Updates a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def DeleteBackup(self, request, context): + """Deletes a pending or completed [Backup][google.spanner.admin.database.v1.Backup]. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackups(self, request, context): + """Lists completed and pending backups. + Backups returned are ordered by `create_time` in descending order, + starting from the most recent `create_time`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def RestoreDatabase(self, request, context): + """Create a new database by restoring from a completed backup. The new + database must be in the same project and in an instance with the same + instance configuration as the instance containing + the backup. The returned database [long-running + operation][google.longrunning.Operation] has a name of the format + `projects//instances//databases//operations/`, + and can be used to track the progress of the operation, and to cancel it. + The [metadata][google.longrunning.Operation.metadata] field type is + [RestoreDatabaseMetadata][google.spanner.admin.database.v1.RestoreDatabaseMetadata]. + The [response][google.longrunning.Operation.response] type + is [Database][google.spanner.admin.database.v1.Database], if + successful. Cancelling the returned operation will stop the restore and + delete the database. + There can be only one database being restored into an instance at a time. + Once the restore operation completes, a new restore operation can be + initiated, without waiting for the optimize operation associated with the + first restore to complete. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListDatabaseOperations(self, request, context): + """Lists database [longrunning-operations][google.longrunning.Operation]. + A database operation has a name of the form + `projects//instances//databases//operations/`. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + `metadata.type_url` describes the type of the metadata. Operations returned + include those that have completed/failed/canceled within the last 7 days, + and pending operations. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details("Method not implemented!") + raise NotImplementedError("Method not implemented!") + + def ListBackupOperations(self, request, context): + """Lists the backup [long-running operations][google.longrunning.Operation] in + the given instance. A backup operation has a name of the form + `projects//instances//backups//operations/`. + The long-running operation + [metadata][google.longrunning.Operation.metadata] field type + `metadata.type_url` describes the type of the metadata. Operations returned + include those that have completed/failed/canceled within the last 7 days, + and pending operations. Operations returned are ordered by + `operation.metadata.value.progress.start_time` in descending order starting + from the most recently started operation. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details("Method not implemented!") @@ -221,6 +377,46 @@ def add_DatabaseAdminServicer_to_server(servicer, server): request_deserializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsRequest.FromString, response_serializer=google_dot_iam_dot_v1_dot_iam__policy__pb2.TestIamPermissionsResponse.SerializeToString, ), + "CreateBackup": grpc.unary_unary_rpc_method_handler( + servicer.CreateBackup, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.CreateBackupRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "GetBackup": grpc.unary_unary_rpc_method_handler( + servicer.GetBackup, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.GetBackupRequest.FromString, + response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.Backup.SerializeToString, + ), + "UpdateBackup": grpc.unary_unary_rpc_method_handler( + servicer.UpdateBackup, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.UpdateBackupRequest.FromString, + response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.Backup.SerializeToString, + ), + "DeleteBackup": grpc.unary_unary_rpc_method_handler( + servicer.DeleteBackup, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.DeleteBackupRequest.FromString, + response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString, + ), + "ListBackups": grpc.unary_unary_rpc_method_handler( + servicer.ListBackups, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupsRequest.FromString, + response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupsResponse.SerializeToString, + ), + "RestoreDatabase": grpc.unary_unary_rpc_method_handler( + servicer.RestoreDatabase, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.RestoreDatabaseRequest.FromString, + response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString, + ), + "ListDatabaseOperations": grpc.unary_unary_rpc_method_handler( + servicer.ListDatabaseOperations, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsRequest.FromString, + response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_spanner__database__admin__pb2.ListDatabaseOperationsResponse.SerializeToString, + ), + "ListBackupOperations": grpc.unary_unary_rpc_method_handler( + servicer.ListBackupOperations, + request_deserializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsRequest.FromString, + response_serializer=google_dot_cloud_dot_spanner_dot_admin_dot_database__v1_dot_proto_dot_backup__pb2.ListBackupOperationsResponse.SerializeToString, + ), } generic_handler = grpc.method_handlers_generic_handler( "google.spanner.admin.database.v1.DatabaseAdmin", rpc_method_handlers diff --git a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py index d55c007030..f00bfbbe0a 100644 --- a/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py +++ b/google/cloud/spanner_admin_instance_v1/proto/spanner_instance_admin_pb2.py @@ -1340,8 +1340,8 @@ ), DESCRIPTOR=_INSTANCE, __module__="google.cloud.spanner.admin.instance_v1.proto.spanner_instance_admin_pb2", - __doc__="""An isolated set of Cloud Spanner resources on which databases can be - hosted. + __doc__="""An isolated set of Cloud Spanner resources on which + databases can be hosted. Attributes: diff --git a/synth.metadata b/synth.metadata index 8abfac8f6a..df0f13a1a6 100644 --- a/synth.metadata +++ b/synth.metadata @@ -1,20 +1,20 @@ { - "updateTime": "2020-03-03T13:26:45.038429Z", + "updateTime": "2020-03-12T12:14:50.181539Z", "sources": [ { "generator": { "name": "artman", - "version": "0.47.0", - "dockerImage": "googleapis/artman@sha256:b3e50d6b8de03920b9f065bbc3d210e2ca93a043446f1fa16cdf567393c09678" + "version": "1.1.0", + "dockerImage": "googleapis/artman@sha256:f54b7644a1d2e7a37b23f5c0dfe9bba473e41c675002a507a244389e27487ca9" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "4a180bfff8a21645b3a935c2756e8d6ab18a74e0", - "internalRef": "298484782", - "log": "4a180bfff8a21645b3a935c2756e8d6ab18a74e0\nautoml/v1beta1 publish proto updates\n\nPiperOrigin-RevId: 298484782\n\n6de6e938b7df1cd62396563a067334abeedb9676\nchore: use the latest gapic-generator and protoc-java-resource-name-plugin in Bazel workspace.\n\nPiperOrigin-RevId: 298474513\n\n244ab2b83a82076a1fa7be63b7e0671af73f5c02\nAdds service config definition for bigqueryreservation v1\n\nPiperOrigin-RevId: 298455048\n\n83c6f84035ee0f80eaa44d8b688a010461cc4080\nUpdate google/api/auth.proto to make AuthProvider to have JwtLocation\n\nPiperOrigin-RevId: 297918498\n\ne9e90a787703ec5d388902e2cb796aaed3a385b4\nDialogflow weekly v2/v2beta1 library update:\n - adding get validation result\n - adding field mask override control for output audio config\nImportant updates are also posted at:\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 297671458\n\n1a2b05cc3541a5f7714529c665aecc3ea042c646\nAdding .yaml and .json config files.\n\nPiperOrigin-RevId: 297570622\n\ndfe1cf7be44dee31d78f78e485d8c95430981d6e\nPublish `QueryOptions` proto.\n\nIntroduced a `query_options` input in `ExecuteSqlRequest`.\n\nPiperOrigin-RevId: 297497710\n\ndafc905f71e5d46f500b41ed715aad585be062c3\npubsub: revert pull init_rpc_timeout & max_rpc_timeout back to 25 seconds and reset multiplier to 1.0\n\nPiperOrigin-RevId: 297486523\n\nf077632ba7fee588922d9e8717ee272039be126d\nfirestore: add update_transform\n\nPiperOrigin-RevId: 297405063\n\n0aba1900ffef672ec5f0da677cf590ee5686e13b\ncluster: use square brace for cross-reference\n\nPiperOrigin-RevId: 297204568\n\n5dac2da18f6325cbaed54603c43f0667ecd50247\nRestore retry params in gapic config because securitycenter has non-standard default retry params.\nRestore a few retry codes for some idempotent methods.\n\nPiperOrigin-RevId: 297196720\n\n1eb61455530252bba8b2c8d4bc9832960e5a56f6\npubsub: v1 replace IAM HTTP rules\n\nPiperOrigin-RevId: 297188590\n\n80b2d25f8d43d9d47024ff06ead7f7166548a7ba\nDialogflow weekly v2/v2beta1 library update:\n - updates to mega agent api\n - adding field mask override control for output audio config\nImportant updates are also posted at:\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 297187629\n\n0b1876b35e98f560f9c9ca9797955f020238a092\nUse an older version of protoc-docs-plugin that is compatible with the specified gapic-generator and protobuf versions.\n\nprotoc-docs-plugin >=0.4.0 (see commit https://github.com/googleapis/protoc-docs-plugin/commit/979f03ede6678c487337f3d7e88bae58df5207af) is incompatible with protobuf 3.9.1.\n\nPiperOrigin-RevId: 296986742\n\n1e47e676cddbbd8d93f19ba0665af15b5532417e\nFix: Restore a method signature for UpdateCluster\n\nPiperOrigin-RevId: 296901854\n\n7f910bcc4fc4704947ccfd3ceed015d16b9e00c2\nUpdate Dataproc v1beta2 client.\n\nPiperOrigin-RevId: 296451205\n\nde287524405a3dce124d301634731584fc0432d7\nFix: Reinstate method signatures that had been missed off some RPCs\nFix: Correct resource types for two fields\n\nPiperOrigin-RevId: 296435091\n\ne5bc9566ae057fb4c92f8b7e047f1c8958235b53\nDeprecate the endpoint_uris field, as it is unused.\n\nPiperOrigin-RevId: 296357191\n\n8c12e2b4dca94e12bff9f538bdac29524ff7ef7a\nUpdate Dataproc v1 client.\n\nPiperOrigin-RevId: 296336662\n\n17567c4a1ef0a9b50faa87024d66f8acbb561089\nRemoving erroneous comment, a la https://github.com/googleapis/java-speech/pull/103\n\nPiperOrigin-RevId: 296332968\n\n3eaaaf8626ce5b0c0bc7eee05e143beffa373b01\nAdd BUILD.bazel for v1 secretmanager.googleapis.com\n\nPiperOrigin-RevId: 296274723\n\ne76149c3d992337f85eeb45643106aacae7ede82\nMove securitycenter v1 to use generate from annotations.\n\nPiperOrigin-RevId: 296266862\n\n203740c78ac69ee07c3bf6be7408048751f618f8\nAdd StackdriverLoggingConfig field to Cloud Tasks v2 API.\n\nPiperOrigin-RevId: 296256388\n\ne4117d5e9ed8bbca28da4a60a94947ca51cb2083\nCreate a Bazel BUILD file for the google.actions.type export.\n\nPiperOrigin-RevId: 296212567\n\na9639a0a9854fd6e1be08bba1ac3897f4f16cb2f\nAdd secretmanager.googleapis.com v1 protos\n\nPiperOrigin-RevId: 295983266\n\nce4f4c21d9dd2bfab18873a80449b9d9851efde8\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295861722\n\ncb61d6c2d070b589980c779b68ffca617f789116\nasset: v1p1beta1 remove SearchResources and SearchIamPolicies\n\nPiperOrigin-RevId: 295855449\n\nab2685d8d3a0e191dc8aef83df36773c07cb3d06\nfix: Dataproc v1 - AutoscalingPolicy annotation\n\nThis adds the second resource name pattern to the\nAutoscalingPolicy resource.\n\nCommitter: @lukesneeringer\nPiperOrigin-RevId: 295738415\n\n8a1020bf6828f6e3c84c3014f2c51cb62b739140\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 295286165\n\n5cfa105206e77670369e4b2225597386aba32985\nAdd service control related proto build rule.\n\nPiperOrigin-RevId: 295262088\n\nee4dddf805072004ab19ac94df2ce669046eec26\nmonitoring v3: Add prefix \"https://cloud.google.com/\" into the link for global access\ncl 295167522, get ride of synth.py hacks\n\nPiperOrigin-RevId: 295238095\n\nd9835e922ea79eed8497db270d2f9f85099a519c\nUpdate some minor docs changes about user event proto\n\nPiperOrigin-RevId: 295185610\n\n5f311e416e69c170243de722023b22f3df89ec1c\nfix: use correct PHP package name in gapic configuration\n\nPiperOrigin-RevId: 295161330\n\n6cdd74dcdb071694da6a6b5a206e3a320b62dd11\npubsub: v1 add client config annotations and retry config\n\nPiperOrigin-RevId: 295158776\n\n5169f46d9f792e2934d9fa25c36d0515b4fd0024\nAdded cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 295026522\n\n56b55aa8818cd0a532a7d779f6ef337ba809ccbd\nFix: Resource annotations for CreateTimeSeriesRequest and ListTimeSeriesRequest should refer to valid resources. TimeSeries is not a named resource.\n\nPiperOrigin-RevId: 294931650\n\n0646bc775203077226c2c34d3e4d50cc4ec53660\nRemove unnecessary languages from bigquery-related artman configuration files.\n\nPiperOrigin-RevId: 294809380\n\n8b78aa04382e3d4147112ad6d344666771bb1909\nUpdate backend.proto for schemes and protocol\n\nPiperOrigin-RevId: 294788800\n\n80b8f8b3de2359831295e24e5238641a38d8488f\nAdds artman config files for bigquerystorage endpoints v1beta2, v1alpha2, v1\n\nPiperOrigin-RevId: 294763931\n\n2c17ac33b226194041155bb5340c3f34733f1b3a\nAdd parameter to sample generated for UpdateInstance. Related to https://github.com/googleapis/python-redis/issues/4\n\nPiperOrigin-RevId: 294734008\n\nd5e8a8953f2acdfe96fb15e85eb2f33739623957\nMove bigquery datatransfer to gapic v2.\n\nPiperOrigin-RevId: 294703703\n\nefd36705972cfcd7d00ab4c6dfa1135bafacd4ae\nfix: Add two annotations that we missed.\n\nPiperOrigin-RevId: 294664231\n\n8a36b928873ff9c05b43859b9d4ea14cd205df57\nFix: Define the \"bigquery.googleapis.com/Table\" resource in the BigQuery Storage API (v1beta2).\n\nPiperOrigin-RevId: 294459768\n\nc7a3caa2c40c49f034a3c11079dd90eb24987047\nFix: Define the \"bigquery.googleapis.com/Table\" resource in the BigQuery Storage API (v1).\n\nPiperOrigin-RevId: 294456889\n\n5006247aa157e59118833658084345ee59af7c09\nFix: Make deprecated fields optional\nFix: Deprecate SetLoggingServiceRequest.zone in line with the comments\nFeature: Add resource name method signatures where appropriate\n\nPiperOrigin-RevId: 294383128\n\neabba40dac05c5cbe0fca3a35761b17e372036c4\nFix: C# and PHP package/namespace capitalization for BigQuery Storage v1.\n\nPiperOrigin-RevId: 294382444\n\nf8d9a858a7a55eba8009a23aa3f5cc5fe5e88dde\nfix: artman configuration file for bigtable-admin\n\nPiperOrigin-RevId: 294322616\n\n0f29555d1cfcf96add5c0b16b089235afbe9b1a9\nAPI definition for (not-yet-launched) GCS gRPC.\n\nPiperOrigin-RevId: 294321472\n\nfcc86bee0e84dc11e9abbff8d7c3529c0626f390\nfix: Bigtable Admin v2\n\nChange LRO metadata from PartialUpdateInstanceMetadata\nto UpdateInstanceMetadata. (Otherwise, it will not build.)\n\nPiperOrigin-RevId: 294264582\n\n6d9361eae2ebb3f42d8c7ce5baf4bab966fee7c0\nrefactor: Add annotations to Bigtable Admin v2.\n\nPiperOrigin-RevId: 294243406\n\nad7616f3fc8e123451c8b3a7987bc91cea9e6913\nFix: Resource type in CreateLogMetricRequest should use logging.googleapis.com.\nFix: ListLogEntries should have a method signature for convenience of calling it.\n\nPiperOrigin-RevId: 294222165\n\n63796fcbb08712676069e20a3e455c9f7aa21026\nFix: Remove extraneous resource definition for cloudkms.googleapis.com/CryptoKey.\n\nPiperOrigin-RevId: 294176658\n\ne7d8a694f4559201e6913f6610069cb08b39274e\nDepend on the latest gapic-generator and resource names plugin.\n\nThis fixes the very old an very annoying bug: https://github.com/googleapis/gapic-generator/pull/3087\n\nPiperOrigin-RevId: 293903652\n\n806b2854a966d55374ee26bb0cef4e30eda17b58\nfix: correct capitalization of Ruby namespaces in SecurityCenter V1p1beta1\n\nPiperOrigin-RevId: 293903613\n\n1b83c92462b14d67a7644e2980f723112472e03a\nPublish annotations and grpc service config for Logging API.\n\nPiperOrigin-RevId: 293893514\n\ne46f761cd6ec15a9e3d5ed4ff321a4bcba8e8585\nGenerate the Bazel build file for recommendengine public api\n\nPiperOrigin-RevId: 293710856\n\n68477017c4173c98addac0373950c6aa9d7b375f\nMake `language_code` optional for UpdateIntentRequest and BatchUpdateIntentsRequest.\n\nThe comments and proto annotations describe this parameter as optional.\n\nPiperOrigin-RevId: 293703548\n\n16f823f578bca4e845a19b88bb9bc5870ea71ab2\nAdd BUILD.bazel files for managedidentities API\n\nPiperOrigin-RevId: 293698246\n\n2f53fd8178c9a9de4ad10fae8dd17a7ba36133f2\nAdd v1p1beta1 config file\n\nPiperOrigin-RevId: 293696729\n\n052b274138fce2be80f97b6dcb83ab343c7c8812\nAdd source field for user event and add field behavior annotations\n\nPiperOrigin-RevId: 293693115\n\n1e89732b2d69151b1b3418fff3d4cc0434f0dded\ndatacatalog: v1beta1 add three new RPCs to gapic v1beta1 config\n\nPiperOrigin-RevId: 293692823\n\n9c8bd09bbdc7c4160a44f1fbab279b73cd7a2337\nchange the name of AccessApproval service to AccessApprovalAdmin\n\nPiperOrigin-RevId: 293690934\n\n2e23b8fbc45f5d9e200572ca662fe1271bcd6760\nAdd ListEntryGroups method, add http bindings to support entry group tagging, and update some comments.\n\nPiperOrigin-RevId: 293666452\n\n0275e38a4ca03a13d3f47a9613aac8c8b0d3f1f2\nAdd proto_package field to managedidentities API. It is needed for APIs that still depend on artman generation.\n\nPiperOrigin-RevId: 293643323\n\n4cdfe8278cb6f308106580d70648001c9146e759\nRegenerating public protos for Data Catalog to add new Custom Type Entry feature.\n\nPiperOrigin-RevId: 293614782\n\n45d2a569ab526a1fad3720f95eefb1c7330eaada\nEnable client generation for v1 ManagedIdentities API.\n\nPiperOrigin-RevId: 293515675\n\n2c17086b77e6f3bcf04a1f65758dfb0c3da1568f\nAdd the Actions on Google common types (//google/actions/type/*).\n\nPiperOrigin-RevId: 293478245\n\n781aadb932e64a12fb6ead7cd842698d99588433\nDialogflow weekly v2/v2beta1 library update:\n- Documentation updates\nImportant updates are also posted at\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 293443396\n\ne2602608c9138c2fca24162720e67f9307c30b95\nDialogflow weekly v2/v2beta1 library update:\n- Documentation updates\nImportant updates are also posted at\nhttps://cloud.google.com/dialogflow/docs/release-notes\n\nPiperOrigin-RevId: 293442964\n\nc8aef82028d06b7992278fa9294c18570dc86c3d\nAdd cc_proto_library and cc_grpc_library targets for Bigtable protos.\n\nAlso fix indentation of cc_grpc_library targets in Spanner and IAM protos.\n\nPiperOrigin-RevId: 293440538\n\ne2faab04f4cb7f9755072330866689b1943a16e9\ncloudtasks: v2 replace non-standard retry params in gapic config v2\n\nPiperOrigin-RevId: 293424055\n\ndfb4097ea628a8470292c6590a4313aee0c675bd\nerrorreporting: v1beta1 add legacy artman config for php\n\nPiperOrigin-RevId: 293423790\n\nb18aed55b45bfe5b62476292c72759e6c3e573c6\nasset: v1p1beta1 updated comment for `page_size` limit.\n\nPiperOrigin-RevId: 293421386\n\nc9ef36b7956d9859a2fc86ad35fcaa16958ab44f\nbazel: Refactor CI build scripts\n\nPiperOrigin-RevId: 293387911\n\na8ed9d921fdddc61d8467bfd7c1668f0ad90435c\nfix: set Ruby module name for OrgPolicy\n\nPiperOrigin-RevId: 293257997\n\n6c7d28509bd8315de8af0889688ee20099594269\nredis: v1beta1 add UpgradeInstance and connect_mode field to Instance\n\nPiperOrigin-RevId: 293242878\n\nae0abed4fcb4c21f5cb67a82349a049524c4ef68\nredis: v1 add connect_mode field to Instance\n\nPiperOrigin-RevId: 293241914\n\n3f7a0d29b28ee9365771da2b66edf7fa2b4e9c56\nAdds service config definition for bigqueryreservation v1beta1\n\nPiperOrigin-RevId: 293234418\n\n0c88168d5ed6fe353a8cf8cbdc6bf084f6bb66a5\naddition of BUILD & configuration for accessapproval v1\n\nPiperOrigin-RevId: 293219198\n\n39bedc2e30f4778ce81193f6ba1fec56107bcfc4\naccessapproval: v1 publish protos\n\nPiperOrigin-RevId: 293167048\n\n69d9945330a5721cd679f17331a78850e2618226\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080182\n\nf6a1a6b417f39694275ca286110bc3c1ca4db0dc\nAdd file-level `Session` resource definition\n\nPiperOrigin-RevId: 293080178\n\n29d40b78e3dc1579b0b209463fbcb76e5767f72a\nExpose managedidentities/v1beta1/ API for client library usage.\n\nPiperOrigin-RevId: 292979741\n\na22129a1fb6e18056d576dfb7717aef74b63734a\nExpose managedidentities/v1/ API for client library usage.\n\nPiperOrigin-RevId: 292968186\n\n" + "sha": "34a5450c591b6be3d6566f25ac31caa5211b2f3f", + "internalRef": "300474272", + "log": "34a5450c591b6be3d6566f25ac31caa5211b2f3f\nIncreases the default timeout from 20s to 30s for MetricService\n\nPiperOrigin-RevId: 300474272\n\n5d8bffe87cd01ba390c32f1714230e5a95d5991d\nfeat: use the latest gapic-generator in WORKSPACE for bazel build.\n\nPiperOrigin-RevId: 300461878\n\nd631c651e3bcfac5d371e8560c27648f7b3e2364\nUpdated the GAPIC configs to include parameters for Backups APIs.\n\nPiperOrigin-RevId: 300443402\n\n678afc7055c1adea9b7b54519f3bdb228013f918\nAdding Game Servers v1beta API.\n\nPiperOrigin-RevId: 300433218\n\n80d2bd2c652a5e213302041b0620aff423132589\nEnable proto annotation and gapic v2 for talent API.\n\nPiperOrigin-RevId: 300393997\n\n85e454be7a353f7fe1bf2b0affb753305785b872\ndocs(google/maps/roads): remove mention of nonexported api\n\nPiperOrigin-RevId: 300367734\n\nbf839ae632e0f263a729569e44be4b38b1c85f9c\nAdding protocol buffer annotations and updated config info for v1 and v2.\n\nPiperOrigin-RevId: 300276913\n\n309b899ca18a4c604bce63882a161d44854da549\nPublish `Backup` APIs and protos.\n\nPiperOrigin-RevId: 300246038\n\neced64c3f122421350b4aca68a28e89121d20db8\nadd PHP client libraries\n\nPiperOrigin-RevId: 300193634\n\n7727af0e39df1ae9ad715895c8576d7b65cf6c6d\nfeat: use the latest gapic-generator and protoc-java-resource-name-plugin in googleapis/WORKSPACE.\n\nPiperOrigin-RevId: 300188410\n\n2a25aa351dd5b5fe14895266aff5824d90ce757b\nBreaking change: remove the ProjectOrTenant resource and its references.\n\nPiperOrigin-RevId: 300182152\n\na499dbb28546379415f51803505cfb6123477e71\nUpdate web risk v1 gapic config and BUILD file.\n\nPiperOrigin-RevId: 300152177\n\n52701da10fec2a5f9796e8d12518c0fe574488fe\nFix: apply appropriate namespace/package options for C#, PHP and Ruby.\n\nPiperOrigin-RevId: 300123508\n\n365c029b8cdb63f7751b92ab490f1976e616105c\nAdd CC targets to the kms protos.\n\nThese are needed by go/tink.\n\nPiperOrigin-RevId: 300038469\n\n4ba9aa8a4a1413b88dca5a8fa931824ee9c284e6\nExpose logo recognition API proto for GA.\n\nPiperOrigin-RevId: 299971671\n\n1c9fc2c9e03dadf15f16b1c4f570955bdcebe00e\nAdding ruby_package option to accessapproval.proto for the Ruby client libraries generation.\n\nPiperOrigin-RevId: 299955924\n\n1cc6f0a7bfb147e6f2ede911d9b01e7a9923b719\nbuild(google/maps/routes): generate api clients\n\nPiperOrigin-RevId: 299955905\n\n29a47c965aac79e3fe8e3314482ca0b5967680f0\nIncrease timeout to 1hr for method `dropRange` in bigtable/admin/v2, which is\nsynced with the timeout setting in gapic_yaml.\n\nPiperOrigin-RevId: 299917154\n\n8f631c4c70a60a9c7da3749511ee4ad432b62898\nbuild(google/maps/roads/v1op): move go to monorepo pattern\n\nPiperOrigin-RevId: 299885195\n\nd66816518844ebbf63504c9e8dfc7133921dd2cd\nbuild(google/maps/roads/v1op): Add bazel build files to generate clients.\n\nPiperOrigin-RevId: 299851148\n\naf7dff701fabe029672168649c62356cf1bb43d0\nAdd LogPlayerReports and LogImpressions to Playable Locations service\n\nPiperOrigin-RevId: 299724050\n\nb6927fca808f38df32a642c560082f5bf6538ced\nUpdate BigQuery Connection API v1beta1 proto: added credential to CloudSqlProperties.\n\nPiperOrigin-RevId: 299503150\n\n91e1fb5ef9829c0c7a64bfa5bde330e6ed594378\nchore: update protobuf (protoc) version to 3.11.2\n\nPiperOrigin-RevId: 299404145\n\n30e36b4bee6749c4799f4fc1a51cc8f058ba167d\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 299399890\n\nffbb493674099f265693872ae250711b2238090c\nfeat: cloudbuild/v1 add new fields and annotate OUTPUT_OUT fields.\n\nPiperOrigin-RevId: 299397780\n\nbc973a15818e00c19e121959832676e9b7607456\nbazel: Fix broken common dependency\n\nPiperOrigin-RevId: 299397431\n\n71094a343e3b962e744aa49eb9338219537474e4\nchore: bigtable/admin/v2 publish retry config\n\nPiperOrigin-RevId: 299391875\n\n8f488efd7bda33885cb674ddd023b3678c40bd82\nfeat: Migrate logging to GAPIC v2; release new features.\n\nIMPORTANT: This is a breaking change for client libraries\nin all languages.\n\nCommitter: @lukesneeringer, @jskeet\nPiperOrigin-RevId: 299370279\n\n007605bf9ad3a1fd775014ebefbf7f1e6b31ee71\nUpdate API for bigqueryreservation v1beta1.\n- Adds flex capacity commitment plan to CapacityCommitment.\n- Adds methods for getting and updating BiReservations.\n- Adds methods for updating/splitting/merging CapacityCommitments.\n\nPiperOrigin-RevId: 299368059\n\nf0b581b5bdf803e45201ecdb3688b60e381628a8\nfix: recommendationengine/v1beta1 update some comments\n\nPiperOrigin-RevId: 299181282\n\n10e9a0a833dc85ff8f05b2c67ebe5ac785fe04ff\nbuild: add generated BUILD file for Routes Preferred API\n\nPiperOrigin-RevId: 299164808\n\n86738c956a8238d7c77f729be78b0ed887a6c913\npublish v1p1beta1: update with absolute address in comments\n\nPiperOrigin-RevId: 299152383\n\n73d9f2ad4591de45c2e1f352bc99d70cbd2a6d95\npublish v1: update with absolute address in comments\n\nPiperOrigin-RevId: 299147194\n\nd2158f24cb77b0b0ccfe68af784c6a628705e3c6\npublish v1beta2: update with absolute address in comments\n\nPiperOrigin-RevId: 299147086\n\n7fca61292c11b4cd5b352cee1a50bf88819dd63b\npublish v1p2beta1: update with absolute address in comments\n\nPiperOrigin-RevId: 299146903\n\n583b7321624736e2c490e328f4b1957335779295\npublish v1p3beta1: update with absolute address in comments\n\nPiperOrigin-RevId: 299146674\n\n638253bf86d1ce1c314108a089b7351440c2f0bf\nfix: add java_multiple_files option for automl text_sentiment.proto\n\nPiperOrigin-RevId: 298971070\n\n373d655703bf914fb8b0b1cc4071d772bac0e0d1\nUpdate Recs AI Beta public bazel file\n\nPiperOrigin-RevId: 298961623\n\ndcc5d00fc8a8d8b56f16194d7c682027b2c66a3b\nfix: add java_multiple_files option for automl classification.proto\n\nPiperOrigin-RevId: 298953301\n\na3f791827266f3496a6a5201d58adc4bb265c2a3\nchore: automl/v1 publish annotations and retry config\n\nPiperOrigin-RevId: 298942178\n\n01c681586d8d6dbd60155289b587aee678530bd9\nMark return_immediately in PullRequest deprecated.\n\nPiperOrigin-RevId: 298893281\n\nc9f5e9c4bfed54bbd09227e990e7bded5f90f31c\nRemove out of date documentation for predicate support on the Storage API\n\nPiperOrigin-RevId: 298883309\n\nfd5b3b8238d783b04692a113ffe07c0363f5de0f\ngenerate webrisk v1 proto\n\nPiperOrigin-RevId: 298847934\n\n541b1ded4abadcc38e8178680b0677f65594ea6f\nUpdate cloud asset api v1p4beta1.\n\nPiperOrigin-RevId: 298686266\n\nc0d171acecb4f5b0bfd2c4ca34fc54716574e300\n Updated to include the Notification v1 API.\n\nPiperOrigin-RevId: 298652775\n\n2346a9186c0bff2c9cc439f2459d558068637e05\nAdd Service Directory v1beta1 protos and configs\n\nPiperOrigin-RevId: 298625638\n\na78ed801b82a5c6d9c5368e24b1412212e541bb7\nPublishing v3 protos and configs.\n\nPiperOrigin-RevId: 298607357\n\n" } }, { diff --git a/tests/unit/gapic/v1/test_database_admin_client_v1.py b/tests/unit/gapic/v1/test_database_admin_client_v1.py index d828f8ae1c..dec787ae89 100644 --- a/tests/unit/gapic/v1/test_database_admin_client_v1.py +++ b/tests/unit/gapic/v1/test_database_admin_client_v1.py @@ -22,11 +22,13 @@ from google.rpc import status_pb2 from google.cloud import spanner_admin_database_v1 +from google.cloud.spanner_admin_database_v1.proto import backup_pb2 from google.cloud.spanner_admin_database_v1.proto import spanner_database_admin_pb2 from google.iam.v1 import iam_policy_pb2 from google.iam.v1 import policy_pb2 from google.longrunning import operations_pb2 from google.protobuf import empty_pb2 +from google.protobuf import field_mask_pb2 class MultiCallableStub(object): @@ -412,6 +414,386 @@ def test_test_iam_permissions_exception(self): with pytest.raises(CustomException): client.test_iam_permissions(resource, permissions) + def test_create_backup(self): + # Setup Expected Response + database = "database1789464955" + name = "name3373707" + size_bytes = 1796325715 + expected_response = { + "database": database, + "name": name, + "size_bytes": size_bytes, + } + expected_response = backup_pb2.Backup(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_create_backup", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = backup_pb2.CreateBackupRequest( + parent=parent, backup_id=backup_id, backup=backup + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_create_backup_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_create_backup_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + backup_id = "backupId1355353272" + backup = {} + + response = client.create_backup(parent, backup_id, backup) + exception = response.exception() + assert exception.errors[0] == error + + def test_get_backup(self): + # Setup Expected Response + database = "database1789464955" + name_2 = "name2-1052831874" + size_bytes = 1796325715 + expected_response = { + "database": database, + "name": name_2, + "size_bytes": size_bytes, + } + expected_response = backup_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") + + response = client.get_backup(name) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = backup_pb2.GetBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_get_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") + + with pytest.raises(CustomException): + client.get_backup(name) + + def test_update_backup(self): + # Setup Expected Response + database = "database1789464955" + name = "name3373707" + size_bytes = 1796325715 + expected_response = { + "database": database, + "name": name, + "size_bytes": size_bytes, + } + expected_response = backup_pb2.Backup(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + backup = {} + update_mask = {} + + response = client.update_backup(backup, update_mask) + assert expected_response == response + + assert len(channel.requests) == 1 + expected_request = backup_pb2.UpdateBackupRequest( + backup=backup, update_mask=update_mask + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_update_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup request + backup = {} + update_mask = {} + + with pytest.raises(CustomException): + client.update_backup(backup, update_mask) + + def test_delete_backup(self): + channel = ChannelStub() + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") + + client.delete_backup(name) + + assert len(channel.requests) == 1 + expected_request = backup_pb2.DeleteBackupRequest(name=name) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_delete_backup_exception(self): + # Mock the API response + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup request + name = client.backup_path("[PROJECT]", "[INSTANCE]", "[BACKUP]") + + with pytest.raises(CustomException): + client.delete_backup(name) + + def test_list_backups(self): + # Setup Expected Response + next_page_token = "" + backups_element = {} + backups = [backups_element] + expected_response = {"next_page_token": next_page_token, "backups": backups} + expected_response = backup_pb2.ListBackupsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_backups(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.backups[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = backup_pb2.ListBackupsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_backups_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_backups(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_restore_database(self): + # Setup Expected Response + name = "name3373707" + expected_response = {"name": name} + expected_response = spanner_database_admin_pb2.Database(**expected_response) + operation = operations_pb2.Operation( + name="operations/test_restore_database", done=True + ) + operation.response.Pack(expected_response) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + database_id = "databaseId816491103" + + response = client.restore_database(parent, database_id) + result = response.result() + assert expected_response == result + + assert len(channel.requests) == 1 + expected_request = spanner_database_admin_pb2.RestoreDatabaseRequest( + parent=parent, database_id=database_id + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_restore_database_exception(self): + # Setup Response + error = status_pb2.Status() + operation = operations_pb2.Operation( + name="operations/test_restore_database_exception", done=True + ) + operation.error.CopyFrom(error) + + # Mock the API response + channel = ChannelStub(responses=[operation]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + database_id = "databaseId816491103" + + response = client.restore_database(parent, database_id) + exception = response.exception() + assert exception.errors[0] == error + + def test_list_database_operations(self): + # Setup Expected Response + next_page_token = "" + operations_element = {} + operations = [operations_element] + expected_response = { + "next_page_token": next_page_token, + "operations": operations, + } + expected_response = spanner_database_admin_pb2.ListDatabaseOperationsResponse( + **expected_response + ) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_database_operations(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.operations[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = spanner_database_admin_pb2.ListDatabaseOperationsRequest( + parent=parent + ) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_database_operations_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_database_operations(parent) + with pytest.raises(CustomException): + list(paged_list_response) + + def test_list_backup_operations(self): + # Setup Expected Response + next_page_token = "" + operations_element = {} + operations = [operations_element] + expected_response = { + "next_page_token": next_page_token, + "operations": operations, + } + expected_response = backup_pb2.ListBackupOperationsResponse(**expected_response) + + # Mock the API response + channel = ChannelStub(responses=[expected_response]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup Request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_backup_operations(parent) + resources = list(paged_list_response) + assert len(resources) == 1 + + assert expected_response.operations[0] == resources[0] + + assert len(channel.requests) == 1 + expected_request = backup_pb2.ListBackupOperationsRequest(parent=parent) + actual_request = channel.requests[0][1] + assert expected_request == actual_request + + def test_list_backup_operations_exception(self): + channel = ChannelStub(responses=[CustomException()]) + patch = mock.patch("google.api_core.grpc_helpers.create_channel") + with patch as create_channel: + create_channel.return_value = channel + client = spanner_admin_database_v1.DatabaseAdminClient() + + # Setup request + parent = client.instance_path("[PROJECT]", "[INSTANCE]") + + paged_list_response = client.list_backup_operations(parent) + with pytest.raises(CustomException): + list(paged_list_response) + def test_list_databases(self): # Setup Expected Response next_page_token = "" From 39288e784826c5accca71096be11f99ad7f930f4 Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Fri, 13 Mar 2020 17:35:03 +1100 Subject: [PATCH 12/14] feat: add support for backups (#35) * feat: implement backup support * Apply suggestions from code review Co-Authored-By: skuruppu * refactor restore to use source Co-authored-by: larkee Co-authored-by: skuruppu --- google/cloud/spanner_v1/backup.py | 275 +++++++++++++ google/cloud/spanner_v1/database.py | 116 ++++++ google/cloud/spanner_v1/instance.py | 167 ++++++++ tests/unit/test_backup.py | 590 ++++++++++++++++++++++++++++ tests/unit/test_database.py | 251 +++++++++++- tests/unit/test_instance.py | 327 +++++++++++++++ 6 files changed, 1725 insertions(+), 1 deletion(-) create mode 100644 google/cloud/spanner_v1/backup.py create mode 100644 tests/unit/test_backup.py diff --git a/google/cloud/spanner_v1/backup.py b/google/cloud/spanner_v1/backup.py new file mode 100644 index 0000000000..2aaa1c0f5c --- /dev/null +++ b/google/cloud/spanner_v1/backup.py @@ -0,0 +1,275 @@ +# Copyright 2020 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""User friendly container for Cloud Spanner Backup.""" + +import re + +from google.cloud._helpers import _datetime_to_pb_timestamp, _pb_timestamp_to_datetime +from google.cloud.exceptions import NotFound + +from google.cloud.spanner_admin_database_v1.gapic import enums +from google.cloud.spanner_v1._helpers import _metadata_with_prefix + +_BACKUP_NAME_RE = re.compile( + r"^projects/(?P[^/]+)/" + r"instances/(?P[a-z][-a-z0-9]*)/" + r"backups/(?P[a-z][a-z0-9_\-]*[a-z0-9])$" +) + + +class Backup(object): + """Representation of a Cloud Spanner Backup. + + We can use a :class`Backup` to: + + * :meth:`create` the backup + * :meth:`update` the backup + * :meth:`delete` the backup + + :type backup_id: str + :param backup_id: The ID of the backup. + + :type instance: :class:`~google.cloud.spanner_v1.instance.Instance` + :param instance: The instance that owns the backup. + + :type database: str + :param database: (Optional) The URI of the database that the backup is + for. Required if the create method needs to be called. + + :type expire_time: :class:`datetime.datetime` + :param expire_time: (Optional) The expire time that will be used to + create the backup. Required if the create method + needs to be called. + """ + + def __init__(self, backup_id, instance, database="", expire_time=None): + self.backup_id = backup_id + self._instance = instance + self._database = database + self._expire_time = expire_time + self._create_time = None + self._size_bytes = None + self._state = None + self._referencing_databases = None + + @property + def name(self): + """Backup name used in requests. + + The backup name is of the form + + ``"projects/../instances/../backups/{backup_id}"`` + + :rtype: str + :returns: The backup name. + """ + return self._instance.name + "/backups/" + self.backup_id + + @property + def database(self): + """Database name used in requests. + + The database name is of the form + + ``"projects/../instances/../backups/{backup_id}"`` + + :rtype: str + :returns: The database name. + """ + return self._database + + @property + def expire_time(self): + """Expire time used in creation requests. + + :rtype: :class:`datetime.datetime` + :returns: a datetime object representing the expire time of + this backup + """ + return self._expire_time + + @property + def create_time(self): + """Create time of this backup. + + :rtype: :class:`datetime.datetime` + :returns: a datetime object representing the create time of + this backup + """ + return self._create_time + + @property + def size_bytes(self): + """Size of this backup in bytes. + + :rtype: int + :returns: the number size of this backup measured in bytes + """ + return self._size_bytes + + @property + def state(self): + """State of this backup. + + :rtype: :class:`~google.cloud.spanner_admin_database_v1.gapic.enums.Backup.State` + :returns: an enum describing the state of the backup + """ + return self._state + + @property + def referencing_databases(self): + """List of databases referencing this backup. + + :rtype: list of strings + :returns: a list of database path strings which specify the databases still + referencing this backup + """ + return self._referencing_databases + + @classmethod + def from_pb(cls, backup_pb, instance): + """Create an instance of this class from a protobuf message. + + :type backup_pb: :class:`~google.spanner.admin.database.v1.Backup` + :param backup_pb: A backup protobuf object. + + :type instance: :class:`~google.cloud.spanner_v1.instance.Instance` + :param instance: The instance that owns the backup. + + :rtype: :class:`Backup` + :returns: The backup parsed from the protobuf response. + :raises ValueError: + if the backup name does not match the expected format or if + the parsed project ID does not match the project ID on the + instance's client, or if the parsed instance ID does not match + the instance's ID. + """ + match = _BACKUP_NAME_RE.match(backup_pb.name) + if match is None: + raise ValueError( + "Backup protobuf name was not in the expected format.", backup_pb.name + ) + if match.group("project") != instance._client.project: + raise ValueError( + "Project ID on backup does not match the project ID" + "on the instance's client" + ) + instance_id = match.group("instance_id") + if instance_id != instance.instance_id: + raise ValueError( + "Instance ID on database does not match the instance ID" + "on the instance" + ) + backup_id = match.group("backup_id") + return cls(backup_id, instance) + + def create(self): + """Create this backup within its instance. + + :rtype: :class:`~google.api_core.operation.Operation` + :returns: a future used to poll the status of the create request + :raises Conflict: if the backup already exists + :raises NotFound: if the instance owning the backup does not exist + :raises BadRequest: if the database or expire_time values are invalid + or expire_time is not set + """ + if not self._expire_time: + raise ValueError("expire_time not set") + if not self._database: + raise ValueError("database not set") + api = self._instance._client.database_admin_api + metadata = _metadata_with_prefix(self.name) + backup = { + "database": self._database, + "expire_time": _datetime_to_pb_timestamp(self.expire_time), + } + + future = api.create_backup( + self._instance.name, self.backup_id, backup, metadata=metadata + ) + return future + + def exists(self): + """Test whether this backup exists. + + :rtype: bool + :returns: True if the backup exists, else False. + """ + api = self._instance._client.database_admin_api + metadata = _metadata_with_prefix(self.name) + + try: + api.get_backup(self.name, metadata=metadata) + except NotFound: + return False + return True + + def reload(self): + """Reload this backup. + + Refresh the stored backup properties. + + :raises NotFound: if the backup does not exist + """ + api = self._instance._client.database_admin_api + metadata = _metadata_with_prefix(self.name) + pb = api.get_backup(self.name, metadata=metadata) + self._database = pb.database + self._expire_time = _pb_timestamp_to_datetime(pb.expire_time) + self._create_time = _pb_timestamp_to_datetime(pb.create_time) + self._size_bytes = pb.size_bytes + self._state = enums.Backup.State(pb.state) + self._referencing_databases = pb.referencing_databases + + def update_expire_time(self, new_expire_time): + """Update the expire time of this backup. + + :type new_expire_time: :class:`datetime.datetime` + :param new_expire_time: the new expire time timestamp + """ + api = self._instance._client.database_admin_api + metadata = _metadata_with_prefix(self.name) + backup_update = { + "name": self.name, + "expire_time": _datetime_to_pb_timestamp(new_expire_time), + } + update_mask = {"paths": ["expire_time"]} + api.update_backup(backup_update, update_mask, metadata=metadata) + self._expire_time = new_expire_time + + def is_ready(self): + """Test whether this backup is ready for use. + + :rtype: bool + :returns: True if the backup state is READY, else False. + """ + return self.state == enums.Backup.State.READY + + def delete(self): + """Delete this backup.""" + api = self._instance._client.database_admin_api + metadata = _metadata_with_prefix(self.name) + api.delete_backup(self.name, metadata=metadata) + + +class BackupInfo(object): + def __init__(self, backup, create_time, source_database): + self.backup = backup + self.create_time = _pb_timestamp_to_datetime(create_time) + self.source_database = source_database + + @classmethod + def from_pb(cls, pb): + return cls(pb.backup, pb.create_time, pb.source_database) diff --git a/google/cloud/spanner_v1/database.py b/google/cloud/spanner_v1/database.py index 9ee046e094..5785953bd7 100644 --- a/google/cloud/spanner_v1/database.py +++ b/google/cloud/spanner_v1/database.py @@ -30,11 +30,13 @@ import six # pylint: disable=ungrouped-imports +from google.cloud.spanner_admin_database_v1.gapic import enums from google.cloud.spanner_v1._helpers import ( _make_value_pb, _merge_query_options, _metadata_with_prefix, ) +from google.cloud.spanner_v1.backup import BackupInfo from google.cloud.spanner_v1.batch import Batch from google.cloud.spanner_v1.gapic.spanner_client import SpannerClient from google.cloud.spanner_v1.gapic.transports import spanner_grpc_transport @@ -49,6 +51,7 @@ TransactionSelector, TransactionOptions, ) +from google.cloud._helpers import _pb_timestamp_to_datetime # pylint: enable=ungrouped-imports @@ -62,6 +65,7 @@ r"databases/(?P[a-z][a-z0-9_\-]*[a-z0-9])$" ) +_DATABASE_METADATA_FILTER = "name:{0}/operations/" _RESOURCE_ROUTING_PERMISSIONS_WARNING = ( "The client library attempted to connect to an endpoint closer to your Cloud Spanner data " @@ -110,6 +114,9 @@ def __init__(self, database_id, instance, ddl_statements=(), pool=None): self._instance = instance self._ddl_statements = _check_ddl_statements(ddl_statements) self._local = threading.local() + self._state = None + self._create_time = None + self._restore_info = None if pool is None: pool = BurstyPool() @@ -179,6 +186,34 @@ def name(self): """ return self._instance.name + "/databases/" + self.database_id + @property + def state(self): + """State of this database. + + :rtype: :class:`~google.cloud.spanner_admin_database_v1.gapic.enums.Database.State` + :returns: an enum describing the state of the database + """ + return self._state + + @property + def create_time(self): + """Create time of this database. + + :rtype: :class:`datetime.datetime` + :returns: a datetime object representing the create time of + this database + """ + return self._create_time + + @property + def restore_info(self): + """Restore info for this database. + + :rtype: :class:`~google.cloud.spanner_v1.database.RestoreInfo` + :returns: an object representing the restore info for this database + """ + return self._restore_info + @property def ddl_statements(self): """DDL Statements used to define database schema. @@ -316,6 +351,10 @@ def reload(self): metadata = _metadata_with_prefix(self.name) response = api.get_database_ddl(self.name, metadata=metadata) self._ddl_statements = tuple(response.statements) + response = api.get_database(self.name, metadata=metadata) + self._state = enums.Database.State(response.state) + self._create_time = _pb_timestamp_to_datetime(response.create_time) + self._restore_info = response.restore_info def update_ddl(self, ddl_statements, operation_id=""): """Update DDL for this database. @@ -521,6 +560,73 @@ def run_in_transaction(self, func, *args, **kw): finally: self._local.transaction_running = False + def restore(self, source): + """Restore from a backup to this database. + + :type backup: :class:`~google.cloud.spanner_v1.backup.Backup` + :param backup: the path of the backup being restored from. + + :rtype: :class:'~google.api_core.operation.Operation` + :returns: a future used to poll the status of the create request + :raises Conflict: if the database already exists + :raises NotFound: + if the instance owning the database does not exist, or + if the backup being restored from does not exist + :raises ValueError: if backup is not set + """ + if source is None: + raise ValueError("Restore source not specified") + api = self._instance._client.database_admin_api + metadata = _metadata_with_prefix(self.name) + future = api.restore_database( + self._instance.name, self.database_id, backup=source.name, metadata=metadata + ) + return future + + def is_ready(self): + """Test whether this database is ready for use. + + :rtype: bool + :returns: True if the database state is READY_OPTIMIZING or READY, else False. + """ + return ( + self.state == enums.Database.State.READY_OPTIMIZING + or self.state == enums.Database.State.READY + ) + + def is_optimized(self): + """Test whether this database has finished optimizing. + + :rtype: bool + :returns: True if the database state is READY, else False. + """ + return self.state == enums.Database.State.READY + + def list_database_operations(self, filter_="", page_size=None): + """List database operations for the database. + + :type filter_: str + :param filter_: + Optional. A string specifying a filter for which database operations to list. + + :type page_size: int + :param page_size: + Optional. The maximum number of operations in each page of results from this + request. Non-positive values are ignored. Defaults to a sensible value set + by the API. + + :type: :class:`~google.api_core.page_iterator.Iterator` + :returns: + Iterator of :class:`~google.api_core.operation.Operation` + resources within the current instance. + """ + database_filter = _DATABASE_METADATA_FILTER.format(self.name) + if filter_: + database_filter = "({0}) AND ({1})".format(filter_, database_filter) + return self._instance.list_database_operations( + filter_=database_filter, page_size=page_size + ) + class BatchCheckout(object): """Context manager for using a batch from a database. @@ -906,3 +1012,13 @@ def _check_ddl_statements(value): raise ValueError("Do not pass a 'CREATE DATABASE' statement") return tuple(value) + + +class RestoreInfo(object): + def __init__(self, source_type, backup_info): + self.source_type = enums.RestoreSourceType(source_type) + self.backup_info = BackupInfo.from_pb(backup_info) + + @classmethod + def from_pb(cls, pb): + return cls(pb.source_type, pb.backup_info) diff --git a/google/cloud/spanner_v1/instance.py b/google/cloud/spanner_v1/instance.py index 05e596622c..4a14032c13 100644 --- a/google/cloud/spanner_v1/instance.py +++ b/google/cloud/spanner_v1/instance.py @@ -14,16 +14,23 @@ """User friendly container for Cloud Spanner Instance.""" +import google.api_core.operation import re from google.cloud.spanner_admin_instance_v1.proto import ( spanner_instance_admin_pb2 as admin_v1_pb2, ) +from google.cloud.spanner_admin_database_v1.proto import ( + backup_pb2, + spanner_database_admin_pb2, +) +from google.protobuf.empty_pb2 import Empty from google.protobuf.field_mask_pb2 import FieldMask # pylint: disable=ungrouped-imports from google.cloud.exceptions import NotFound from google.cloud.spanner_v1._helpers import _metadata_with_prefix +from google.cloud.spanner_v1.backup import Backup from google.cloud.spanner_v1.database import Database from google.cloud.spanner_v1.pool import BurstyPool @@ -36,6 +43,33 @@ DEFAULT_NODE_COUNT = 1 +_OPERATION_METADATA_MESSAGES = ( + backup_pb2.Backup, + backup_pb2.CreateBackupMetadata, + spanner_database_admin_pb2.CreateDatabaseMetadata, + spanner_database_admin_pb2.Database, + spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata, + spanner_database_admin_pb2.RestoreDatabaseMetadata, + spanner_database_admin_pb2.UpdateDatabaseDdlMetadata, +) + +_OPERATION_METADATA_TYPES = { + "type.googleapis.com/{}".format(message.DESCRIPTOR.full_name): message + for message in _OPERATION_METADATA_MESSAGES +} + +_OPERATION_RESPONSE_TYPES = { + backup_pb2.CreateBackupMetadata: backup_pb2.Backup, + spanner_database_admin_pb2.CreateDatabaseMetadata: spanner_database_admin_pb2.Database, + spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata: spanner_database_admin_pb2.Database, + spanner_database_admin_pb2.RestoreDatabaseMetadata: spanner_database_admin_pb2.Database, + spanner_database_admin_pb2.UpdateDatabaseDdlMetadata: Empty, +} + + +def _type_string_to_type_pb(type_string): + return _OPERATION_METADATA_TYPES.get(type_string, Empty) + class Instance(object): """Representation of a Cloud Spanner Instance. @@ -379,3 +413,136 @@ def _item_to_database(self, iterator, database_pb): :returns: The next database in the page. """ return Database.from_pb(database_pb, self, pool=BurstyPool()) + + def backup(self, backup_id, database="", expire_time=None): + """Factory to create a backup within this instance. + + :type backup_id: str + :param backup_id: The ID of the backup. + + :type database: :class:`~google.cloud.spanner_v1.database.Database` + :param database: + Optional. The database that will be used when creating the backup. + Required if the create method needs to be called. + + :type expire_time: :class:`datetime.datetime` + :param expire_time: + Optional. The expire time that will be used when creating the backup. + Required if the create method needs to be called. + """ + try: + return Backup( + backup_id, self, database=database.name, expire_time=expire_time + ) + except AttributeError: + return Backup(backup_id, self, database=database, expire_time=expire_time) + + def list_backups(self, filter_="", page_size=None): + """List backups for the instance. + + :type filter_: str + :param filter_: + Optional. A string specifying a filter for which backups to list. + + :type page_size: int + :param page_size: + Optional. The maximum number of databases in each page of results + from this request. Non-positive values are ignored. Defaults to a + sensible value set by the API. + + :rtype: :class:`~google.api_core.page_iterator.Iterator` + :returns: + Iterator of :class:`~google.cloud.spanner_v1.backup.Backup` + resources within the current instance. + """ + metadata = _metadata_with_prefix(self.name) + page_iter = self._client.database_admin_api.list_backups( + self.name, filter_, page_size=page_size, metadata=metadata + ) + page_iter.item_to_value = self._item_to_backup + return page_iter + + def _item_to_backup(self, iterator, backup_pb): + """Convert a backup protobuf to the native object. + + :type iterator: :class:`~google.api_core.page_iterator.Iterator` + :param iterator: The iterator that is currently in use. + + :type backup_pb: :class:`~google.spanner.admin.database.v1.Backup` + :param backup_pb: A backup returned from the API. + + :rtype: :class:`~google.cloud.spanner_v1.backup.Backup` + :returns: The next backup in the page. + """ + return Backup.from_pb(backup_pb, self) + + def list_backup_operations(self, filter_="", page_size=None): + """List backup operations for the instance. + + :type filter_: str + :param filter_: + Optional. A string specifying a filter for which backup operations + to list. + + :type page_size: int + :param page_size: + Optional. The maximum number of operations in each page of results + from this request. Non-positive values are ignored. Defaults to a + sensible value set by the API. + + :rtype: :class:`~google.api_core.page_iterator.Iterator` + :returns: + Iterator of :class:`~google.api_core.operation.Operation` + resources within the current instance. + """ + metadata = _metadata_with_prefix(self.name) + page_iter = self._client.database_admin_api.list_backup_operations( + self.name, filter_, page_size=page_size, metadata=metadata + ) + page_iter.item_to_value = self._item_to_operation + return page_iter + + def list_database_operations(self, filter_="", page_size=None): + """List database operations for the instance. + + :type filter_: str + :param filter_: + Optional. A string specifying a filter for which database operations + to list. + + :type page_size: int + :param page_size: + Optional. The maximum number of operations in each page of results + from this request. Non-positive values are ignored. Defaults to a + sensible value set by the API. + + :rtype: :class:`~google.api_core.page_iterator.Iterator` + :returns: + Iterator of :class:`~google.api_core.operation.Operation` + resources within the current instance. + """ + metadata = _metadata_with_prefix(self.name) + page_iter = self._client.database_admin_api.list_database_operations( + self.name, filter_, page_size=page_size, metadata=metadata + ) + page_iter.item_to_value = self._item_to_operation + return page_iter + + def _item_to_operation(self, iterator, operation_pb): + """Convert an operation protobuf to the native object. + + :type iterator: :class:`~google.api_core.page_iterator.Iterator` + :param iterator: The iterator that is currently in use. + + :type operation_pb: :class:`~google.longrunning.operations.Operation` + :param operation_pb: An operation returned from the API. + + :rtype: :class:`~google.api_core.operation.Operation` + :returns: The next operation in the page. + """ + operations_client = self._client.database_admin_api.transport._operations_client + metadata_type = _type_string_to_type_pb(operation_pb.metadata.type_url) + response_type = _OPERATION_RESPONSE_TYPES[metadata_type] + return google.api_core.operation.from_gapic( + operation_pb, operations_client, response_type, metadata_type=metadata_type + ) diff --git a/tests/unit/test_backup.py b/tests/unit/test_backup.py new file mode 100644 index 0000000000..a3b559b763 --- /dev/null +++ b/tests/unit/test_backup.py @@ -0,0 +1,590 @@ +# Copyright 2020 Google LLC All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import unittest + +import mock + + +class _BaseTest(unittest.TestCase): + PROJECT_ID = "project-id" + PARENT = "projects/" + PROJECT_ID + INSTANCE_ID = "instance-id" + INSTANCE_NAME = PARENT + "/instances/" + INSTANCE_ID + DATABASE_ID = "database_id" + DATABASE_NAME = INSTANCE_NAME + "/databases/" + DATABASE_ID + BACKUP_ID = "backup_id" + BACKUP_NAME = INSTANCE_NAME + "/backups/" + BACKUP_ID + + def _make_one(self, *args, **kwargs): + return self._get_target_class()(*args, **kwargs) + + @staticmethod + def _make_timestamp(): + import datetime + from google.cloud._helpers import UTC + + return datetime.datetime.utcnow().replace(tzinfo=UTC) + + +class TestBackup(_BaseTest): + def _get_target_class(self): + from google.cloud.spanner_v1.backup import Backup + + return Backup + + @staticmethod + def _make_database_admin_api(): + from google.cloud.spanner_v1.client import DatabaseAdminClient + + return mock.create_autospec(DatabaseAdminClient, instance=True) + + def test_ctor_defaults(self): + instance = _Instance(self.INSTANCE_NAME) + + backup = self._make_one(self.BACKUP_ID, instance) + + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertIs(backup._instance, instance) + self.assertEqual(backup._database, "") + self.assertIsNone(backup._expire_time) + + def test_ctor_non_defaults(self): + instance = _Instance(self.INSTANCE_NAME) + timestamp = self._make_timestamp() + + backup = self._make_one( + self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp + ) + + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertIs(backup._instance, instance) + self.assertEqual(backup._database, self.DATABASE_NAME) + self.assertIsNotNone(backup._expire_time) + self.assertIs(backup._expire_time, timestamp) + + def test_from_pb_project_mismatch(self): + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + + ALT_PROJECT = "ALT_PROJECT" + client = _Client(project=ALT_PROJECT) + instance = _Instance(self.INSTANCE_NAME, client) + backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + backup_class = self._get_target_class() + + with self.assertRaises(ValueError): + backup_class.from_pb(backup_pb, instance) + + def test_from_pb_instance_mismatch(self): + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + + ALT_INSTANCE = "/projects/%s/instances/ALT-INSTANCE" % (self.PROJECT_ID,) + client = _Client() + instance = _Instance(ALT_INSTANCE, client) + backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + backup_class = self._get_target_class() + + with self.assertRaises(ValueError): + backup_class.from_pb(backup_pb, instance) + + def test_from_pb_invalid_name(self): + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client) + backup_pb = backup_pb2.Backup(name="invalid_format") + backup_class = self._get_target_class() + + with self.assertRaises(ValueError): + backup_class.from_pb(backup_pb, instance) + + def test_from_pb_success(self): + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client) + backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + backup_class = self._get_target_class() + + backup = backup_class.from_pb(backup_pb, instance) + + self.assertTrue(isinstance(backup, backup_class)) + self.assertEqual(backup._instance, instance) + self.assertEqual(backup.backup_id, self.BACKUP_ID) + self.assertEqual(backup._database, "") + self.assertIsNone(backup._expire_time) + + def test_name_property(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected_name = self.BACKUP_NAME + self.assertEqual(backup.name, expected_name) + + def test_database_property(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._database = self.DATABASE_NAME + self.assertEqual(backup.database, expected) + + def test_expire_time_property(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._expire_time = self._make_timestamp() + self.assertEqual(backup.expire_time, expected) + + def test_create_time_property(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._create_time = self._make_timestamp() + self.assertEqual(backup.create_time, expected) + + def test_size_bytes_property(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._size_bytes = 10 + self.assertEqual(backup.size_bytes, expected) + + def test_state_property(self): + from google.cloud.spanner_admin_database_v1.gapic import enums + + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._state = enums.Backup.State.READY + self.assertEqual(backup.state, expected) + + def test_referencing_databases_property(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance) + expected = backup._referencing_databases = [self.DATABASE_NAME] + self.assertEqual(backup.referencing_databases, expected) + + def test_create_grpc_error(self): + from google.api_core.exceptions import GoogleAPICallError + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.create_backup.side_effect = Unknown("testing") + + instance = _Instance(self.INSTANCE_NAME, client=client) + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp + ) + + from google.cloud._helpers import _datetime_to_pb_timestamp + + backup_pb = { + "database": self.DATABASE_NAME, + "expire_time": _datetime_to_pb_timestamp(timestamp), + } + + with self.assertRaises(GoogleAPICallError): + backup.create() + + api.create_backup.assert_called_once_with( + parent=self.INSTANCE_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, + metadata=[("google-cloud-resource-prefix", backup.name)], + ) + + def test_create_already_exists(self): + from google.cloud.exceptions import Conflict + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.create_backup.side_effect = Conflict("testing") + + instance = _Instance(self.INSTANCE_NAME, client=client) + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp + ) + + from google.cloud._helpers import _datetime_to_pb_timestamp + + backup_pb = { + "database": self.DATABASE_NAME, + "expire_time": _datetime_to_pb_timestamp(timestamp), + } + + with self.assertRaises(Conflict): + backup.create() + + api.create_backup.assert_called_once_with( + parent=self.INSTANCE_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, + metadata=[("google-cloud-resource-prefix", backup.name)], + ) + + def test_create_instance_not_found(self): + from google.cloud.exceptions import NotFound + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.create_backup.side_effect = NotFound("testing") + + instance = _Instance(self.INSTANCE_NAME, client=client) + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp + ) + + from google.cloud._helpers import _datetime_to_pb_timestamp + + backup_pb = { + "database": self.DATABASE_NAME, + "expire_time": _datetime_to_pb_timestamp(timestamp), + } + + with self.assertRaises(NotFound): + backup.create() + + api.create_backup.assert_called_once_with( + parent=self.INSTANCE_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, + metadata=[("google-cloud-resource-prefix", backup.name)], + ) + + def test_create_expire_time_not_set(self): + instance = _Instance(self.INSTANCE_NAME) + backup = self._make_one(self.BACKUP_ID, instance, database=self.DATABASE_NAME) + + with self.assertRaises(ValueError): + backup.create() + + def test_create_database_not_set(self): + instance = _Instance(self.INSTANCE_NAME) + timestamp = self._make_timestamp() + backup = self._make_one(self.BACKUP_ID, instance, expire_time=timestamp) + + with self.assertRaises(ValueError): + backup.create() + + def test_create_success(self): + op_future = object() + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.create_backup.return_value = op_future + + instance = _Instance(self.INSTANCE_NAME, client=client) + timestamp = self._make_timestamp() + backup = self._make_one( + self.BACKUP_ID, instance, database=self.DATABASE_NAME, expire_time=timestamp + ) + + from google.cloud._helpers import _datetime_to_pb_timestamp + + backup_pb = { + "database": self.DATABASE_NAME, + "expire_time": _datetime_to_pb_timestamp(timestamp), + } + + future = backup.create() + self.assertIs(future, op_future) + + api.create_backup.assert_called_once_with( + parent=self.INSTANCE_NAME, + backup_id=self.BACKUP_ID, + backup=backup_pb, + metadata=[("google-cloud-resource-prefix", backup.name)], + ) + + def test_exists_grpc_error(self): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.get_backup.side_effect = Unknown("testing") + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + with self.assertRaises(Unknown): + backup.exists() + + api.get_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_exists_not_found(self): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.get_backup.side_effect = NotFound("testing") + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + self.assertFalse(backup.exists()) + + api.get_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_exists_success(self): + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + + client = _Client() + backup_pb = backup_pb2.Backup(name=self.BACKUP_NAME) + api = client.database_admin_api = self._make_database_admin_api() + api.get_backup.return_value = backup_pb + + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + self.assertTrue(backup.exists()) + + api.get_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_delete_grpc_error(self): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.delete_backup.side_effect = Unknown("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + with self.assertRaises(Unknown): + backup.delete() + + api.delete_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_delete_not_found(self): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.delete_backup.side_effect = NotFound("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + with self.assertRaises(NotFound): + backup.delete() + + api.delete_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_delete_success(self): + from google.protobuf.empty_pb2 import Empty + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.delete_backup.return_value = Empty() + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + backup.delete() + + api.delete_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_reload_grpc_error(self): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.get_backup.side_effect = Unknown("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + with self.assertRaises(Unknown): + backup.reload() + + api.get_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_reload_not_found(self): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.get_backup.side_effect = NotFound("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + with self.assertRaises(NotFound): + backup.reload() + + api.get_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_reload_success(self): + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud._helpers import _datetime_to_pb_timestamp + + timestamp = self._make_timestamp() + + client = _Client() + backup_pb = backup_pb2.Backup( + name=self.BACKUP_NAME, + database=self.DATABASE_NAME, + expire_time=_datetime_to_pb_timestamp(timestamp), + create_time=_datetime_to_pb_timestamp(timestamp), + size_bytes=10, + state=1, + referencing_databases=[], + ) + api = client.database_admin_api = self._make_database_admin_api() + api.get_backup.return_value = backup_pb + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + + backup.reload() + self.assertEqual(backup.name, self.BACKUP_NAME) + self.assertEqual(backup.database, self.DATABASE_NAME) + self.assertEqual(backup.expire_time, timestamp) + self.assertEqual(backup.create_time, timestamp) + self.assertEqual(backup.size_bytes, 10) + self.assertEqual(backup.state, enums.Backup.State.CREATING) + self.assertEqual(backup.referencing_databases, []) + + api.get_backup.assert_called_once_with( + self.BACKUP_NAME, metadata=[("google-cloud-resource-prefix", backup.name)] + ) + + def test_update_expire_time_grpc_error(self): + from google.api_core.exceptions import Unknown + from google.cloud._helpers import _datetime_to_pb_timestamp + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.update_backup.side_effect = Unknown("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + expire_time = self._make_timestamp() + + with self.assertRaises(Unknown): + backup.update_expire_time(expire_time) + + backup_update = { + "name": self.BACKUP_NAME, + "expire_time": _datetime_to_pb_timestamp(expire_time), + } + update_mask = {"paths": ["expire_time"]} + api.update_backup.assert_called_once_with( + backup_update, + update_mask, + metadata=[("google-cloud-resource-prefix", backup.name)], + ) + + def test_update_expire_time_not_found(self): + from google.api_core.exceptions import NotFound + from google.cloud._helpers import _datetime_to_pb_timestamp + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.update_backup.side_effect = NotFound("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + expire_time = self._make_timestamp() + + with self.assertRaises(NotFound): + backup.update_expire_time(expire_time) + + backup_update = { + "name": self.BACKUP_NAME, + "expire_time": _datetime_to_pb_timestamp(expire_time), + } + update_mask = {"paths": ["expire_time"]} + api.update_backup.assert_called_once_with( + backup_update, + update_mask, + metadata=[("google-cloud-resource-prefix", backup.name)], + ) + + def test_update_expire_time_success(self): + from google.cloud._helpers import _datetime_to_pb_timestamp + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.update_backup.return_type = backup_pb2.Backup(name=self.BACKUP_NAME) + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + expire_time = self._make_timestamp() + + backup.update_expire_time(expire_time) + + backup_update = { + "name": self.BACKUP_NAME, + "expire_time": _datetime_to_pb_timestamp(expire_time), + } + update_mask = {"paths": ["expire_time"]} + api.update_backup.assert_called_once_with( + backup_update, + update_mask, + metadata=[("google-cloud-resource-prefix", backup.name)], + ) + + def test_is_ready(self): + from google.cloud.spanner_admin_database_v1.gapic import enums + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + backup = self._make_one(self.BACKUP_ID, instance) + backup._state = enums.Backup.State.READY + self.assertTrue(backup.is_ready()) + backup._state = enums.Backup.State.CREATING + self.assertFalse(backup.is_ready()) + + +class TestBackupInfo(_BaseTest): + def test_from_pb(self): + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_v1.backup import BackupInfo + from google.cloud._helpers import _datetime_to_pb_timestamp + + backup_name = "backup_name" + timestamp = self._make_timestamp() + database_name = "database_name" + + pb = backup_pb2.BackupInfo( + backup=backup_name, + create_time=_datetime_to_pb_timestamp(timestamp), + source_database=database_name, + ) + backup_info = BackupInfo.from_pb(pb) + + self.assertEqual(backup_info.backup, backup_name) + self.assertEqual(backup_info.create_time, timestamp) + self.assertEqual(backup_info.source_database, database_name) + + +class _Client(object): + def __init__(self, project=TestBackup.PROJECT_ID): + self.project = project + self.project_name = "projects/" + self.project + + +class _Instance(object): + def __init__(self, name, client=None): + self.name = name + self.instance_id = name.rsplit("/", 1)[1] + self._client = client diff --git a/tests/unit/test_database.py b/tests/unit/test_database.py index 2d7e2e1888..4b343c2fd9 100644 --- a/tests/unit/test_database.py +++ b/tests/unit/test_database.py @@ -53,6 +53,8 @@ class _BaseTest(unittest.TestCase): SESSION_ID = "session_id" SESSION_NAME = DATABASE_NAME + "/sessions/" + SESSION_ID TRANSACTION_ID = b"transaction_id" + BACKUP_ID = "backup_id" + BACKUP_NAME = INSTANCE_NAME + "/backups/" + BACKUP_ID def _make_one(self, *args, **kwargs): return self._get_target_class()(*args, **kwargs) @@ -230,6 +232,33 @@ def test_name_property(self): expected_name = self.DATABASE_NAME self.assertEqual(database.name, expected_name) + def test_create_time_property(self): + instance = _Instance(self.INSTANCE_NAME) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + expected_create_time = database._create_time = self._make_timestamp() + self.assertEqual(database.create_time, expected_create_time) + + def test_state_property(self): + from google.cloud.spanner_admin_database_v1.gapic import enums + + instance = _Instance(self.INSTANCE_NAME) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + expected_state = database._state = enums.Database.State.READY + self.assertEqual(database.state, expected_state) + + def test_restore_info(self): + from google.cloud.spanner_v1.database import RestoreInfo + + instance = _Instance(self.INSTANCE_NAME) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + restore_info = database._restore_info = mock.create_autospec( + RestoreInfo, instance=True + ) + self.assertEqual(database.restore_info, restore_info) + def test_spanner_api_property_w_scopeless_creds(self): from google.cloud.spanner_admin_instance_v1.proto import ( spanner_instance_admin_pb2 as admin_v1_pb2, @@ -766,24 +795,41 @@ def test_reload_success(self): from google.cloud.spanner_admin_database_v1.proto import ( spanner_database_admin_pb2 as admin_v1_pb2, ) + from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud._helpers import _datetime_to_pb_timestamp from tests._fixtures import DDL_STATEMENTS + timestamp = self._make_timestamp() + restore_info = admin_v1_pb2.RestoreInfo() + client = _Client() ddl_pb = admin_v1_pb2.GetDatabaseDdlResponse(statements=DDL_STATEMENTS) api = client.database_admin_api = self._make_database_admin_api() api.get_database_ddl.return_value = ddl_pb + db_pb = admin_v1_pb2.Database( + state=2, + create_time=_datetime_to_pb_timestamp(timestamp), + restore_info=restore_info, + ) + api.get_database.return_value = db_pb instance = _Instance(self.INSTANCE_NAME, client=client) pool = _Pool() database = self._make_one(self.DATABASE_ID, instance, pool=pool) database.reload() - + self.assertEqual(database._state, enums.Database.State.READY) + self.assertEqual(database._create_time, timestamp) + self.assertEqual(database._restore_info, restore_info) self.assertEqual(database._ddl_statements, tuple(DDL_STATEMENTS)) api.get_database_ddl.assert_called_once_with( self.DATABASE_NAME, metadata=[("google-cloud-resource-prefix", database.name)], ) + api.get_database.assert_called_once_with( + self.DATABASE_NAME, + metadata=[("google-cloud-resource-prefix", database.name)], + ) def test_update_ddl_grpc_error(self): from google.api_core.exceptions import Unknown @@ -1195,6 +1241,180 @@ def nested_unit_of_work(): database.run_in_transaction(nested_unit_of_work) self.assertEqual(inner.call_count, 0) + def test_restore_backup_unspecified(self): + instance = _Instance(self.INSTANCE_NAME, client=_Client()) + database = self._make_one(self.DATABASE_ID, instance) + + with self.assertRaises(ValueError): + database.restore(None) + + def test_restore_grpc_error(self): + from google.api_core.exceptions import Unknown + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.restore_database.side_effect = Unknown("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + backup = _Backup(self.BACKUP_NAME) + + with self.assertRaises(Unknown): + database.restore(backup) + + api.restore_database.assert_called_once_with( + parent=self.INSTANCE_NAME, + database_id=self.DATABASE_ID, + backup=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", database.name)], + ) + + def test_restore_not_found(self): + from google.api_core.exceptions import NotFound + + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.restore_database.side_effect = NotFound("testing") + instance = _Instance(self.INSTANCE_NAME, client=client) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + backup = _Backup(self.BACKUP_NAME) + + with self.assertRaises(NotFound): + database.restore(backup) + + api.restore_database.assert_called_once_with( + parent=self.INSTANCE_NAME, + database_id=self.DATABASE_ID, + backup=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", database.name)], + ) + + def test_restore_success(self): + op_future = object() + client = _Client() + api = client.database_admin_api = self._make_database_admin_api() + api.restore_database.return_value = op_future + instance = _Instance(self.INSTANCE_NAME, client=client) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + backup = _Backup(self.BACKUP_NAME) + + future = database.restore(backup) + + self.assertIs(future, op_future) + + api.restore_database.assert_called_once_with( + parent=self.INSTANCE_NAME, + database_id=self.DATABASE_ID, + backup=self.BACKUP_NAME, + metadata=[("google-cloud-resource-prefix", database.name)], + ) + + def test_is_ready(self): + from google.cloud.spanner_admin_database_v1.gapic import enums + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + database._state = enums.Database.State.READY + self.assertTrue(database.is_ready()) + database._state = enums.Database.State.READY_OPTIMIZING + self.assertTrue(database.is_ready()) + database._state = enums.Database.State.CREATING + self.assertFalse(database.is_ready()) + + def test_is_optimized(self): + from google.cloud.spanner_admin_database_v1.gapic import enums + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + database._state = enums.Database.State.READY + self.assertTrue(database.is_optimized()) + database._state = enums.Database.State.READY_OPTIMIZING + self.assertFalse(database.is_optimized()) + database._state = enums.Database.State.CREATING + self.assertFalse(database.is_optimized()) + + def test_list_database_operations_grpc_error(self): + from google.api_core.exceptions import Unknown + from google.cloud.spanner_v1.database import _DATABASE_METADATA_FILTER + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + instance.list_database_operations = mock.MagicMock( + side_effect=Unknown("testing") + ) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + + with self.assertRaises(Unknown): + database.list_database_operations() + + instance.list_database_operations.assert_called_once_with( + filter_=_DATABASE_METADATA_FILTER.format(database.name), page_size=None + ) + + def test_list_database_operations_not_found(self): + from google.api_core.exceptions import NotFound + from google.cloud.spanner_v1.database import _DATABASE_METADATA_FILTER + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + instance.list_database_operations = mock.MagicMock( + side_effect=NotFound("testing") + ) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + + with self.assertRaises(NotFound): + database.list_database_operations() + + instance.list_database_operations.assert_called_once_with( + filter_=_DATABASE_METADATA_FILTER.format(database.name), page_size=None + ) + + def test_list_database_operations_defaults(self): + from google.cloud.spanner_v1.database import _DATABASE_METADATA_FILTER + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + instance.list_database_operations = mock.MagicMock(return_value=[]) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + + database.list_database_operations() + + instance.list_database_operations.assert_called_once_with( + filter_=_DATABASE_METADATA_FILTER.format(database.name), page_size=None + ) + + def test_list_database_operations_explicit_filter(self): + from google.cloud.spanner_v1.database import _DATABASE_METADATA_FILTER + + client = _Client() + instance = _Instance(self.INSTANCE_NAME, client=client) + instance.list_database_operations = mock.MagicMock(return_value=[]) + pool = _Pool() + database = self._make_one(self.DATABASE_ID, instance, pool=pool) + + expected_filter_ = "({0}) AND ({1})".format( + "metadata.@type:type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata", + _DATABASE_METADATA_FILTER.format(database.name), + ) + page_size = 10 + database.list_database_operations( + filter_="metadata.@type:type.googleapis.com/google.spanner.admin.database.v1.RestoreDatabaseMetadata", + page_size=page_size, + ) + + instance.list_database_operations.assert_called_once_with( + filter_=expected_filter_, page_size=page_size + ) + class TestBatchCheckout(_BaseTest): def _get_target_class(self): @@ -1810,6 +2030,30 @@ def _make_instance_api(): return mock.create_autospec(InstanceAdminClient) +class TestRestoreInfo(_BaseTest): + def test_from_pb(self): + from google.cloud.spanner_v1.database import RestoreInfo + from google.cloud.spanner_admin_database_v1.gapic import enums + from google.cloud.spanner_admin_database_v1.proto import ( + backup_pb2, + spanner_database_admin_pb2 as admin_v1_pb2, + ) + from google.cloud._helpers import _datetime_to_pb_timestamp + + timestamp = self._make_timestamp() + restore_pb = admin_v1_pb2.RestoreInfo( + source_type=1, + backup_info=backup_pb2.BackupInfo( + backup="backup_path", + create_time=_datetime_to_pb_timestamp(timestamp), + source_database="database_path", + ), + ) + restore_info = RestoreInfo.from_pb(restore_pb) + self.assertEqual(restore_info.source_type, enums.RestoreSourceType.BACKUP) + self.assertEqual(restore_info.backup_info.create_time, timestamp) + + class _Client(object): def __init__(self, project=TestDatabase.PROJECT_ID): from google.cloud.spanner_v1.proto.spanner_pb2 import ExecuteSqlRequest @@ -1831,6 +2075,11 @@ def __init__(self, name, client=None, emulator_host=None): self.emulator_host = emulator_host +class _Backup(object): + def __init__(self, name): + self.name = name + + class _Database(object): def __init__(self, name, instance=None): self.name = name diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py index 0e7bc99df4..b71445d835 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/test_instance.py @@ -579,6 +579,333 @@ def test_list_databases_w_options(self): timeout=mock.ANY, ) + def test_backup_factory_defaults(self): + from google.cloud.spanner_v1.backup import Backup + + client = _Client(self.PROJECT) + instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) + BACKUP_ID = "backup-id" + + backup = instance.backup(BACKUP_ID) + + self.assertIsInstance(backup, Backup) + self.assertEqual(backup.backup_id, BACKUP_ID) + self.assertIs(backup._instance, instance) + self.assertEqual(backup._database, "") + self.assertIsNone(backup._expire_time) + + def test_backup_factory_explicit(self): + import datetime + from google.cloud._helpers import UTC + from google.cloud.spanner_v1.backup import Backup + + client = _Client(self.PROJECT) + instance = self._make_one(self.INSTANCE_ID, client, self.CONFIG_NAME) + BACKUP_ID = "backup-id" + DATABASE_NAME = "database-name" + timestamp = datetime.datetime.utcnow().replace(tzinfo=UTC) + + backup = instance.backup( + BACKUP_ID, database=DATABASE_NAME, expire_time=timestamp + ) + + self.assertIsInstance(backup, Backup) + self.assertEqual(backup.backup_id, BACKUP_ID) + self.assertIs(backup._instance, instance) + self.assertEqual(backup._database, DATABASE_NAME) + self.assertIs(backup._expire_time, timestamp) + + def test_list_backups_defaults(self): + from google.cloud.spanner_admin_database_v1.gapic import database_admin_client + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_v1.backup import Backup + + api = database_admin_client.DatabaseAdminClient(mock.Mock()) + client = _Client(self.PROJECT) + client.database_admin_api = api + instance = self._make_one(self.INSTANCE_ID, client) + + backups_pb = backup_pb2.ListBackupsResponse( + backups=[ + backup_pb2.Backup(name=instance.name + "/backups/op1"), + backup_pb2.Backup(name=instance.name + "/backups/op2"), + backup_pb2.Backup(name=instance.name + "/backups/op3"), + ] + ) + + ldo_api = api._inner_api_calls["list_backups"] = mock.Mock( + return_value=backups_pb + ) + + backups = instance.list_backups() + + for backup in backups: + self.assertIsInstance(backup, Backup) + + expected_metadata = [ + ("google-cloud-resource-prefix", instance.name), + ("x-goog-request-params", "parent={}".format(instance.name)), + ] + ldo_api.assert_called_once_with( + backup_pb2.ListBackupsRequest(parent=self.INSTANCE_NAME), + metadata=expected_metadata, + retry=mock.ANY, + timeout=mock.ANY, + ) + + def test_list_backups_w_options(self): + from google.cloud.spanner_admin_database_v1.gapic import database_admin_client + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.cloud.spanner_v1.backup import Backup + + api = database_admin_client.DatabaseAdminClient(mock.Mock()) + client = _Client(self.PROJECT) + client.database_admin_api = api + instance = self._make_one(self.INSTANCE_ID, client) + + backups_pb = backup_pb2.ListBackupsResponse( + backups=[ + backup_pb2.Backup(name=instance.name + "/backups/op1"), + backup_pb2.Backup(name=instance.name + "/backups/op2"), + backup_pb2.Backup(name=instance.name + "/backups/op3"), + ] + ) + + ldo_api = api._inner_api_calls["list_backups"] = mock.Mock( + return_value=backups_pb + ) + + backups = instance.list_backups(filter_="filter", page_size=10) + + for backup in backups: + self.assertIsInstance(backup, Backup) + + expected_metadata = [ + ("google-cloud-resource-prefix", instance.name), + ("x-goog-request-params", "parent={}".format(instance.name)), + ] + ldo_api.assert_called_once_with( + backup_pb2.ListBackupsRequest( + parent=self.INSTANCE_NAME, filter="filter", page_size=10 + ), + metadata=expected_metadata, + retry=mock.ANY, + timeout=mock.ANY, + ) + + def test_list_backup_operations_defaults(self): + from google.api_core.operation import Operation + from google.cloud.spanner_admin_database_v1.gapic import database_admin_client + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + + api = database_admin_client.DatabaseAdminClient(mock.Mock()) + client = _Client(self.PROJECT) + client.database_admin_api = api + instance = self._make_one(self.INSTANCE_ID, client) + + create_backup_metadata = Any() + create_backup_metadata.Pack(backup_pb2.CreateBackupMetadata()) + + operations_pb = backup_pb2.ListBackupOperationsResponse( + operations=[ + operations_pb2.Operation(name="op1", metadata=create_backup_metadata) + ] + ) + + ldo_api = api._inner_api_calls["list_backup_operations"] = mock.Mock( + return_value=operations_pb + ) + + operations = instance.list_backup_operations() + + for op in operations: + self.assertIsInstance(op, Operation) + + expected_metadata = [ + ("google-cloud-resource-prefix", instance.name), + ("x-goog-request-params", "parent={}".format(instance.name)), + ] + ldo_api.assert_called_once_with( + backup_pb2.ListBackupOperationsRequest(parent=self.INSTANCE_NAME), + metadata=expected_metadata, + retry=mock.ANY, + timeout=mock.ANY, + ) + + def test_list_backup_operations_w_options(self): + from google.api_core.operation import Operation + from google.cloud.spanner_admin_database_v1.gapic import database_admin_client + from google.cloud.spanner_admin_database_v1.proto import backup_pb2 + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + + api = database_admin_client.DatabaseAdminClient(mock.Mock()) + client = _Client(self.PROJECT) + client.database_admin_api = api + instance = self._make_one(self.INSTANCE_ID, client) + + create_backup_metadata = Any() + create_backup_metadata.Pack(backup_pb2.CreateBackupMetadata()) + + operations_pb = backup_pb2.ListBackupOperationsResponse( + operations=[ + operations_pb2.Operation(name="op1", metadata=create_backup_metadata) + ] + ) + + ldo_api = api._inner_api_calls["list_backup_operations"] = mock.Mock( + return_value=operations_pb + ) + + operations = instance.list_backup_operations(filter_="filter", page_size=10) + + for op in operations: + self.assertIsInstance(op, Operation) + + expected_metadata = [ + ("google-cloud-resource-prefix", instance.name), + ("x-goog-request-params", "parent={}".format(instance.name)), + ] + ldo_api.assert_called_once_with( + backup_pb2.ListBackupOperationsRequest( + parent=self.INSTANCE_NAME, filter="filter", page_size=10 + ), + metadata=expected_metadata, + retry=mock.ANY, + timeout=mock.ANY, + ) + + def test_list_database_operations_defaults(self): + from google.api_core.operation import Operation + from google.cloud.spanner_admin_database_v1.gapic import database_admin_client + from google.cloud.spanner_admin_database_v1.proto import ( + spanner_database_admin_pb2, + ) + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + + api = database_admin_client.DatabaseAdminClient(mock.Mock()) + client = _Client(self.PROJECT) + client.database_admin_api = api + instance = self._make_one(self.INSTANCE_ID, client) + + create_database_metadata = Any() + create_database_metadata.Pack( + spanner_database_admin_pb2.CreateDatabaseMetadata() + ) + + optimize_database_metadata = Any() + optimize_database_metadata.Pack( + spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata() + ) + + databases_pb = spanner_database_admin_pb2.ListDatabaseOperationsResponse( + operations=[ + operations_pb2.Operation(name="op1", metadata=create_database_metadata), + operations_pb2.Operation( + name="op2", metadata=optimize_database_metadata + ), + ] + ) + + ldo_api = api._inner_api_calls["list_database_operations"] = mock.Mock( + return_value=databases_pb + ) + + operations = instance.list_database_operations() + + for op in operations: + self.assertIsInstance(op, Operation) + + expected_metadata = [ + ("google-cloud-resource-prefix", instance.name), + ("x-goog-request-params", "parent={}".format(instance.name)), + ] + ldo_api.assert_called_once_with( + spanner_database_admin_pb2.ListDatabaseOperationsRequest( + parent=self.INSTANCE_NAME + ), + metadata=expected_metadata, + retry=mock.ANY, + timeout=mock.ANY, + ) + + def test_list_database_operations_w_options(self): + from google.api_core.operation import Operation + from google.cloud.spanner_admin_database_v1.gapic import database_admin_client + from google.cloud.spanner_admin_database_v1.proto import ( + spanner_database_admin_pb2, + ) + from google.longrunning import operations_pb2 + from google.protobuf.any_pb2 import Any + + api = database_admin_client.DatabaseAdminClient(mock.Mock()) + client = _Client(self.PROJECT) + client.database_admin_api = api + instance = self._make_one(self.INSTANCE_ID, client) + + restore_database_metadata = Any() + restore_database_metadata.Pack( + spanner_database_admin_pb2.RestoreDatabaseMetadata() + ) + + update_database_metadata = Any() + update_database_metadata.Pack( + spanner_database_admin_pb2.UpdateDatabaseDdlMetadata() + ) + + databases_pb = spanner_database_admin_pb2.ListDatabaseOperationsResponse( + operations=[ + operations_pb2.Operation( + name="op1", metadata=restore_database_metadata + ), + operations_pb2.Operation(name="op2", metadata=update_database_metadata), + ] + ) + + ldo_api = api._inner_api_calls["list_database_operations"] = mock.Mock( + return_value=databases_pb + ) + + operations = instance.list_database_operations(filter_="filter", page_size=10) + + for op in operations: + self.assertIsInstance(op, Operation) + + expected_metadata = [ + ("google-cloud-resource-prefix", instance.name), + ("x-goog-request-params", "parent={}".format(instance.name)), + ] + ldo_api.assert_called_once_with( + spanner_database_admin_pb2.ListDatabaseOperationsRequest( + parent=self.INSTANCE_NAME, filter="filter", page_size=10 + ), + metadata=expected_metadata, + retry=mock.ANY, + timeout=mock.ANY, + ) + + def test_type_string_to_type_pb_hit(self): + from google.cloud.spanner_admin_database_v1.proto import ( + spanner_database_admin_pb2, + ) + from google.cloud.spanner_v1 import instance + + type_string = "type.googleapis.com/google.spanner.admin.database.v1.OptimizeRestoredDatabaseMetadata" + self.assertIn(type_string, instance._OPERATION_METADATA_TYPES) + self.assertEqual( + instance._type_string_to_type_pb(type_string), + spanner_database_admin_pb2.OptimizeRestoredDatabaseMetadata, + ) + + def test_type_string_to_type_pb_miss(self): + from google.cloud.spanner_v1 import instance + from google.protobuf.empty_pb2 import Empty + + self.assertEqual(instance._type_string_to_type_pb("invalid_string"), Empty) + class _Client(object): def __init__(self, project, timeout_seconds=None): From faf23af9c77378e82f0dbbc016142777baf6738e Mon Sep 17 00:00:00 2001 From: larkee <31196561+larkee@users.noreply.github.com> Date: Mon, 16 Mar 2020 14:17:05 +1100 Subject: [PATCH 13/14] chore: ensure next release is minor (#37) Release-As: 1.15.0 Co-authored-by: larkee From e378ba039fc5714c715386b3a1d9ba5b2c7ce306 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 18 Mar 2020 07:28:37 +1100 Subject: [PATCH 14/14] chore: release 1.15.0 (#40) * updated CHANGELOG.md [ci skip] * updated setup.py [ci skip] * Update CHANGELOG.md Tidy commit summaries Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> Co-authored-by: larkee <31196561+larkee@users.noreply.github.com> --- CHANGELOG.md | 25 ++++++++++++++++++++----- setup.py | 2 +- 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f708046b40..f62aff853b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,20 +4,35 @@ [1]: https://pypi.org/project/google-cloud-spanner/#history +## [1.15.0](https://www.github.com/googleapis/python-spanner/compare/v1.14.0...v1.15.0) (2020-03-17) + + +### Features + +* Add emulator support ([#14](https://www.github.com/googleapis/python-spanner/issues/14)) ([b315593](https://www.github.com/googleapis/python-spanner/commit/b315593bd3e473d96cc3033f5bbf0da7487e38eb)) +* Export transaction._rolled_back as transaction.rolled_back ([#16](https://www.github.com/googleapis/python-spanner/issues/16)) ([974ee92](https://www.github.com/googleapis/python-spanner/commit/974ee925df1962f559d6cb43318ee301e330e8f2)) +* Add support for backups ([#35](https://www.github.com/googleapis/python-spanner/issues/35)) ([39288e7](https://www.github.com/googleapis/python-spanner/commit/39288e784826c5accca71096be11f99ad7f930f4)) +* Implement query options versioning support ([#30](https://www.github.com/googleapis/python-spanner/issues/30)) ([5147921](https://www.github.com/googleapis/python-spanner/commit/514792151c2fe4fc7a6cf4ad0dd141c9090a634b)) + + +### Bug Fixes + +* Remove erroneous timeouts for batch_create_session calls ([#18](https://www.github.com/googleapis/python-spanner/issues/18)) ([997a034](https://www.github.com/googleapis/python-spanner/commit/997a03477b07ec39c718480d9bfe729404bf5748)) + ## [1.14.0](https://www.github.com/googleapis/python-spanner/compare/v1.13.0...v1.14.0) (2020-01-31) ### Features -* **spanner:** add deprecation warnings; add field_mask to get_instance; add endpoint_uris to Instance proto; update timeouts; make mutations optional for commits (via synth) ([62edbe1](https://www.github.com/googleapis/python-spanner/commit/62edbe12a0c5a74eacb8d87ca265a19e6d27f890)) -* **spanner:** add resource based routing implementation ([#10183](https://www.github.com/googleapis/google-cloud-python/issues/10183)) ([e072d5d](https://www.github.com/googleapis/python-spanner/commit/e072d5dd04d58fff7f62ce19ce42e906dfd11012)) -* **spanner:** un-deprecate resource name helper functions, add 3.8 tests (via synth) ([#10062](https://www.github.com/googleapis/google-cloud-python/issues/10062)) ([dbb79b0](https://www.github.com/googleapis/python-spanner/commit/dbb79b0d8b0c79f6ed1772f28e4eedb9d986b108)) +* Add deprecation warnings; add field_mask to get_instance; add endpoint_uris to Instance proto; update timeouts; make mutations optional for commits (via synth) ([62edbe1](https://www.github.com/googleapis/python-spanner/commit/62edbe12a0c5a74eacb8d87ca265a19e6d27f890)) +* Add resource based routing implementation ([#10183](https://www.github.com/googleapis/google-cloud-python/issues/10183)) ([e072d5d](https://www.github.com/googleapis/python-spanner/commit/e072d5dd04d58fff7f62ce19ce42e906dfd11012)) +* Un-deprecate resource name helper functions, add 3.8 tests (via synth) ([#10062](https://www.github.com/googleapis/google-cloud-python/issues/10062)) ([dbb79b0](https://www.github.com/googleapis/python-spanner/commit/dbb79b0d8b0c79f6ed1772f28e4eedb9d986b108)) ### Bug Fixes -* be permssive about merging an empty struct ([#10079](https://www.github.com/googleapis/google-cloud-python/issues/10079)) ([cfae63d](https://www.github.com/googleapis/python-spanner/commit/cfae63d5a8b8332f8875307283da6075a544c838)) -* **spanner:** fix imports for doc samples ([#10283](https://www.github.com/googleapis/google-cloud-python/issues/10283)) ([55a21d9](https://www.github.com/googleapis/python-spanner/commit/55a21d97d0c863cbbbb2d973b6faa4aeba8e38bb)) +* Be permssive about merging an empty struct ([#10079](https://www.github.com/googleapis/google-cloud-python/issues/10079)) ([cfae63d](https://www.github.com/googleapis/python-spanner/commit/cfae63d5a8b8332f8875307283da6075a544c838)) +* Fix imports for doc samples ([#10283](https://www.github.com/googleapis/google-cloud-python/issues/10283)) ([55a21d9](https://www.github.com/googleapis/python-spanner/commit/55a21d97d0c863cbbbb2d973b6faa4aeba8e38bb)) ## 1.13.0 diff --git a/setup.py b/setup.py index cc86f650ea..3db2cc15f2 100644 --- a/setup.py +++ b/setup.py @@ -22,7 +22,7 @@ name = "google-cloud-spanner" description = "Cloud Spanner API client library" -version = "1.14.0" +version = "1.15.0" # Should be one of: # 'Development Status :: 3 - Alpha' # 'Development Status :: 4 - Beta'