Skip to content
This repository was archived by the owner on Mar 31, 2026. It is now read-only.

Commit ede4343

Browse files
authored
fix: Convert PBs in system test cleanup (#199)
Fixes #198, includes #201. Fix a bug in test_system.py around backup instance proto conversion.
1 parent 2b74f9c commit ede4343

2 files changed

Lines changed: 89 additions & 80 deletions

File tree

tests/system/test_system.py

Lines changed: 82 additions & 77 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,13 @@
3535
from google.cloud.spanner_v1 import Type
3636

3737
from google.cloud._helpers import UTC
38+
from google.cloud.spanner_v1 import BurstyPool
39+
from google.cloud.spanner_v1 import COMMIT_TIMESTAMP
3840
from google.cloud.spanner_v1 import Client
3941
from google.cloud.spanner_v1 import KeyRange
4042
from google.cloud.spanner_v1 import KeySet
41-
from google.cloud.spanner_v1 import BurstyPool
42-
from google.cloud.spanner_v1 import COMMIT_TIMESTAMP
43+
from google.cloud.spanner_v1.instance import Backup
44+
from google.cloud.spanner_v1.instance import Instance
4345

4446
from test_utils.retry import RetryErrors
4547
from test_utils.retry import RetryInstanceState
@@ -115,14 +117,17 @@ def setUpModule():
115117

116118
# Delete test instances that are older than an hour.
117119
cutoff = int(time.time()) - 1 * 60 * 60
118-
for instance in Config.CLIENT.list_instances("labels.python-spanner-systests:true"):
120+
instance_pbs = Config.CLIENT.list_instances("labels.python-spanner-systests:true")
121+
for instance_pb in instance_pbs:
122+
instance = Instance.from_pb(instance_pb, Config.CLIENT)
119123
if "created" not in instance.labels:
120124
continue
121125
create_time = int(instance.labels["created"])
122126
if create_time > cutoff:
123127
continue
124128
# Instance cannot be deleted while backups exist.
125-
for backup in instance.list_backups():
129+
for backup_pb in instance.list_backups():
130+
backup = Backup.from_pb(backup_pb, instance)
126131
backup.delete()
127132
instance.delete()
128133

@@ -939,9 +944,9 @@ def test_batch_insert_then_read(self):
939944
)
940945

941946
def test_batch_insert_then_read_string_array_of_string(self):
942-
TABLE = "string_plus_array_of_string"
943-
COLUMNS = ["id", "name", "tags"]
944-
ROWDATA = [
947+
table = "string_plus_array_of_string"
948+
columns = ["id", "name", "tags"]
949+
rowdata = [
945950
(0, None, None),
946951
(1, "phred", ["yabba", "dabba", "do"]),
947952
(2, "bharney", []),
@@ -951,12 +956,12 @@ def test_batch_insert_then_read_string_array_of_string(self):
951956
retry(self._db.reload)()
952957

953958
with self._db.batch() as batch:
954-
batch.delete(TABLE, self.ALL)
955-
batch.insert(TABLE, COLUMNS, ROWDATA)
959+
batch.delete(table, self.ALL)
960+
batch.insert(table, columns, rowdata)
956961

957962
with self._db.snapshot(read_timestamp=batch.committed) as snapshot:
958-
rows = list(snapshot.read(TABLE, COLUMNS, self.ALL))
959-
self._check_rows_data(rows, expected=ROWDATA)
963+
rows = list(snapshot.read(table, columns, self.ALL))
964+
self._check_rows_data(rows, expected=rowdata)
960965

961966
def test_batch_insert_then_read_all_datatypes(self):
962967
retry = RetryInstanceState(_has_all_ddl)
@@ -1570,23 +1575,23 @@ def _read_w_concurrent_update(self, transaction, pkey):
15701575
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
15711576

15721577
def test_transaction_read_w_concurrent_updates(self):
1573-
PKEY = "read_w_concurrent_updates"
1574-
self._transaction_concurrency_helper(self._read_w_concurrent_update, PKEY)
1578+
pkey = "read_w_concurrent_updates"
1579+
self._transaction_concurrency_helper(self._read_w_concurrent_update, pkey)
15751580

15761581
def _query_w_concurrent_update(self, transaction, pkey):
1577-
SQL = "SELECT * FROM counters WHERE name = @name"
1582+
sql = "SELECT * FROM counters WHERE name = @name"
15781583
rows = list(
15791584
transaction.execute_sql(
1580-
SQL, params={"name": pkey}, param_types={"name": param_types.STRING}
1585+
sql, params={"name": pkey}, param_types={"name": param_types.STRING}
15811586
)
15821587
)
15831588
self.assertEqual(len(rows), 1)
15841589
pkey, value = rows[0]
15851590
transaction.update(COUNTERS_TABLE, COUNTERS_COLUMNS, [[pkey, value + 1]])
15861591

15871592
def test_transaction_query_w_concurrent_updates(self):
1588-
PKEY = "query_w_concurrent_updates"
1589-
self._transaction_concurrency_helper(self._query_w_concurrent_update, PKEY)
1593+
pkey = "query_w_concurrent_updates"
1594+
self._transaction_concurrency_helper(self._query_w_concurrent_update, pkey)
15901595

15911596
@unittest.skipIf(USE_EMULATOR, "Skipping concurrent transactions")
15921597
def test_transaction_read_w_abort(self):
@@ -1684,9 +1689,9 @@ def test_snapshot_read_w_various_staleness(self):
16841689
from datetime import datetime
16851690
from google.cloud._helpers import UTC
16861691

1687-
ROW_COUNT = 400
1688-
committed = self._set_up_table(ROW_COUNT)
1689-
all_data_rows = list(self._row_data(ROW_COUNT))
1692+
row_count = 400
1693+
committed = self._set_up_table(row_count)
1694+
all_data_rows = list(self._row_data(row_count))
16901695

16911696
before_reads = datetime.utcnow().replace(tzinfo=UTC)
16921697

@@ -1718,9 +1723,9 @@ def test_snapshot_read_w_various_staleness(self):
17181723
self._check_row_data(rows, all_data_rows)
17191724

17201725
def test_multiuse_snapshot_read_isolation_strong(self):
1721-
ROW_COUNT = 40
1722-
self._set_up_table(ROW_COUNT)
1723-
all_data_rows = list(self._row_data(ROW_COUNT))
1726+
row_count = 40
1727+
self._set_up_table(row_count)
1728+
all_data_rows = list(self._row_data(row_count))
17241729
with self._db.snapshot(multi_use=True) as strong:
17251730
before = list(strong.read(self.TABLE, self.COLUMNS, self.ALL))
17261731
self._check_row_data(before, all_data_rows)
@@ -1732,9 +1737,9 @@ def test_multiuse_snapshot_read_isolation_strong(self):
17321737
self._check_row_data(after, all_data_rows)
17331738

17341739
def test_multiuse_snapshot_read_isolation_read_timestamp(self):
1735-
ROW_COUNT = 40
1736-
committed = self._set_up_table(ROW_COUNT)
1737-
all_data_rows = list(self._row_data(ROW_COUNT))
1740+
row_count = 40
1741+
committed = self._set_up_table(row_count)
1742+
all_data_rows = list(self._row_data(row_count))
17381743

17391744
with self._db.snapshot(read_timestamp=committed, multi_use=True) as read_ts:
17401745

@@ -1748,10 +1753,10 @@ def test_multiuse_snapshot_read_isolation_read_timestamp(self):
17481753
self._check_row_data(after, all_data_rows)
17491754

17501755
def test_multiuse_snapshot_read_isolation_exact_staleness(self):
1751-
ROW_COUNT = 40
1756+
row_count = 40
17521757

1753-
self._set_up_table(ROW_COUNT)
1754-
all_data_rows = list(self._row_data(ROW_COUNT))
1758+
self._set_up_table(row_count)
1759+
all_data_rows = list(self._row_data(row_count))
17551760

17561761
time.sleep(1)
17571762
delta = datetime.timedelta(microseconds=1000)
@@ -1768,7 +1773,7 @@ def test_multiuse_snapshot_read_isolation_exact_staleness(self):
17681773
self._check_row_data(after, all_data_rows)
17691774

17701775
def test_read_w_index(self):
1771-
ROW_COUNT = 2000
1776+
row_count = 2000
17721777
# Indexed reads cannot return non-indexed columns
17731778
MY_COLUMNS = self.COLUMNS[0], self.COLUMNS[2]
17741779
EXTRA_DDL = ["CREATE INDEX contacts_by_last_name ON contacts(last_name)"]
@@ -1784,7 +1789,7 @@ def test_read_w_index(self):
17841789

17851790
# We want to make sure the operation completes.
17861791
operation.result(30) # raises on failure / timeout.
1787-
committed = self._set_up_table(ROW_COUNT, database=temp_db)
1792+
committed = self._set_up_table(row_count, database=temp_db)
17881793

17891794
with temp_db.snapshot(read_timestamp=committed) as snapshot:
17901795
rows = list(
@@ -1794,36 +1799,36 @@ def test_read_w_index(self):
17941799
)
17951800

17961801
expected = list(
1797-
reversed([(row[0], row[2]) for row in self._row_data(ROW_COUNT)])
1802+
reversed([(row[0], row[2]) for row in self._row_data(row_count)])
17981803
)
17991804
self._check_rows_data(rows, expected)
18001805

18011806
def test_read_w_single_key(self):
18021807
# [START spanner_test_single_key_read]
1803-
ROW_COUNT = 40
1804-
committed = self._set_up_table(ROW_COUNT)
1808+
row_count = 40
1809+
committed = self._set_up_table(row_count)
18051810

18061811
with self._db.snapshot(read_timestamp=committed) as snapshot:
18071812
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(0,)])))
18081813

1809-
all_data_rows = list(self._row_data(ROW_COUNT))
1814+
all_data_rows = list(self._row_data(row_count))
18101815
expected = [all_data_rows[0]]
18111816
self._check_row_data(rows, expected)
18121817
# [END spanner_test_single_key_read]
18131818

18141819
def test_empty_read(self):
18151820
# [START spanner_test_empty_read]
1816-
ROW_COUNT = 40
1817-
self._set_up_table(ROW_COUNT)
1821+
row_count = 40
1822+
self._set_up_table(row_count)
18181823
with self._db.snapshot() as snapshot:
18191824
rows = list(snapshot.read(self.TABLE, self.COLUMNS, KeySet(keys=[(40,)])))
18201825
self._check_row_data(rows, [])
18211826
# [END spanner_test_empty_read]
18221827

18231828
def test_read_w_multiple_keys(self):
1824-
ROW_COUNT = 40
1829+
row_count = 40
18251830
indices = [0, 5, 17]
1826-
committed = self._set_up_table(ROW_COUNT)
1831+
committed = self._set_up_table(row_count)
18271832

18281833
with self._db.snapshot(read_timestamp=committed) as snapshot:
18291834
rows = list(
@@ -1834,58 +1839,58 @@ def test_read_w_multiple_keys(self):
18341839
)
18351840
)
18361841

1837-
all_data_rows = list(self._row_data(ROW_COUNT))
1842+
all_data_rows = list(self._row_data(row_count))
18381843
expected = [row for row in all_data_rows if row[0] in indices]
18391844
self._check_row_data(rows, expected)
18401845

18411846
def test_read_w_limit(self):
1842-
ROW_COUNT = 3000
1843-
LIMIT = 100
1844-
committed = self._set_up_table(ROW_COUNT)
1847+
row_count = 3000
1848+
limit = 100
1849+
committed = self._set_up_table(row_count)
18451850

18461851
with self._db.snapshot(read_timestamp=committed) as snapshot:
1847-
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=LIMIT))
1852+
rows = list(snapshot.read(self.TABLE, self.COLUMNS, self.ALL, limit=limit))
18481853

1849-
all_data_rows = list(self._row_data(ROW_COUNT))
1850-
expected = all_data_rows[:LIMIT]
1854+
all_data_rows = list(self._row_data(row_count))
1855+
expected = all_data_rows[:limit]
18511856
self._check_row_data(rows, expected)
18521857

18531858
def test_read_w_ranges(self):
1854-
ROW_COUNT = 3000
1855-
START = 1000
1856-
END = 2000
1857-
committed = self._set_up_table(ROW_COUNT)
1859+
row_count = 3000
1860+
start = 1000
1861+
end = 2000
1862+
committed = self._set_up_table(row_count)
18581863
with self._db.snapshot(read_timestamp=committed, multi_use=True) as snapshot:
1859-
all_data_rows = list(self._row_data(ROW_COUNT))
1864+
all_data_rows = list(self._row_data(row_count))
18601865

1861-
single_key = KeyRange(start_closed=[START], end_open=[START + 1])
1866+
single_key = KeyRange(start_closed=[start], end_open=[start + 1])
18621867
keyset = KeySet(ranges=(single_key,))
18631868
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
1864-
expected = all_data_rows[START : START + 1]
1869+
expected = all_data_rows[start : start + 1]
18651870
self._check_rows_data(rows, expected)
18661871

1867-
closed_closed = KeyRange(start_closed=[START], end_closed=[END])
1872+
closed_closed = KeyRange(start_closed=[start], end_closed=[end])
18681873
keyset = KeySet(ranges=(closed_closed,))
18691874
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
1870-
expected = all_data_rows[START : END + 1]
1875+
expected = all_data_rows[start : end + 1]
18711876
self._check_row_data(rows, expected)
18721877

1873-
closed_open = KeyRange(start_closed=[START], end_open=[END])
1878+
closed_open = KeyRange(start_closed=[start], end_open=[end])
18741879
keyset = KeySet(ranges=(closed_open,))
18751880
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
1876-
expected = all_data_rows[START:END]
1881+
expected = all_data_rows[start:end]
18771882
self._check_row_data(rows, expected)
18781883

1879-
open_open = KeyRange(start_open=[START], end_open=[END])
1884+
open_open = KeyRange(start_open=[start], end_open=[end])
18801885
keyset = KeySet(ranges=(open_open,))
18811886
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
1882-
expected = all_data_rows[START + 1 : END]
1887+
expected = all_data_rows[start + 1 : end]
18831888
self._check_row_data(rows, expected)
18841889

1885-
open_closed = KeyRange(start_open=[START], end_closed=[END])
1890+
open_closed = KeyRange(start_open=[start], end_closed=[end])
18861891
keyset = KeySet(ranges=(open_closed,))
18871892
rows = list(snapshot.read(self.TABLE, self.COLUMNS, keyset))
1888-
expected = all_data_rows[START + 1 : END + 1]
1893+
expected = all_data_rows[start + 1 : end + 1]
18891894
self._check_row_data(rows, expected)
18901895

18911896
def test_read_partial_range_until_end(self):
@@ -2129,8 +2134,8 @@ def test_partition_read_w_index(self):
21292134
batch_txn.close()
21302135

21312136
def test_execute_sql_w_manual_consume(self):
2132-
ROW_COUNT = 3000
2133-
committed = self._set_up_table(ROW_COUNT)
2137+
row_count = 3000
2138+
committed = self._set_up_table(row_count)
21342139

21352140
with self._db.snapshot(read_timestamp=committed) as snapshot:
21362141
streamed = snapshot.execute_sql(self.SQL)
@@ -2154,9 +2159,9 @@ def _check_sql_results(
21542159
self._check_rows_data(rows, expected=expected)
21552160

21562161
def test_multiuse_snapshot_execute_sql_isolation_strong(self):
2157-
ROW_COUNT = 40
2158-
self._set_up_table(ROW_COUNT)
2159-
all_data_rows = list(self._row_data(ROW_COUNT))
2162+
row_count = 40
2163+
self._set_up_table(row_count)
2164+
all_data_rows = list(self._row_data(row_count))
21602165
with self._db.snapshot(multi_use=True) as strong:
21612166

21622167
before = list(strong.execute_sql(self.SQL))
@@ -2169,22 +2174,22 @@ def test_multiuse_snapshot_execute_sql_isolation_strong(self):
21692174
self._check_row_data(after, all_data_rows)
21702175

21712176
def test_execute_sql_returning_array_of_struct(self):
2172-
SQL = (
2177+
sql = (
21732178
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
21742179
"FROM (SELECT 'a' AS C1, 1 AS C2 "
21752180
"UNION ALL SELECT 'b' AS C1, 2 AS C2) "
21762181
"ORDER BY C1 ASC)"
21772182
)
21782183
self._check_sql_results(
21792184
self._db,
2180-
sql=SQL,
2185+
sql=sql,
21812186
params=None,
21822187
param_types=None,
21832188
expected=[[[["a", 1], ["b", 2]]]],
21842189
)
21852190

21862191
def test_execute_sql_returning_empty_array_of_struct(self):
2187-
SQL = (
2192+
sql = (
21882193
"SELECT ARRAY(SELECT AS STRUCT C1, C2 "
21892194
"FROM (SELECT 2 AS C1) X "
21902195
"JOIN (SELECT 1 AS C2) Y "
@@ -2194,7 +2199,7 @@ def test_execute_sql_returning_empty_array_of_struct(self):
21942199
self._db.snapshot(multi_use=True)
21952200

21962201
self._check_sql_results(
2197-
self._db, sql=SQL, params=None, param_types=None, expected=[[[]]]
2202+
self._db, sql=sql, params=None, param_types=None, expected=[[[]]]
21982203
)
21992204

22002205
def test_invalid_type(self):
@@ -2359,11 +2364,11 @@ def test_execute_sql_w_numeric_bindings(self):
23592364
self._bind_test_helper(TypeCode.NUMERIC, NUMERIC_1, [NUMERIC_1, NUMERIC_2])
23602365

23612366
def test_execute_sql_w_query_param_struct(self):
2362-
NAME = "Phred"
2363-
COUNT = 123
2364-
SIZE = 23.456
2365-
HEIGHT = 188.0
2366-
WEIGHT = 97.6
2367+
name = "Phred"
2368+
count = 123
2369+
size = 23.456
2370+
height = 188.0
2371+
weight = 97.6
23672372

23682373
record_type = param_types.Struct(
23692374
[
@@ -2416,9 +2421,9 @@ def test_execute_sql_w_query_param_struct(self):
24162421
self._check_sql_results(
24172422
self._db,
24182423
sql="SELECT @r.name, @r.count, @r.size, @r.nested.weight",
2419-
params={"r": (NAME, COUNT, SIZE, (HEIGHT, WEIGHT))},
2424+
params={"r": (name, count, size, (height, weight))},
24202425
param_types={"r": record_type},
2421-
expected=[(NAME, COUNT, SIZE, WEIGHT)],
2426+
expected=[(name, count, size, weight)],
24222427
order=False,
24232428
)
24242429

0 commit comments

Comments
 (0)