Skip to content

Commit aa95442

Browse files
committed
Use 'grpcio-tools' in a virtualenv to generate latest Bigtable V2 protos.
Closes googleapis#1928. This is a better hack for googleapis#1482, but we still really want googleapis#1384.
1 parent 90cd2e7 commit aa95442

16 files changed

+1413
-716
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,3 +57,4 @@ scripts/pylintrc_reduced
5757
generated_python/
5858
cloud-bigtable-client/
5959
googleapis-pb/
60+
grpc_python_venv/

Makefile.bigtable_v2

Lines changed: 20 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,8 @@
1+
GRPCIO_VIRTUALENV=$(shell pwd)/grpc_python_venv
12
GENERATED_DIR=$(shell pwd)/generated_python
23
GENERATED_SUBDIR=_generated_v2
34
BIGTABLE_DIR=$(shell pwd)/gcloud/bigtable/$(GENERATED_SUBDIR)
4-
GRPC_PLUGIN=grpc_python_plugin
5-
PROTOC_CMD=protoc
6-
BIGTABLE_CHECKOUT_DIR=$(shell pwd)/cloud-bigtable-client
7-
BIGTABLE_PROTOS_DIR=$(BIGTABLE_CHECKOUT_DIR)/bigtable-protos/src/main/proto
5+
PROTOC_CMD=$(GRPCIO_VIRTUALENV)/bin/python -m grpc.tools.protoc
86
GOOGLEAPIS_PROTOS_DIR=$(shell pwd)/googleapis-pb
97

108
help:
@@ -15,41 +13,40 @@ help:
1513
@echo ' make clean Clean generated files '
1614

1715
generate:
16+
# Ensure we have a virtualenv w/ up-to-date grpcio/grpcio-tools
17+
[ -d $(GRPCIO_VIRTUALENV) ] || python2.7 -m virtualenv $(GRPCIO_VIRTUALENV)
18+
$(GRPCIO_VIRTUALENV)/bin/pip install --upgrade grpcio grpcio-tools
1819
# Retrieve git repos that have our *.proto files.
19-
[ -d $(BIGTABLE_CHECKOUT_DIR) ] || git clone https://github.com/GoogleCloudPlatform/cloud-bigtable-client --depth=1
20-
cd $(BIGTABLE_CHECKOUT_DIR) && git pull origin master
2120
[ -d googleapis-pb ] || git clone https://github.com/google/googleapis googleapis-pb --depth=1
2221
cd googleapis-pb && git pull origin master
2322
# Make the directory where our *_pb2.py files will go.
2423
mkdir -p $(GENERATED_DIR)
2524
# Generate all *_pb2.py files that require gRPC.
2625
$(PROTOC_CMD) \
27-
--proto_path=$(BIGTABLE_PROTOS_DIR) \
26+
--proto_path=$(GOOGLEAPIS_PROTOS_DIR) \
2827
--python_out=$(GENERATED_DIR) \
29-
--plugin=protoc-gen-grpc=$(GRPC_PLUGIN) \
30-
--grpc_out=$(GENERATED_DIR) \
31-
$(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/bigtable.proto \
32-
$(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_instance_admin.proto \
33-
$(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_table_admin.proto
28+
--grpc_python_out=$(GENERATED_DIR) \
29+
$(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/bigtable.proto \
30+
$(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_instance_admin.proto \
31+
$(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/bigtable_table_admin.proto
3432
# Generate all *_pb2.py files that do not require gRPC.
3533
$(PROTOC_CMD) \
36-
--proto_path=$(BIGTABLE_PROTOS_DIR) \
3734
--proto_path=$(GOOGLEAPIS_PROTOS_DIR) \
3835
--python_out=$(GENERATED_DIR) \
39-
$(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/data.proto \
40-
$(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/common.proto \
41-
$(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/instance.proto \
42-
$(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/table.proto \
36+
$(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/data.proto \
37+
$(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/common.proto \
38+
$(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/instance.proto \
39+
$(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/table.proto \
4340
# Move the newly generated *_pb2.py files into our library.
4441
cp $(GENERATED_DIR)/google/bigtable/v2/* $(BIGTABLE_DIR)
4542
cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR)
4643
cp $(GENERATED_DIR)/google/bigtable/admin/v2/* $(BIGTABLE_DIR)
4744
# Remove all existing *.proto files before we replace
4845
rm -f $(BIGTABLE_DIR)/*.proto
4946
# Copy over the *.proto files into our library.
50-
cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/v2/*.proto $(BIGTABLE_DIR)
51-
cp $(BIGTABLE_PROTOS_DIR)/google/bigtable/admin/v2/*.proto $(BIGTABLE_DIR)
52-
cp $(BIGTABLE_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR)
47+
cp $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/v2/*.proto $(BIGTABLE_DIR)
48+
cp $(GOOGLEAPIS_PROTOS_DIR)/google/bigtable/admin/v2/*.proto $(BIGTABLE_DIR)
49+
cp $(GOOGLEAPIS_PROTOS_DIR)/google/longrunning/operations.proto $(BIGTABLE_DIR)
5350
# Rename all *.proto files in our library with an
5451
# underscore and remove executable bit.
5552
cd $(BIGTABLE_DIR) && \
@@ -60,8 +57,8 @@ generate:
6057
# Separate the gRPC parts of the operations service from the
6158
# non-gRPC parts so that the protos from `googleapis-common-protos`
6259
# can be used without gRPC.
63-
PROTOC_CMD=$(PROTOC_CMD) GRPC_PLUGIN=$(GRPC_PLUGIN) \
64-
GENERATED_SUBDIR=$(GENERATED_SUBDIR) \
60+
GRPCIO_VIRTUALENV="$(GRPCIO_VIRTUALENV)" \
61+
GENERATED_SUBDIR=$(GENERATED_SUBDIR) \
6562
python scripts/make_operations_grpc.py
6663
# Rewrite the imports in the generated *_pb2.py files.
6764
python scripts/rewrite_imports.py $(BIGTABLE_DIR)/*pb2.py
@@ -70,6 +67,6 @@ check_generate:
7067
python scripts/check_generate.py
7168

7269
clean:
73-
rm -fr $(BIGTABLE_CHECKOUT_DIR) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR)
70+
rm -fr $(GRPCIO_VIRTUALENV) $(GOOGLEAPIS_PROTOS_DIR) $(GENERATED_DIR)
7471

7572
.PHONY: generate check_generate clean

gcloud/bigtable/_generated_v2/_bigtable.proto

Lines changed: 31 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -27,18 +27,12 @@ option java_package = "com.google.bigtable.v2";
2727

2828

2929
// Service for reading from and writing to existing Bigtable tables.
30-
//
31-
// Caution: This service is experimental. The details can change and the rpcs
32-
// may or may not be active.
3330
service Bigtable {
3431
// Streams back the contents of all requested rows, optionally
3532
// applying the same Reader filter to each. Depending on their size,
3633
// rows and cells may be broken up across multiple responses, but
3734
// atomicity of each row will still be preserved. See the
3835
// ReadRowsResponse documentation for details.
39-
//
40-
// Caution: This rpc is experimental. The details can change and the rpc
41-
// may or may not be active.
4236
rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
4337
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" };
4438
}
@@ -47,47 +41,33 @@ service Bigtable {
4741
// delimit contiguous sections of the table of approximately equal size,
4842
// which can be used to break up the data for distributed tasks like
4943
// mapreduces.
50-
//
51-
// Caution: This rpc is experimental. The details can change and the rpc
52-
// may or may not be active.
5344
rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
5445
option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" };
5546
}
5647

5748
// Mutates a row atomically. Cells already present in the row are left
58-
// unchanged unless explicitly changed by 'mutation'.
59-
//
60-
// Caution: This rpc is experimental. The details can change and the rpc
61-
// may or may not be active.
49+
// unchanged unless explicitly changed by `mutation`.
6250
rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) {
6351
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" };
6452
}
6553

6654
// Mutates multiple rows in a batch. Each individual row is mutated
6755
// atomically as in MutateRow, but the entire batch is not executed
6856
// atomically.
69-
//
70-
// Caution: This rpc is experimental. The details can change and the rpc
71-
// may or may not be active.
7257
rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) {
7358
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" };
7459
}
7560

7661
// Mutates a row atomically based on the output of a predicate Reader filter.
77-
//
78-
// Caution: This rpc is experimental. The details can change and the rpc
79-
// may or may not be active.
8062
rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
8163
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" };
8264
}
8365

84-
// Modifies a row atomically, reading the latest existing timestamp/value from
85-
// the specified columns and writing a new value at
86-
// max(existing timestamp, current server time) based on pre-defined
87-
// read/modify/write rules. Returns the new contents of all modified cells.
88-
//
89-
// Caution: This rpc is experimental. The details can change and the rpc
90-
// may or may not be active.
66+
// Modifies a row atomically. The method reads the latest existing timestamp
67+
// and value from the specified columns and writes a new entry based on
68+
// pre-defined read/modify/write rules. The new value for the timestamp is the
69+
// greater of the existing timestamp or the current server time. The method
70+
// returns the new contents of all modified cells.
9171
rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) {
9272
option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" };
9373
}
@@ -97,7 +77,7 @@ service Bigtable {
9777
message ReadRowsRequest {
9878
// The unique name of the table from which to read.
9979
// Values are of the form
100-
// projects/<project>/instances/<instance>/tables/<table>
80+
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
10181
string table_name = 1;
10282

10383
// The row keys and/or ranges to read. If not specified, reads from all rows.
@@ -128,22 +108,22 @@ message ReadRowsResponse {
128108
// family as the previous CellChunk. The empty string can occur as a
129109
// column family name in a response so clients must check
130110
// explicitly for the presence of this message, not just for
131-
// family_name.value being non-empty.
111+
// `family_name.value` being non-empty.
132112
google.protobuf.StringValue family_name = 2;
133113

134114
// The column qualifier for this chunk of data. If this message
135115
// is not present, this CellChunk is a continuation of the same column
136116
// as the previous CellChunk. Column qualifiers may be empty so
137117
// clients must check for the presence of this message, not just
138-
// for qualifier.value being non-empty.
118+
// for `qualifier.value` being non-empty.
139119
google.protobuf.BytesValue qualifier = 3;
140120

141121
// The cell's stored timestamp, which also uniquely identifies it
142122
// within its column. Values are always expressed in
143123
// microseconds, but individual tables may set a coarser
144-
// "granularity" to further restrict the allowed values. For
124+
// granularity to further restrict the allowed values. For
145125
// example, a table which specifies millisecond granularity will
146-
// only allow values of "timestamp_micros" which are multiples of
126+
// only allow values of `timestamp_micros` which are multiples of
147127
// 1000. Timestamps are only set in the first CellChunk per cell
148128
// (for cells split into multiple chunks).
149129
int64 timestamp_micros = 4;
@@ -168,11 +148,11 @@ message ReadRowsResponse {
168148

169149
oneof row_status {
170150
// Indicates that the client should drop all previous chunks for
171-
// "row_key", as it will be re-read from the beginning.
151+
// `row_key`, as it will be re-read from the beginning.
172152
bool reset_row = 8;
173153

174154
// Indicates that the client can safely process all previous chunks for
175-
// "row_key", as its data has been fully read.
155+
// `row_key`, as its data has been fully read.
176156
bool commit_row = 9;
177157
}
178158
}
@@ -193,7 +173,7 @@ message ReadRowsResponse {
193173
message SampleRowKeysRequest {
194174
// The unique name of the table from which to sample row keys.
195175
// Values are of the form
196-
// projects/<project>/instances/<instance>/tables/<table>
176+
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
197177
string table_name = 1;
198178
}
199179

@@ -209,17 +189,17 @@ message SampleRowKeysResponse {
209189
bytes row_key = 1;
210190

211191
// Approximate total storage space used by all rows in the table which precede
212-
// "row_key". Buffering the contents of all rows between two subsequent
192+
// `row_key`. Buffering the contents of all rows between two subsequent
213193
// samples would require space roughly equal to the difference in their
214-
// "offset_bytes" fields.
194+
// `offset_bytes` fields.
215195
int64 offset_bytes = 2;
216196
}
217197

218198
// Request message for Bigtable.MutateRow.
219199
message MutateRowRequest {
220200
// The unique name of the table to which the mutation should be applied.
221201
// Values are of the form
222-
// projects/<project>/instances/<instance>/tables/<table>
202+
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
223203
string table_name = 1;
224204

225205
// The key of the row to which the mutation should be applied.
@@ -245,17 +225,17 @@ message MutateRowsRequest {
245225
// Changes to be atomically applied to the specified row. Mutations are
246226
// applied in order, meaning that earlier mutations can be masked by
247227
// later ones.
248-
// At least one mutation must be specified.
228+
// You must specify at least one mutation.
249229
repeated Mutation mutations = 2;
250230
}
251231

252232
// The unique name of the table to which the mutations should be applied.
253233
string table_name = 1;
254234

255-
// The row keys/mutations to be applied in bulk.
235+
// The row keys and corresponding mutations to be applied in bulk.
256236
// Each entry is applied as an atomic mutation, but the entries may be
257237
// applied in arbitrary order (even between entries for the same row).
258-
// At least one entry must be specified, and in total the entries may
238+
// At least one entry must be specified, and in total the entries can
259239
// contain at most 100000 mutations.
260240
repeated Entry entries = 2;
261241
}
@@ -283,36 +263,36 @@ message CheckAndMutateRowRequest {
283263
// The unique name of the table to which the conditional mutation should be
284264
// applied.
285265
// Values are of the form
286-
// projects/<project>/instances/<instance>/tables/<table>
266+
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
287267
string table_name = 1;
288268

289269
// The key of the row to which the conditional mutation should be applied.
290270
bytes row_key = 2;
291271

292272
// The filter to be applied to the contents of the specified row. Depending
293-
// on whether or not any results are yielded, either "true_mutations" or
294-
// "false_mutations" will be executed. If unset, checks that the row contains
273+
// on whether or not any results are yielded, either `true_mutations` or
274+
// `false_mutations` will be executed. If unset, checks that the row contains
295275
// any values at all.
296276
RowFilter predicate_filter = 6;
297277

298-
// Changes to be atomically applied to the specified row if "predicate_filter"
299-
// yields at least one cell when applied to "row_key". Entries are applied in
278+
// Changes to be atomically applied to the specified row if `predicate_filter`
279+
// yields at least one cell when applied to `row_key`. Entries are applied in
300280
// order, meaning that earlier mutations can be masked by later ones.
301-
// Must contain at least one entry if "false_mutations" is empty, and at most
281+
// Must contain at least one entry if `false_mutations` is empty, and at most
302282
// 100000.
303283
repeated Mutation true_mutations = 4;
304284

305-
// Changes to be atomically applied to the specified row if "predicate_filter"
306-
// does not yield any cells when applied to "row_key". Entries are applied in
285+
// Changes to be atomically applied to the specified row if `predicate_filter`
286+
// does not yield any cells when applied to `row_key`. Entries are applied in
307287
// order, meaning that earlier mutations can be masked by later ones.
308-
// Must contain at least one entry if "true_mutations" is empty, and at most
288+
// Must contain at least one entry if `true_mutations` is empty, and at most
309289
// 100000.
310290
repeated Mutation false_mutations = 5;
311291
}
312292

313293
// Response message for Bigtable.CheckAndMutateRow.
314294
message CheckAndMutateRowResponse {
315-
// Whether or not the request's "predicate_filter" yielded any results for
295+
// Whether or not the request's `predicate_filter` yielded any results for
316296
// the specified row.
317297
bool predicate_matched = 1;
318298
}
@@ -322,7 +302,7 @@ message ReadModifyWriteRowRequest {
322302
// The unique name of the table to which the read/modify/write rules should be
323303
// applied.
324304
// Values are of the form
325-
// projects/<project>/instances/<instance>/tables/<table>
305+
// projects/&lt;project&gt;/instances/&lt;instance&gt;/tables/&lt;table&gt;
326306
string table_name = 1;
327307

328308
// The key of the row to which the read/modify/write rules should be applied.

0 commit comments

Comments
 (0)