responseObserver) {
io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(
getReadChangeStreamMethod(), responseObserver);
}
+ }
+
+ /**
+ * Base class for the server implementation of the service Bigtable.
+ *
+ *
+ * Service for reading from and writing to existing Bigtable tables.
+ *
+ */
+ public abstract static class BigtableImplBase implements io.grpc.BindableService, AsyncService {
@java.lang.Override
public final io.grpc.ServerServiceDefinition bindService() {
- return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
- .addMethod(
- getReadRowsMethod(),
- io.grpc.stub.ServerCalls.asyncServerStreamingCall(
- new MethodHandlers<
- com.google.bigtable.v2.ReadRowsRequest,
- com.google.bigtable.v2.ReadRowsResponse>(this, METHODID_READ_ROWS)))
- .addMethod(
- getSampleRowKeysMethod(),
- io.grpc.stub.ServerCalls.asyncServerStreamingCall(
- new MethodHandlers<
- com.google.bigtable.v2.SampleRowKeysRequest,
- com.google.bigtable.v2.SampleRowKeysResponse>(
- this, METHODID_SAMPLE_ROW_KEYS)))
- .addMethod(
- getMutateRowMethod(),
- io.grpc.stub.ServerCalls.asyncUnaryCall(
- new MethodHandlers<
- com.google.bigtable.v2.MutateRowRequest,
- com.google.bigtable.v2.MutateRowResponse>(this, METHODID_MUTATE_ROW)))
- .addMethod(
- getMutateRowsMethod(),
- io.grpc.stub.ServerCalls.asyncServerStreamingCall(
- new MethodHandlers<
- com.google.bigtable.v2.MutateRowsRequest,
- com.google.bigtable.v2.MutateRowsResponse>(this, METHODID_MUTATE_ROWS)))
- .addMethod(
- getCheckAndMutateRowMethod(),
- io.grpc.stub.ServerCalls.asyncUnaryCall(
- new MethodHandlers<
- com.google.bigtable.v2.CheckAndMutateRowRequest,
- com.google.bigtable.v2.CheckAndMutateRowResponse>(
- this, METHODID_CHECK_AND_MUTATE_ROW)))
- .addMethod(
- getPingAndWarmMethod(),
- io.grpc.stub.ServerCalls.asyncUnaryCall(
- new MethodHandlers<
- com.google.bigtable.v2.PingAndWarmRequest,
- com.google.bigtable.v2.PingAndWarmResponse>(this, METHODID_PING_AND_WARM)))
- .addMethod(
- getReadModifyWriteRowMethod(),
- io.grpc.stub.ServerCalls.asyncUnaryCall(
- new MethodHandlers<
- com.google.bigtable.v2.ReadModifyWriteRowRequest,
- com.google.bigtable.v2.ReadModifyWriteRowResponse>(
- this, METHODID_READ_MODIFY_WRITE_ROW)))
- .addMethod(
- getGenerateInitialChangeStreamPartitionsMethod(),
- io.grpc.stub.ServerCalls.asyncServerStreamingCall(
- new MethodHandlers<
- com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest,
- com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse>(
- this, METHODID_GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS)))
- .addMethod(
- getReadChangeStreamMethod(),
- io.grpc.stub.ServerCalls.asyncServerStreamingCall(
- new MethodHandlers<
- com.google.bigtable.v2.ReadChangeStreamRequest,
- com.google.bigtable.v2.ReadChangeStreamResponse>(
- this, METHODID_READ_CHANGE_STREAM)))
- .build();
+ return BigtableGrpc.bindService(this);
}
}
/**
- *
+ * A stub to allow clients to do asynchronous rpc calls to service Bigtable.
*
*
* Service for reading from and writing to existing Bigtable tables.
@@ -881,7 +831,7 @@ public void readChangeStream(
}
/**
- *
+ * A stub to allow clients to do synchronous rpc calls to service Bigtable.
*
*
* Service for reading from and writing to existing Bigtable tables.
@@ -1042,7 +992,7 @@ public java.util.Iterator readC
}
/**
- *
+ * A stub to allow clients to do ListenableFuture-style rpc calls to service Bigtable.
*
*
* Service for reading from and writing to existing Bigtable tables.
@@ -1137,10 +1087,10 @@ private static final class MethodHandlers
io.grpc.stub.ServerCalls.ServerStreamingMethod,
io.grpc.stub.ServerCalls.ClientStreamingMethod,
io.grpc.stub.ServerCalls.BidiStreamingMethod {
- private final BigtableImplBase serviceImpl;
+ private final AsyncService serviceImpl;
private final int methodId;
- MethodHandlers(BigtableImplBase serviceImpl, int methodId) {
+ MethodHandlers(AsyncService serviceImpl, int methodId) {
this.serviceImpl = serviceImpl;
this.methodId = methodId;
}
@@ -1220,6 +1170,70 @@ public io.grpc.stub.StreamObserver invoke(
}
}
+ public static final io.grpc.ServerServiceDefinition bindService(AsyncService service) {
+ return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor())
+ .addMethod(
+ getReadRowsMethod(),
+ io.grpc.stub.ServerCalls.asyncServerStreamingCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.ReadRowsRequest,
+ com.google.bigtable.v2.ReadRowsResponse>(service, METHODID_READ_ROWS)))
+ .addMethod(
+ getSampleRowKeysMethod(),
+ io.grpc.stub.ServerCalls.asyncServerStreamingCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.SampleRowKeysRequest,
+ com.google.bigtable.v2.SampleRowKeysResponse>(
+ service, METHODID_SAMPLE_ROW_KEYS)))
+ .addMethod(
+ getMutateRowMethod(),
+ io.grpc.stub.ServerCalls.asyncUnaryCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.MutateRowRequest,
+ com.google.bigtable.v2.MutateRowResponse>(service, METHODID_MUTATE_ROW)))
+ .addMethod(
+ getMutateRowsMethod(),
+ io.grpc.stub.ServerCalls.asyncServerStreamingCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.MutateRowsRequest,
+ com.google.bigtable.v2.MutateRowsResponse>(service, METHODID_MUTATE_ROWS)))
+ .addMethod(
+ getCheckAndMutateRowMethod(),
+ io.grpc.stub.ServerCalls.asyncUnaryCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.CheckAndMutateRowRequest,
+ com.google.bigtable.v2.CheckAndMutateRowResponse>(
+ service, METHODID_CHECK_AND_MUTATE_ROW)))
+ .addMethod(
+ getPingAndWarmMethod(),
+ io.grpc.stub.ServerCalls.asyncUnaryCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.PingAndWarmRequest,
+ com.google.bigtable.v2.PingAndWarmResponse>(service, METHODID_PING_AND_WARM)))
+ .addMethod(
+ getReadModifyWriteRowMethod(),
+ io.grpc.stub.ServerCalls.asyncUnaryCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.ReadModifyWriteRowRequest,
+ com.google.bigtable.v2.ReadModifyWriteRowResponse>(
+ service, METHODID_READ_MODIFY_WRITE_ROW)))
+ .addMethod(
+ getGenerateInitialChangeStreamPartitionsMethod(),
+ io.grpc.stub.ServerCalls.asyncServerStreamingCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest,
+ com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse>(
+ service, METHODID_GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS)))
+ .addMethod(
+ getReadChangeStreamMethod(),
+ io.grpc.stub.ServerCalls.asyncServerStreamingCall(
+ new MethodHandlers<
+ com.google.bigtable.v2.ReadChangeStreamRequest,
+ com.google.bigtable.v2.ReadChangeStreamResponse>(
+ service, METHODID_READ_CHANGE_STREAM)))
+ .build();
+ }
+
private abstract static class BigtableBaseDescriptorSupplier
implements io.grpc.protobuf.ProtoFileDescriptorSupplier,
io.grpc.protobuf.ProtoServiceDescriptorSupplier {
diff --git a/owlbot.py b/owlbot.py
index 2daf394127..8b33b41998 100644
--- a/owlbot.py
+++ b/owlbot.py
@@ -109,4 +109,5 @@ def make_internal_only(sources):
'codecov.yaml'
# needed for extraFiles
'.github/release-please.yml',
+ 'renovate.json',
])
diff --git a/pom.xml b/pom.xml
index 6d1501a968..e07b065746 100644
--- a/pom.xml
+++ b/pom.xml
@@ -4,7 +4,7 @@
google-cloud-bigtable-parent
pom
- 2.19.2
+ 2.20.3
Google Cloud Bigtable Parent
https://github.com/googleapis/java-bigtable
@@ -153,33 +153,33 @@
com.google.api.grpc
proto-google-cloud-bigtable-v2
- 2.19.2
+ 2.20.3
com.google.api.grpc
proto-google-cloud-bigtable-admin-v2
- 2.19.2
+ 2.20.3
com.google.api.grpc
grpc-google-cloud-bigtable-v2
- 2.19.2
+ 2.20.3
com.google.api.grpc
grpc-google-cloud-bigtable-admin-v2
- 2.19.2
+ 2.20.3
com.google.cloud
google-cloud-bigtable
- 2.19.2
+ 2.20.3
com.google.cloud
google-cloud-conformance-tests
- 0.3.4
+ 0.3.5
com.google.truth
@@ -226,7 +226,7 @@
org.apache.maven.plugins
maven-javadoc-plugin
- 3.4.1
+ 3.5.0
aggregate
@@ -317,7 +317,7 @@
org.apache.maven.plugins
maven-javadoc-plugin
- 3.4.1
+ 3.5.0
com.microsoft.doclet.DocFxDoclet
diff --git a/proto-google-cloud-bigtable-admin-v2/pom.xml b/proto-google-cloud-bigtable-admin-v2/pom.xml
index d0ab4c144f..9135aefe07 100644
--- a/proto-google-cloud-bigtable-admin-v2/pom.xml
+++ b/proto-google-cloud-bigtable-admin-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigtable-admin-v2
- 2.19.2
+ 2.20.3
proto-google-cloud-bigtable-admin-v2
PROTO library for proto-google-cloud-bigtable-admin-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.19.2
+ 2.20.3
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.19.2
+ 2.20.3
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.19.2
+ 2.20.3
pom
import
diff --git a/proto-google-cloud-bigtable-v2/pom.xml b/proto-google-cloud-bigtable-v2/pom.xml
index 2d6de218d9..0b54c3e05f 100644
--- a/proto-google-cloud-bigtable-v2/pom.xml
+++ b/proto-google-cloud-bigtable-v2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigtable-v2
- 2.19.2
+ 2.20.3
proto-google-cloud-bigtable-v2
PROTO library for proto-google-cloud-bigtable-v2
com.google.cloud
google-cloud-bigtable-parent
- 2.19.2
+ 2.20.3
@@ -18,14 +18,14 @@
com.google.cloud
google-cloud-bigtable-deps-bom
- 2.19.2
+ 2.20.3
pom
import
com.google.cloud
google-cloud-bigtable-bom
- 2.19.2
+ 2.20.3
pom
import
diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java
index ba23c35367..87ba2cbdd2 100644
--- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java
+++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/BigtableProto.java
@@ -225,7 +225,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "able.v2.StreamContinuationTokensH\000\022,\n\010en"
+ "d_time\030\005 \001(\0132\032.google.protobuf.Timestamp"
+ "\0225\n\022heartbeat_duration\030\007 \001(\0132\031.google.pr"
- + "otobuf.DurationB\014\n\nstart_from\"\353\t\n\030ReadCh"
+ + "otobuf.DurationB\014\n\nstart_from\"\251\n\n\030ReadCh"
+ "angeStreamResponse\022N\n\013data_change\030\001 \001(\0132"
+ "7.google.bigtable.v2.ReadChangeStreamRes"
+ "ponse.DataChangeH\000\022K\n\theartbeat\030\002 \001(\01326."
@@ -253,99 +253,101 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "\003\032\221\001\n\tHeartbeat\022G\n\022continuation_token\030\001 "
+ "\001(\0132+.google.bigtable.v2.StreamContinuat"
+ "ionToken\022;\n\027estimated_low_watermark\030\002 \001("
- + "\0132\032.google.protobuf.Timestamp\032{\n\013CloseSt"
- + "ream\022\"\n\006status\030\001 \001(\0132\022.google.rpc.Status"
- + "\022H\n\023continuation_tokens\030\002 \003(\0132+.google.b"
- + "igtable.v2.StreamContinuationTokenB\017\n\rst"
- + "ream_record2\327\030\n\010Bigtable\022\233\002\n\010ReadRows\022#."
- + "google.bigtable.v2.ReadRowsRequest\032$.goo"
- + "gle.bigtable.v2.ReadRowsResponse\"\301\001\202\323\344\223\002"
- + ">\"9/v2/{table_name=projects/*/instances/"
- + "*/tables/*}:readRows:\001*\212\323\344\223\002N\022:\n\ntable_n"
- + "ame\022,{table_name=projects/*/instances/*/"
- + "tables/*}\022\020\n\016app_profile_id\332A\ntable_name"
- + "\332A\031table_name,app_profile_id0\001\022\254\002\n\rSampl"
- + "eRowKeys\022(.google.bigtable.v2.SampleRowK"
- + "eysRequest\032).google.bigtable.v2.SampleRo"
- + "wKeysResponse\"\303\001\202\323\344\223\002@\022>/v2/{table_name="
- + "projects/*/instances/*/tables/*}:sampleR"
- + "owKeys\212\323\344\223\002N\022:\n\ntable_name\022,{table_name="
+ + "\0132\032.google.protobuf.Timestamp\032\270\001\n\013CloseS"
+ + "tream\022\"\n\006status\030\001 \001(\0132\022.google.rpc.Statu"
+ + "s\022H\n\023continuation_tokens\030\002 \003(\0132+.google."
+ + "bigtable.v2.StreamContinuationToken\022;\n\016n"
+ + "ew_partitions\030\003 \003(\0132#.google.bigtable.v2"
+ + ".StreamPartitionB\017\n\rstream_record2\327\030\n\010Bi"
+ + "gtable\022\233\002\n\010ReadRows\022#.google.bigtable.v2"
+ + ".ReadRowsRequest\032$.google.bigtable.v2.Re"
+ + "adRowsResponse\"\301\001\202\323\344\223\002>\"9/v2/{table_name"
+ + "=projects/*/instances/*/tables/*}:readRo"
+ + "ws:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=p"
+ + "rojects/*/instances/*/tables/*}\022\020\n\016app_p"
+ + "rofile_id\332A\ntable_name\332A\031table_name,app_"
+ + "profile_id0\001\022\254\002\n\rSampleRowKeys\022(.google."
+ + "bigtable.v2.SampleRowKeysRequest\032).googl"
+ + "e.bigtable.v2.SampleRowKeysResponse\"\303\001\202\323"
+ + "\344\223\002@\022>/v2/{table_name=projects/*/instanc"
+ + "es/*/tables/*}:sampleRowKeys\212\323\344\223\002N\022:\n\nta"
+ + "ble_name\022,{table_name=projects/*/instanc"
+ + "es/*/tables/*}\022\020\n\016app_profile_id\332A\ntable"
+ + "_name\332A\031table_name,app_profile_id0\001\022\301\002\n\t"
+ + "MutateRow\022$.google.bigtable.v2.MutateRow"
+ + "Request\032%.google.bigtable.v2.MutateRowRe"
+ + "sponse\"\346\001\202\323\344\223\002?\":/v2/{table_name=project"
+ + "s/*/instances/*/tables/*}:mutateRow:\001*\212\323"
+ + "\344\223\002N\022:\n\ntable_name\022,{table_name=projects"
+ + "/*/instances/*/tables/*}\022\020\n\016app_profile_"
+ + "id\332A\034table_name,row_key,mutations\332A+tabl"
+ + "e_name,row_key,mutations,app_profile_id\022"
+ + "\263\002\n\nMutateRows\022%.google.bigtable.v2.Muta"
+ + "teRowsRequest\032&.google.bigtable.v2.Mutat"
+ + "eRowsResponse\"\323\001\202\323\344\223\002@\";/v2/{table_name="
+ + "projects/*/instances/*/tables/*}:mutateR"
+ + "ows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name="
+ "projects/*/instances/*/tables/*}\022\020\n\016app_"
- + "profile_id\332A\ntable_name\332A\031table_name,app"
- + "_profile_id0\001\022\301\002\n\tMutateRow\022$.google.big"
- + "table.v2.MutateRowRequest\032%.google.bigta"
- + "ble.v2.MutateRowResponse\"\346\001\202\323\344\223\002?\":/v2/{"
+ + "profile_id\332A\022table_name,entries\332A!table_"
+ + "name,entries,app_profile_id0\001\022\255\003\n\021CheckA"
+ + "ndMutateRow\022,.google.bigtable.v2.CheckAn"
+ + "dMutateRowRequest\032-.google.bigtable.v2.C"
+ + "heckAndMutateRowResponse\"\272\002\202\323\344\223\002G\"B/v2/{"
+ "table_name=projects/*/instances/*/tables"
- + "/*}:mutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{t"
- + "able_name=projects/*/instances/*/tables/"
- + "*}\022\020\n\016app_profile_id\332A\034table_name,row_ke"
- + "y,mutations\332A+table_name,row_key,mutatio"
- + "ns,app_profile_id\022\263\002\n\nMutateRows\022%.googl"
- + "e.bigtable.v2.MutateRowsRequest\032&.google"
- + ".bigtable.v2.MutateRowsResponse\"\323\001\202\323\344\223\002@"
- + "\";/v2/{table_name=projects/*/instances/*"
- + "/tables/*}:mutateRows:\001*\212\323\344\223\002N\022:\n\ntable_"
+ + "/*}:checkAndMutateRow:\001*\212\323\344\223\002N\022:\n\ntable_"
+ "name\022,{table_name=projects/*/instances/*"
- + "/tables/*}\022\020\n\016app_profile_id\332A\022table_nam"
- + "e,entries\332A!table_name,entries,app_profi"
- + "le_id0\001\022\255\003\n\021CheckAndMutateRow\022,.google.b"
- + "igtable.v2.CheckAndMutateRowRequest\032-.go"
- + "ogle.bigtable.v2.CheckAndMutateRowRespon"
- + "se\"\272\002\202\323\344\223\002G\"B/v2/{table_name=projects/*/"
- + "instances/*/tables/*}:checkAndMutateRow:"
- + "\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=proj"
- + "ects/*/instances/*/tables/*}\022\020\n\016app_prof"
- + "ile_id\332ABtable_name,row_key,predicate_fi"
- + "lter,true_mutations,false_mutations\332AQta"
- + "ble_name,row_key,predicate_filter,true_m"
- + "utations,false_mutations,app_profile_id\022"
- + "\356\001\n\013PingAndWarm\022&.google.bigtable.v2.Pin"
- + "gAndWarmRequest\032\'.google.bigtable.v2.Pin"
- + "gAndWarmResponse\"\215\001\202\323\344\223\002+\"&/v2/{name=pro"
- + "jects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004na"
- + "me\022\035{name=projects/*/instances/*}\022\020\n\016app"
- + "_profile_id\332A\004name\332A\023name,app_profile_id"
- + "\022\335\002\n\022ReadModifyWriteRow\022-.google.bigtabl"
- + "e.v2.ReadModifyWriteRowRequest\032..google."
- + "bigtable.v2.ReadModifyWriteRowResponse\"\347"
- + "\001\202\323\344\223\002H\"C/v2/{table_name=projects/*/inst"
- + "ances/*/tables/*}:readModifyWriteRow:\001*\212"
- + "\323\344\223\002N\022:\n\ntable_name\022,{table_name=project"
- + "s/*/instances/*/tables/*}\022\020\n\016app_profile"
- + "_id\332A\030table_name,row_key,rules\332A\'table_n"
- + "ame,row_key,rules,app_profile_id\022\273\002\n%Gen"
- + "erateInitialChangeStreamPartitions\022@.goo"
- + "gle.bigtable.v2.GenerateInitialChangeStr"
- + "eamPartitionsRequest\032A.google.bigtable.v"
- + "2.GenerateInitialChangeStreamPartitionsR"
- + "esponse\"\212\001\202\323\344\223\002[\"V/v2/{table_name=projec"
- + "ts/*/instances/*/tables/*}:generateIniti"
- + "alChangeStreamPartitions:\001*\332A\ntable_name"
- + "\332A\031table_name,app_profile_id0\001\022\346\001\n\020ReadC"
- + "hangeStream\022+.google.bigtable.v2.ReadCha"
- + "ngeStreamRequest\032,.google.bigtable.v2.Re"
- + "adChangeStreamResponse\"u\202\323\344\223\002F\"A/v2/{tab"
- + "le_name=projects/*/instances/*/tables/*}"
- + ":readChangeStream:\001*\332A\ntable_name\332A\031tabl"
- + "e_name,app_profile_id0\001\032\333\002\312A\027bigtable.go"
- + "ogleapis.com\322A\275\002https://www.googleapis.c"
- + "om/auth/bigtable.data,https://www.google"
- + "apis.com/auth/bigtable.data.readonly,htt"
- + "ps://www.googleapis.com/auth/cloud-bigta"
- + "ble.data,https://www.googleapis.com/auth"
- + "/cloud-bigtable.data.readonly,https://ww"
- + "w.googleapis.com/auth/cloud-platform,htt"
- + "ps://www.googleapis.com/auth/cloud-platf"
- + "orm.read-onlyB\353\002\n\026com.google.bigtable.v2"
- + "B\rBigtableProtoP\001Z:google.golang.org/gen"
- + "proto/googleapis/bigtable/v2;bigtable\252\002\030"
- + "Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\"
- + "Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V"
- + "2\352AP\n%bigtableadmin.googleapis.com/Insta"
- + "nce\022\'projects/{project}/instances/{insta"
- + "nce}\352A\\\n\"bigtableadmin.googleapis.com/Ta"
- + "ble\0226projects/{project}/instances/{insta"
- + "nce}/tables/{table}b\006proto3"
+ + "/tables/*}\022\020\n\016app_profile_id\332ABtable_nam"
+ + "e,row_key,predicate_filter,true_mutation"
+ + "s,false_mutations\332AQtable_name,row_key,p"
+ + "redicate_filter,true_mutations,false_mut"
+ + "ations,app_profile_id\022\356\001\n\013PingAndWarm\022&."
+ + "google.bigtable.v2.PingAndWarmRequest\032\'."
+ + "google.bigtable.v2.PingAndWarmResponse\"\215"
+ + "\001\202\323\344\223\002+\"&/v2/{name=projects/*/instances/"
+ + "*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects"
+ + "/*/instances/*}\022\020\n\016app_profile_id\332A\004name"
+ + "\332A\023name,app_profile_id\022\335\002\n\022ReadModifyWri"
+ + "teRow\022-.google.bigtable.v2.ReadModifyWri"
+ + "teRowRequest\032..google.bigtable.v2.ReadMo"
+ + "difyWriteRowResponse\"\347\001\202\323\344\223\002H\"C/v2/{tabl"
+ + "e_name=projects/*/instances/*/tables/*}:"
+ + "readModifyWriteRow:\001*\212\323\344\223\002N\022:\n\ntable_nam"
+ + "e\022,{table_name=projects/*/instances/*/ta"
+ + "bles/*}\022\020\n\016app_profile_id\332A\030table_name,r"
+ + "ow_key,rules\332A\'table_name,row_key,rules,"
+ + "app_profile_id\022\273\002\n%GenerateInitialChange"
+ + "StreamPartitions\022@.google.bigtable.v2.Ge"
+ + "nerateInitialChangeStreamPartitionsReque"
+ + "st\032A.google.bigtable.v2.GenerateInitialC"
+ + "hangeStreamPartitionsResponse\"\212\001\202\323\344\223\002[\"V"
+ + "/v2/{table_name=projects/*/instances/*/t"
+ + "ables/*}:generateInitialChangeStreamPart"
+ + "itions:\001*\332A\ntable_name\332A\031table_name,app_"
+ + "profile_id0\001\022\346\001\n\020ReadChangeStream\022+.goog"
+ + "le.bigtable.v2.ReadChangeStreamRequest\032,"
+ + ".google.bigtable.v2.ReadChangeStreamResp"
+ + "onse\"u\202\323\344\223\002F\"A/v2/{table_name=projects/*"
+ + "/instances/*/tables/*}:readChangeStream:"
+ + "\001*\332A\ntable_name\332A\031table_name,app_profile"
+ + "_id0\001\032\333\002\312A\027bigtable.googleapis.com\322A\275\002ht"
+ + "tps://www.googleapis.com/auth/bigtable.d"
+ + "ata,https://www.googleapis.com/auth/bigt"
+ + "able.data.readonly,https://www.googleapi"
+ + "s.com/auth/cloud-bigtable.data,https://w"
+ + "ww.googleapis.com/auth/cloud-bigtable.da"
+ + "ta.readonly,https://www.googleapis.com/a"
+ + "uth/cloud-platform,https://www.googleapi"
+ + "s.com/auth/cloud-platform.read-onlyB\353\002\n\026"
+ + "com.google.bigtable.v2B\rBigtableProtoP\001Z"
+ + ":google.golang.org/genproto/googleapis/b"
+ + "igtable/v2;bigtable\252\002\030Google.Cloud.Bigta"
+ + "ble.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Goog"
+ + "le::Cloud::Bigtable::V2\352AP\n%bigtableadmi"
+ + "n.googleapis.com/Instance\022\'projects/{pro"
+ + "ject}/instances/{instance}\352A\\\n\"bigtablea"
+ + "dmin.googleapis.com/Table\0226projects/{pro"
+ + "ject}/instances/{instance}/tables/{table"
+ + "}b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -604,7 +606,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_bigtable_v2_ReadChangeStreamResponse_CloseStream_descriptor,
new java.lang.String[] {
- "Status", "ContinuationTokens",
+ "Status", "ContinuationTokens", "NewPartitions",
});
com.google.protobuf.ExtensionRegistry registry =
com.google.protobuf.ExtensionRegistry.newInstance();
diff --git a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java
index 083b170f36..0739e1101a 100644
--- a/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java
+++ b/proto-google-cloud-bigtable-v2/src/main/java/com/google/bigtable/v2/ReadChangeStreamResponse.java
@@ -6100,8 +6100,8 @@ public interface CloseStreamOrBuilder
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6111,8 +6111,8 @@ public interface CloseStreamOrBuilder
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6122,8 +6122,8 @@ public interface CloseStreamOrBuilder
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6133,8 +6133,8 @@ public interface CloseStreamOrBuilder
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6145,24 +6145,101 @@ public interface CloseStreamOrBuilder
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
*/
com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTokensOrBuilder(
int index);
+
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ java.util.List getNewPartitionsList();
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ com.google.bigtable.v2.StreamPartition getNewPartitions(int index);
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ int getNewPartitionsCount();
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ java.util.List extends com.google.bigtable.v2.StreamPartitionOrBuilder>
+ getNewPartitionsOrBuilderList();
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ com.google.bigtable.v2.StreamPartitionOrBuilder getNewPartitionsOrBuilder(int index);
}
/**
*
*
*
* A message indicating that the client should stop reading from the stream.
- * If status is OK and `continuation_tokens` is empty, the stream has finished
- * (for example if there was an `end_time` specified).
- * If `continuation_tokens` is present, then a change in partitioning requires
- * the client to open a new stream for each token to resume reading.
+ * If status is OK and `continuation_tokens` & `new_partitions` are empty, the
+ * stream has finished (for example if there was an `end_time` specified).
+ * If `continuation_tokens` & `new_partitions` are present, then a change in
+ * partitioning requires the client to open a new stream for each token to
+ * resume reading. Example:
+ * [B, D) ends
+ * |
+ * v
+ * new_partitions: [A, C) [C, E)
+ * continuation_tokens.partitions: [B,C) [C,D)
+ * ^---^ ^---^
+ * ^ ^
+ * | |
+ * | StreamContinuationToken 2
+ * |
+ * StreamContinuationToken 1
+ * To read the new partition [A,C), supply the continuation tokens whose
+ * ranges cover the new partition, for example ContinuationToken[A,B) &
+ * ContinuationToken[B,C).
*
*
* Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.CloseStream}
@@ -6179,6 +6256,7 @@ private CloseStream(com.google.protobuf.GeneratedMessageV3.Builder> builder) {
private CloseStream() {
continuationTokens_ = java.util.Collections.emptyList();
+ newPartitions_ = java.util.Collections.emptyList();
}
@java.lang.Override
@@ -6261,8 +6339,8 @@ public com.google.rpc.StatusOrBuilder getStatusOrBuilder() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6276,8 +6354,8 @@ public com.google.rpc.StatusOrBuilder getStatusOrBuilder() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6291,8 +6369,8 @@ public com.google.rpc.StatusOrBuilder getStatusOrBuilder() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6305,8 +6383,8 @@ public int getContinuationTokensCount() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6319,8 +6397,8 @@ public com.google.bigtable.v2.StreamContinuationToken getContinuationTokens(int
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6331,6 +6409,87 @@ public com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTo
return continuationTokens_.get(index);
}
+ public static final int NEW_PARTITIONS_FIELD_NUMBER = 3;
+
+ @SuppressWarnings("serial")
+ private java.util.List newPartitions_;
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ @java.lang.Override
+ public java.util.List getNewPartitionsList() {
+ return newPartitions_;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ @java.lang.Override
+ public java.util.List extends com.google.bigtable.v2.StreamPartitionOrBuilder>
+ getNewPartitionsOrBuilderList() {
+ return newPartitions_;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ @java.lang.Override
+ public int getNewPartitionsCount() {
+ return newPartitions_.size();
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ @java.lang.Override
+ public com.google.bigtable.v2.StreamPartition getNewPartitions(int index) {
+ return newPartitions_.get(index);
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ @java.lang.Override
+ public com.google.bigtable.v2.StreamPartitionOrBuilder getNewPartitionsOrBuilder(int index) {
+ return newPartitions_.get(index);
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -6351,6 +6510,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
for (int i = 0; i < continuationTokens_.size(); i++) {
output.writeMessage(2, continuationTokens_.get(i));
}
+ for (int i = 0; i < newPartitions_.size(); i++) {
+ output.writeMessage(3, newPartitions_.get(i));
+ }
getUnknownFields().writeTo(output);
}
@@ -6367,6 +6529,9 @@ public int getSerializedSize() {
size +=
com.google.protobuf.CodedOutputStream.computeMessageSize(2, continuationTokens_.get(i));
}
+ for (int i = 0; i < newPartitions_.size(); i++) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, newPartitions_.get(i));
+ }
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
@@ -6388,6 +6553,7 @@ public boolean equals(final java.lang.Object obj) {
if (!getStatus().equals(other.getStatus())) return false;
}
if (!getContinuationTokensList().equals(other.getContinuationTokensList())) return false;
+ if (!getNewPartitionsList().equals(other.getNewPartitionsList())) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@@ -6407,6 +6573,10 @@ public int hashCode() {
hash = (37 * hash) + CONTINUATION_TOKENS_FIELD_NUMBER;
hash = (53 * hash) + getContinuationTokensList().hashCode();
}
+ if (getNewPartitionsCount() > 0) {
+ hash = (37 * hash) + NEW_PARTITIONS_FIELD_NUMBER;
+ hash = (53 * hash) + getNewPartitionsList().hashCode();
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -6514,10 +6684,25 @@ protected Builder newBuilderForType(
*
*
* A message indicating that the client should stop reading from the stream.
- * If status is OK and `continuation_tokens` is empty, the stream has finished
- * (for example if there was an `end_time` specified).
- * If `continuation_tokens` is present, then a change in partitioning requires
- * the client to open a new stream for each token to resume reading.
+ * If status is OK and `continuation_tokens` & `new_partitions` are empty, the
+ * stream has finished (for example if there was an `end_time` specified).
+ * If `continuation_tokens` & `new_partitions` are present, then a change in
+ * partitioning requires the client to open a new stream for each token to
+ * resume reading. Example:
+ * [B, D) ends
+ * |
+ * v
+ * new_partitions: [A, C) [C, E)
+ * continuation_tokens.partitions: [B,C) [C,D)
+ * ^---^ ^---^
+ * ^ ^
+ * | |
+ * | StreamContinuationToken 2
+ * |
+ * StreamContinuationToken 1
+ * To read the new partition [A,C), supply the continuation tokens whose
+ * ranges cover the new partition, for example ContinuationToken[A,B) &
+ * ContinuationToken[B,C).
*
*
* Protobuf type {@code google.bigtable.v2.ReadChangeStreamResponse.CloseStream}
@@ -6565,6 +6750,13 @@ public Builder clear() {
continuationTokensBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
+ if (newPartitionsBuilder_ == null) {
+ newPartitions_ = java.util.Collections.emptyList();
+ } else {
+ newPartitions_ = null;
+ newPartitionsBuilder_.clear();
+ }
+ bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@@ -6612,6 +6804,15 @@ private void buildPartialRepeatedFields(
} else {
result.continuationTokens_ = continuationTokensBuilder_.build();
}
+ if (newPartitionsBuilder_ == null) {
+ if (((bitField0_ & 0x00000004) != 0)) {
+ newPartitions_ = java.util.Collections.unmodifiableList(newPartitions_);
+ bitField0_ = (bitField0_ & ~0x00000004);
+ }
+ result.newPartitions_ = newPartitions_;
+ } else {
+ result.newPartitions_ = newPartitionsBuilder_.build();
+ }
}
private void buildPartial0(
@@ -6701,6 +6902,33 @@ public Builder mergeFrom(com.google.bigtable.v2.ReadChangeStreamResponse.CloseSt
}
}
}
+ if (newPartitionsBuilder_ == null) {
+ if (!other.newPartitions_.isEmpty()) {
+ if (newPartitions_.isEmpty()) {
+ newPartitions_ = other.newPartitions_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ } else {
+ ensureNewPartitionsIsMutable();
+ newPartitions_.addAll(other.newPartitions_);
+ }
+ onChanged();
+ }
+ } else {
+ if (!other.newPartitions_.isEmpty()) {
+ if (newPartitionsBuilder_.isEmpty()) {
+ newPartitionsBuilder_.dispose();
+ newPartitionsBuilder_ = null;
+ newPartitions_ = other.newPartitions_;
+ bitField0_ = (bitField0_ & ~0x00000004);
+ newPartitionsBuilder_ =
+ com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders
+ ? getNewPartitionsFieldBuilder()
+ : null;
+ } else {
+ newPartitionsBuilder_.addAllMessages(other.newPartitions_);
+ }
+ }
+ }
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
@@ -6747,6 +6975,19 @@ public Builder mergeFrom(
}
break;
} // case 18
+ case 26:
+ {
+ com.google.bigtable.v2.StreamPartition m =
+ input.readMessage(
+ com.google.bigtable.v2.StreamPartition.parser(), extensionRegistry);
+ if (newPartitionsBuilder_ == null) {
+ ensureNewPartitionsIsMutable();
+ newPartitions_.add(m);
+ } else {
+ newPartitionsBuilder_.addMessage(m);
+ }
+ break;
+ } // case 26
default:
{
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
@@ -6962,8 +7203,8 @@ private void ensureContinuationTokensIsMutable() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6980,8 +7221,8 @@ private void ensureContinuationTokensIsMutable() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -6997,8 +7238,8 @@ public int getContinuationTokensCount() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7014,8 +7255,8 @@ public com.google.bigtable.v2.StreamContinuationToken getContinuationTokens(int
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7038,8 +7279,8 @@ public Builder setContinuationTokens(
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7059,8 +7300,8 @@ public Builder setContinuationTokens(
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7082,8 +7323,8 @@ public Builder addContinuationTokens(com.google.bigtable.v2.StreamContinuationTo
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7106,8 +7347,8 @@ public Builder addContinuationTokens(
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7127,8 +7368,8 @@ public Builder addContinuationTokens(
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7148,8 +7389,8 @@ public Builder addContinuationTokens(
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7169,8 +7410,8 @@ public Builder addAllContinuationTokens(
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7189,8 +7430,8 @@ public Builder clearContinuationTokens() {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7209,8 +7450,8 @@ public Builder removeContinuationTokens(int index) {
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7223,8 +7464,8 @@ public com.google.bigtable.v2.StreamContinuationToken.Builder getContinuationTok
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7241,8 +7482,8 @@ public com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTo
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7259,8 +7500,8 @@ public com.google.bigtable.v2.StreamContinuationTokenOrBuilder getContinuationTo
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7273,8 +7514,8 @@ public com.google.bigtable.v2.StreamContinuationToken.Builder addContinuationTok
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7288,8 +7529,8 @@ public com.google.bigtable.v2.StreamContinuationToken.Builder addContinuationTok
*
*
*
- * If non-empty, contains the information needed to start reading the new
- * partition(s) that contain segments of this partition's row range.
+ * If non-empty, contains the information needed to resume reading their
+ * associated partitions.
*
*
* repeated .google.bigtable.v2.StreamContinuationToken continuation_tokens = 2;
@@ -7319,6 +7560,396 @@ public com.google.bigtable.v2.StreamContinuationToken.Builder addContinuationTok
return continuationTokensBuilder_;
}
+ private java.util.List newPartitions_ =
+ java.util.Collections.emptyList();
+
+ private void ensureNewPartitionsIsMutable() {
+ if (!((bitField0_ & 0x00000004) != 0)) {
+ newPartitions_ =
+ new java.util.ArrayList(newPartitions_);
+ bitField0_ |= 0x00000004;
+ }
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.bigtable.v2.StreamPartition,
+ com.google.bigtable.v2.StreamPartition.Builder,
+ com.google.bigtable.v2.StreamPartitionOrBuilder>
+ newPartitionsBuilder_;
+
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public java.util.List getNewPartitionsList() {
+ if (newPartitionsBuilder_ == null) {
+ return java.util.Collections.unmodifiableList(newPartitions_);
+ } else {
+ return newPartitionsBuilder_.getMessageList();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public int getNewPartitionsCount() {
+ if (newPartitionsBuilder_ == null) {
+ return newPartitions_.size();
+ } else {
+ return newPartitionsBuilder_.getCount();
+ }
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public com.google.bigtable.v2.StreamPartition getNewPartitions(int index) {
+ if (newPartitionsBuilder_ == null) {
+ return newPartitions_.get(index);
+ } else {
+ return newPartitionsBuilder_.getMessage(index);
+ }
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder setNewPartitions(int index, com.google.bigtable.v2.StreamPartition value) {
+ if (newPartitionsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNewPartitionsIsMutable();
+ newPartitions_.set(index, value);
+ onChanged();
+ } else {
+ newPartitionsBuilder_.setMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder setNewPartitions(
+ int index, com.google.bigtable.v2.StreamPartition.Builder builderForValue) {
+ if (newPartitionsBuilder_ == null) {
+ ensureNewPartitionsIsMutable();
+ newPartitions_.set(index, builderForValue.build());
+ onChanged();
+ } else {
+ newPartitionsBuilder_.setMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder addNewPartitions(com.google.bigtable.v2.StreamPartition value) {
+ if (newPartitionsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNewPartitionsIsMutable();
+ newPartitions_.add(value);
+ onChanged();
+ } else {
+ newPartitionsBuilder_.addMessage(value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder addNewPartitions(int index, com.google.bigtable.v2.StreamPartition value) {
+ if (newPartitionsBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ ensureNewPartitionsIsMutable();
+ newPartitions_.add(index, value);
+ onChanged();
+ } else {
+ newPartitionsBuilder_.addMessage(index, value);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder addNewPartitions(
+ com.google.bigtable.v2.StreamPartition.Builder builderForValue) {
+ if (newPartitionsBuilder_ == null) {
+ ensureNewPartitionsIsMutable();
+ newPartitions_.add(builderForValue.build());
+ onChanged();
+ } else {
+ newPartitionsBuilder_.addMessage(builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder addNewPartitions(
+ int index, com.google.bigtable.v2.StreamPartition.Builder builderForValue) {
+ if (newPartitionsBuilder_ == null) {
+ ensureNewPartitionsIsMutable();
+ newPartitions_.add(index, builderForValue.build());
+ onChanged();
+ } else {
+ newPartitionsBuilder_.addMessage(index, builderForValue.build());
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder addAllNewPartitions(
+ java.lang.Iterable extends com.google.bigtable.v2.StreamPartition> values) {
+ if (newPartitionsBuilder_ == null) {
+ ensureNewPartitionsIsMutable();
+ com.google.protobuf.AbstractMessageLite.Builder.addAll(values, newPartitions_);
+ onChanged();
+ } else {
+ newPartitionsBuilder_.addAllMessages(values);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder clearNewPartitions() {
+ if (newPartitionsBuilder_ == null) {
+ newPartitions_ = java.util.Collections.emptyList();
+ bitField0_ = (bitField0_ & ~0x00000004);
+ onChanged();
+ } else {
+ newPartitionsBuilder_.clear();
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public Builder removeNewPartitions(int index) {
+ if (newPartitionsBuilder_ == null) {
+ ensureNewPartitionsIsMutable();
+ newPartitions_.remove(index);
+ onChanged();
+ } else {
+ newPartitionsBuilder_.remove(index);
+ }
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public com.google.bigtable.v2.StreamPartition.Builder getNewPartitionsBuilder(int index) {
+ return getNewPartitionsFieldBuilder().getBuilder(index);
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public com.google.bigtable.v2.StreamPartitionOrBuilder getNewPartitionsOrBuilder(int index) {
+ if (newPartitionsBuilder_ == null) {
+ return newPartitions_.get(index);
+ } else {
+ return newPartitionsBuilder_.getMessageOrBuilder(index);
+ }
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public java.util.List extends com.google.bigtable.v2.StreamPartitionOrBuilder>
+ getNewPartitionsOrBuilderList() {
+ if (newPartitionsBuilder_ != null) {
+ return newPartitionsBuilder_.getMessageOrBuilderList();
+ } else {
+ return java.util.Collections.unmodifiableList(newPartitions_);
+ }
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public com.google.bigtable.v2.StreamPartition.Builder addNewPartitionsBuilder() {
+ return getNewPartitionsFieldBuilder()
+ .addBuilder(com.google.bigtable.v2.StreamPartition.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public com.google.bigtable.v2.StreamPartition.Builder addNewPartitionsBuilder(int index) {
+ return getNewPartitionsFieldBuilder()
+ .addBuilder(index, com.google.bigtable.v2.StreamPartition.getDefaultInstance());
+ }
+ /**
+ *
+ *
+ *
+ * If non-empty, contains the new partitions to start reading from, which
+ * are related to but not necessarily identical to the partitions for the
+ * above `continuation_tokens`.
+ *
+ *
+ * repeated .google.bigtable.v2.StreamPartition new_partitions = 3;
+ */
+ public java.util.List
+ getNewPartitionsBuilderList() {
+ return getNewPartitionsFieldBuilder().getBuilderList();
+ }
+
+ private com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.bigtable.v2.StreamPartition,
+ com.google.bigtable.v2.StreamPartition.Builder,
+ com.google.bigtable.v2.StreamPartitionOrBuilder>
+ getNewPartitionsFieldBuilder() {
+ if (newPartitionsBuilder_ == null) {
+ newPartitionsBuilder_ =
+ new com.google.protobuf.RepeatedFieldBuilderV3<
+ com.google.bigtable.v2.StreamPartition,
+ com.google.bigtable.v2.StreamPartition.Builder,
+ com.google.bigtable.v2.StreamPartitionOrBuilder>(
+ newPartitions_,
+ ((bitField0_ & 0x00000004) != 0),
+ getParentForChildren(),
+ isClean());
+ newPartitions_ = null;
+ }
+ return newPartitionsBuilder_;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(
final com.google.protobuf.UnknownFieldSet unknownFields) {
diff --git a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto
index c85e0cfc8c..098d17e3e7 100644
--- a/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto
+++ b/proto-google-cloud-bigtable-v2/src/main/proto/google/bigtable/v2/bigtable.proto
@@ -788,17 +788,37 @@ message ReadChangeStreamResponse {
}
// A message indicating that the client should stop reading from the stream.
- // If status is OK and `continuation_tokens` is empty, the stream has finished
- // (for example if there was an `end_time` specified).
- // If `continuation_tokens` is present, then a change in partitioning requires
- // the client to open a new stream for each token to resume reading.
+ // If status is OK and `continuation_tokens` & `new_partitions` are empty, the
+ // stream has finished (for example if there was an `end_time` specified).
+ // If `continuation_tokens` & `new_partitions` are present, then a change in
+ // partitioning requires the client to open a new stream for each token to
+ // resume reading. Example:
+ // [B, D) ends
+ // |
+ // v
+ // new_partitions: [A, C) [C, E)
+ // continuation_tokens.partitions: [B,C) [C,D)
+ // ^---^ ^---^
+ // ^ ^
+ // | |
+ // | StreamContinuationToken 2
+ // |
+ // StreamContinuationToken 1
+ // To read the new partition [A,C), supply the continuation tokens whose
+ // ranges cover the new partition, for example ContinuationToken[A,B) &
+ // ContinuationToken[B,C).
message CloseStream {
// The status of the stream.
google.rpc.Status status = 1;
- // If non-empty, contains the information needed to start reading the new
- // partition(s) that contain segments of this partition's row range.
+ // If non-empty, contains the information needed to resume reading their
+ // associated partitions.
repeated StreamContinuationToken continuation_tokens = 2;
+
+ // If non-empty, contains the new partitions to start reading from, which
+ // are related to but not necessarily identical to the partitions for the
+ // above `continuation_tokens`.
+ repeated StreamPartition new_partitions = 3;
}
// The data or control message on the stream.
diff --git a/renovate.json b/renovate.json
index 2543edb1a8..7494258c9a 100644
--- a/renovate.json
+++ b/renovate.json
@@ -68,6 +68,28 @@
"^com.fasterxml.jackson.core"
],
"groupName": "jackson dependencies"
+ },
+ {
+ "packagePatterns": [
+ "^com.google.cloud:google-cloud-shared-dependencies",
+ "^com.google.protobuf:protoc",
+ "^io.grpc:protoc-gen-grpc-java"
+ ],
+ "groupName": "shared dependencies"
+ }
+ ],
+ "regexManagers": [
+ {
+ "fileMatch": ["^pom\\.xml$"],
+ "matchStrings": ["\\(?.*?)\\<\\/grpc\\.version\\>"],
+ "depNameTemplate": "io.grpc:protoc-gen-grpc-java",
+ "datasourceTemplate": "maven"
+ },
+ {
+ "fileMatch": ["^pom\\.xml$"],
+ "matchStrings": ["\\(?.*?)\\<\\/protobuf\\.version\\>"],
+ "depNameTemplate": "com.google.protobuf:protoc",
+ "datasourceTemplate": "maven"
}
],
"semanticCommits": true,
diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml
index d4cb9fac5f..7e407955ed 100644
--- a/samples/install-without-bom/pom.xml
+++ b/samples/install-without-bom/pom.xml
@@ -29,7 +29,7 @@
com.google.cloud
google-cloud-bigtable
- 2.19.1
+ 2.20.2
diff --git a/samples/native-image-sample/pom.xml b/samples/native-image-sample/pom.xml
index 1f8ff485ac..954eb06312 100644
--- a/samples/native-image-sample/pom.xml
+++ b/samples/native-image-sample/pom.xml
@@ -29,7 +29,7 @@
com.google.cloud
libraries-bom
- 26.8.0
+ 26.11.0
pom
import
diff --git a/samples/pom.xml b/samples/pom.xml
index 4254f54811..e05832ffc6 100644
--- a/samples/pom.xml
+++ b/samples/pom.xml
@@ -39,7 +39,7 @@
org.apache.maven.plugins
maven-deploy-plugin
- 3.0.0
+ 3.1.1
true
diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml
index af5085f6a7..1011de9c95 100644
--- a/samples/snapshot/pom.xml
+++ b/samples/snapshot/pom.xml
@@ -28,7 +28,7 @@
com.google.cloud
google-cloud-bigtable
- 2.19.2
+ 2.20.3
diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml
index f58844f347..d072f99023 100644
--- a/samples/snippets/pom.xml
+++ b/samples/snippets/pom.xml
@@ -30,7 +30,7 @@
com.google.cloud
libraries-bom
- 26.8.0
+ 26.11.0
pom
import
diff --git a/test-proxy/pom.xml b/test-proxy/pom.xml
index e2326f96d7..162785dc7e 100644
--- a/test-proxy/pom.xml
+++ b/test-proxy/pom.xml
@@ -12,11 +12,11 @@
google-cloud-bigtable-parent
com.google.cloud
- 2.19.2
+ 2.20.3
- 2.19.2
+ 2.20.3
diff --git a/test-proxy/src/main/java/com/google/cloud/bigtable/testproxy/CbtTestProxy.java b/test-proxy/src/main/java/com/google/cloud/bigtable/testproxy/CbtTestProxy.java
index c668ca5e30..c14d4b4186 100644
--- a/test-proxy/src/main/java/com/google/cloud/bigtable/testproxy/CbtTestProxy.java
+++ b/test-proxy/src/main/java/com/google/cloud/bigtable/testproxy/CbtTestProxy.java
@@ -60,6 +60,7 @@
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.Iterator;
+import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
@@ -488,7 +489,8 @@ public void readRows(ReadRowsRequest request, StreamObserver respons
/**
* Helper method to convert row from type com.google.cloud.bigtable.data.v2.models.Row to type
* com.google.bigtable.v2.Row. After conversion, row cells within the same column and family are
- * grouped and ordered; but the ordering of family and qualifier is not preserved.
+ * grouped and ordered; the ordering of qualifiers within the same family is preserved; but the
+ * ordering of families is not (the original order is not specified after all).
*
* @param row Logical row of type com.google.cloud.bigtable.data.v2.models.Row
* @return the converted row in RowResult Builder
@@ -502,7 +504,9 @@ private static RowResult.Builder convertRowResult(
row.getCells().stream()
.collect(
Collectors.groupingBy(
- RowCell::getFamily, Collectors.groupingBy(RowCell::getQualifier)));
+ RowCell::getFamily,
+ Collectors.groupingBy(
+ RowCell::getQualifier, LinkedHashMap::new, Collectors.toList())));
for (Map.Entry>> e : grouped.entrySet()) {
Family.Builder family = rowBuilder.addFamiliesBuilder().setName(e.getKey());
diff --git a/versions.txt b/versions.txt
index 98b9735ca9..10506013e2 100644
--- a/versions.txt
+++ b/versions.txt
@@ -1,10 +1,10 @@
# Format:
# module:released-version:current-version
-google-cloud-bigtable:2.19.2:2.19.2
-grpc-google-cloud-bigtable-admin-v2:2.19.2:2.19.2
-grpc-google-cloud-bigtable-v2:2.19.2:2.19.2
-proto-google-cloud-bigtable-admin-v2:2.19.2:2.19.2
-proto-google-cloud-bigtable-v2:2.19.2:2.19.2
-google-cloud-bigtable-emulator:0.156.2:0.156.2
-google-cloud-bigtable-emulator-core:2.19.2:2.19.2
+google-cloud-bigtable:2.20.3:2.20.3
+grpc-google-cloud-bigtable-admin-v2:2.20.3:2.20.3
+grpc-google-cloud-bigtable-v2:2.20.3:2.20.3
+proto-google-cloud-bigtable-admin-v2:2.20.3:2.20.3
+proto-google-cloud-bigtable-v2:2.20.3:2.20.3
+google-cloud-bigtable-emulator:0.157.3:0.157.3
+google-cloud-bigtable-emulator-core:2.20.3:2.20.3