ReadRows
Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 100 MiB per response; read requests which attempt to read individual rows larger than 100 MiB will fail. + *
Reads rows from the stream in the format prescribed by the ReadSession. Each response contains one or more table rows, up to a maximum of 128 MB per response; read requests which attempt to read individual rows larger than 128 MB will fail. *
Each request also returns a set of stream statistics reflecting the current state of the stream.
Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.
@@ -414,8 +414,8 @@ public final UnaryCallableEach request also returns a set of stream statistics reflecting the current state of the
* stream.
diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java
index 53797bb049..2da1b0f617 100644
--- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java
+++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java
@@ -33,6 +33,8 @@
import com.google.cloud.bigquery.storage.v1.StreamWriter.SingleConnectionOrConnectionPool.Kind;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import com.google.protobuf.ByteString;
import io.grpc.Status;
@@ -48,7 +50,9 @@
import java.util.Map.Entry;
import java.util.Objects;
import java.util.UUID;
+import java.util.concurrent.Callable;
import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.locks.Lock;
@@ -75,8 +79,15 @@ public class StreamWriter implements AutoCloseable {
private static Pattern streamPatternDefaultStream = Pattern.compile(defaultStreamMatching);
// Cache of location info for a given dataset.
- private static Map
* Reads rows from the stream in the format prescribed by the ReadSession.
- * Each response contains one or more table rows, up to a maximum of 100 MiB
+ * Each response contains one or more table rows, up to a maximum of 128 MB
* per response; read requests which attempt to read individual rows larger
- * than 100 MiB will fail.
+ * than 128 MB will fail.
* Each request also returns a set of stream statistics reflecting the current
* state of the stream.
*
@@ -389,9 +386,9 @@ public void createReadSession(
*
*
* Reads rows from the stream in the format prescribed by the ReadSession.
- * Each response contains one or more table rows, up to a maximum of 100 MiB
+ * Each response contains one or more table rows, up to a maximum of 128 MB
* per response; read requests which attempt to read individual rows larger
- * than 100 MiB will fail.
+ * than 128 MB will fail.
* Each request also returns a set of stream statistics reflecting the current
* state of the stream.
*
@@ -475,8 +472,9 @@ protected BigQueryReadBlockingV2Stub build(
*
*/
public com.google.cloud.bigquery.storage.v1.ReadSession createReadSession(
- com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest request)
+ throws io.grpc.StatusException {
+ return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getCreateReadSessionMethod(), getCallOptions(), request);
}
@@ -485,9 +483,9 @@ public com.google.cloud.bigquery.storage.v1.ReadSession createReadSession(
*
*
* Reads rows from the stream in the format prescribed by the ReadSession.
- * Each response contains one or more table rows, up to a maximum of 100 MiB
+ * Each response contains one or more table rows, up to a maximum of 128 MB
* per response; read requests which attempt to read individual rows larger
- * than 100 MiB will fail.
+ * than 128 MB will fail.
* Each request also returns a set of stream statistics reflecting the current
* state of the stream.
*
@@ -517,8 +515,9 @@ public com.google.cloud.bigquery.storage.v1.ReadSession createReadSession(
*
*/
public com.google.cloud.bigquery.storage.v1.SplitReadStreamResponse splitReadStream(
- com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ com.google.cloud.bigquery.storage.v1.SplitReadStreamRequest request)
+ throws io.grpc.StatusException {
+ return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getSplitReadStreamMethod(), getCallOptions(), request);
}
}
@@ -576,9 +575,9 @@ public com.google.cloud.bigquery.storage.v1.ReadSession createReadSession(
*
*
* Reads rows from the stream in the format prescribed by the ReadSession.
- * Each response contains one or more table rows, up to a maximum of 100 MiB
+ * Each response contains one or more table rows, up to a maximum of 128 MB
* per response; read requests which attempt to read individual rows larger
- * than 100 MiB will fail.
+ * than 128 MB will fail.
* Each request also returns a set of stream statistics reflecting the current
* state of the stream.
*
diff --git a/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java
index 4dff99d0fd..fcbc2af218 100644
--- a/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java
+++ b/grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryWriteGrpc.java
@@ -27,9 +27,6 @@
* https://cloud.google.com/bigquery/docs/write-api
*
*/
-@javax.annotation.Generated(
- value = "by gRPC proto compiler",
- comments = "Source: google/cloud/bigquery/storage/v1/storage.proto")
@io.grpc.stub.annotations.GrpcGenerated
public final class BigQueryWriteGrpc {
@@ -734,8 +731,9 @@ protected BigQueryWriteBlockingV2Stub build(
*
*/
public com.google.cloud.bigquery.storage.v1.WriteStream createWriteStream(
- com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ com.google.cloud.bigquery.storage.v1.CreateWriteStreamRequest request)
+ throws io.grpc.StatusException {
+ return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request);
}
@@ -786,8 +784,9 @@ public com.google.cloud.bigquery.storage.v1.WriteStream createWriteStream(
*
*/
public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream(
- com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest request)
+ throws io.grpc.StatusException {
+ return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getGetWriteStreamMethod(), getCallOptions(), request);
}
@@ -800,8 +799,9 @@ public com.google.cloud.bigquery.storage.v1.WriteStream getWriteStream(
*
*/
public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse finalizeWriteStream(
- com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamRequest request)
+ throws io.grpc.StatusException {
+ return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request);
}
@@ -818,8 +818,9 @@ public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse finalize
*/
public com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsResponse
batchCommitWriteStreams(
- com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ com.google.cloud.bigquery.storage.v1.BatchCommitWriteStreamsRequest request)
+ throws io.grpc.StatusException {
+ return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request);
}
@@ -836,8 +837,9 @@ public com.google.cloud.bigquery.storage.v1.FinalizeWriteStreamResponse finalize
*
*/
public com.google.cloud.bigquery.storage.v1.FlushRowsResponse flushRows(
- com.google.cloud.bigquery.storage.v1.FlushRowsRequest request) {
- return io.grpc.stub.ClientCalls.blockingUnaryCall(
+ com.google.cloud.bigquery.storage.v1.FlushRowsRequest request)
+ throws io.grpc.StatusException {
+ return io.grpc.stub.ClientCalls.blockingV2UnaryCall(
getChannel(), getFlushRowsMethod(), getCallOptions(), request);
}
}
diff --git a/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml b/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml
index 5a4f9e5e20..f9823bd91d 100644
--- a/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml
+++ b/grpc-google-cloud-bigquerystorage-v1alpha/pom.xml
@@ -4,13 +4,13 @@
* Arrow schema and data.
- * Arrow format is an experimental feature only selected for allowlisted
- * customers.
*
*
* Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData}
@@ -676,8 +674,6 @@ protected Builder newBuilderForType(
*
*
* Arrow schema and data.
- * Arrow format is an experimental feature only selected for allowlisted
- * customers.
*
*
* Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData}
@@ -1368,8 +1364,8 @@ public interface ProtoDataOrBuilder
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -1388,8 +1384,8 @@ public interface ProtoDataOrBuilder
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -1408,8 +1404,8 @@ public interface ProtoDataOrBuilder
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -1426,7 +1422,7 @@ public interface ProtoDataOrBuilder
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -1442,7 +1438,7 @@ public interface ProtoDataOrBuilder
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -1458,7 +1454,7 @@ public interface ProtoDataOrBuilder
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -1521,8 +1517,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -1544,8 +1540,8 @@ public boolean hasWriterSchema() {
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -1569,8 +1565,8 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() {
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -1595,7 +1591,7 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchema
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -1614,7 +1610,7 @@ public boolean hasRows() {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -1635,7 +1631,7 @@ public com.google.cloud.bigquery.storage.v1.ProtoRows getRows() {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2082,8 +2078,8 @@ public Builder mergeFrom(
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2104,8 +2100,8 @@ public boolean hasWriterSchema() {
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2132,8 +2128,8 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() {
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2162,8 +2158,8 @@ public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2190,8 +2186,8 @@ public Builder setWriterSchema(
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2226,8 +2222,8 @@ public Builder mergeWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchem
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2253,8 +2249,8 @@ public Builder clearWriterSchema() {
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2275,8 +2271,8 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder getWriterSchemaB
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2301,8 +2297,8 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchema
*
*
*
- * The protocol buffer schema used to serialize the data. Provide this value
- * whenever:
+ * Optional. The protocol buffer schema used to serialize the data. Provide
+ * this value whenever:
*
* * You send the first request of an RPC connection.
*
@@ -2341,7 +2337,7 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchema
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2359,7 +2355,7 @@ public boolean hasRows() {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2383,7 +2379,7 @@ public com.google.cloud.bigquery.storage.v1.ProtoRows getRows() {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2409,7 +2405,7 @@ public Builder setRows(com.google.cloud.bigquery.storage.v1.ProtoRows value) {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2433,7 +2429,7 @@ public Builder setRows(
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2464,7 +2460,7 @@ public Builder mergeRows(com.google.cloud.bigquery.storage.v1.ProtoRows value) {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2487,7 +2483,7 @@ public Builder clearRows() {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2505,7 +2501,7 @@ public com.google.cloud.bigquery.storage.v1.ProtoRows.Builder getRowsBuilder() {
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2527,7 +2523,7 @@ public com.google.cloud.bigquery.storage.v1.ProtoRowsOrBuilder getRowsOrBuilder(
*
*
*
- * Serialized row data in protobuf message format.
+ * Required. Serialized row data in protobuf message format.
* Currently, the backend expects the serialized rows to adhere to
* proto2 semantics when appending rows, particularly with respect to
* how default values are encoded.
@@ -2908,8 +2904,7 @@ public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ProtoData getProto
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -2925,8 +2920,7 @@ public boolean hasArrowRows() {
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -2945,8 +2939,7 @@ public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData getArrow
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -3368,8 +3361,8 @@ public int getMissingValueInterpretationsValueOrThrow(java.lang.String key) {
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
@@ -3394,8 +3387,8 @@ public int getDefaultMissingValueInterpretationValue() {
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
@@ -4787,8 +4780,7 @@ public Builder clearProtoRows() {
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4804,8 +4796,7 @@ public boolean hasArrowRows() {
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4833,8 +4824,7 @@ public com.google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData getArrow
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4858,8 +4848,7 @@ public Builder setArrowRows(
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4880,8 +4869,7 @@ public Builder setArrowRows(
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4917,8 +4905,7 @@ public Builder mergeArrowRows(
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4944,8 +4931,7 @@ public Builder clearArrowRows() {
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4959,8 +4945,7 @@ public Builder clearArrowRows() {
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -4983,8 +4968,7 @@ public Builder clearArrowRows() {
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -5678,8 +5662,8 @@ public Builder putAllMissingValueInterpretationsValue(
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
@@ -5704,8 +5688,8 @@ public int getDefaultMissingValueInterpretationValue() {
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
@@ -5733,8 +5717,8 @@ public Builder setDefaultMissingValueInterpretationValue(int value) {
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
@@ -5766,8 +5750,8 @@ public Builder setDefaultMissingValueInterpretationValue(int value) {
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
@@ -5799,8 +5783,8 @@ public Builder setDefaultMissingValueInterpretation(
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java
index c77b02d624..e17f00d96a 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java
@@ -203,8 +203,7 @@ public interface AppendRowsRequestOrBuilder
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -217,8 +216,7 @@ public interface AppendRowsRequestOrBuilder
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -231,8 +229,7 @@ public interface AppendRowsRequestOrBuilder
*
*
*
- * Rows in arrow format. This is an experimental feature only selected for
- * allowlisted customers.
+ * Rows in arrow format.
*
*
* .google.cloud.bigquery.storage.v1.AppendRowsRequest.ArrowData arrow_rows = 5;
@@ -534,8 +531,8 @@ public interface AppendRowsRequestOrBuilder
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
@@ -557,8 +554,8 @@ public interface AppendRowsRequestOrBuilder
*
* Optional. Default missing value interpretation for all columns in the
* table. When a value is specified on an `AppendRowsRequest`, it is applied
- * to all requests on the connection from that point forward, until a
- * subsequent `AppendRowsRequest` sets it to a different value.
+ * to all requests from that point forward, until a subsequent
+ * `AppendRowsRequest` sets it to a different value.
* `missing_value_interpretation` can override
* `default_missing_value_interpretation`. For example, if you want to write
* `NULL` instead of using default values for some columns, you can set
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java
index 82410f18e7..a9d6fc7a0b 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowProto.java
@@ -53,18 +53,25 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ ".proto\022 google.cloud.bigquery.storage.v1"
+ "\"(\n\013ArrowSchema\022\031\n\021serialized_schema\030\001 \001"
+ "(\014\"J\n\020ArrowRecordBatch\022\037\n\027serialized_rec"
- + "ord_batch\030\001 \001(\014\022\025\n\trow_count\030\002 \001(\003B\002\030\001\"\317"
- + "\001\n\031ArrowSerializationOptions\022h\n\022buffer_c"
+ + "ord_batch\030\001 \001(\014\022\025\n\trow_count\030\002 \001(\003B\002\030\001\"\354"
+ + "\003\n\031ArrowSerializationOptions\022h\n\022buffer_c"
+ "ompression\030\002 \001(\0162L.google.cloud.bigquery"
+ ".storage.v1.ArrowSerializationOptions.Co"
- + "mpressionCodec\"H\n\020CompressionCodec\022\033\n\027CO"
+ + "mpressionCodec\022v\n\031picos_timestamp_precis"
+ + "ion\030\003 \001(\0162S.google.cloud.bigquery.storag"
+ + "e.v1.ArrowSerializationOptions.PicosTime"
+ + "stampPrecision\"H\n\020CompressionCodec\022\033\n\027CO"
+ "MPRESSION_UNSPECIFIED\020\000\022\r\n\tLZ4_FRAME\020\001\022\010"
- + "\n\004ZSTD\020\002B\272\001\n$com.google.cloud.bigquery.s"
- + "torage.v1B\nArrowProtoP\001Z>cloud.google.co"
- + "m/go/bigquery/storage/apiv1/storagepb;st"
- + "oragepb\252\002 Google.Cloud.BigQuery.Storage."
- + "V1\312\002 Google\\Cloud\\BigQuery\\Storage\\V1b\006p"
- + "roto3"
+ + "\n\004ZSTD\020\002\"\242\001\n\027PicosTimestampPrecision\022)\n%"
+ + "PICOS_TIMESTAMP_PRECISION_UNSPECIFIED\020\000\022"
+ + "\036\n\032TIMESTAMP_PRECISION_MICROS\020\001\022\035\n\031TIMES"
+ + "TAMP_PRECISION_NANOS\020\002\022\035\n\031TIMESTAMP_PREC"
+ + "ISION_PICOS\020\003B\272\001\n$com.google.cloud.bigqu"
+ + "ery.storage.v1B\nArrowProtoP\001Z>cloud.goog"
+ + "le.com/go/bigquery/storage/apiv1/storage"
+ + "pb;storagepb\252\002 Google.Cloud.BigQuery.Sto"
+ + "rage.V1\312\002 Google\\Cloud\\BigQuery\\Storage\\"
+ + "V1b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -91,7 +98,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_bigquery_storage_v1_ArrowSerializationOptions_descriptor,
new java.lang.String[] {
- "BufferCompression",
+ "BufferCompression", "PicosTimestampPrecision",
});
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java
index 19035062b5..fdf9acac7c 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptions.java
@@ -41,6 +41,7 @@ private ArrowSerializationOptions(com.google.protobuf.GeneratedMessageV3.Builder
private ArrowSerializationOptions() {
bufferCompression_ = 0;
+ picosTimestampPrecision_ = 0;
}
@java.lang.Override
@@ -228,6 +229,205 @@ private CompressionCodec(int value) {
// @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec)
}
+ /**
+ *
+ *
+ *
+ * The precision of the timestamp value in the Avro message. This precision
+ * will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
+ *
+ *
+ * Protobuf enum {@code
+ * google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision}
+ */
+ public enum PicosTimestampPrecision implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ *
+ *
+ *
+ * Unspecified timestamp precision. The default precision is microseconds.
+ *
+ *
+ * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;
+ */
+ PICOS_TIMESTAMP_PRECISION_UNSPECIFIED(0),
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to microsecond
+ * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_MICROS = 1;
+ */
+ TIMESTAMP_PRECISION_MICROS(1),
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to nanosecond
+ * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_NANOS = 2;
+ */
+ TIMESTAMP_PRECISION_NANOS(2),
+ /**
+ *
+ *
+ *
+ * Read API will return full precision picosecond value. The value will be
+ * encoded as a string which conforms to ISO 8601 format.
+ *
+ *
+ * TIMESTAMP_PRECISION_PICOS = 3;
+ */
+ TIMESTAMP_PRECISION_PICOS(3),
+ UNRECOGNIZED(-1),
+ ;
+
+ /**
+ *
+ *
+ *
+ * Unspecified timestamp precision. The default precision is microseconds.
+ *
+ *
+ * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;
+ */
+ public static final int PICOS_TIMESTAMP_PRECISION_UNSPECIFIED_VALUE = 0;
+
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to microsecond
+ * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_MICROS = 1;
+ */
+ public static final int TIMESTAMP_PRECISION_MICROS_VALUE = 1;
+
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to nanosecond
+ * level precision. The value will be encoded as Arrow TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_NANOS = 2;
+ */
+ public static final int TIMESTAMP_PRECISION_NANOS_VALUE = 2;
+
+ /**
+ *
+ *
+ *
+ * Read API will return full precision picosecond value. The value will be
+ * encoded as a string which conforms to ISO 8601 format.
+ *
+ *
+ * TIMESTAMP_PRECISION_PICOS = 3;
+ */
+ public static final int TIMESTAMP_PRECISION_PICOS_VALUE = 3;
+
+ public final int getNumber() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalArgumentException(
+ "Can't get the number of an unknown enum value.");
+ }
+ return value;
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static PicosTimestampPrecision valueOf(int value) {
+ return forNumber(value);
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ */
+ public static PicosTimestampPrecision forNumber(int value) {
+ switch (value) {
+ case 0:
+ return PICOS_TIMESTAMP_PRECISION_UNSPECIFIED;
+ case 1:
+ return TIMESTAMP_PRECISION_MICROS;
+ case 2:
+ return TIMESTAMP_PRECISION_NANOS;
+ case 3:
+ return TIMESTAMP_PRECISION_PICOS;
+ default:
+ return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @return The enum numeric value on the wire for picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public int getPicosTimestampPrecisionValue() {
+ return picosTimestampPrecision_;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @return The picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ getPicosTimestampPrecision() {
+ com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision result =
+ com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ .forNumber(picosTimestampPrecision_);
+ return result == null
+ ? com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ .UNRECOGNIZED
+ : result;
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -296,6 +544,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
.getNumber()) {
output.writeEnum(2, bufferCompression_);
}
+ if (picosTimestampPrecision_
+ != com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED
+ .getNumber()) {
+ output.writeEnum(3, picosTimestampPrecision_);
+ }
getUnknownFields().writeTo(output);
}
@@ -311,6 +565,12 @@ public int getSerializedSize() {
.getNumber()) {
size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, bufferCompression_);
}
+ if (picosTimestampPrecision_
+ != com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED
+ .getNumber()) {
+ size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, picosTimestampPrecision_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
@@ -328,6 +588,7 @@ public boolean equals(final java.lang.Object obj) {
(com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions) obj;
if (bufferCompression_ != other.bufferCompression_) return false;
+ if (picosTimestampPrecision_ != other.picosTimestampPrecision_) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@@ -341,6 +602,8 @@ public int hashCode() {
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + BUFFER_COMPRESSION_FIELD_NUMBER;
hash = (53 * hash) + bufferCompression_;
+ hash = (37 * hash) + PICOS_TIMESTAMP_PRECISION_FIELD_NUMBER;
+ hash = (53 * hash) + picosTimestampPrecision_;
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -483,6 +746,7 @@ public Builder clear() {
super.clear();
bitField0_ = 0;
bufferCompression_ = 0;
+ picosTimestampPrecision_ = 0;
return this;
}
@@ -524,6 +788,9 @@ private void buildPartial0(
if (((from_bitField0_ & 0x00000001) != 0)) {
result.bufferCompression_ = bufferCompression_;
}
+ if (((from_bitField0_ & 0x00000002) != 0)) {
+ result.picosTimestampPrecision_ = picosTimestampPrecision_;
+ }
}
@java.lang.Override
@@ -576,6 +843,9 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ArrowSerialization
if (other.bufferCompression_ != 0) {
setBufferCompressionValue(other.getBufferCompressionValue());
}
+ if (other.picosTimestampPrecision_ != 0) {
+ setPicosTimestampPrecisionValue(other.getPicosTimestampPrecisionValue());
+ }
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
@@ -608,6 +878,12 @@ public Builder mergeFrom(
bitField0_ |= 0x00000001;
break;
} // case 16
+ case 24:
+ {
+ picosTimestampPrecision_ = input.readEnum();
+ bitField0_ |= 0x00000002;
+ break;
+ } // case 24
default:
{
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
@@ -743,6 +1019,124 @@ public Builder clearBufferCompression() {
return this;
}
+ private int picosTimestampPrecision_ = 0;
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @return The enum numeric value on the wire for picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public int getPicosTimestampPrecisionValue() {
+ return picosTimestampPrecision_;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @param value The enum numeric value on the wire for picosTimestampPrecision to set.
+ * @return This builder for chaining.
+ */
+ public Builder setPicosTimestampPrecisionValue(int value) {
+ picosTimestampPrecision_ = value;
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @return The picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ getPicosTimestampPrecision() {
+ com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ result =
+ com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ .forNumber(picosTimestampPrecision_);
+ return result == null
+ ? com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ .UNRECOGNIZED
+ : result;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @param value The picosTimestampPrecision to set.
+ * @return This builder for chaining.
+ */
+ public Builder setPicosTimestampPrecision(
+ com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ picosTimestampPrecision_ = value.getNumber();
+ onChanged();
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearPicosTimestampPrecision() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ picosTimestampPrecision_ = 0;
+ onChanged();
+ return this;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java
index 16a84415f3..1fcd8f4b06 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ArrowSerializationOptionsOrBuilder.java
@@ -56,4 +56,37 @@ public interface ArrowSerializationOptionsOrBuilder
*/
com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.CompressionCodec
getBufferCompression();
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @return The enum numeric value on the wire for picosTimestampPrecision.
+ */
+ int getPicosTimestampPrecisionValue();
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 3;
+ *
+ *
+ * @return The picosTimestampPrecision.
+ */
+ com.google.cloud.bigquery.storage.v1.ArrowSerializationOptions.PicosTimestampPrecision
+ getPicosTimestampPrecision();
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java
index 71d3587b71..eb249acb13 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroProto.java
@@ -53,13 +53,21 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "proto\022 google.cloud.bigquery.storage.v1\""
+ "\034\n\nAvroSchema\022\016\n\006schema\030\001 \001(\t\"A\n\010AvroRow"
+ "s\022\036\n\026serialized_binary_rows\030\001 \001(\014\022\025\n\trow"
- + "_count\030\002 \001(\003B\002\030\001\"A\n\030AvroSerializationOpt"
- + "ions\022%\n\035enable_display_name_attribute\030\001 "
- + "\001(\010B\271\001\n$com.google.cloud.bigquery.storag"
- + "e.v1B\tAvroProtoP\001Z>cloud.google.com/go/b"
- + "igquery/storage/apiv1/storagepb;storagep"
- + "b\252\002 Google.Cloud.BigQuery.Storage.V1\312\002 G"
- + "oogle\\Cloud\\BigQuery\\Storage\\V1b\006proto3"
+ + "_count\030\002 \001(\003B\002\030\001\"\335\002\n\030AvroSerializationOp"
+ + "tions\022%\n\035enable_display_name_attribute\030\001"
+ + " \001(\010\022u\n\031picos_timestamp_precision\030\002 \001(\0162"
+ + "R.google.cloud.bigquery.storage.v1.AvroS"
+ + "erializationOptions.PicosTimestampPrecis"
+ + "ion\"\242\001\n\027PicosTimestampPrecision\022)\n%PICOS"
+ + "_TIMESTAMP_PRECISION_UNSPECIFIED\020\000\022\036\n\032TI"
+ + "MESTAMP_PRECISION_MICROS\020\001\022\035\n\031TIMESTAMP_"
+ + "PRECISION_NANOS\020\002\022\035\n\031TIMESTAMP_PRECISION"
+ + "_PICOS\020\003B\271\001\n$com.google.cloud.bigquery.s"
+ + "torage.v1B\tAvroProtoP\001Z>cloud.google.com"
+ + "/go/bigquery/storage/apiv1/storagepb;sto"
+ + "ragepb\252\002 Google.Cloud.BigQuery.Storage.V"
+ + "1\312\002 Google\\Cloud\\BigQuery\\Storage\\V1b\006pr"
+ + "oto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -86,7 +94,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_bigquery_storage_v1_AvroSerializationOptions_descriptor,
new java.lang.String[] {
- "EnableDisplayNameAttribute",
+ "EnableDisplayNameAttribute", "PicosTimestampPrecision",
});
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java
index 6c8dbcf764..d687adb37a 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptions.java
@@ -39,7 +39,9 @@ private AvroSerializationOptions(com.google.protobuf.GeneratedMessageV3.Builder<
super(builder);
}
- private AvroSerializationOptions() {}
+ private AvroSerializationOptions() {
+ picosTimestampPrecision_ = 0;
+ }
@java.lang.Override
@SuppressWarnings({"unused"})
@@ -62,6 +64,205 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() {
com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.Builder.class);
}
+ /**
+ *
+ *
+ *
+ * The precision of the timestamp value in the Avro message. This precision
+ * will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
+ *
+ *
+ * Protobuf enum {@code
+ * google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision}
+ */
+ public enum PicosTimestampPrecision implements com.google.protobuf.ProtocolMessageEnum {
+ /**
+ *
+ *
+ *
+ * Unspecified timestamp precision. The default precision is microseconds.
+ *
+ *
+ * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;
+ */
+ PICOS_TIMESTAMP_PRECISION_UNSPECIFIED(0),
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to microsecond
+ * level precision. The value will be encoded as Avro TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_MICROS = 1;
+ */
+ TIMESTAMP_PRECISION_MICROS(1),
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to nanosecond
+ * level precision. The value will be encoded as Avro TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_NANOS = 2;
+ */
+ TIMESTAMP_PRECISION_NANOS(2),
+ /**
+ *
+ *
+ *
+ * Read API will return full precision picosecond value. The value will be
+ * encoded as a string which conforms to ISO 8601 format.
+ *
+ *
+ * TIMESTAMP_PRECISION_PICOS = 3;
+ */
+ TIMESTAMP_PRECISION_PICOS(3),
+ UNRECOGNIZED(-1),
+ ;
+
+ /**
+ *
+ *
+ *
+ * Unspecified timestamp precision. The default precision is microseconds.
+ *
+ *
+ * PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;
+ */
+ public static final int PICOS_TIMESTAMP_PRECISION_UNSPECIFIED_VALUE = 0;
+
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to microsecond
+ * level precision. The value will be encoded as Avro TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_MICROS = 1;
+ */
+ public static final int TIMESTAMP_PRECISION_MICROS_VALUE = 1;
+
+ /**
+ *
+ *
+ *
+ * Timestamp values returned by Read API will be truncated to nanosecond
+ * level precision. The value will be encoded as Avro TIMESTAMP type in a
+ * 64 bit integer.
+ *
+ *
+ * TIMESTAMP_PRECISION_NANOS = 2;
+ */
+ public static final int TIMESTAMP_PRECISION_NANOS_VALUE = 2;
+
+ /**
+ *
+ *
+ *
+ * Read API will return full precision picosecond value. The value will be
+ * encoded as a string which conforms to ISO 8601 format.
+ *
+ *
+ * TIMESTAMP_PRECISION_PICOS = 3;
+ */
+ public static final int TIMESTAMP_PRECISION_PICOS_VALUE = 3;
+
+ public final int getNumber() {
+ if (this == UNRECOGNIZED) {
+ throw new java.lang.IllegalArgumentException(
+ "Can't get the number of an unknown enum value.");
+ }
+ return value;
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ * @deprecated Use {@link #forNumber(int)} instead.
+ */
+ @java.lang.Deprecated
+ public static PicosTimestampPrecision valueOf(int value) {
+ return forNumber(value);
+ }
+
+ /**
+ * @param value The numeric wire value of the corresponding enum entry.
+ * @return The enum associated with the given numeric wire value.
+ */
+ public static PicosTimestampPrecision forNumber(int value) {
+ switch (value) {
+ case 0:
+ return PICOS_TIMESTAMP_PRECISION_UNSPECIFIED;
+ case 1:
+ return TIMESTAMP_PRECISION_MICROS;
+ case 2:
+ return TIMESTAMP_PRECISION_NANOS;
+ case 3:
+ return TIMESTAMP_PRECISION_PICOS;
+ default:
+ return null;
+ }
+ }
+
+ public static com.google.protobuf.Internal.EnumLiteMap
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @return The enum numeric value on the wire for picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public int getPicosTimestampPrecisionValue() {
+ return picosTimestampPrecision_;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @return The picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ getPicosTimestampPrecision() {
+ com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision result =
+ com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ .forNumber(picosTimestampPrecision_);
+ return result == null
+ ? com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ .UNRECOGNIZED
+ : result;
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -107,6 +356,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (enableDisplayNameAttribute_ != false) {
output.writeBool(1, enableDisplayNameAttribute_);
}
+ if (picosTimestampPrecision_
+ != com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED
+ .getNumber()) {
+ output.writeEnum(2, picosTimestampPrecision_);
+ }
getUnknownFields().writeTo(output);
}
@@ -119,6 +374,12 @@ public int getSerializedSize() {
if (enableDisplayNameAttribute_ != false) {
size += com.google.protobuf.CodedOutputStream.computeBoolSize(1, enableDisplayNameAttribute_);
}
+ if (picosTimestampPrecision_
+ != com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ .PICOS_TIMESTAMP_PRECISION_UNSPECIFIED
+ .getNumber()) {
+ size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, picosTimestampPrecision_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
@@ -136,6 +397,7 @@ public boolean equals(final java.lang.Object obj) {
(com.google.cloud.bigquery.storage.v1.AvroSerializationOptions) obj;
if (getEnableDisplayNameAttribute() != other.getEnableDisplayNameAttribute()) return false;
+ if (picosTimestampPrecision_ != other.picosTimestampPrecision_) return false;
if (!getUnknownFields().equals(other.getUnknownFields())) return false;
return true;
}
@@ -149,6 +411,8 @@ public int hashCode() {
hash = (19 * hash) + getDescriptor().hashCode();
hash = (37 * hash) + ENABLE_DISPLAY_NAME_ATTRIBUTE_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getEnableDisplayNameAttribute());
+ hash = (37 * hash) + PICOS_TIMESTAMP_PRECISION_FIELD_NUMBER;
+ hash = (53 * hash) + picosTimestampPrecision_;
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -291,6 +555,7 @@ public Builder clear() {
super.clear();
bitField0_ = 0;
enableDisplayNameAttribute_ = false;
+ picosTimestampPrecision_ = 0;
return this;
}
@@ -332,6 +597,9 @@ private void buildPartial0(
if (((from_bitField0_ & 0x00000001) != 0)) {
result.enableDisplayNameAttribute_ = enableDisplayNameAttribute_;
}
+ if (((from_bitField0_ & 0x00000002) != 0)) {
+ result.picosTimestampPrecision_ = picosTimestampPrecision_;
+ }
}
@java.lang.Override
@@ -384,6 +652,9 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AvroSerializationO
if (other.getEnableDisplayNameAttribute() != false) {
setEnableDisplayNameAttribute(other.getEnableDisplayNameAttribute());
}
+ if (other.picosTimestampPrecision_ != 0) {
+ setPicosTimestampPrecisionValue(other.getPicosTimestampPrecisionValue());
+ }
this.mergeUnknownFields(other.getUnknownFields());
onChanged();
return this;
@@ -416,6 +687,12 @@ public Builder mergeFrom(
bitField0_ |= 0x00000001;
break;
} // case 8
+ case 16:
+ {
+ picosTimestampPrecision_ = input.readEnum();
+ bitField0_ |= 0x00000002;
+ break;
+ } // case 16
default:
{
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
@@ -518,6 +795,123 @@ public Builder clearEnableDisplayNameAttribute() {
return this;
}
+ private int picosTimestampPrecision_ = 0;
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @return The enum numeric value on the wire for picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public int getPicosTimestampPrecisionValue() {
+ return picosTimestampPrecision_;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @param value The enum numeric value on the wire for picosTimestampPrecision to set.
+ * @return This builder for chaining.
+ */
+ public Builder setPicosTimestampPrecisionValue(int value) {
+ picosTimestampPrecision_ = value;
+ bitField0_ |= 0x00000002;
+ onChanged();
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @return The picosTimestampPrecision.
+ */
+ @java.lang.Override
+ public com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ getPicosTimestampPrecision() {
+ com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision result =
+ com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ .forNumber(picosTimestampPrecision_);
+ return result == null
+ ? com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ .UNRECOGNIZED
+ : result;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @param value The picosTimestampPrecision to set.
+ * @return This builder for chaining.
+ */
+ public Builder setPicosTimestampPrecision(
+ com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ bitField0_ |= 0x00000002;
+ picosTimestampPrecision_ = value.getNumber();
+ onChanged();
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearPicosTimestampPrecision() {
+ bitField0_ = (bitField0_ & ~0x00000002);
+ picosTimestampPrecision_ = 0;
+ onChanged();
+ return this;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java
index c64a63d6e2..289136efc0 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AvroSerializationOptionsOrBuilder.java
@@ -45,4 +45,37 @@ public interface AvroSerializationOptionsOrBuilder
* @return The enableDisplayNameAttribute.
*/
boolean getEnableDisplayNameAttribute();
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @return The enum numeric value on the wire for picosTimestampPrecision.
+ */
+ int getPicosTimestampPrecisionValue();
+
+ /**
+ *
+ *
+ *
+ * Optional. Set timestamp precision option. If not set, the default precision
+ * is microseconds.
+ *
+ *
+ *
+ * .google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision picos_timestamp_precision = 2;
+ *
+ *
+ * @return The picosTimestampPrecision.
+ */
+ com.google.cloud.bigquery.storage.v1.AvroSerializationOptions.PicosTimestampPrecision
+ getPicosTimestampPrecision();
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java
index f4b84c8a24..d999837dad 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StreamProto.java
@@ -121,7 +121,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "e.cloud.bigquery.storage.v1.TableSchemaB\003\340A\003\022P\n\n"
+ "write_mode\030\007 \001(\01627.google.cloud."
+ "bigquery.storage.v1.WriteStream.WriteModeB\003\340A\005\022\025\n"
- + "\010location\030\010 \001(\tB\003\340A\005\"F\n"
+ + "\010location\030\010 \001(\tB\003\340A\003\"F\n"
+ "\004Type\022\024\n"
+ "\020TYPE_UNSPECIFIED\020\000\022\r\n"
+ "\tCOMMITTED\020\001\022\013\n"
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java
index d251596836..21b404c06e 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchema.java
@@ -1776,6 +1776,83 @@ public com.google.protobuf.ByteString getDefaultValueExpressionBytes() {
}
}
+ public static final int TIMESTAMP_PRECISION_FIELD_NUMBER = 27;
+ private com.google.protobuf.Int64Value timestampPrecision_;
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the timestampPrecision field is set.
+ */
+ @java.lang.Override
+ public boolean hasTimestampPrecision() {
+ return ((bitField0_ & 0x00000001) != 0);
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The timestampPrecision.
+ */
+ @java.lang.Override
+ public com.google.protobuf.Int64Value getTimestampPrecision() {
+ return timestampPrecision_ == null
+ ? com.google.protobuf.Int64Value.getDefaultInstance()
+ : timestampPrecision_;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ @java.lang.Override
+ public com.google.protobuf.Int64ValueOrBuilder getTimestampPrecisionOrBuilder() {
+ return timestampPrecision_ == null
+ ? com.google.protobuf.Int64Value.getDefaultInstance()
+ : timestampPrecision_;
+ }
+
public static final int RANGE_ELEMENT_TYPE_FIELD_NUMBER = 11;
private com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType rangeElementType_;
@@ -1799,7 +1876,7 @@ public com.google.protobuf.ByteString getDefaultValueExpressionBytes() {
*/
@java.lang.Override
public boolean hasRangeElementType() {
- return ((bitField0_ & 0x00000001) != 0);
+ return ((bitField0_ & 0x00000002) != 0);
}
/**
@@ -1899,9 +1976,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(defaultValueExpression_)) {
com.google.protobuf.GeneratedMessageV3.writeString(output, 10, defaultValueExpression_);
}
- if (((bitField0_ & 0x00000001) != 0)) {
+ if (((bitField0_ & 0x00000002) != 0)) {
output.writeMessage(11, getRangeElementType());
}
+ if (((bitField0_ & 0x00000001) != 0)) {
+ output.writeMessage(27, getTimestampPrecision());
+ }
getUnknownFields().writeTo(output);
}
@@ -1942,9 +2022,12 @@ public int getSerializedSize() {
if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(defaultValueExpression_)) {
size += com.google.protobuf.GeneratedMessageV3.computeStringSize(10, defaultValueExpression_);
}
- if (((bitField0_ & 0x00000001) != 0)) {
+ if (((bitField0_ & 0x00000002) != 0)) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(11, getRangeElementType());
}
+ if (((bitField0_ & 0x00000001) != 0)) {
+ size += com.google.protobuf.CodedOutputStream.computeMessageSize(27, getTimestampPrecision());
+ }
size += getUnknownFields().getSerializedSize();
memoizedSize = size;
return size;
@@ -1970,6 +2053,10 @@ public boolean equals(final java.lang.Object obj) {
if (getPrecision() != other.getPrecision()) return false;
if (getScale() != other.getScale()) return false;
if (!getDefaultValueExpression().equals(other.getDefaultValueExpression())) return false;
+ if (hasTimestampPrecision() != other.hasTimestampPrecision()) return false;
+ if (hasTimestampPrecision()) {
+ if (!getTimestampPrecision().equals(other.getTimestampPrecision())) return false;
+ }
if (hasRangeElementType() != other.hasRangeElementType()) return false;
if (hasRangeElementType()) {
if (!getRangeElementType().equals(other.getRangeElementType())) return false;
@@ -2005,6 +2092,10 @@ public int hashCode() {
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getScale());
hash = (37 * hash) + DEFAULT_VALUE_EXPRESSION_FIELD_NUMBER;
hash = (53 * hash) + getDefaultValueExpression().hashCode();
+ if (hasTimestampPrecision()) {
+ hash = (37 * hash) + TIMESTAMP_PRECISION_FIELD_NUMBER;
+ hash = (53 * hash) + getTimestampPrecision().hashCode();
+ }
if (hasRangeElementType()) {
hash = (37 * hash) + RANGE_ELEMENT_TYPE_FIELD_NUMBER;
hash = (53 * hash) + getRangeElementType().hashCode();
@@ -2152,6 +2243,7 @@ private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) {
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {
getFieldsFieldBuilder();
+ getTimestampPrecisionFieldBuilder();
getRangeElementTypeFieldBuilder();
}
}
@@ -2175,6 +2267,11 @@ public Builder clear() {
precision_ = 0L;
scale_ = 0L;
defaultValueExpression_ = "";
+ timestampPrecision_ = null;
+ if (timestampPrecisionBuilder_ != null) {
+ timestampPrecisionBuilder_.dispose();
+ timestampPrecisionBuilder_ = null;
+ }
rangeElementType_ = null;
if (rangeElementTypeBuilder_ != null) {
rangeElementTypeBuilder_.dispose();
@@ -2256,9 +2353,16 @@ private void buildPartial0(com.google.cloud.bigquery.storage.v1.TableFieldSchema
}
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000200) != 0)) {
+ result.timestampPrecision_ =
+ timestampPrecisionBuilder_ == null
+ ? timestampPrecision_
+ : timestampPrecisionBuilder_.build();
+ to_bitField0_ |= 0x00000001;
+ }
+ if (((from_bitField0_ & 0x00000400) != 0)) {
result.rangeElementType_ =
rangeElementTypeBuilder_ == null ? rangeElementType_ : rangeElementTypeBuilder_.build();
- to_bitField0_ |= 0x00000001;
+ to_bitField0_ |= 0x00000002;
}
result.bitField0_ |= to_bitField0_;
}
@@ -2366,6 +2470,9 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.TableFieldSchema o
bitField0_ |= 0x00000100;
onChanged();
}
+ if (other.hasTimestampPrecision()) {
+ mergeTimestampPrecision(other.getTimestampPrecision());
+ }
if (other.hasRangeElementType()) {
mergeRangeElementType(other.getRangeElementType());
}
@@ -2461,9 +2568,16 @@ public Builder mergeFrom(
{
input.readMessage(
getRangeElementTypeFieldBuilder().getBuilder(), extensionRegistry);
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000400;
break;
} // case 90
+ case 218:
+ {
+ input.readMessage(
+ getTimestampPrecisionFieldBuilder().getBuilder(), extensionRegistry);
+ bitField0_ |= 0x00000200;
+ break;
+ } // case 218
default:
{
if (!super.parseUnknownField(input, extensionRegistry, tag)) {
@@ -3768,6 +3882,272 @@ public Builder setDefaultValueExpressionBytes(com.google.protobuf.ByteString val
return this;
}
+ private com.google.protobuf.Int64Value timestampPrecision_;
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Int64Value,
+ com.google.protobuf.Int64Value.Builder,
+ com.google.protobuf.Int64ValueOrBuilder>
+ timestampPrecisionBuilder_;
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the timestampPrecision field is set.
+ */
+ public boolean hasTimestampPrecision() {
+ return ((bitField0_ & 0x00000200) != 0);
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The timestampPrecision.
+ */
+ public com.google.protobuf.Int64Value getTimestampPrecision() {
+ if (timestampPrecisionBuilder_ == null) {
+ return timestampPrecision_ == null
+ ? com.google.protobuf.Int64Value.getDefaultInstance()
+ : timestampPrecision_;
+ } else {
+ return timestampPrecisionBuilder_.getMessage();
+ }
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setTimestampPrecision(com.google.protobuf.Int64Value value) {
+ if (timestampPrecisionBuilder_ == null) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ timestampPrecision_ = value;
+ } else {
+ timestampPrecisionBuilder_.setMessage(value);
+ }
+ bitField0_ |= 0x00000200;
+ onChanged();
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder setTimestampPrecision(com.google.protobuf.Int64Value.Builder builderForValue) {
+ if (timestampPrecisionBuilder_ == null) {
+ timestampPrecision_ = builderForValue.build();
+ } else {
+ timestampPrecisionBuilder_.setMessage(builderForValue.build());
+ }
+ bitField0_ |= 0x00000200;
+ onChanged();
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder mergeTimestampPrecision(com.google.protobuf.Int64Value value) {
+ if (timestampPrecisionBuilder_ == null) {
+ if (((bitField0_ & 0x00000200) != 0)
+ && timestampPrecision_ != null
+ && timestampPrecision_ != com.google.protobuf.Int64Value.getDefaultInstance()) {
+ getTimestampPrecisionBuilder().mergeFrom(value);
+ } else {
+ timestampPrecision_ = value;
+ }
+ } else {
+ timestampPrecisionBuilder_.mergeFrom(value);
+ }
+ if (timestampPrecision_ != null) {
+ bitField0_ |= 0x00000200;
+ onChanged();
+ }
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public Builder clearTimestampPrecision() {
+ bitField0_ = (bitField0_ & ~0x00000200);
+ timestampPrecision_ = null;
+ if (timestampPrecisionBuilder_ != null) {
+ timestampPrecisionBuilder_.dispose();
+ timestampPrecisionBuilder_ = null;
+ }
+ onChanged();
+ return this;
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.protobuf.Int64Value.Builder getTimestampPrecisionBuilder() {
+ bitField0_ |= 0x00000200;
+ onChanged();
+ return getTimestampPrecisionFieldBuilder().getBuilder();
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ public com.google.protobuf.Int64ValueOrBuilder getTimestampPrecisionOrBuilder() {
+ if (timestampPrecisionBuilder_ != null) {
+ return timestampPrecisionBuilder_.getMessageOrBuilder();
+ } else {
+ return timestampPrecision_ == null
+ ? com.google.protobuf.Int64Value.getDefaultInstance()
+ : timestampPrecision_;
+ }
+ }
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ private com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Int64Value,
+ com.google.protobuf.Int64Value.Builder,
+ com.google.protobuf.Int64ValueOrBuilder>
+ getTimestampPrecisionFieldBuilder() {
+ if (timestampPrecisionBuilder_ == null) {
+ timestampPrecisionBuilder_ =
+ new com.google.protobuf.SingleFieldBuilderV3<
+ com.google.protobuf.Int64Value,
+ com.google.protobuf.Int64Value.Builder,
+ com.google.protobuf.Int64ValueOrBuilder>(
+ getTimestampPrecision(), getParentForChildren(), isClean());
+ timestampPrecision_ = null;
+ }
+ return timestampPrecisionBuilder_;
+ }
+
private com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType
rangeElementType_;
private com.google.protobuf.SingleFieldBuilderV3<
@@ -3795,7 +4175,7 @@ public Builder setDefaultValueExpressionBytes(com.google.protobuf.ByteString val
* @return Whether the rangeElementType field is set.
*/
public boolean hasRangeElementType() {
- return ((bitField0_ & 0x00000200) != 0);
+ return ((bitField0_ & 0x00000400) != 0);
}
/**
@@ -3854,7 +4234,7 @@ public Builder setRangeElementType(
} else {
rangeElementTypeBuilder_.setMessage(value);
}
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000400;
onChanged();
return this;
}
@@ -3883,7 +4263,7 @@ public Builder setRangeElementType(
} else {
rangeElementTypeBuilder_.setMessage(builderForValue.build());
}
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000400;
onChanged();
return this;
}
@@ -3907,7 +4287,7 @@ public Builder setRangeElementType(
public Builder mergeRangeElementType(
com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType value) {
if (rangeElementTypeBuilder_ == null) {
- if (((bitField0_ & 0x00000200) != 0)
+ if (((bitField0_ & 0x00000400) != 0)
&& rangeElementType_ != null
&& rangeElementType_
!= com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType
@@ -3920,7 +4300,7 @@ public Builder mergeRangeElementType(
rangeElementTypeBuilder_.mergeFrom(value);
}
if (rangeElementType_ != null) {
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000400;
onChanged();
}
return this;
@@ -3943,7 +4323,7 @@ public Builder mergeRangeElementType(
*
*/
public Builder clearRangeElementType() {
- bitField0_ = (bitField0_ & ~0x00000200);
+ bitField0_ = (bitField0_ & ~0x00000400);
rangeElementType_ = null;
if (rangeElementTypeBuilder_ != null) {
rangeElementTypeBuilder_.dispose();
@@ -3971,7 +4351,7 @@ public Builder clearRangeElementType() {
*/
public com.google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementType.Builder
getRangeElementTypeBuilder() {
- bitField0_ |= 0x00000200;
+ bitField0_ |= 0x00000400;
onChanged();
return getRangeElementTypeFieldBuilder().getBuilder();
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java
index fed4f83767..3cbe66a9d1 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableFieldSchemaOrBuilder.java
@@ -320,6 +320,67 @@ public interface TableFieldSchemaOrBuilder
*/
com.google.protobuf.ByteString getDefaultValueExpressionBytes();
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return Whether the timestampPrecision field is set.
+ */
+ boolean hasTimestampPrecision();
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ *
+ * @return The timestampPrecision.
+ */
+ com.google.protobuf.Int64Value getTimestampPrecision();
+
+ /**
+ *
+ *
+ *
+ * Optional. Precision (maximum number of total digits in base 10) for seconds
+ * of TIMESTAMP type.
+ *
+ * Possible values include:
+ *
+ * * 6 (Default, for TIMESTAMP type with microsecond precision)
+ * * 12 (For TIMESTAMP type with picosecond precision)
+ *
+ *
+ *
+ * .google.protobuf.Int64Value timestamp_precision = 27 [(.google.api.field_behavior) = OPTIONAL];
+ *
+ */
+ com.google.protobuf.Int64ValueOrBuilder getTimestampPrecisionOrBuilder();
+
/**
*
*
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java
index e377772d49..c17623b1a5 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/TableProto.java
@@ -50,30 +50,32 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
static {
java.lang.String[] descriptorData = {
"\n"
- + ",google/cloud/bigquery/storage/v1/table.proto\022"
- + " google.cloud.bigquery.storage.v1\032\037google/api/field_behavior.proto\"Q\n"
+ + ",google/cloud/bigquery/storage/v1/table.proto\022 google.cloud.bigquery.storage.v1"
+ + "\032\037google/api/field_behavior.proto\032\036google/protobuf/wrappers.proto\"Q\n"
+ "\013TableSchema\022B\n"
+ "\006fields\030\001"
- + " \003(\01322.google.cloud.bigquery.storage.v1.TableFieldSchema\"\375\006\n"
+ + " \003(\01322.google.cloud.bigquery.storage.v1.TableFieldSchema\"\274\007\n"
+ "\020TableFieldSchema\022\021\n"
+ "\004name\030\001 \001(\tB\003\340A\002\022J\n"
- + "\004type\030\002"
- + " \001(\01627.google.cloud.bigquery.storage.v1.TableFieldSchema.TypeB\003\340A\002\022J\n"
- + "\004mode\030\003"
- + " \001(\01627.google.cloud.bigquery.storage.v1.TableFieldSchema.ModeB\003\340A\001\022G\n"
- + "\006fields\030\004"
- + " \003(\01322.google.cloud.bigquery.storage.v1.TableFieldSchemaB\003\340A\001\022\030\n"
+ + "\004type\030\002 \001"
+ + "(\01627.google.cloud.bigquery.storage.v1.TableFieldSchema.TypeB\003\340A\002\022J\n"
+ + "\004mode\030\003 \001(\01627"
+ + ".google.cloud.bigquery.storage.v1.TableFieldSchema.ModeB\003\340A\001\022G\n"
+ + "\006fields\030\004 \003(\01322.g"
+ + "oogle.cloud.bigquery.storage.v1.TableFieldSchemaB\003\340A\001\022\030\n"
+ "\013description\030\006 \001(\tB\003\340A\001\022\027\n\n"
+ "max_length\030\007 \001(\003B\003\340A\001\022\026\n"
+ "\tprecision\030\010 \001(\003B\003\340A\001\022\022\n"
+ "\005scale\030\t \001(\003B\003\340A\001\022%\n"
+ "\030default_value_expression\030\n"
- + " \001(\tB\003\340A\001\022d\n"
- + "\022range_element_type\030\013 \001(\0132C.google.cloud.b"
- + "igquery.storage.v1.TableFieldSchema.FieldElementTypeB\003\340A\001\032^\n"
+ + " \001(\tB\003\340A\001\022=\n"
+ + "\023timestamp_precision\030\033"
+ + " \001(\0132\033.google.protobuf.Int64ValueB\003\340A\001\022d\n"
+ + "\022range_element_type\030\013 \001(\0132C"
+ + ".google.cloud.bigquery.storage.v1.TableFieldSchema.FieldElementTypeB\003\340A\001\032^\n"
+ "\020FieldElementType\022J\n"
- + "\004type\030\001"
- + " \001(\01627.google.cloud.bigquery.storage.v1.TableFieldSchema.TypeB\003\340A\002\"\340\001\n"
+ + "\004type\030\001 \001(\01627.google.clou"
+ + "d.bigquery.storage.v1.TableFieldSchema.TypeB\003\340A\002\"\340\001\n"
+ "\004Type\022\024\n"
+ "\020TYPE_UNSPECIFIED\020\000\022\n\n"
+ "\006STRING\020\001\022\t\n"
@@ -100,16 +102,17 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "\010REQUIRED\020\002\022\014\n"
+ "\010REPEATED\020\003B\272\001\n"
+ "$com.google.cloud.bigquery.storage.v1B\n"
- + "TableProtoP\001Z>cloud.googl"
- + "e.com/go/bigquery/storage/apiv1/storagepb;storagepb\252\002"
- + " Google.Cloud.BigQuery.Storage.V1\312\002 Google\\Cloud\\BigQuery\\Storage\\V"
- + "1b\006proto3"
+ + "TableProto"
+ + "P\001Z>cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb\252\002"
+ + " Google.Cloud.BigQuery.Storage.V1\312\002 Google\\Cloud\\Big"
+ + "Query\\Storage\\V1b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
com.google.api.FieldBehaviorProto.getDescriptor(),
+ com.google.protobuf.WrappersProto.getDescriptor(),
});
internal_static_google_cloud_bigquery_storage_v1_TableSchema_descriptor =
getDescriptor().getMessageTypes().get(0);
@@ -134,6 +137,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"Precision",
"Scale",
"DefaultValueExpression",
+ "TimestampPrecision",
"RangeElementType",
});
internal_static_google_cloud_bigquery_storage_v1_TableFieldSchema_FieldElementType_descriptor =
@@ -152,6 +156,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor(
descriptor, registry);
com.google.api.FieldBehaviorProto.getDescriptor();
+ com.google.protobuf.WrappersProto.getDescriptor();
}
// @@protoc_insertion_point(outer_class_scope)
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java
index 98f3bd789e..722a2b0f79 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStream.java
@@ -724,12 +724,12 @@ public com.google.cloud.bigquery.storage.v1.WriteStream.WriteMode getWriteMode()
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @return The location.
*/
@@ -750,12 +750,12 @@ public java.lang.String getLocation() {
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @return The bytes for location.
*/
@@ -2336,12 +2336,12 @@ public Builder clearWriteMode() {
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @return The location.
*/
@@ -2361,12 +2361,12 @@ public java.lang.String getLocation() {
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @return The bytes for location.
*/
@@ -2386,12 +2386,12 @@ public com.google.protobuf.ByteString getLocationBytes() {
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @param value The location to set.
* @return This builder for chaining.
@@ -2410,12 +2410,12 @@ public Builder setLocation(java.lang.String value) {
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @return This builder for chaining.
*/
@@ -2430,12 +2430,12 @@ public Builder clearLocation() {
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @param value The bytes for location to set.
* @return This builder for chaining.
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java
index e52bfea3ec..1bff37c43c 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/WriteStreamOrBuilder.java
@@ -260,12 +260,12 @@ public interface WriteStreamOrBuilder
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @return The location.
*/
@@ -275,12 +275,12 @@ public interface WriteStreamOrBuilder
*
*
*
- * Immutable. The geographic location where the stream's dataset resides. See
- * https://cloud.google.com/bigquery/docs/locations for supported
+ * Output only. The geographic location where the stream's dataset resides.
+ * See https://cloud.google.com/bigquery/docs/locations for supported
* locations.
*
*
- * string location = 8 [(.google.api.field_behavior) = IMMUTABLE];
+ * string location = 8 [(.google.api.field_behavior) = OUTPUT_ONLY];
*
* @return The bytes for location.
*/
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/arrow.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/arrow.proto
index f4f17c3cdf..0132aab193 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/arrow.proto
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/arrow.proto
@@ -58,7 +58,32 @@ message ArrowSerializationOptions {
ZSTD = 2;
}
+ // The precision of the timestamp value in the Avro message. This precision
+ // will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
+ enum PicosTimestampPrecision {
+ // Unspecified timestamp precision. The default precision is microseconds.
+ PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;
+
+ // Timestamp values returned by Read API will be truncated to microsecond
+ // level precision. The value will be encoded as Arrow TIMESTAMP type in a
+ // 64 bit integer.
+ TIMESTAMP_PRECISION_MICROS = 1;
+
+ // Timestamp values returned by Read API will be truncated to nanosecond
+ // level precision. The value will be encoded as Arrow TIMESTAMP type in a
+ // 64 bit integer.
+ TIMESTAMP_PRECISION_NANOS = 2;
+
+ // Read API will return full precision picosecond value. The value will be
+ // encoded as a string which conforms to ISO 8601 format.
+ TIMESTAMP_PRECISION_PICOS = 3;
+ }
+
// The compression codec to use for Arrow buffers in serialized record
// batches.
CompressionCodec buffer_compression = 2;
+
+ // Optional. Set timestamp precision option. If not set, the default precision
+ // is microseconds.
+ PicosTimestampPrecision picos_timestamp_precision = 3;
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto
index ddf7c15ae2..6082fa58d8 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto
@@ -42,6 +42,27 @@ message AvroRows {
// Contains options specific to Avro Serialization.
message AvroSerializationOptions {
+ // The precision of the timestamp value in the Avro message. This precision
+ // will **only** be applied to the column(s) with the `TIMESTAMP_PICOS` type.
+ enum PicosTimestampPrecision {
+ // Unspecified timestamp precision. The default precision is microseconds.
+ PICOS_TIMESTAMP_PRECISION_UNSPECIFIED = 0;
+
+ // Timestamp values returned by Read API will be truncated to microsecond
+ // level precision. The value will be encoded as Avro TIMESTAMP type in a
+ // 64 bit integer.
+ TIMESTAMP_PRECISION_MICROS = 1;
+
+ // Timestamp values returned by Read API will be truncated to nanosecond
+ // level precision. The value will be encoded as Avro TIMESTAMP type in a
+ // 64 bit integer.
+ TIMESTAMP_PRECISION_NANOS = 2;
+
+ // Read API will return full precision picosecond value. The value will be
+ // encoded as a string which conforms to ISO 8601 format.
+ TIMESTAMP_PRECISION_PICOS = 3;
+ }
+
// Enable displayName attribute in Avro schema.
//
// The Avro specification requires field names to be alphanumeric. By
@@ -53,4 +74,8 @@ message AvroSerializationOptions {
// value and populates a "displayName" attribute for every avro field with the
// original column name.
bool enable_display_name_attribute = 1;
+
+ // Optional. Set timestamp precision option. If not set, the default precision
+ // is microseconds.
+ PicosTimestampPrecision picos_timestamp_precision = 2;
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto
index c9dc3f3d46..dc0ae7f9f4 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto
@@ -78,9 +78,9 @@ service BigQueryRead {
}
// Reads rows from the stream in the format prescribed by the ReadSession.
- // Each response contains one or more table rows, up to a maximum of 100 MiB
+ // Each response contains one or more table rows, up to a maximum of 128 MB
// per response; read requests which attempt to read individual rows larger
- // than 100 MiB will fail.
+ // than 128 MB will fail.
//
// Each request also returns a set of stream statistics reflecting the current
// state of the stream.
@@ -423,8 +423,6 @@ message CreateWriteStreamRequest {
// Requests larger than this return an error, typically `INVALID_ARGUMENT`.
message AppendRowsRequest {
// Arrow schema and data.
- // Arrow format is an experimental feature only selected for allowlisted
- // customers.
message ArrowData {
// Optional. Arrow Schema used to serialize the data.
ArrowSchema writer_schema = 1;
@@ -436,8 +434,8 @@ message AppendRowsRequest {
// ProtoData contains the data rows and schema when constructing append
// requests.
message ProtoData {
- // The protocol buffer schema used to serialize the data. Provide this value
- // whenever:
+ // Optional. The protocol buffer schema used to serialize the data. Provide
+ // this value whenever:
//
// * You send the first request of an RPC connection.
//
@@ -446,7 +444,7 @@ message AppendRowsRequest {
// * You specify a new destination table.
ProtoSchema writer_schema = 1;
- // Serialized row data in protobuf message format.
+ // Required. Serialized row data in protobuf message format.
// Currently, the backend expects the serialized rows to adhere to
// proto2 semantics when appending rows, particularly with respect to
// how default values are encoded.
@@ -522,8 +520,7 @@ message AppendRowsRequest {
// Rows in proto format.
ProtoData proto_rows = 4;
- // Rows in arrow format. This is an experimental feature only selected for
- // allowlisted customers.
+ // Rows in arrow format.
ArrowData arrow_rows = 5;
}
@@ -553,8 +550,8 @@ message AppendRowsRequest {
// Optional. Default missing value interpretation for all columns in the
// table. When a value is specified on an `AppendRowsRequest`, it is applied
- // to all requests on the connection from that point forward, until a
- // subsequent `AppendRowsRequest` sets it to a different value.
+ // to all requests from that point forward, until a subsequent
+ // `AppendRowsRequest` sets it to a different value.
// `missing_value_interpretation` can override
// `default_missing_value_interpretation`. For example, if you want to write
// `NULL` instead of using default values for some columns, you can set
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto
index 2e52a0732b..f0d1dfef5c 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto
@@ -328,8 +328,8 @@ message WriteStream {
// Immutable. Mode of the stream.
WriteMode write_mode = 7 [(google.api.field_behavior) = IMMUTABLE];
- // Immutable. The geographic location where the stream's dataset resides. See
- // https://cloud.google.com/bigquery/docs/locations for supported
+ // Output only. The geographic location where the stream's dataset resides.
+ // See https://cloud.google.com/bigquery/docs/locations for supported
// locations.
- string location = 8 [(google.api.field_behavior) = IMMUTABLE];
+ string location = 8 [(google.api.field_behavior) = OUTPUT_ONLY];
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto
index eb75d70672..30c30228c2 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/table.proto
@@ -17,6 +17,7 @@ syntax = "proto3";
package google.cloud.bigquery.storage.v1;
import "google/api/field_behavior.proto";
+import "google/protobuf/wrappers.proto";
option csharp_namespace = "Google.Cloud.BigQuery.Storage.V1";
option go_package = "cloud.google.com/go/bigquery/storage/apiv1/storagepb;storagepb";
@@ -178,6 +179,16 @@ message TableFieldSchema {
// (https://cloud.google.com/bigquery/docs/default-values) for this field.
string default_value_expression = 10 [(google.api.field_behavior) = OPTIONAL];
+ // Optional. Precision (maximum number of total digits in base 10) for seconds
+ // of TIMESTAMP type.
+ //
+ // Possible values include:
+ //
+ // * 6 (Default, for TIMESTAMP type with microsecond precision)
+ // * 12 (For TIMESTAMP type with picosecond precision)
+ google.protobuf.Int64Value timestamp_precision = 27
+ [(google.api.field_behavior) = OPTIONAL];
+
// Optional. The subtype of the RANGE, if the type of this field is RANGE. If
// the type is RANGE, this field is required. Possible values for the field
// element type of a RANGE include:
diff --git a/proto-google-cloud-bigquerystorage-v1alpha/pom.xml b/proto-google-cloud-bigquerystorage-v1alpha/pom.xml
index 943afc8dea..afe8c3d3ee 100644
--- a/proto-google-cloud-bigquerystorage-v1alpha/pom.xml
+++ b/proto-google-cloud-bigquerystorage-v1alpha/pom.xml
@@ -4,13 +4,13 @@