diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 627419201d..e1f4bd0e65 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -4,11 +4,11 @@ # For syntax help see: # https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners#codeowners-syntax -# The @googleapis/api-bigtable is the default owner for changes in this repo -* @googleapis/cloud-sdk-java-team @googleapis/api-bigtable +# The @googleapis/bigtable-team is the default owner for changes in this repo +* @googleapis/cloud-sdk-java-team @googleapis/bigtable-team # for handwritten libraries, keep codeowner_team in .repo-metadata.json as owner -**/*.java @googleapis/api-bigtable @googleapis/cloud-sdk-java-team +**/*.java @googleapis/bigtable-team @googleapis/cloud-sdk-java-team # The java-samples-reviewers team is the default owner for samples changes @@ -18,5 +18,5 @@ samples/**/*.java @googleapis/java-samples-reviewers samples/snippets/generated/ @googleapis/cloud-sdk-java-team # Admin Module (Cloud Java Team ownership) -**/com/google/cloud/bigtable/admin/** @googleapis/api-bigtable @googleapis/cloud-sdk-java-team -**/com/google/bigtable/admin/** @googleapis/api-bigtable @googleapis/cloud-sdk-java-team +**/com/google/cloud/bigtable/admin/** @googleapis/bigtable-team @googleapis/cloud-sdk-java-team +**/com/google/bigtable/admin/** @googleapis/bigtable-team @googleapis/cloud-sdk-java-team diff --git a/.github/sync-repo-settings.yaml b/.github/sync-repo-settings.yaml index 7be7e5e5f1..1005971cae 100644 --- a/.github/sync-repo-settings.yaml +++ b/.github/sync-repo-settings.yaml @@ -224,5 +224,5 @@ permissionRules: permission: admin - team: yoshi-java-admins permission: admin - - team: yoshi-java + - team: cloud-sdk-java-team permission: push diff --git a/.github/workflows/hermetic_library_generation.yaml b/.github/workflows/hermetic_library_generation.yaml index aab36acb20..cb086cb986 100644 --- a/.github/workflows/hermetic_library_generation.yaml +++ b/.github/workflows/hermetic_library_generation.yaml @@ -37,7 +37,7 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} - - uses: googleapis/sdk-platform-java/.github/scripts@v2.66.0 + - uses: googleapis/sdk-platform-java/.github/scripts@v2.67.0 if: env.SHOULD_RUN == 'true' with: base_ref: ${{ github.base_ref }} diff --git a/.github/workflows/unmanaged_dependency_check.yaml b/.github/workflows/unmanaged_dependency_check.yaml index 716fd44a54..bb458025c6 100644 --- a/.github/workflows/unmanaged_dependency_check.yaml +++ b/.github/workflows/unmanaged_dependency_check.yaml @@ -14,6 +14,6 @@ jobs: shell: bash run: .kokoro/build.sh - name: Unmanaged dependency check - uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.56.1 + uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.57.0 with: bom-path: google-cloud-bigtable-bom/pom.xml diff --git a/.kokoro/presubmit/graalvm-native-a.cfg b/.kokoro/presubmit/graalvm-native-a.cfg index af4115f37f..b113cf7c79 100644 --- a/.kokoro/presubmit/graalvm-native-a.cfg +++ b/.kokoro/presubmit/graalvm-native-a.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.56.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.57.0" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-b.cfg b/.kokoro/presubmit/graalvm-native-b.cfg index 33056bc067..1f91aa6783 100644 --- a/.kokoro/presubmit/graalvm-native-b.cfg +++ b/.kokoro/presubmit/graalvm-native-b.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.56.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.57.0" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-c.cfg b/.kokoro/presubmit/graalvm-native-c.cfg index 8a6c25f495..53a7b7d63b 100644 --- a/.kokoro/presubmit/graalvm-native-c.cfg +++ b/.kokoro/presubmit/graalvm-native-c.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.56.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.57.0" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.repo-metadata.json b/.repo-metadata.json index d40cb5f9c0..8ac2726bf0 100644 --- a/.repo-metadata.json +++ b/.repo-metadata.json @@ -13,7 +13,7 @@ "api_id": "bigtable.googleapis.com", "library_type": "GAPIC_COMBO", "requires_billing": true, - "codeowner_team": "@googleapis/api-bigtable", + "codeowner_team": "@googleapis/bigtable-team", "excluded_poms": "google-cloud-bigtable-bom", "issue_tracker": "https://issuetracker.google.com/savedsearches/559777", "extra_versioned_modules": "google-cloud-bigtable-emulator,google-cloud-bigtable-emulator-core", diff --git a/CHANGELOG.md b/CHANGELOG.md index 0b0e32dff8..0f3db70c60 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Changelog +## [2.74.0](https://github.com/googleapis/java-bigtable/compare/v2.73.1...v2.74.0) (2026-03-02) + + +### Features + +* Add awaitOptimizeRestoredTable helper for Bigtable Admin ([#2781](https://github.com/googleapis/java-bigtable/issues/2781)) ([cf15d45](https://github.com/googleapis/java-bigtable/commit/cf15d45a8f4c0ee385d3e53a0bae153ee1064999)) +* Add TieredStorageConfig to table admin api ([f05a1a3](https://github.com/googleapis/java-bigtable/commit/f05a1a3b0bb730e62c349dc8a7a1a82b0cf00fa7)) +* **Bigtable:** Add support for creating instances with tags ([#2733](https://github.com/googleapis/java-bigtable/issues/2733)) ([bc46174](https://github.com/googleapis/java-bigtable/commit/bc461749a0aa702f65c26774dd4696d47ef88eae)) +* Expose generated GAPIC admin client and freeze legacy surface ([#2806](https://github.com/googleapis/java-bigtable/issues/2806)) ([c620710](https://github.com/googleapis/java-bigtable/commit/c62071092d67f8ccfebe3166ca826fb001c76e28)) + + +### Bug Fixes + +* **deps:** Update the Java code generator (gapic-generator-java) to 2.67.0 ([f05a1a3](https://github.com/googleapis/java-bigtable/commit/f05a1a3b0bb730e62c349dc8a7a1a82b0cf00fa7)) +* Ensure that per attempt metrics tracer is below the retries ([#2793](https://github.com/googleapis/java-bigtable/issues/2793)) ([1f39032](https://github.com/googleapis/java-bigtable/commit/1f390328b23855ee39e2c3dacf8a0eed8d962b08)) + + +### Dependencies + +* Update shared dependencies ([#2814](https://github.com/googleapis/java-bigtable/issues/2814)) ([dde68fe](https://github.com/googleapis/java-bigtable/commit/dde68fe0ee5c5a491a5ae5382babea57e901605c)) + ## [2.73.1](https://github.com/googleapis/java-bigtable/compare/v2.73.0...v2.73.1) (2026-02-17) diff --git a/README.md b/README.md index 0177e0de23..06e628ea43 100644 --- a/README.md +++ b/README.md @@ -49,20 +49,20 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.74.0') +implementation platform('com.google.cloud:libraries-bom:26.76.0') implementation 'com.google.cloud:google-cloud-bigtable' ``` If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-bigtable:2.73.1' +implementation 'com.google.cloud:google-cloud-bigtable:2.74.0' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-bigtable" % "2.73.1" +libraryDependencies += "com.google.cloud" % "google-cloud-bigtable" % "2.74.0" ``` ## Authentication @@ -452,7 +452,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [javadocs]: https://cloud.google.com/java/docs/reference/google-cloud-bigtable/latest/history [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-bigtable.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigtable/2.73.1 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigtable/2.74.0 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/generation_config.yaml b/generation_config.yaml index 206787fb31..bc0c86ab19 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,6 +1,6 @@ -gapic_generator_version: 2.66.0 -googleapis_commitish: fa4dc54cf123a351f3215b384a7dc7c9f36005b7 -libraries_bom_version: 26.74.0 +gapic_generator_version: 2.67.0 +googleapis_commitish: d420134ab324c0cbe0f4ae06ad9697dac77f26ad +libraries_bom_version: 26.76.0 template_excludes: - .gitignore - .kokoro/presubmit/integration.cfg @@ -27,7 +27,7 @@ libraries: issue_tracker: https://issuetracker.google.com/savedsearches/559777 release_level: stable distribution_name: com.google.cloud:google-cloud-bigtable - codeowner_team: '@googleapis/api-bigtable' + codeowner_team: '@googleapis/bigtable-team' api_id: bigtable.googleapis.com library_type: GAPIC_COMBO extra_versioned_modules: google-cloud-bigtable-emulator,google-cloud-bigtable-emulator-core diff --git a/google-cloud-bigtable-bom/pom.xml b/google-cloud-bigtable-bom/pom.xml index cec378cb4d..a9a033cbbf 100644 --- a/google-cloud-bigtable-bom/pom.xml +++ b/google-cloud-bigtable-bom/pom.xml @@ -3,12 +3,12 @@ 4.0.0 com.google.cloud google-cloud-bigtable-bom - 2.73.1 + 2.74.0 pom com.google.cloud sdk-platform-java-config - 3.56.1 + 3.57.0 @@ -63,37 +63,37 @@ com.google.cloud google-cloud-bigtable - 2.73.1 + 2.74.0 com.google.cloud google-cloud-bigtable-emulator - 0.210.1 + 0.211.0 com.google.cloud google-cloud-bigtable-emulator-core - 0.210.1 + 0.211.0 com.google.api.grpc grpc-google-cloud-bigtable-admin-v2 - 2.73.1 + 2.74.0 com.google.api.grpc grpc-google-cloud-bigtable-v2 - 2.73.1 + 2.74.0 com.google.api.grpc proto-google-cloud-bigtable-admin-v2 - 2.73.1 + 2.74.0 com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.73.1 + 2.74.0 diff --git a/google-cloud-bigtable-deps-bom/pom.xml b/google-cloud-bigtable-deps-bom/pom.xml index 99be3a9bc7..d586fcdd90 100644 --- a/google-cloud-bigtable-deps-bom/pom.xml +++ b/google-cloud-bigtable-deps-bom/pom.xml @@ -7,13 +7,13 @@ com.google.cloud sdk-platform-java-config - 3.56.1 + 3.57.0 com.google.cloud google-cloud-bigtable-deps-bom - 2.73.1 + 2.74.0 pom Google Cloud Bigtable Dependency BOM @@ -68,7 +68,7 @@ com.google.cloud gapic-libraries-bom - 1.77.0 + 1.80.0 pom import diff --git a/google-cloud-bigtable-emulator-core/pom.xml b/google-cloud-bigtable-emulator-core/pom.xml index 4e7f599e89..48c3bebdfd 100644 --- a/google-cloud-bigtable-emulator-core/pom.xml +++ b/google-cloud-bigtable-emulator-core/pom.xml @@ -7,12 +7,12 @@ google-cloud-bigtable-parent com.google.cloud - 2.73.1 + 2.74.0 Google Cloud Java - Bigtable Emulator Core google-cloud-bigtable-emulator-core - 0.210.1 + 0.211.0 A Java wrapper for the Cloud Bigtable emulator. diff --git a/google-cloud-bigtable-emulator/pom.xml b/google-cloud-bigtable-emulator/pom.xml index 2601f88fa2..a0a1fbc2a6 100644 --- a/google-cloud-bigtable-emulator/pom.xml +++ b/google-cloud-bigtable-emulator/pom.xml @@ -5,7 +5,7 @@ 4.0.0 google-cloud-bigtable-emulator - 0.210.1 + 0.211.0 Google Cloud Java - Bigtable Emulator https://github.com/googleapis/java-bigtable @@ -14,7 +14,7 @@ com.google.cloud google-cloud-bigtable-parent - 2.73.1 + 2.74.0 scm:git:git@github.com:googleapis/java-bigtable.git @@ -81,14 +81,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.73.1 + 2.74.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.73.1 + 2.74.0 pom import @@ -99,7 +99,7 @@ com.google.cloud google-cloud-bigtable-emulator-core - 0.210.1 + 0.211.0 diff --git a/google-cloud-bigtable/pom.xml b/google-cloud-bigtable/pom.xml index 0f682ff143..cbc566f890 100644 --- a/google-cloud-bigtable/pom.xml +++ b/google-cloud-bigtable/pom.xml @@ -2,7 +2,7 @@ 4.0.0 google-cloud-bigtable - 2.73.1 + 2.74.0 jar Google Cloud Bigtable https://github.com/googleapis/java-bigtable @@ -12,11 +12,11 @@ com.google.cloud google-cloud-bigtable-parent - 2.73.1 + 2.74.0 - 2.73.1 + 2.74.0 google-cloud-bigtable @@ -45,8 +45,8 @@ batch-bigtable.googleapis.com:443 - 1.65.0 - 3.25.5 + 1.76.3 + 4.33.2 @@ -54,14 +54,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.73.1 + 2.74.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.73.1 + 2.74.0 pom import @@ -136,10 +136,6 @@ com.google.protobuf protobuf-java-util - - com.google.code.gson - gson - io.opencensus opencensus-api @@ -147,7 +143,6 @@ io.grpc grpc-alts - runtime org.checkerframework @@ -260,6 +255,11 @@ + + com.google.api.grpc + grpc-google-cloud-monitoring-v3 + test + com.google.api gax @@ -343,11 +343,26 @@ junit test + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter + test + org.mockito mockito-core test + + org.mockito + mockito-junit-jupiter + test + com.google.guava guava-testlib diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java index 0e9f25aaaa..a07fcc36c1 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/Version.java @@ -20,6 +20,6 @@ @InternalApi("For internal use only") public final class Version { // {x-version-update-start:google-cloud-bigtable:current} - public static String VERSION = "2.73.1"; + public static String VERSION = "2.74.0"; // {x-version-update-end} } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClient.java index 0e5a4c9433..cdb95062e3 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClient.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClient.java @@ -19,6 +19,7 @@ import com.google.api.core.ApiFunction; import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; +import com.google.api.core.ObsoleteApi; import com.google.api.gax.longrunning.OperationFuture; import com.google.api.gax.rpc.ApiExceptions; import com.google.api.gax.rpc.NotFoundException; @@ -72,6 +73,7 @@ import com.google.cloud.bigtable.admin.v2.stub.EnhancedBigtableTableAdminStub; import com.google.cloud.bigtable.data.v2.internal.TableAdminRequestContext; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Lists; @@ -102,13 +104,30 @@ * // One instance per application. * BigtableTableAdminClient client = BigtableTableAdminClient.create("[PROJECT]", "[INSTANCE]"); * - * CreateTableRequest request = - * CreateTableRequest.of("my-table") - * .addFamily("cf1") - * .addFamily("cf2", GCRULES.maxVersions(10)) - * .addSplit(ByteString.copyFromUtf8("b")) - * .addSplit(ByteString.copyFromUtf8("q")); - * client.createTable(request); + * com.google.bigtable.admin.v2.CreateTableRequest request = + * com.google.bigtable.admin.v2.CreateTableRequest.newBuilder() + * .setParent(InstanceName.of("[PROJECT]", "[INSTANCE]").toString()) + * .setTableId("my-table") + * .setTable( + * com.google.bigtable.admin.v2.Table.newBuilder() + * .putColumnFamilies("cf1", com.google.bigtable.admin.v2.ColumnFamily.getDefaultInstance()) + * .putColumnFamilies( + * "cf2", + * com.google.bigtable.admin.v2.ColumnFamily.newBuilder() + * .setGcRule(GcRuleBuilder.maxVersions(10)) + * .build()) + * .build()) + * .addInitialSplits( + * com.google.bigtable.admin.v2.CreateTableRequest.Split.newBuilder() + * .setKey(ByteString.copyFromUtf8("b")) + * .build()) + * .addInitialSplits( + * com.google.bigtable.admin.v2.CreateTableRequest.Split.newBuilder() + * .setKey(ByteString.copyFromUtf8("q")) + * .build()) + * .build(); + * + * client.getBaseClient().createTable(request); * * // Cleanup during application shutdown. * client.close(); @@ -150,6 +169,7 @@ public final class BigtableTableAdminClient implements AutoCloseable { private final EnhancedBigtableTableAdminStub stub; private final String projectId; private final String instanceId; + private final BaseBigtableTableAdminClient baseClient; /** Constructs an instance of BigtableTableAdminClient with the given project and instance IDs. */ public static BigtableTableAdminClient create( @@ -189,6 +209,7 @@ private BigtableTableAdminClient( this.projectId = projectId; this.instanceId = instanceId; this.stub = stub; + this.baseClient = BaseBigtableTableAdminClient.create(stub); } /** Gets the project ID of the instance whose tables this client manages. */ @@ -201,13 +222,24 @@ public String getInstanceId() { return instanceId; } + /** + * Returns the modern autogenerated client. This provides access to the newest features and + * proto-based methods. + */ + public BaseBigtableTableAdminClient getBaseClient() { + return baseClient; + } + @Override public void close() { stub.close(); } /** - * Creates a new table with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createTable(com.google.bigtable.admin.v2.CreateTableRequest)}. + * + *

Creates a new table with the specified configuration. * *

Sample code: * @@ -231,12 +263,16 @@ public void close() { * @see CreateTableRequest for available options. * @see GCRules for the documentation on available garbage collection rules. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Table createTable(CreateTableRequest request) { return ApiExceptions.callAndTranslateApiException(createTableAsync(request)); } /** - * Asynchronously creates a new table with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createTableCallable()}. + * + *

Asynchronously creates a new table with the specified configuration. * *

Sample code: * @@ -275,13 +311,17 @@ public Table createTable(CreateTableRequest request) { * @see GCRules for the documentation on available garbage collection rules. */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture createTableAsync(CreateTableRequest request) { return transformToTableResponse( this.stub.createTableCallable().futureCall(request.toProto(projectId, instanceId))); } /** - * Update a table with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateTable(com.google.bigtable.admin.v2.UpdateTableRequest)}. + * + *

Update a table with the specified configuration. * *

Sample code: * @@ -301,12 +341,16 @@ public ApiFuture

createTableAsync(CreateTableRequest request) { * * @see UpdateTableRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Table updateTable(UpdateTableRequest request) { return ApiExceptions.callAndTranslateApiException(updateTableAsync(request)); } /** - * Asynchronously update a table with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateTableOperationCallable()}. + * + *

Asynchronously update a table with the specified configuration. * *

Sample code: * @@ -334,6 +378,7 @@ public Table updateTable(UpdateTableRequest request) { * * @see UpdateTableRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture

updateTableAsync(UpdateTableRequest request) { return ApiFutures.transform( stub.updateTableOperationCallable().futureCall(request.toProto(projectId, instanceId)), @@ -347,7 +392,10 @@ public Table apply(com.google.bigtable.admin.v2.Table tableProto) { } /** - * Creates, updates and drops column families as specified in the request. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#modifyColumnFamilies(com.google.bigtable.admin.v2.ModifyColumnFamiliesRequest)}. + * + *

Creates, updates and drops column families as specified in the request. * *

Sample code: * @@ -380,12 +428,16 @@ public Table apply(com.google.bigtable.admin.v2.Table tableProto) { * * @see ModifyColumnFamiliesRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Table modifyFamilies(ModifyColumnFamiliesRequest request) { return ApiExceptions.callAndTranslateApiException(modifyFamiliesAsync(request)); } /** - * Asynchronously creates, updates, and drops column families as specified in the request. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#modifyColumnFamiliesCallable()}. + * + *

Asynchronously creates, updates, and drops column families as specified in the request. * *

Sample code: * @@ -432,6 +484,7 @@ public Table modifyFamilies(ModifyColumnFamiliesRequest request) { * @see ModifyColumnFamiliesRequest for available options. */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture

modifyFamiliesAsync(ModifyColumnFamiliesRequest request) { return transformToTableResponse( this.stub @@ -440,7 +493,10 @@ public ApiFuture
modifyFamiliesAsync(ModifyColumnFamiliesRequest request) } /** - * Deletes the table specified by the table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteTable(com.google.bigtable.admin.v2.DeleteTableRequest)}. + * + *

Deletes the table specified by the table ID. * *

Sample code: * @@ -448,12 +504,16 @@ public ApiFuture

modifyFamiliesAsync(ModifyColumnFamiliesRequest request) * client.deleteTable("my-table"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void deleteTable(String tableId) { ApiExceptions.callAndTranslateApiException(deleteTableAsync(tableId)); } /** - * Asynchronously deletes the table specified by the table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteTableCallable()}. + * + *

Asynchronously deletes the table specified by the table ID. * *

Sample code: * @@ -476,6 +536,7 @@ public void deleteTable(String tableId) { * } */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture deleteTableAsync(String tableId) { DeleteTableRequest request = DeleteTableRequest.newBuilder().setName(getTableName(tableId)).build(); @@ -484,7 +545,10 @@ public ApiFuture deleteTableAsync(String tableId) { } /** - * Checks if the table specified by the table ID exists. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getTable(com.google.bigtable.admin.v2.GetTableRequest)}. + * + *

Checks if the table specified by the table ID exists. * *

Sample code: * @@ -494,12 +558,16 @@ public ApiFuture deleteTableAsync(String tableId) { * } * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public boolean exists(String tableId) { return ApiExceptions.callAndTranslateApiException(existsAsync(tableId)); } /** - * Asynchronously checks if the table specified by the table ID exists. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getTableCallable()}. + * + *

Asynchronously checks if the table specified by the table ID exists. * *

Sample code: * @@ -525,6 +593,7 @@ public boolean exists(String tableId) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture existsAsync(String tableId) { ApiFuture

protoFuture = @@ -554,7 +623,10 @@ public Boolean apply(NotFoundException ignored) { } /** - * Gets the table metadata by table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getTable(com.google.bigtable.admin.v2.GetTableRequest)}. + * + *

Gets the table metadata by table ID. * *

Sample code: * @@ -569,12 +641,16 @@ public Boolean apply(NotFoundException ignored) { * } * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Table getTable(String tableId) { return ApiExceptions.callAndTranslateApiException(getTableAsync(tableId)); } /** - * Asynchronously gets the table metadata by table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getTableCallable()}. + * + *

Asynchronously gets the table metadata by table ID. * *

Sample code: * @@ -601,6 +677,7 @@ public Table getTable(String tableId) { * } */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture

getTableAsync(String tableId) { return getTableAsync(tableId, com.google.bigtable.admin.v2.Table.View.SCHEMA_VIEW); } @@ -614,21 +691,29 @@ private ApiFuture
getTableAsync( } /** - * Gets the current encryption info for the table across all of the clusters. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getTable(com.google.bigtable.admin.v2.GetTableRequest)}. + * + *

Gets the current encryption info for the table across all of the clusters. * *

The returned Map will be keyed by cluster id and contain a status for all of the keys in * use. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Map> getEncryptionInfo(String tableId) { return ApiExceptions.callAndTranslateApiException(getEncryptionInfoAsync(tableId)); } /** - * Asynchronously gets the current encryption info for the table across all of the clusters. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getTableCallable()}. + * + *

Asynchronously gets the current encryption info for the table across all of the clusters. * *

The returned Map will be keyed by cluster id and contain a status for all of the keys in * use. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture>> getEncryptionInfoAsync(String tableId) { GetTableRequest request = GetTableRequest.newBuilder() @@ -660,7 +745,10 @@ public Map> apply(com.google.bigtable.admin.v2.Tabl } /** - * Lists all table IDs in the instance. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listTables(com.google.bigtable.admin.v2.ListTablesRequest)}. + * + *

Lists all table IDs in the instance. * *

Sample code: * @@ -672,12 +760,16 @@ public Map> apply(com.google.bigtable.admin.v2.Tabl * } * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public List listTables() { return ApiExceptions.callAndTranslateApiException(listTablesAsync()); } /** - * Asynchronously lists all table IDs in the instance. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listTablesPagedCallable()}. + * + *

Asynchronously lists all table IDs in the instance. * *

Sample code: * @@ -702,6 +794,7 @@ public List listTables() { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture> listTablesAsync() { ListTablesRequest request = ListTablesRequest.newBuilder() @@ -765,7 +858,10 @@ public List apply(List protos) { } /** - * Drops rows by the specified row key prefix and table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#dropRowRange(com.google.bigtable.admin.v2.DropRowRangeRequest)}. + * + *

Drops rows by the specified row key prefix and table ID. * *

Please note that this method is considered part of the admin API and is rate limited. * @@ -775,12 +871,16 @@ public List apply(List protos) { * client.dropRowRange("my-table", "prefix"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void dropRowRange(String tableId, String rowKeyPrefix) { ApiExceptions.callAndTranslateApiException(dropRowRangeAsync(tableId, rowKeyPrefix)); } /** - * Asynchronously drops rows by the specified row key prefix and table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#dropRowRangeCallable()}. + * + *

Asynchronously drops rows by the specified row key prefix and table ID. * *

Please note that this method is considered part of the admin API and is rate limited. * @@ -805,12 +905,16 @@ public void dropRowRange(String tableId, String rowKeyPrefix) { * } */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture dropRowRangeAsync(String tableId, String rowKeyPrefix) { return dropRowRangeAsync(tableId, ByteString.copyFromUtf8(rowKeyPrefix)); } /** - * Drops rows by the specified row key prefix and table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#dropRowRange(com.google.bigtable.admin.v2.DropRowRangeRequest)}. + * + *

Drops rows by the specified row key prefix and table ID. * *

Please note that this method is considered part of the admin API and is rate limited. * @@ -821,12 +925,16 @@ public ApiFuture dropRowRangeAsync(String tableId, String rowKeyPrefix) { * } */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void dropRowRange(String tableId, ByteString rowKeyPrefix) { ApiExceptions.callAndTranslateApiException(dropRowRangeAsync(tableId, rowKeyPrefix)); } /** - * Asynchronously drops rows by the specified row key prefix and table ID. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#dropRowRangeCallable()}. + * + *

Asynchronously drops rows by the specified row key prefix and table ID. * *

Please note that this method is considered part of the admin API and is rate limited. * @@ -851,6 +959,7 @@ public void dropRowRange(String tableId, ByteString rowKeyPrefix) { * } */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture dropRowRangeAsync(String tableId, ByteString rowKeyPrefix) { DropRowRangeRequest request = DropRowRangeRequest.newBuilder() @@ -862,7 +971,10 @@ public ApiFuture dropRowRangeAsync(String tableId, ByteString rowKeyPrefix } /** - * Drops all data in the table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#dropRowRange(com.google.bigtable.admin.v2.DropRowRangeRequest)}. + * + *

Drops all data in the table. * *

Sample code: * @@ -870,12 +982,16 @@ public ApiFuture dropRowRangeAsync(String tableId, ByteString rowKeyPrefix * client.dropAllRows("my-table"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void dropAllRows(String tableId) { ApiExceptions.callAndTranslateApiException(dropAllRowsAsync(tableId)); } /** - * Asynchronously drops all data in the table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#dropRowRangeCallable()}. + * + *

Asynchronously drops all data in the table. * *

Sample code: * @@ -898,6 +1014,7 @@ public void dropAllRows(String tableId) { * } */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture dropAllRowsAsync(String tableId) { DropRowRangeRequest request = DropRowRangeRequest.newBuilder() @@ -909,7 +1026,10 @@ public ApiFuture dropAllRowsAsync(String tableId) { } /** - * Blocks the current thread until replication has caught up to the point when this method was + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#checkConsistency(com.google.bigtable.admin.v2.CheckConsistencyRequest)}. + * + *

Blocks the current thread until replication has caught up to the point when this method was * called. This allows callers to make sure that their mutations have been replicated across all * of their clusters. * @@ -921,6 +1041,7 @@ public ApiFuture dropAllRowsAsync(String tableId) { * * @throws com.google.api.gax.retrying.PollException when polling exceeds the total timeout */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void awaitReplication(String tableId) { // TODO(igorbernstein2): remove usage of typesafe names com.google.bigtable.admin.v2.TableName tableName = @@ -936,7 +1057,10 @@ public void awaitConsistency(ConsistencyRequest consistencyRequest) { } /** - * Creates a backup with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createBackup(com.google.bigtable.admin.v2.CreateBackupRequest)}. + * + *

Creates a backup with the specified configuration. * *

Sample code * @@ -948,12 +1072,16 @@ public void awaitConsistency(ConsistencyRequest consistencyRequest) { * Backup response = client.createBackup(request); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Backup createBackup(CreateBackupRequest request) { return ApiExceptions.callAndTranslateApiException(createBackupAsync(request)); } /** - * Creates a backup with the specified configuration asynchronously. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createBackupOperationCallable()}. + * + *

Creates a backup with the specified configuration asynchronously. * *

Sample code * @@ -979,6 +1107,7 @@ public Backup createBackup(CreateBackupRequest request) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture createBackupAsync(CreateBackupRequest request) { return ApiFutures.transform( stub.createBackupOperationCallable().futureCall(request.toProto(projectId, instanceId)), @@ -992,7 +1121,10 @@ public Backup apply(com.google.bigtable.admin.v2.Backup backupProto) { } /** - * Gets a backup with the specified backup ID in the specified cluster. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getBackup(com.google.bigtable.admin.v2.GetBackupRequest)}. + * + *

Gets a backup with the specified backup ID in the specified cluster. * *

Sample code * @@ -1000,12 +1132,16 @@ public Backup apply(com.google.bigtable.admin.v2.Backup backupProto) { * Backup backup = client.getBackup(clusterId, backupId); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Backup getBackup(String clusterId, String backupId) { return ApiExceptions.callAndTranslateApiException(getBackupAsync(clusterId, backupId)); } /** - * Gets a backup with the specified backup ID in the specified cluster asynchronously. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getBackupCallable()}. + * + *

Gets a backup with the specified backup ID in the specified cluster asynchronously. * *

Sample code * @@ -1027,6 +1163,7 @@ public Backup getBackup(String clusterId, String backupId) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture getBackupAsync(String clusterId, String backupId) { GetBackupRequest request = GetBackupRequest.newBuilder() @@ -1044,7 +1181,10 @@ public Backup apply(com.google.bigtable.admin.v2.Backup backup) { } /** - * Lists backups in the specified cluster. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listBackups(com.google.bigtable.admin.v2.ListBackupsRequest)}. + * + *

Lists backups in the specified cluster. * *

Sample code * @@ -1052,12 +1192,16 @@ public Backup apply(com.google.bigtable.admin.v2.Backup backup) { * List backups = client.listBackups(clusterId); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public List listBackups(String clusterId) { return ApiExceptions.callAndTranslateApiException(listBackupsAsync(clusterId)); } /** - * Lists backups in the specified cluster asynchronously. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listBackupsPagedCallable()}. + * + *

Lists backups in the specified cluster asynchronously. * *

Sample code: * @@ -1082,6 +1226,7 @@ public List listBackups(String clusterId) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture> listBackupsAsync(String clusterId) { ListBackupsRequest request = ListBackupsRequest.newBuilder() @@ -1145,7 +1290,10 @@ public List apply(List protos) { } /** - * Deletes a backup with the specified backup ID in the specified cluster. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteBackup(com.google.bigtable.admin.v2.DeleteBackupRequest)}. + * + *

Deletes a backup with the specified backup ID in the specified cluster. * *

Sample code * @@ -1153,12 +1301,16 @@ public List apply(List protos) { * client.deleteBackup(clusterId, backupId); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void deleteBackup(String clusterId, String backupId) { ApiExceptions.callAndTranslateApiException(deleteBackupAsync(clusterId, backupId)); } /** - * Deletes a backup with the specified backup ID in the specified cluster asynchronously. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteBackupCallable()}. + * + *

Deletes a backup with the specified backup ID in the specified cluster asynchronously. * *

Sample code * @@ -1180,6 +1332,7 @@ public void deleteBackup(String clusterId, String backupId) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture deleteBackupAsync(String clusterId, String backupId) { DeleteBackupRequest request = DeleteBackupRequest.newBuilder() @@ -1190,7 +1343,10 @@ public ApiFuture deleteBackupAsync(String clusterId, String backupId) { } /** - * Updates a backup with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateBackup(com.google.bigtable.admin.v2.UpdateBackupRequest)}. + * + *

Updates a backup with the specified configuration. * *

Sample code * @@ -1198,12 +1354,16 @@ public ApiFuture deleteBackupAsync(String clusterId, String backupId) { * Backup backup = client.updateBackup(clusterId, backupId); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Backup updateBackup(UpdateBackupRequest request) { return ApiExceptions.callAndTranslateApiException(updateBackupAsync(request)); } /** - * Updates a backup with the specified configuration asynchronously. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateBackupCallable()}. + * + *

Updates a backup with the specified configuration asynchronously. * *

Sample code * @@ -1225,6 +1385,7 @@ public Backup updateBackup(UpdateBackupRequest request) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture updateBackupAsync(UpdateBackupRequest request) { return ApiFutures.transform( stub.updateBackupCallable().futureCall(request.toProto(projectId, instanceId)), @@ -1238,7 +1399,10 @@ public Backup apply(com.google.bigtable.admin.v2.Backup proto) { } /** - * Restores a backup to a new table with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#restoreTable(com.google.bigtable.admin.v2.RestoreTableRequest)}. + * + *

Restores a backup to a new table with the specified configuration. * *

Sample code * @@ -1247,12 +1411,18 @@ public Backup apply(com.google.bigtable.admin.v2.Backup proto) { * client.restoreTable(RestoreTableRequest.of(clusterId, backupId).setTableId(tableId)); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public RestoredTableResult restoreTable(RestoreTableRequest request) throws ExecutionException, InterruptedException { return ApiExceptions.callAndTranslateApiException(restoreTableAsync(request)); } - /** Restores a backup to a new table with the specified configuration asynchronously. + /** + *

This method is obsolete. For the recommended proto-based approach, please see + * {@link com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#restoreTableAsync(com.google.bigtable.admin.v2.RestoreTableRequest)}. + * + *

Restores a backup to a new table with the specified configuration asynchronously. + * *

Sample code * *

{@code
@@ -1273,7 +1443,8 @@ public RestoredTableResult restoreTable(RestoreTableRequest request)
    *   MoreExecutors.directExecutor()
    * );
    * 
- * */ + */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture restoreTableAsync(RestoreTableRequest request) { final OperationFuture future = this.stub @@ -1296,6 +1467,37 @@ public ApiFuture apply(com.google.bigtable.admin.v2.Table t MoreExecutors.directExecutor()); } + /** + * Awaits the completion of the "Optimize Restored Table" operation. + * + *

This method blocks until the restore operation is complete, extracts the optimization token, + * and returns an ApiFuture for the optimization phase. + * + * @param restoreFuture The future returned by restoreTableAsync(). + * @return An ApiFuture that tracks the optimization progress. + */ + public ApiFuture awaitOptimizeRestoredTable(ApiFuture restoreFuture) { + // 1. Block and wait for the restore operation to complete + RestoredTableResult result; + try { + result = restoreFuture.get(); + } catch (Exception e) { + throw new RuntimeException("Restore operation failed", e); + } + + // 2. Extract the operation token from the result + // (RestoredTableResult already wraps the OptimizeRestoredTableOperationToken) + OptimizeRestoredTableOperationToken token = result.getOptimizeRestoredTableOperationToken(); + + if (token == null || Strings.isNullOrEmpty(token.getOperationName())) { + // If there is no optimization operation, return immediate success. + return ApiFutures.immediateFuture(Empty.getDefaultInstance()); + } + + // 3. Return the future for the optimization operation + return stub.awaitOptimizeRestoredTableCallable().resumeFutureCall(token.getOperationName()); + } + /** * Awaits a restored table is fully optimized. * @@ -1345,7 +1547,10 @@ public ApiFuture awaitOptimizeRestoredTableAsync( } /** - * Copy an existing backup to a new backup in a Cloud Bigtable cluster with the specified + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#copyBackup(com.google.bigtable.admin.v2.CopyBackupRequest)}. + * + *

Copy an existing backup to a new backup in a Cloud Bigtable cluster with the specified * configuration. * *

Sample code Note: You want to create the client with project and instance where you want the @@ -1382,12 +1587,16 @@ public ApiFuture awaitOptimizeRestoredTableAsync( * Backup response = client.copyBackup(request); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Backup copyBackup(CopyBackupRequest request) { return ApiExceptions.callAndTranslateApiException(copyBackupAsync(request)); } /** - * Creates a copy of a backup from an existing backup in a Cloud Bigtable cluster with the + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#copyBackupOperationCallable()}. + * + *

Creates a copy of a backup from an existing backup in a Cloud Bigtable cluster with the * specified configuration asynchronously. * *

Sample code @@ -1414,6 +1623,7 @@ public Backup copyBackup(CopyBackupRequest request) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture copyBackupAsync(CopyBackupRequest request) { return ApiFutures.transform( stub.copyBackupOperationCallable().futureCall(request.toProto(projectId, instanceId)), @@ -1427,9 +1637,12 @@ public Backup apply(com.google.bigtable.admin.v2.Backup backupProto) { } /** - * Returns a future that is resolved when replication has caught up to the point when this method - * was called. This allows callers to make sure that their mutations have been replicated across - * all of their clusters. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#generateConsistencyTokenCallable()}. + * + *

Returns a future that is resolved when replication has caught up to the point when this + * method was called. This allows callers to make sure that their mutations have been replicated + * across all of their clusters. * *

Sample code: * @@ -1453,6 +1666,7 @@ public Backup apply(com.google.bigtable.admin.v2.Backup backupProto) { * } */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture awaitReplicationAsync(final String tableId) { // TODO(igorbernstein2): remove usage of typesafe names com.google.bigtable.admin.v2.TableName tableName = @@ -1485,7 +1699,10 @@ public ApiFuture waitForConsistencyAsync(String tableId, String consistenc } /** - * Creates a new authorized view with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createAuthorizedView(com.google.bigtable.admin.v2.CreateAuthorizedViewRequest)}. + * + *

Creates a new authorized view with the specified configuration. * *

Sample code: * @@ -1504,12 +1721,16 @@ public ApiFuture waitForConsistencyAsync(String tableId, String consistenc * * @see CreateAuthorizedViewRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public AuthorizedView createAuthorizedView(CreateAuthorizedViewRequest request) { return ApiExceptions.callAndTranslateApiException(createAuthorizedViewAsync(request)); } /** - * Asynchronously creates a new authorized view with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createAuthorizedViewOperationCallable()}. + * + *

Asynchronously creates a new authorized view with the specified configuration. * *

Sample code: * @@ -1542,6 +1763,7 @@ public AuthorizedView createAuthorizedView(CreateAuthorizedViewRequest request) * * @see CreateAuthorizedViewRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture createAuthorizedViewAsync(CreateAuthorizedViewRequest request) { return ApiFutures.transform( stub.createAuthorizedViewOperationCallable() @@ -1557,7 +1779,10 @@ public AuthorizedView apply( } /** - * Updates an existing authorized view with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateAuthorizedView(com.google.bigtable.admin.v2.UpdateAuthorizedViewRequest)}. + * + *

Updates an existing authorized view with the specified configuration. * *

Sample code: * @@ -1572,12 +1797,16 @@ public AuthorizedView apply( * * @see UpdateAuthorizedViewRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public AuthorizedView updateAuthorizedView(UpdateAuthorizedViewRequest request) { return ApiExceptions.callAndTranslateApiException(updateAuthorizedViewAsync(request)); } /** - * Asynchronously updates an existing authorized view with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateAuthorizedViewOperationCallable()}. + * + *

Asynchronously updates an existing authorized view with the specified configuration. * *

Sample code: * @@ -1606,6 +1835,7 @@ public AuthorizedView updateAuthorizedView(UpdateAuthorizedViewRequest request) * * @see UpdateAuthorizedViewRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture updateAuthorizedViewAsync(UpdateAuthorizedViewRequest request) { return ApiFutures.transform( stub.updateAuthorizedViewOperationCallable() @@ -1621,7 +1851,10 @@ public AuthorizedView apply( } /** - * Gets an authorized view with the specified authorized view ID in the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getAuthorizedView(com.google.bigtable.admin.v2.GetAuthorizedViewRequest)}. + * + *

Gets an authorized view with the specified authorized view ID in the specified table. * *

Sample code: * @@ -1629,14 +1862,18 @@ public AuthorizedView apply( * AuthorizedView authorizedView = client.getAuthorizedView("my-table", "my-authorized-view"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public AuthorizedView getAuthorizedView(String tableId, String authorizedViewId) { return ApiExceptions.callAndTranslateApiException( getAuthorizedViewAsync(tableId, authorizedViewId)); } /** - * Asynchronously gets an authorized view with the specified authorized view ID in the specified - * table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getAuthorizedViewCallable()}. + * + *

Asynchronously gets an authorized view with the specified authorized view ID in the + * specified table. * *

Sample code: * @@ -1658,6 +1895,7 @@ public AuthorizedView getAuthorizedView(String tableId, String authorizedViewId) * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture getAuthorizedViewAsync(String tableId, String authorizedViewId) { GetAuthorizedViewRequest request = GetAuthorizedViewRequest.newBuilder() @@ -1677,7 +1915,10 @@ public AuthorizedView apply( } /** - * Lists all authorized view IDs in the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listAuthorizedViews(com.google.bigtable.admin.v2.ListAuthorizedViewsRequest)}. + * + *

Lists all authorized view IDs in the specified table. * *

Sample code: * @@ -1685,12 +1926,16 @@ public AuthorizedView apply( * List authorizedViews = client.listAuthorizedViews("my-table"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public List listAuthorizedViews(String tableId) { return ApiExceptions.callAndTranslateApiException(listAuthorizedViewsAsync(tableId)); } /** - * Asynchronously lists all authorized view IDs in the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listAuthorizedViewsPagedCallable()}. + * + *

Asynchronously lists all authorized view IDs in the specified table. * *

Sample code: * @@ -1715,6 +1960,7 @@ public List listAuthorizedViews(String tableId) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture> listAuthorizedViewsAsync(String tableId) { ListAuthorizedViewsRequest request = ListAuthorizedViewsRequest.newBuilder() @@ -1780,9 +2026,12 @@ public List apply(List prot } /** - * Deletes an authorized view with the specified authorized view ID in the specified table. Note - * that the deletion is prohibited if the authorized view has deletion_protection field set to - * true. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteAuthorizedView(com.google.bigtable.admin.v2.DeleteAuthorizedViewRequest)}. + * + *

Deletes an authorized view with the specified authorized view ID in the specified table. + * Note that the deletion is prohibited if the authorized view has deletion_protection field set + * to true. * *

Sample code: * @@ -1790,13 +2039,17 @@ public List apply(List prot * client.deleteAuthorizedView("my-table", "my-authorized-view"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void deleteAuthorizedView(String tableId, String authorizedViewId) { ApiExceptions.callAndTranslateApiException( deleteAuthorizedViewAsync(tableId, authorizedViewId)); } /** - * Asynchronously deletes an authorized view with the specified authorized view ID in the + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteAuthorizedViewCallable()}. + * + *

Asynchronously deletes an authorized view with the specified authorized view ID in the * specified table. Note that the deletion is prohibited if the authorized view has * deletion_protection field set to true. * @@ -1820,6 +2073,7 @@ public void deleteAuthorizedView(String tableId, String authorizedViewId) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture deleteAuthorizedViewAsync(String tableId, String authorizedViewId) { DeleteAuthorizedViewRequest request = DeleteAuthorizedViewRequest.newBuilder() @@ -1831,7 +2085,10 @@ public ApiFuture deleteAuthorizedViewAsync(String tableId, String authoriz } /** - * Creates a new schema bundle with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createSchemaBundle(com.google.bigtable.admin.v2.CreateSchemaBundleRequest)}. + * + *

Creates a new schema bundle with the specified configuration. * *

Sample code: * @@ -1849,12 +2106,16 @@ public ApiFuture deleteAuthorizedViewAsync(String tableId, String authoriz * * @see CreateSchemaBundleRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public SchemaBundle createSchemaBundle(CreateSchemaBundleRequest request) { return ApiExceptions.callAndTranslateApiException(createSchemaBundleAsync(request)); } /** - * Asynchronously creates a new schema bundle with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#createSchemaBundleOperationCallable()}. + * + *

Asynchronously creates a new schema bundle with the specified configuration. * *

Sample code: * @@ -1885,6 +2146,7 @@ public SchemaBundle createSchemaBundle(CreateSchemaBundleRequest request) { * * @see CreateSchemaBundleRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture createSchemaBundleAsync(CreateSchemaBundleRequest request) { return ApiFutures.transform( stub.createSchemaBundleOperationCallable() @@ -1899,7 +2161,10 @@ public SchemaBundle apply(com.google.bigtable.admin.v2.SchemaBundle schemaBundle } /** - * Updates an existing schema bundle with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateSchemaBundle(com.google.bigtable.admin.v2.UpdateSchemaBundleRequest)}. + * + *

Updates an existing schema bundle with the specified configuration. * *

Sample code: * @@ -1913,12 +2178,16 @@ public SchemaBundle apply(com.google.bigtable.admin.v2.SchemaBundle schemaBundle * * @see UpdateSchemaBundleRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public SchemaBundle updateSchemaBundle(UpdateSchemaBundleRequest request) { return ApiExceptions.callAndTranslateApiException(updateSchemaBundleAsync(request)); } /** - * Asynchronously updates an existing schema bundle with the specified configuration. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#updateSchemaBundleOperationCallable()}. + * + *

Asynchronously updates an existing schema bundle with the specified configuration. * *

Sample code: * @@ -1945,6 +2214,7 @@ public SchemaBundle updateSchemaBundle(UpdateSchemaBundleRequest request) { * * @see UpdateSchemaBundleRequest for available options. */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture updateSchemaBundleAsync(UpdateSchemaBundleRequest request) { return ApiFutures.transform( stub.updateSchemaBundleOperationCallable() @@ -1959,7 +2229,10 @@ public SchemaBundle apply(com.google.bigtable.admin.v2.SchemaBundle schemaBundle } /** - * Gets an schema bundle with the specified schema bundle ID in the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getSchemaBundle(com.google.bigtable.admin.v2.GetSchemaBundleRequest)}. + * + *

Gets an schema bundle with the specified schema bundle ID in the specified table. * *

Sample code: * @@ -1967,13 +2240,17 @@ public SchemaBundle apply(com.google.bigtable.admin.v2.SchemaBundle schemaBundle * SchemaBundle schemaBundle = client.getSchemaBundle("my-table", "my-schema-bundle"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public SchemaBundle getSchemaBundle(String tableId, String schemaBundleId) { return ApiExceptions.callAndTranslateApiException( getSchemaBundleAsync(tableId, schemaBundleId)); } /** - * Asynchronously gets an schema bundle with the specified schema bundle ID in the specified + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getSchemaBundleCallable()}. + * + *

Asynchronously gets an schema bundle with the specified schema bundle ID in the specified * table. * *

Sample code: @@ -1995,6 +2272,7 @@ public SchemaBundle getSchemaBundle(String tableId, String schemaBundleId) { * MoreExecutors.directExecutor()); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture getSchemaBundleAsync(String tableId, String schemaBundleId) { GetSchemaBundleRequest request = GetSchemaBundleRequest.newBuilder() @@ -2013,7 +2291,10 @@ public SchemaBundle apply(com.google.bigtable.admin.v2.SchemaBundle schemaBundle } /** - * Lists all schema bundle IDs in the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listSchemaBundles(com.google.bigtable.admin.v2.ListSchemaBundlesRequest)}. + * + *

Lists all schema bundle IDs in the specified table. * *

Sample code: * @@ -2021,12 +2302,16 @@ public SchemaBundle apply(com.google.bigtable.admin.v2.SchemaBundle schemaBundle * List schemaBundles = client.listSchemaBundles("my-table"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public List listSchemaBundles(String tableId) { return ApiExceptions.callAndTranslateApiException(listSchemaBundlesAsync(tableId)); } /** - * Asynchronously lists all schema bundle IDs in the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#listSchemaBundlesPagedCallable()}. + * + *

Asynchronously lists all schema bundle IDs in the specified table. * *

Sample code: * @@ -2050,6 +2335,7 @@ public List listSchemaBundles(String tableId) { * MoreExecutors.directExecutor()); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture> listSchemaBundlesAsync(String tableId) { ListSchemaBundlesRequest request = ListSchemaBundlesRequest.newBuilder() @@ -2116,7 +2402,10 @@ public List apply(List protos } /** - * Deletes an schema bundle with the specified schema bundle ID in the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteSchemaBundle(com.google.bigtable.admin.v2.DeleteSchemaBundleRequest)}. + * + *

Deletes an schema bundle with the specified schema bundle ID in the specified table. * *

Sample code: * @@ -2124,12 +2413,16 @@ public List apply(List protos * client.deleteSchemaBundle("my-table", "my-schema-bundle"); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public void deleteSchemaBundle(String tableId, String schemaBundleId) { ApiExceptions.callAndTranslateApiException(deleteSchemaBundleAsync(tableId, schemaBundleId)); } /** - * Asynchronously deletes an schema bundle with the specified schema bundle ID in the specified + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#deleteSchemaBundleCallable()}. + * + *

Asynchronously deletes an schema bundle with the specified schema bundle ID in the specified * table. * *

Sample code: @@ -2152,6 +2445,7 @@ public void deleteSchemaBundle(String tableId, String schemaBundleId) { * ); * } */ + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture deleteSchemaBundleAsync(String tableId, String schemaBundleId) { DeleteSchemaBundleRequest request = DeleteSchemaBundleRequest.newBuilder() @@ -2202,7 +2496,10 @@ public Void apply(Empty empty) { } /** - * Gets the IAM access control policy for the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getIamPolicy(com.google.iam.v1.GetIamPolicyRequest)}. + * + *

Gets the IAM access control policy for the specified table. * *

Sample code: * @@ -2218,12 +2515,16 @@ public Void apply(Empty empty) { * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Policy getIamPolicy(String tableId) { return ApiExceptions.callAndTranslateApiException(getIamPolicyAsync(tableId)); } /** - * Asynchronously gets the IAM access control policy for the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getIamPolicyCallable()}. + * + *

Asynchronously gets the IAM access control policy for the specified table. * *

Sample code: * @@ -2250,13 +2551,17 @@ public Policy getIamPolicy(String tableId) { * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture getIamPolicyAsync(String tableId) { String tableName = NameUtil.formatTableName(projectId, instanceId, tableId); return getResourceIamPolicy(tableName); } /** - * Replaces the IAM policy associated with the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#setIamPolicy(com.google.iam.v1.SetIamPolicyRequest)}. + * + *

Replaces the IAM policy associated with the specified table. * *

Sample code: * @@ -2273,12 +2578,16 @@ public ApiFuture getIamPolicyAsync(String tableId) { * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Policy setIamPolicy(String tableId, Policy policy) { return ApiExceptions.callAndTranslateApiException(setIamPolicyAsync(tableId, policy)); } /** - * Asynchronously replaces the IAM policy associated with the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#setIamPolicyCallable()}. + * + *

Asynchronously replaces the IAM policy associated with the specified table. * *

Sample code: * @@ -2309,14 +2618,18 @@ public Policy setIamPolicy(String tableId, Policy policy) { * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture setIamPolicyAsync(String tableId, Policy policy) { String tableName = NameUtil.formatTableName(projectId, instanceId, tableId); return setResourceIamPolicy(policy, tableName); } /** - * Tests whether the caller has the given permissions for the specified table. Returns a subset of - * the specified permissions that the caller has. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#testIamPermissions(com.google.iam.v1.TestIamPermissionsRequest)}. + * + *

Tests whether the caller has the given permissions for the specified table. Returns a subset + * of the specified permissions that the caller has. * *

Sample code: * @@ -2333,12 +2646,16 @@ public ApiFuture setIamPolicyAsync(String tableId, Policy policy) { * permissions */ @SuppressWarnings({"WeakerAccess"}) + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public List testIamPermission(String tableId, String... permissions) { return ApiExceptions.callAndTranslateApiException(testIamPermissionAsync(tableId, permissions)); } /** - * Asynchronously tests whether the caller has the given permissions for the specified table. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#testIamPermissionsCallable()}. + * + *

Asynchronously tests whether the caller has the given permissions for the specified table. * Returns a subset of the specified permissions that the caller has. * *

Sample code: @@ -2365,13 +2682,17 @@ public List testIamPermission(String tableId, String... permissions) { * permissions */ @SuppressWarnings({"WeakerAccess"}) + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture> testIamPermissionAsync(String tableId, String... permissions) { String tableName = NameUtil.formatTableName(projectId, instanceId, tableId); return testResourceIamPermissions(tableName, permissions); } /** - * Gets the IAM access control policy for the specified backup. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getIamPolicy(com.google.iam.v1.GetIamPolicyRequest)}. + * + *

Gets the IAM access control policy for the specified backup. * *

Sample code: * @@ -2387,12 +2708,16 @@ public ApiFuture> testIamPermissionAsync(String tableId, String... * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Policy getBackupIamPolicy(String clusterId, String backupId) { return ApiExceptions.callAndTranslateApiException(getBackupIamPolicyAsync(clusterId, backupId)); } /** - * Asynchronously gets the IAM access control policy for the specified backup. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getIamPolicyCallable()}. + * + *

Asynchronously gets the IAM access control policy for the specified backup. * *

Sample code: * @@ -2419,13 +2744,17 @@ public Policy getBackupIamPolicy(String clusterId, String backupId) { * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture getBackupIamPolicyAsync(String clusterId, String backupId) { String backupName = NameUtil.formatBackupName(projectId, instanceId, clusterId, backupId); return getResourceIamPolicy(backupName); } /** - * Replaces the IAM policy associated with the specified backup. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#setIamPolicy(com.google.iam.v1.SetIamPolicyRequest)}. + * + *

Replaces the IAM policy associated with the specified backup. * *

Sample code: * @@ -2442,13 +2771,17 @@ public ApiFuture getBackupIamPolicyAsync(String clusterId, String backup * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Policy setBackupIamPolicy(String clusterId, String backupId, Policy policy) { return ApiExceptions.callAndTranslateApiException( setBackupIamPolicyAsync(clusterId, backupId, policy)); } /** - * Asynchronously replaces the IAM policy associated with the specified backup. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#setIamPolicyCallable()}. + * + *

Asynchronously replaces the IAM policy associated with the specified backup. * *

Sample code: * @@ -2479,6 +2812,7 @@ public Policy setBackupIamPolicy(String clusterId, String backupId, Policy polic * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture setBackupIamPolicyAsync( String clusterId, String backupId, Policy policy) { String backupName = NameUtil.formatBackupName(projectId, instanceId, clusterId, backupId); @@ -2486,8 +2820,11 @@ public ApiFuture setBackupIamPolicyAsync( } /** - * Tests whether the caller has the given permissions for the specified backup. Returns a subset - * of the specified permissions that the caller has. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#testIamPermissions(com.google.iam.v1.TestIamPermissionsRequest)}. + * + *

Tests whether the caller has the given permissions for the specified backup. Returns a + * subset of the specified permissions that the caller has. * *

Sample code: * @@ -2506,6 +2843,7 @@ public ApiFuture setBackupIamPolicyAsync( * permissions */ @SuppressWarnings({"WeakerAccess"}) + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public List testBackupIamPermission( String clusterId, String backupId, String... permissions) { return ApiExceptions.callAndTranslateApiException( @@ -2513,7 +2851,10 @@ public List testBackupIamPermission( } /** - * Asynchronously tests whether the caller has the given permissions for the specified backup. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#testIamPermissionsCallable()}. + * + *

Asynchronously tests whether the caller has the given permissions for the specified backup. * Returns a subset of the specified permissions that the caller has. * *

Sample code: @@ -2540,6 +2881,7 @@ public List testBackupIamPermission( * permissions */ @SuppressWarnings({"WeakerAccess"}) + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture> testBackupIamPermissionAsync( String clusterId, String backupId, String... permissions) { String backupName = NameUtil.formatBackupName(projectId, instanceId, clusterId, backupId); @@ -2547,7 +2889,10 @@ public ApiFuture> testBackupIamPermissionAsync( } /** - * Gets the IAM access control policy for the specified authorized view. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getIamPolicy(com.google.iam.v1.GetIamPolicyRequest)}. + * + *

Gets the IAM access control policy for the specified authorized view. * *

Sample code: * @@ -2563,13 +2908,17 @@ public ApiFuture> testBackupIamPermissionAsync( * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Policy getAuthorizedViewIamPolicy(String tableId, String authorizedViewId) { return ApiExceptions.callAndTranslateApiException( getAuthorizedViewIamPolicyAsync(tableId, authorizedViewId)); } /** - * Asynchronously gets the IAM access control policy for the specified authorized view. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#getIamPolicyCallable()}. + * + *

Asynchronously gets the IAM access control policy for the specified authorized view. * *

Sample code: * @@ -2596,6 +2945,7 @@ public Policy getAuthorizedViewIamPolicy(String tableId, String authorizedViewId * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture getAuthorizedViewIamPolicyAsync( String tableId, String authorizedViewId) { String authorizedViewName = @@ -2604,7 +2954,10 @@ public ApiFuture getAuthorizedViewIamPolicyAsync( } /** - * Replaces the IAM policy associated with the specified authorized view. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#setIamPolicy(com.google.iam.v1.SetIamPolicyRequest)}. + * + *

Replaces the IAM policy associated with the specified authorized view. * *

Sample code: * @@ -2621,13 +2974,17 @@ public ApiFuture getAuthorizedViewIamPolicyAsync( * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public Policy setAuthorizedViewIamPolicy(String tableId, String authorizedViewId, Policy policy) { return ApiExceptions.callAndTranslateApiException( setAuthorizedViewIamPolicyAsync(tableId, authorizedViewId, policy)); } /** - * Asynchronously replaces the IAM policy associated with the specified authorized view. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#setIamPolicyCallable()}. + * + *

Asynchronously replaces the IAM policy associated with the specified authorized view. * *

Sample code: * @@ -2658,6 +3015,7 @@ public Policy setAuthorizedViewIamPolicy(String tableId, String authorizedViewId * IAM management */ @SuppressWarnings("WeakerAccess") + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public ApiFuture setAuthorizedViewIamPolicyAsync( String tableId, String authorizedViewId, Policy policy) { String authorizedViewName = @@ -2666,8 +3024,11 @@ public ApiFuture setAuthorizedViewIamPolicyAsync( } /** - * Tests whether the caller has the given permissions for the specified authorized view. Returns a - * subset of the specified permissions that the caller has. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#testIamPermissions(com.google.iam.v1.TestIamPermissionsRequest)}. + * + *

Tests whether the caller has the given permissions for the specified authorized view. + * Returns a subset of the specified permissions that the caller has. * *

Sample code: * @@ -2686,6 +3047,7 @@ public ApiFuture setAuthorizedViewIamPolicyAsync( * permissions */ @SuppressWarnings({"WeakerAccess"}) + @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.") public List testAuthorizedViewIamPermission( String tableId, String authorizedViewId, String... permissions) { return ApiExceptions.callAndTranslateApiException( @@ -2693,14 +3055,17 @@ public List testAuthorizedViewIamPermission( } /** - * Asynchronously tests whether the caller has the given permissions for the specified authorized - * view. Returns a subset of the specified permissions that the caller has. + * This method is obsolete. For the recommended proto-based approach, please see {@link + * com.google.cloud.bigtable.admin.v2.BaseBigtableTableAdminClient#testIamPermissionsCallable()}. + * + *

Asynchronously tests whether the caller has the given permissions for the specified + * authorized view. Returns a subset of the specified permissions that the caller has. * *

Sample code: * *

{@code
-   * ApiFuture> grantedPermissionsFuture = client.testAuthorizedViewIamPermissionAsync("my-table-id", "my-authorized-view-id",
-   *   "bigtable.authorizedViews.get", "bigtable.authorizedViews.delete");
+   * ApiFuture> grantedPermissionsFuture = client.testAuthorizedViewIamPermissionAsync("my-table-id",
+   *   "my-authorized-view-id", "bigtable.authorizedViews.get", "bigtable.authorizedViews.delete");
    *
    * ApiFutures.addCallback(grantedPermissionsFuture,
    *   new ApiFutureCallback>() {
@@ -2720,6 +3085,7 @@ public List testAuthorizedViewIamPermission(
    *     permissions
    */
   @SuppressWarnings({"WeakerAccess"})
+  @ObsoleteApi("Use getBaseClient() to access the auto-generated proto-based methods instead.")
   public ApiFuture> testAuthorizedViewIamPermissionAsync(
       String tableId, String authorizedViewId, String... permissions) {
     String authorizedViewName =
diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateInstanceRequest.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateInstanceRequest.java
index 685e52d555..69c75f9011 100644
--- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateInstanceRequest.java
+++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/CreateInstanceRequest.java
@@ -118,6 +118,22 @@ public CreateInstanceRequest addLabel(@Nonnull String key, @Nonnull String value
     return this;
   }
 
+  /**
+   * Adds a tag to the instance.
+   *
+   * 

Tags are a way to organize and govern resources across Google Cloud. Unlike labels, Tags are + * standalone resources created and managed through the Resource Manager API. + * + * @see For more details + */ + @SuppressWarnings("WeakerAccess") + public CreateInstanceRequest addTag(@Nonnull String key, @Nonnull String value) { + Preconditions.checkNotNull(key, "Key can't be null"); + Preconditions.checkNotNull(value, "Value can't be null"); + builder.getInstanceBuilder().putTags(key, value); + return this; + } + /** * Adds a cluster to the instance request with manual scaling enabled. * diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/Instance.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/Instance.java index c3a0c43bca..df163b0e0d 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/Instance.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/admin/v2/models/Instance.java @@ -150,6 +150,12 @@ public String getDisplayName() { return proto.getDisplayName(); } + /** Gets the instance's tags. */ + @SuppressWarnings("WeakerAccess") + public Map getTags() { + return proto.getTagsMap(); + } + /** Gets the instance's current type. Can be DEVELOPMENT or PRODUCTION. */ @SuppressWarnings("WeakerAccess") public Type getType() { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java index cef5e58f3a..b659a02175 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClient.java @@ -54,7 +54,6 @@ import com.google.cloud.bigtable.data.v2.models.sql.PreparedStatement; import com.google.cloud.bigtable.data.v2.models.sql.ResultSet; import com.google.cloud.bigtable.data.v2.models.sql.SqlType; -import com.google.cloud.bigtable.data.v2.stub.BigtableClientContext; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import com.google.cloud.bigtable.data.v2.stub.sql.SqlServerStream; import com.google.common.util.concurrent.MoreExecutors; @@ -180,18 +179,6 @@ public static BigtableDataClient create(BigtableDataSettings settings) throws IO return new BigtableDataClient(stub); } - /** - * Constructs an instance of BigtableDataClient with the provided client context. This is used by - * {@link BigtableDataClientFactory} and the client context will not be closed unless {@link - * BigtableDataClientFactory#close()} is called. - */ - static BigtableDataClient createWithClientContext( - BigtableDataSettings settings, BigtableClientContext context) throws IOException { - EnhancedBigtableStub stub = - EnhancedBigtableStub.createWithClientContext(settings.getStubSettings(), context); - return new BigtableDataClient(stub); - } - @InternalApi("Visible for testing") BigtableDataClient(EnhancedBigtableStub stub) { this.stub = stub; diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java index 599dce9f31..f19726e2a3 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataClientFactory.java @@ -16,8 +16,9 @@ package com.google.cloud.bigtable.data.v2; import com.google.api.core.BetaApi; -import com.google.api.gax.rpc.ClientContext; +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName; import com.google.cloud.bigtable.data.v2.stub.BigtableClientContext; +import com.google.cloud.bigtable.data.v2.stub.ClientOperationSettings; import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub; import java.io.IOException; import javax.annotation.Nonnull; @@ -62,9 +63,8 @@ */ @BetaApi("This feature is currently experimental and can change in the future") public final class BigtableDataClientFactory implements AutoCloseable { - - private final BigtableDataSettings defaultSettings; private final BigtableClientContext sharedClientContext; + private final ClientOperationSettings perOpSettings; /** * Create a instance of this factory. @@ -75,15 +75,15 @@ public final class BigtableDataClientFactory implements AutoCloseable { public static BigtableDataClientFactory create(BigtableDataSettings defaultSettings) throws IOException { BigtableClientContext sharedClientContext = - EnhancedBigtableStub.createBigtableClientContext(defaultSettings.getStubSettings()); - - return new BigtableDataClientFactory(sharedClientContext, defaultSettings); + BigtableClientContext.create(defaultSettings.getStubSettings()); + ClientOperationSettings perOpSettings = defaultSettings.getStubSettings().getPerOpSettings(); + return new BigtableDataClientFactory(sharedClientContext, perOpSettings); } private BigtableDataClientFactory( - BigtableClientContext sharedClientContext, BigtableDataSettings defaultSettings) { + BigtableClientContext sharedClientContext, ClientOperationSettings perOpSettings) { this.sharedClientContext = sharedClientContext; - this.defaultSettings = defaultSettings; + this.perOpSettings = perOpSettings; } /** @@ -107,15 +107,12 @@ public void close() throws Exception { */ public BigtableDataClient createDefault() { try { - ClientContext clientContext = - sharedClientContext.getClientContext().toBuilder() - .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory( - defaultSettings.getStubSettings(), sharedClientContext.getOpenTelemetry())) - .build(); + BigtableClientContext ctx = + sharedClientContext.createChild( + sharedClientContext.getClientInfo().getInstanceName(), + sharedClientContext.getClientInfo().getAppProfileId()); - return BigtableDataClient.createWithClientContext( - defaultSettings, sharedClientContext.withClientContext(clientContext)); + return new BigtableDataClient(new EnhancedBigtableStub(perOpSettings, ctx)); } catch (IOException e) { // Should never happen because the connection has been established already throw new RuntimeException( @@ -133,17 +130,11 @@ public BigtableDataClient createDefault() { * release all resources, first close all of the created clients and then this factory instance. */ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) throws IOException { - BigtableDataSettings settings = - defaultSettings.toBuilder().setAppProfileId(appProfileId).build(); + BigtableClientContext ctx = + sharedClientContext.createChild( + sharedClientContext.getClientInfo().getInstanceName(), appProfileId); - ClientContext clientContext = - sharedClientContext.getClientContext().toBuilder() - .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), sharedClientContext.getOpenTelemetry())) - .build(); - return BigtableDataClient.createWithClientContext( - settings, sharedClientContext.withClientContext(clientContext)); + return new BigtableDataClient(new EnhancedBigtableStub(perOpSettings, ctx)); } /** @@ -157,22 +148,10 @@ public BigtableDataClient createForAppProfile(@Nonnull String appProfileId) thro */ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull String instanceId) throws IOException { - BigtableDataSettings settings = - defaultSettings.toBuilder() - .setProjectId(projectId) - .setInstanceId(instanceId) - .setDefaultAppProfileId() - .build(); + BigtableClientContext ctx = + sharedClientContext.createChild(InstanceName.of(projectId, instanceId), ""); - ClientContext clientContext = - sharedClientContext.getClientContext().toBuilder() - .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), sharedClientContext.getOpenTelemetry())) - .build(); - - return BigtableDataClient.createWithClientContext( - settings, sharedClientContext.withClientContext(clientContext)); + return new BigtableDataClient(new EnhancedBigtableStub(perOpSettings, ctx)); } /** @@ -187,19 +166,9 @@ public BigtableDataClient createForInstance(@Nonnull String projectId, @Nonnull public BigtableDataClient createForInstance( @Nonnull String projectId, @Nonnull String instanceId, @Nonnull String appProfileId) throws IOException { - BigtableDataSettings settings = - defaultSettings.toBuilder() - .setProjectId(projectId) - .setInstanceId(instanceId) - .setAppProfileId(appProfileId) - .build(); - ClientContext clientContext = - sharedClientContext.getClientContext().toBuilder() - .setTracerFactory( - EnhancedBigtableStub.createBigtableTracerFactory( - settings.getStubSettings(), sharedClientContext.getOpenTelemetry())) - .build(); - return BigtableDataClient.createWithClientContext( - settings, sharedClientContext.withClientContext(clientContext)); + BigtableClientContext ctx = + sharedClientContext.createChild(InstanceName.of(projectId, instanceId), appProfileId); + + return new BigtableDataClient(new EnhancedBigtableStub(perOpSettings, ctx)); } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java index b8a514433f..4329e98f63 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/BigtableDataSettings.java @@ -450,7 +450,6 @@ public boolean isRefreshingChannel() { */ @Deprecated public Builder setPrimingTableIds(String... tableIds) { - stubSettings.setPrimedTableIds(tableIds); return this; } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/RequestContext.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/RequestContext.java index fc015186aa..7058ae137c 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/RequestContext.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/RequestContext.java @@ -17,6 +17,7 @@ import com.google.api.core.InternalApi; import com.google.auto.value.AutoValue; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; import java.io.Serializable; /** @@ -33,6 +34,13 @@ @AutoValue public abstract class RequestContext implements Serializable { + public static RequestContext create(ClientInfo clientInfo) { + return create( + clientInfo.getInstanceName().getProjectId(), + clientInfo.getInstanceName().getInstanceId(), + clientInfo.getAppProfileId()); + } + /** Creates a new instance of the {@link RequestContext}. */ public static RequestContext create(String projectId, String instanceId, String appProfileId) { return new AutoValue_RequestContext(projectId, instanceId, appProfileId); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/api/InstanceName.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/api/InstanceName.java new file mode 100644 index 0000000000..01dfed2d72 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/api/InstanceName.java @@ -0,0 +1,70 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.api; + +import com.google.auto.value.AutoValue; +import com.google.common.base.Preconditions; +import com.google.common.base.Splitter; +import java.util.List; + +@AutoValue +public abstract class InstanceName { + public abstract String getProjectId(); + + public abstract String getInstanceId(); + + @Override + public final String toString() { + return String.format("projects/%s/instances/%s", getProjectId(), getInstanceId()); + } + + public static InstanceName of(String projectId, String instanceId) { + return InstanceName.builder().setProjectId(projectId).setInstanceId(instanceId).build(); + } + + public static Builder builder() { + return new AutoValue_InstanceName.Builder(); + } + + public static InstanceName parse(String name) { + List parts = Splitter.on('/').splitToList(name); + Preconditions.checkArgument(parts.size() == 4, "Invalid instance name: %s", name); + Preconditions.checkArgument( + "projects".equals(parts.get(0)), + "Invalid instance name: %s, must start with projects/", + name); + Preconditions.checkArgument( + !parts.get(1).isEmpty(), "Invalid instance name %s, must have a project id", name); + Preconditions.checkArgument( + "instances".equals(parts.get(2)), + "Invalid instance name: %s, must start with projects/$PROJECT_ID/instances/", + name); + Preconditions.checkArgument( + !parts.get(3).isEmpty(), "Invalid instance name %s, must have an instance id", name); + + return builder().setProjectId(parts.get(1)).setInstanceId(parts.get(3)).build(); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setProjectId(String projectId); + + public abstract Builder setInstanceId(String instanceId); + + public abstract InstanceName build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/api/TableName.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/api/TableName.java new file mode 100644 index 0000000000..159c7b0b50 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/api/TableName.java @@ -0,0 +1,86 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.api; + +import com.google.auto.value.AutoValue; +import com.google.common.base.Preconditions; +import com.google.common.base.Splitter; +import java.util.List; + +@AutoValue +public abstract class TableName { + public abstract String getProjectId(); + + public abstract String getInstanceId(); + + public abstract String getTableId(); + + public InstanceName getInstanceName() { + return InstanceName.builder() + .setProjectId(getProjectId()) + .setInstanceId(getInstanceId()) + .build(); + } + + @Override + public final String toString() { + return String.format("%s/tables/%s", getInstanceName(), getTableId()); + } + + public static Builder builder() { + return new AutoValue_TableName.Builder(); + } + + public static TableName parse(String name) { + List parts = Splitter.on('/').splitToList(name); + Preconditions.checkArgument(parts.size() == 6, "Invalid table name: %s", name); + Preconditions.checkArgument( + "projects".equals(parts.get(0)), "Invalid table name: %s, must start with projects/", name); + Preconditions.checkArgument( + !parts.get(1).isEmpty(), "Invalid table name %s, must have a project id", name); + Preconditions.checkArgument( + "instances".equals(parts.get(2)), + "Invalid table name: %s, must start with projects/$PROJECT_ID/instances/", + name); + Preconditions.checkArgument( + !parts.get(3).isEmpty(), "Invalid table name %s, must have an instance id", name); + Preconditions.checkArgument( + "tables".equals(parts.get(4)), + "Invalid table name: %s, must start with" + + " projects/$PROJECT_ID/instances/$INSTANCE_ID/tables", + name); + Preconditions.checkArgument( + !parts.get(5).isEmpty(), "Invalid table name %s, must have table id", name); + + return builder() + .setProjectId(parts.get(1)) + .setInstanceId(parts.get(3)) + .setTableId(parts.get(5)) + .build(); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setProjectId(String projectId); + + public abstract Builder setInstanceId(String instanceId); + + public abstract Builder setTableId(String tableId); + + public abstract TableName build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricRegistry.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricRegistry.java new file mode 100644 index 0000000000..9c0a70d30c --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricRegistry.java @@ -0,0 +1,218 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm; + +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientBatchWriteFlowControlFactor; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientBatchWriteFlowControlTargetQps; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientChannelPoolOutstandingRpcs; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientDpCompatGuage; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientPerConnectionErrorCount; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.GrpcMetric; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.MetricWrapper; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.PacemakerDelay; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableApplicationBlockingLatency; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableAttemptLatency; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableAttemptLatency2; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableClientBlockingLatency; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableConnectivityErrorCount; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableDebugTagCount; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableFirstResponseLatency; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableOperationLatency; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableRemainingDeadline; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableRetryCount; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableServerLatency; +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.metrics.Meter; +import io.opentelemetry.api.metrics.MeterProvider; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +/** + * Repository for all client metrics. This class has 2 audiences: + * + *

    + *
  • VRpcTracer, which reference each metric directly + *
  • Exporter, which will look up each metric by name and use the {@link MetricWrapper} + * interface to augment the {@code MonitoredResource} and {@code Metric Labels} + *
+ */ +public class MetricRegistry { + public static final String METER_NAME = "bigtable.googleapis.com/internal/client/"; + + final TableOperationLatency operationLatencyMetric; + final TableAttemptLatency attemptLatencyMetric; + final TableAttemptLatency2 attemptLatency2Metric; + final TableRetryCount retryCountMetric; + final TableFirstResponseLatency firstResponseLantencyMetric; + final TableServerLatency serverLatencyMetric; + final ClientChannelPoolOutstandingRpcs channelPoolOutstandingRpcsMetric; + final TableConnectivityErrorCount connectivityErrorCountMetric; + final ClientDpCompatGuage dpCompatGuageMetric; + final TableApplicationBlockingLatency applicationBlockingLatencyMetric; + final TableClientBlockingLatency clientBlockingLatencyMetric; + final ClientPerConnectionErrorCount perConnectionErrorCountMetric; + final TableRemainingDeadline remainingDeadlineMetric; + final ClientBatchWriteFlowControlFactor batchWriteFlowControlFactorMetric; + final ClientBatchWriteFlowControlTargetQps batchWriteFlowControlTargetQpsMetric; + + final TableDebugTagCount debugTagCountMetric; + final PacemakerDelay pacemakerDelayMetric; + + private final Map> metrics = new HashMap<>(); + private final List grpcMetricNames = new ArrayList<>(); + + public MetricRegistry() { + operationLatencyMetric = register(new TableOperationLatency()); + attemptLatencyMetric = register(new TableAttemptLatency()); + attemptLatency2Metric = register(new TableAttemptLatency2()); + retryCountMetric = register(new TableRetryCount()); + firstResponseLantencyMetric = register(new TableFirstResponseLatency()); + serverLatencyMetric = register(new TableServerLatency()); + channelPoolOutstandingRpcsMetric = register(new ClientChannelPoolOutstandingRpcs()); + connectivityErrorCountMetric = register(new TableConnectivityErrorCount()); + applicationBlockingLatencyMetric = register(new TableApplicationBlockingLatency()); + clientBlockingLatencyMetric = register(new TableClientBlockingLatency()); + perConnectionErrorCountMetric = register(new ClientPerConnectionErrorCount()); + dpCompatGuageMetric = register(new ClientDpCompatGuage()); + remainingDeadlineMetric = register(new TableRemainingDeadline()); + batchWriteFlowControlFactorMetric = register(new ClientBatchWriteFlowControlFactor()); + batchWriteFlowControlTargetQpsMetric = register(new ClientBatchWriteFlowControlTargetQps()); + + debugTagCountMetric = register(new TableDebugTagCount()); + pacemakerDelayMetric = register(new PacemakerDelay()); + + // From + // https://github.com/grpc/grpc-java/blob/31fdb6c2268b4b1c8ba6c995ee46c58e84a831aa/rls/src/main/java/io/grpc/rls/CachingRlsLbClient.java#L138-L165 + registerGrpcMetric( + "grpc.client.attempt.duration", + ImmutableList.of("grpc.lb.locality", "grpc.status", "grpc.method", "grpc.target")); + registerGrpcMetric( + "grpc.lb.rls.default_target_picks", + ImmutableList.of( + "grpc.target", + "grpc.lb.rls.server_target", + "grpc.lb.rls.data_plane_target", + "grpc.lb.pick_result")); + registerGrpcMetric( + "grpc.lb.rls.target_picks", + ImmutableList.of( + "grpc.target", + "grpc.lb.rls.server_target", + "grpc.lb.rls.data_plane_target", + "grpc.lb.pick_result")); + registerGrpcMetric( + "grpc.lb.rls.failed_picks", ImmutableList.of("grpc.target", "grpc.lb.rls.server_target")); + + // From + // https://github.com/grpc/grpc-java/blob/31fdb6c2268b4b1c8ba6c995ee46c58e84a831aa/xds/src/main/java/io/grpc/xds/XdsClientMetricReporterImpl.java#L67-L94 + // TODO: "grpc.xds_client.connected" + registerGrpcMetric( + "grpc.xds_client.server_failure", ImmutableList.of("grpc.target", "grpc.xds.server")); + // TODO: "grpc.xds_client.resource_updates_valid", + registerGrpcMetric( + "grpc.xds_client.resource_updates_invalid", + ImmutableList.of("grpc.target", "grpc.xds.server", "grpc.xds.resource_type")); + // TODO: "grpc.xds_client.resources" + + // From + // https://github.com/grpc/proposal/blob/86990145a7cef9e5473a132709b2556fec00c4c6/A94-subchannel-otel-metrics.md + registerGrpcMetric( + "grpc.subchannel.disconnections", + ImmutableList.of( + "grpc.target", "grpc.lb.backend_service", "grpc.lb.locality", "grpc.disconnect_error")); + + registerGrpcMetric( + "grpc.subchannel.connection_attempts_succeeded", + ImmutableList.of("grpc.target", "grpc.lb.backend_service", "grpc.lb.locality")); + + registerGrpcMetric( + "grpc.subchannel.connection_attempts_failed", + ImmutableList.of("grpc.target", "grpc.lb.backend_service", "grpc.lb.locality")); + + registerGrpcMetric( + "grpc.subchannel.open_connections", + ImmutableList.of( + "grpc.target", "grpc.security_level", "grpc.lb.backend_service", "grpc.lb.locality")); + } + + private void registerGrpcMetric(String name, List labels) { + grpcMetricNames.add(name); + register(new GrpcMetric(name, labels)); + } + + private > T register(T instrument) { + metrics.put(instrument.getName(), instrument); + return instrument; + } + + List getGrpcMetricNames() { + return ImmutableList.copyOf(grpcMetricNames); + } + + public MetricWrapper getMetric(String name) { + return metrics.get(name); + } + + public RecorderRegistry newRecorderRegistry(MeterProvider meterProvider) { + return new RecorderRegistry(meterProvider.get(METER_NAME)); + } + + public class RecorderRegistry { + public final TableOperationLatency.Recorder operationLatency; + public final TableAttemptLatency.Recorder attemptLatency; + public final TableAttemptLatency2.Recorder attemptLatency2; + public final TableRetryCount.Recorder retryCount; + public final TableFirstResponseLatency.Recorder firstResponseLantency; + public final TableServerLatency.Recorder serverLatency; + public final ClientChannelPoolOutstandingRpcs.Recorder channelPoolOutstandingRpcs; + public final TableConnectivityErrorCount.Recorder connectivityErrorCount; + public final ClientDpCompatGuage.Recorder dpCompatGuage; + public final TableApplicationBlockingLatency.Recorder applicationBlockingLatency; + public final TableClientBlockingLatency.Recorder clientBlockingLatency; + public final ClientPerConnectionErrorCount.Recorder perConnectionErrorCount; + public final TableRemainingDeadline.Recorder remainingDeadline; + public final ClientBatchWriteFlowControlTargetQps.Recorder batchWriteFlowControlTargetQps; + public final ClientBatchWriteFlowControlFactor.Recorder batchWriteFlowControlFactor; + + public final TableDebugTagCount.Recorder debugTagCount; + + public final PacemakerDelay.Recorder pacemakerDelay; + + private RecorderRegistry(Meter meter) { + operationLatency = operationLatencyMetric.newRecorder(meter); + attemptLatency = attemptLatencyMetric.newRecorder(meter); + attemptLatency2 = attemptLatency2Metric.newRecorder(meter); + retryCount = retryCountMetric.newRecorder(meter); + firstResponseLantency = firstResponseLantencyMetric.newRecorder(meter); + serverLatency = serverLatencyMetric.newRecorder(meter); + channelPoolOutstandingRpcs = channelPoolOutstandingRpcsMetric.newRecorder(meter); + connectivityErrorCount = connectivityErrorCountMetric.newRecorder(meter); + dpCompatGuage = dpCompatGuageMetric.newRecorder(meter); + applicationBlockingLatency = applicationBlockingLatencyMetric.newRecorder(meter); + clientBlockingLatency = clientBlockingLatencyMetric.newRecorder(meter); + perConnectionErrorCount = perConnectionErrorCountMetric.newRecorder(meter); + remainingDeadline = remainingDeadlineMetric.newRecorder(meter); + batchWriteFlowControlTargetQps = batchWriteFlowControlTargetQpsMetric.newRecorder(meter); + batchWriteFlowControlFactor = batchWriteFlowControlFactorMetric.newRecorder(meter); + + debugTagCount = debugTagCountMetric.newRecorder(meter); + pacemakerDelay = pacemakerDelayMetric.newRecorder(meter); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/Metrics.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/Metrics.java new file mode 100644 index 0000000000..7df665c673 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/Metrics.java @@ -0,0 +1,38 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm; + +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.ChannelPoolMetricsTracer; +import io.grpc.ManagedChannelBuilder; +import java.io.Closeable; +import java.io.IOException; +import javax.annotation.Nullable; + +public interface Metrics extends Closeable { + ApiTracerFactory createTracerFactory(ClientInfo clientInfo) throws IOException; + + > T configureGrpcChannel(T channelBuilder); + + @Nullable + ChannelPoolMetricsTracer getChannelPoolMetricsTracer(); + + void start(); + + @Override + void close(); +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricsImpl.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricsImpl.java new file mode 100644 index 0000000000..f0efac7e96 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricsImpl.java @@ -0,0 +1,246 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm; + +import com.google.api.gax.grpc.GaxGrpcProperties; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.OpencensusTracerFactory; +import com.google.auth.Credentials; +import com.google.cloud.bigtable.Version; +import com.google.cloud.bigtable.data.v2.BigtableDataSettings; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry.RecorderRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.exporter.BigtableCloudMonitoringExporter; +import com.google.cloud.bigtable.data.v2.internal.csm.exporter.BigtablePeriodicReader; +import com.google.cloud.bigtable.data.v2.internal.csm.opencensus.MetricsTracerFactory; +import com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.BuiltinMetricsTracerFactory; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.ChannelPoolMetricsTracer; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.CompositeTracerFactory; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.Pacemaker; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import io.grpc.ManagedChannelBuilder; +import io.grpc.opentelemetry.GrpcOpenTelemetry; +import io.opencensus.stats.StatsRecorder; +import io.opencensus.tags.TagKey; +import io.opencensus.tags.TagValue; +import io.opencensus.tags.Tagger; +import io.opentelemetry.api.OpenTelemetry; +import io.opentelemetry.sdk.OpenTelemetrySdk; +import io.opentelemetry.sdk.metrics.SdkMeterProvider; +import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; +import java.io.Closeable; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import javax.annotation.Nullable; + +public class MetricsImpl implements Metrics, Closeable { + private final MetricRegistry metricRegistry; + + private final ApiTracerFactory userTracerFactory; + private final @Nullable OpenTelemetrySdk internalOtel; + private final @Nullable MetricRegistry.RecorderRegistry internalRecorder; + private final @Nullable OpenTelemetry userOtel; + private final @Nullable MetricRegistry.RecorderRegistry userRecorder; + private final ScheduledExecutorService executor; + private final Tagger ocTagger; + private final StatsRecorder ocRecorder; + + @Nullable private final GrpcOpenTelemetry grpcOtel; + @Nullable private final ChannelPoolMetricsTracer channelPoolMetricsTracer; + @Nullable private final Pacemaker pacemaker; + private final List> tasks = new ArrayList<>(); + + public MetricsImpl( + MetricRegistry metricRegistry, + ClientInfo clientInfo, + ApiTracerFactory userTracerFactory, + @Nullable OpenTelemetrySdk internalOtel, + @Nullable OpenTelemetry userOtel, + Tagger ocTagger, + StatsRecorder ocRecorder, + ScheduledExecutorService executor) { + this.metricRegistry = metricRegistry; + this.userTracerFactory = Preconditions.checkNotNull(userTracerFactory); + + this.internalOtel = internalOtel; + this.userOtel = userOtel; + + this.ocTagger = ocTagger; + this.ocRecorder = ocRecorder; + + this.executor = executor; + + if (internalOtel != null) { + this.internalRecorder = metricRegistry.newRecorderRegistry(internalOtel.getMeterProvider()); + this.pacemaker = new Pacemaker(internalRecorder, clientInfo, "background"); + this.channelPoolMetricsTracer = new ChannelPoolMetricsTracer(internalRecorder, clientInfo); + this.grpcOtel = + GrpcOpenTelemetry.newBuilder() + .sdk(internalOtel) + .addOptionalLabel("grpc.lb.locality") + // Disable default grpc metrics + .disableAllMetrics() + // Enable specific grpc metrics + .enableMetrics(metricRegistry.getGrpcMetricNames()) + .build(); + + } else { + this.internalRecorder = null; + this.grpcOtel = null; + this.pacemaker = null; + this.channelPoolMetricsTracer = null; + } + + if (userOtel != null) { + this.userRecorder = metricRegistry.newRecorderRegistry(userOtel.getMeterProvider()); + } else { + this.userRecorder = null; + } + } + + @Override + public void close() { + for (ScheduledFuture task : tasks) { + task.cancel(false); + } + if (internalOtel != null) { + internalOtel.close(); + } + } + + @Override + public void start() { + if (channelPoolMetricsTracer != null) { + tasks.add(channelPoolMetricsTracer.start(executor)); + } + if (pacemaker != null) { + tasks.add(pacemaker.start(executor)); + } + } + + @Override + public > T configureGrpcChannel(T channelBuilder) { + if (grpcOtel == null) { + return channelBuilder; + } + grpcOtel.configureChannelBuilder(channelBuilder); + return channelBuilder; + } + + @Override + public ApiTracerFactory createTracerFactory(ClientInfo clientInfo) { + ImmutableList.Builder tracerFactories = ImmutableList.builder(); + tracerFactories + .add(createOCTracingFactory(clientInfo)) + .add(createOCMetricsFactory(clientInfo, ocTagger, ocRecorder)) + .add(userTracerFactory); + + if (internalRecorder != null) { + tracerFactories.add(createOtelMetricsFactory(internalRecorder, clientInfo)); + } + if (userRecorder != null) { + tracerFactories.add(createOtelMetricsFactory(userRecorder, clientInfo)); + } + + return new CompositeTracerFactory(tracerFactories.build()); + } + + @Override + @Nullable + public ChannelPoolMetricsTracer getChannelPoolMetricsTracer() { + return channelPoolMetricsTracer; + } + + public static OpenTelemetrySdk createBuiltinOtel( + MetricRegistry metricRegistry, + ClientInfo clientInfo, + @Nullable Credentials defaultCredentials, + @Nullable String metricsEndpoint, + String universeDomain, + ScheduledExecutorService executor) + throws IOException { + + Credentials credentials = + BigtableDataSettings.getMetricsCredentials() != null + ? BigtableDataSettings.getMetricsCredentials() + : defaultCredentials; + + SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder(); + + BigtableCloudMonitoringExporter exporter = + BigtableCloudMonitoringExporter.create( + metricRegistry, + EnvInfo::detect, + clientInfo, + credentials, + metricsEndpoint, + universeDomain); + + meterProvider.registerMetricReader(new BigtablePeriodicReader(exporter, executor)); + + return OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build(); + } + + private static ApiTracerFactory createOCTracingFactory(ClientInfo clientInfo) { + return new OpencensusTracerFactory( + ImmutableMap.builder() + // Annotate traces with the same tags as metrics + .put( + RpcMeasureConstants.BIGTABLE_PROJECT_ID.getName(), + clientInfo.getInstanceName().getProjectId()) + .put( + RpcMeasureConstants.BIGTABLE_INSTANCE_ID.getName(), + clientInfo.getInstanceName().getInstanceId()) + .put( + RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID.getName(), clientInfo.getAppProfileId()) + // Also annotate traces with library versions + .put("gax", GaxGrpcProperties.getGaxGrpcVersion()) + .put("grpc", GaxGrpcProperties.getGrpcVersion()) + .put("gapic", Version.VERSION) + .build()); + } + + private static ApiTracerFactory createOCMetricsFactory( + ClientInfo clientInfo, Tagger tagger, StatsRecorder stats) { + + ImmutableMap attributes = + ImmutableMap.builder() + .put( + RpcMeasureConstants.BIGTABLE_PROJECT_ID, + TagValue.create(clientInfo.getInstanceName().getProjectId())) + .put( + RpcMeasureConstants.BIGTABLE_INSTANCE_ID, + TagValue.create(clientInfo.getInstanceName().getInstanceId())) + .put( + RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, + TagValue.create(clientInfo.getAppProfileId())) + .build(); + return MetricsTracerFactory.create(tagger, stats, attributes); + } + + private static BuiltinMetricsTracerFactory createOtelMetricsFactory( + RecorderRegistry recorder, ClientInfo clientInfo) { + + return BuiltinMetricsTracerFactory.create(recorder, clientInfo); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/ClientInfo.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/ClientInfo.java new file mode 100644 index 0000000000..7122cb40c7 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/ClientInfo.java @@ -0,0 +1,53 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.attributes; + +import com.google.auto.value.AutoValue; +import com.google.cloud.bigtable.Version; +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName; + +/** + * A value class to capture parameters that the client was instantiated with. These parameters will + * be used by the Exporter to derive MonitoredResource for GrpcMetrics. + */ +@AutoValue +public abstract class ClientInfo { + /** The name and version of the client. */ + public abstract String getClientName(); + + /** A unique identifier to disambiguate TimeSeries from multiple processes on the same VM. */ + public abstract InstanceName getInstanceName(); + + public abstract String getAppProfileId(); + + public abstract Builder toBuilder(); + + public static Builder builder() { + return new AutoValue_ClientInfo.Builder().setClientName("java-bigtable/" + Version.VERSION); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setClientName(String name); + + public abstract Builder setInstanceName(InstanceName name); + + public abstract Builder setAppProfileId(String appProfileId); + + public abstract ClientInfo build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/EnvInfo.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/EnvInfo.java new file mode 100644 index 0000000000..b7afb73ee9 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/EnvInfo.java @@ -0,0 +1,189 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.attributes; + +import com.google.auto.value.AutoValue; +import com.google.cloud.opentelemetry.detection.AttributeKeys; +import com.google.cloud.opentelemetry.detection.DetectedPlatform; +import com.google.cloud.opentelemetry.detection.GCPPlatformDetector; +import com.google.common.base.Function; +import com.google.common.base.Splitter; +import com.google.common.base.Supplier; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import java.lang.management.ManagementFactory; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; +import java.util.logging.Level; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +/** + * Environment attributes, lazily extracted by the Exporter. + * + *

The information will be extracted from the GCE metadata service and environment. + */ +@AutoValue +public abstract class EnvInfo { + private static final Logger logger = Logger.getLogger(EnvInfo.class.getName()); + + private static final Map SUPPORTED_PLATFORM_MAP = + ImmutableMap.of( + GCPPlatformDetector.SupportedPlatform.GOOGLE_COMPUTE_ENGINE, "gcp_compute_engine", + GCPPlatformDetector.SupportedPlatform.GOOGLE_KUBERNETES_ENGINE, "gcp_kubernetes_engine"); + + private static final AtomicLong uidSuffix = new AtomicLong(0); + + public abstract String getUid(); + + /** The Google platform running this client. ie. gcp_compute_engine */ + public abstract String getPlatform(); + + /** The Google project that the VM belongs to. */ + public abstract String getProject(); + + /** The geographic region that the VM is located in. */ + public abstract String getRegion(); + + /** The numeric GCE vm instance id. */ + public abstract String getHostId(); + + /** The hostname of the vm or container running the client. For gke, this will be the pod name. */ + public abstract String getHostName(); + + public static Builder builder() { + return new AutoValue_EnvInfo.Builder().setUid(computeUid() + "-" + uidSuffix.getAndIncrement()); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setUid(String uid); + + public abstract Builder setPlatform(String platform); + + public abstract Builder setProject(String project); + + public abstract Builder setRegion(String region); + + public abstract Builder setHostId(String hostId); + + public abstract Builder setHostName(String hostName); + + public abstract EnvInfo build(); + } + + private static String computeUid() { + final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); + // If jvm doesn't have the expected format, fallback to the local hostname + if (jvmName.indexOf('@') < 1) { + String hostname = "localhost"; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + logger.log(Level.INFO, "Unable to get the hostname.", e); + } + // Generate a random number and use the same format "random_number@hostname". + return "java-" + UUID.randomUUID() + "@" + hostname; + } + return "java-" + UUID.randomUUID() + jvmName; + } + + public static EnvInfo detect() { + return detect( + GCPPlatformDetector.DEFAULT_INSTANCE.detectPlatform(), + System::getenv, + () -> { + try { + return InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + throw new RuntimeException(e); + } + }); + } + + @Nullable + static EnvInfo detect( + DetectedPlatform detectedPlatform, + Function envGetter, + Supplier hostnameSupplier) { + @Nullable + String cloud_platform = SUPPORTED_PLATFORM_MAP.get(detectedPlatform.getSupportedPlatform()); + if (cloud_platform == null) { + return EnvInfo.builder() + .setPlatform("unknown") + .setHostName(detectHostname(envGetter, hostnameSupplier)) + .setRegion("global") + .setProject("") + .setHostId("") + .build(); + } + + Map attrs = detectedPlatform.getAttributes(); + ImmutableList locationKeys = + ImmutableList.of( + AttributeKeys.GCE_CLOUD_REGION, + AttributeKeys.GCE_AVAILABILITY_ZONE, + AttributeKeys.GKE_LOCATION_TYPE_REGION, + AttributeKeys.GKE_CLUSTER_LOCATION); + + String region = + locationKeys.stream().map(attrs::get).filter(Objects::nonNull).findFirst().orElse("global"); + + // Deal with possibility of a zone. Zones are of the form us-east1-c, but we want a region + // which, which is us-east1. + region = Splitter.on('-').splitToStream(region).limit(2).collect(Collectors.joining("-")); + + String hostname = attrs.get(AttributeKeys.GCE_INSTANCE_NAME); + // TODO: add support for cloud run & gae by looking at SERVERLESS_COMPUTE_NAME & GAE_MODULE_NAME + if (hostname == null) { + hostname = detectHostname(envGetter, hostnameSupplier); + } + + String hostId = Optional.ofNullable(attrs.get(AttributeKeys.GCE_INSTANCE_ID)).orElse(""); + + return builder() + .setPlatform(cloud_platform) + .setProject(detectedPlatform.getProjectId()) + .setRegion(region) + .setHostId(hostId) + .setHostName(hostname) + .build(); + } + + private static String detectHostname( + Function envGetter, Supplier hostnameSupplier) { + String hostname = envGetter.apply("HOSTNAME"); + + if (hostname == null) { + try { + hostname = hostnameSupplier.get(); + } catch (RuntimeException e) { + logger.log(Level.WARNING, "failed to detect hostname", e); + } + } + if (hostname == null) { + hostname = ""; + } + return hostname; + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/MethodInfo.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/MethodInfo.java new file mode 100644 index 0000000000..4312392afa --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/MethodInfo.java @@ -0,0 +1,47 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.attributes; + +import com.google.auto.value.AutoValue; + +/** Method specific attributes. */ +@AutoValue +public abstract class MethodInfo { + + /** The name of the method. ie "Bigtable.ReadRow" */ + public abstract String getName(); + + /** If the method is streaming (ie a scan). */ + public abstract boolean getStreaming(); + + public static MethodInfo of(String name, boolean streaming) { + return builder().setName(name).setStreaming(streaming).build(); + } + + public static Builder builder() { + return new AutoValue_MethodInfo.Builder(); + } + + @AutoValue.Builder + public abstract static class Builder { + public abstract Builder setName(String name); + + public abstract Builder setStreaming(boolean isStreaming); + + public abstract MethodInfo build(); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/Util.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/Util.java new file mode 100644 index 0000000000..221452537d --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/Util.java @@ -0,0 +1,181 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.attributes; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.ApiException; +import com.google.bigtable.v2.AuthorizedViewName; +import com.google.bigtable.v2.CheckAndMutateRowRequest; +import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest; +import com.google.bigtable.v2.MaterializedViewName; +import com.google.bigtable.v2.MutateRowRequest; +import com.google.bigtable.v2.MutateRowsRequest; +import com.google.bigtable.v2.PeerInfo; +import com.google.bigtable.v2.PeerInfo.TransportType; +import com.google.bigtable.v2.ReadChangeStreamRequest; +import com.google.bigtable.v2.ReadModifyWriteRowRequest; +import com.google.bigtable.v2.ReadRowsRequest; +import com.google.bigtable.v2.ResponseParams; +import com.google.bigtable.v2.SampleRowKeysRequest; +import com.google.bigtable.v2.TableName; +import com.google.common.annotations.VisibleForTesting; +import io.grpc.Status; +import java.util.Locale; +import java.util.Optional; +import java.util.concurrent.CancellationException; +import javax.annotation.Nullable; + +public class Util { + static final String TRANSPORT_TYPE_PREFIX = "TRANSPORT_TYPE_"; + + public static String formatTransportZone(@Nullable PeerInfo peerInfo) { + return Optional.ofNullable(peerInfo).map(PeerInfo::getApplicationFrontendZone).orElse(""); + } + + public static String formatTransportSubzone(@Nullable PeerInfo peerInfo) { + return Optional.ofNullable(peerInfo).map(PeerInfo::getApplicationFrontendSubzone).orElse(""); + } + + public static String formatTransportType(@Nullable PeerInfo peerInfo) { + return transportTypeToString( + Optional.ofNullable(peerInfo) + .map(PeerInfo::getTransportType) + .orElse(TransportType.TRANSPORT_TYPE_UNKNOWN)); + } + + public static String transportTypeToString(TransportType transportType) { + String label = transportTypeToStringWithoutFallback(transportType); + if (label != null) { + return label; + } + // In case the client is running with a newer version of protos + if (transportType.name().startsWith(TRANSPORT_TYPE_PREFIX)) { + return transportType + .name() + .substring(TRANSPORT_TYPE_PREFIX.length()) + .toLowerCase(Locale.ENGLISH); + } else { + return transportType.name(); + } + } + + @VisibleForTesting + static String transportTypeToStringWithoutFallback(TransportType transportType) { + if (transportType == null) { + return "null"; + } + switch (transportType) { + case TRANSPORT_TYPE_UNKNOWN: + return "unknown"; + case TRANSPORT_TYPE_EXTERNAL: + return "external"; + case TRANSPORT_TYPE_CLOUD_PATH: + return "cloudpath"; + case TRANSPORT_TYPE_DIRECT_ACCESS: + return "directpath"; + case TRANSPORT_TYPE_SESSION_UNKNOWN: + return "session_unknown"; + case TRANSPORT_TYPE_SESSION_EXTERNAL: + return "session_external"; + case TRANSPORT_TYPE_SESSION_CLOUD_PATH: + return "session_cloudpath"; + case TRANSPORT_TYPE_SESSION_DIRECT_ACCESS: + return "session_directpath"; + case UNRECOGNIZED: + return "unrecognized"; + default: + return null; + } + } + + public static String formatClusterIdMetricLabel(@Nullable ResponseParams clusterInfo) { + return Optional.ofNullable(clusterInfo) + .map(ResponseParams::getClusterId) + .filter(s -> !s.isEmpty()) + .orElse(""); + } + + public static String formatZoneIdMetricLabel(@Nullable ResponseParams clusterInfo) { + return Optional.ofNullable(clusterInfo) + .map(ResponseParams::getZoneId) + .filter(s -> !s.isEmpty()) + .orElse("global"); + } + + public static Status.Code extractStatus(@Nullable Throwable error) { + if (error == null) { + return Status.Code.OK; + } + // Handle java CancellationException as if it was a gax CancelledException + if (error instanceof CancellationException) { + return Status.Code.CANCELLED; + } + if (error instanceof ApiException) { + ApiException apiException = (ApiException) error; + if (apiException.getStatusCode() instanceof GrpcStatusCode) { + return ((GrpcStatusCode) apiException.getStatusCode()).getTransportCode(); + } + } + + Status s = Status.fromThrowable(error); + if (s != null) { + return s.getCode(); + } + return Status.Code.UNKNOWN; + } + + public static String extractTableId(Object request) { + String tableName = null; + String authorizedViewName = null; + String materializedViewName = null; + if (request instanceof ReadRowsRequest) { + tableName = ((ReadRowsRequest) request).getTableName(); + authorizedViewName = ((ReadRowsRequest) request).getAuthorizedViewName(); + materializedViewName = ((ReadRowsRequest) request).getMaterializedViewName(); + } else if (request instanceof MutateRowsRequest) { + tableName = ((MutateRowsRequest) request).getTableName(); + authorizedViewName = ((MutateRowsRequest) request).getAuthorizedViewName(); + } else if (request instanceof MutateRowRequest) { + tableName = ((MutateRowRequest) request).getTableName(); + authorizedViewName = ((MutateRowRequest) request).getAuthorizedViewName(); + } else if (request instanceof SampleRowKeysRequest) { + tableName = ((SampleRowKeysRequest) request).getTableName(); + authorizedViewName = ((SampleRowKeysRequest) request).getAuthorizedViewName(); + materializedViewName = ((SampleRowKeysRequest) request).getMaterializedViewName(); + } else if (request instanceof CheckAndMutateRowRequest) { + tableName = ((CheckAndMutateRowRequest) request).getTableName(); + authorizedViewName = ((CheckAndMutateRowRequest) request).getAuthorizedViewName(); + } else if (request instanceof ReadModifyWriteRowRequest) { + tableName = ((ReadModifyWriteRowRequest) request).getTableName(); + authorizedViewName = ((ReadModifyWriteRowRequest) request).getAuthorizedViewName(); + } else if (request instanceof GenerateInitialChangeStreamPartitionsRequest) { + tableName = ((GenerateInitialChangeStreamPartitionsRequest) request).getTableName(); + } else if (request instanceof ReadChangeStreamRequest) { + tableName = ((ReadChangeStreamRequest) request).getTableName(); + } + if (tableName != null && !tableName.isEmpty()) { + return TableName.parse(tableName).getTable(); + } + if (authorizedViewName != null && !authorizedViewName.isEmpty()) { + return AuthorizedViewName.parse(authorizedViewName).getTable(); + } + if (materializedViewName != null && !materializedViewName.isEmpty()) { + return MaterializedViewName.parse(materializedViewName).getMaterializedView(); + } + return ""; + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporter.java new file mode 100644 index 0000000000..2aa98c33ea --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporter.java @@ -0,0 +1,275 @@ +/* + * Copyright 2023 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.exporter; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.api.gax.core.NoCredentialsProvider; +import com.google.api.gax.rpc.PermissionDeniedException; +import com.google.auth.Credentials; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.monitoring.v3.MetricServiceClient; +import com.google.cloud.monitoring.v3.MetricServiceSettings; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Iterables; +import com.google.common.util.concurrent.MoreExecutors; +import com.google.monitoring.v3.CreateTimeSeriesRequest; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeSeries; +import com.google.protobuf.Empty; +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.export.MetricExporter; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Supplier; +import java.util.logging.Level; +import java.util.logging.Logger; +import javax.annotation.Nullable; + +public class BigtableCloudMonitoringExporter implements MetricExporter { + private static final Logger LOGGER = + Logger.getLogger(BigtableCloudMonitoringExporter.class.getName()); + + // This system property can be used to override the monitoring endpoint + // to a different environment. It's meant for internal testing only and + // will be removed in future versions. Use settings in EnhancedBigtableStubSettings + // to override the endpoint. + @Deprecated @Nullable + private static final String MONITORING_ENDPOINT_OVERRIDE_SYS_PROP = + System.getProperty("bigtable.test-monitoring-endpoint"); + + // This the quota limit from Cloud Monitoring. More details in + // https://cloud.google.com/monitoring/quotas#custom_metrics_quotas. + private static final int EXPORT_BATCH_SIZE_LIMIT = 200; + + private final Supplier envInfo; + private final ClientInfo clientInfo; + private final MetricRegistry metricRegistry; + private final MetricServiceClient client; + + private final AtomicReference state; + private CompletableResultCode lastExportCode; + private final AtomicBoolean exportFailureLogged = new AtomicBoolean(false); + + private enum State { + Running, + Closing, + Closed + } + + public static BigtableCloudMonitoringExporter create( + MetricRegistry metricRegistry, + Supplier envInfo, + ClientInfo clientInfo, + @Nullable Credentials credentials, + @Nullable String endpoint, + String universeDomain) + throws IOException { + + Preconditions.checkNotNull(universeDomain); + + MetricServiceSettings.Builder settingsBuilder = + MetricServiceSettings.newBuilder() + .setUniverseDomain(universeDomain) + .setCredentialsProvider( + Optional.ofNullable(credentials) + .map(FixedCredentialsProvider::create) + .orElse(NoCredentialsProvider.create())); + + if (MONITORING_ENDPOINT_OVERRIDE_SYS_PROP != null) { + LOGGER.warning( + "Setting the monitoring endpoint through system variable will be removed in future" + + " versions"); + settingsBuilder.setEndpoint(MONITORING_ENDPOINT_OVERRIDE_SYS_PROP); + } + + if (endpoint != null) { + settingsBuilder.setEndpoint(endpoint); + } + + Duration timeout = Duration.ofMinutes(1); + // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving + // it as not retried for now. + settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetriesDuration(timeout); + + return new BigtableCloudMonitoringExporter( + metricRegistry, envInfo, clientInfo, MetricServiceClient.create(settingsBuilder.build())); + } + + @VisibleForTesting + public BigtableCloudMonitoringExporter( + MetricRegistry metricRegistry, + Supplier envInfo, + ClientInfo clientInfo, + MetricServiceClient client) { + this.metricRegistry = metricRegistry; + this.envInfo = envInfo; + this.clientInfo = clientInfo; + this.client = client; + this.state = new AtomicReference<>(State.Running); + } + + public void close() { + client.close(); + } + + @Override + public CompletableResultCode export(Collection metricData) { + Preconditions.checkState(state.get() != State.Closed, "Exporter is closed"); + + lastExportCode = doExport(metricData); + return lastExportCode; + } + + private CompletableResultCode doExport(Collection metricData) { + Map> converted; + + try { + converted = new Converter(metricRegistry, envInfo.get(), clientInfo).convertAll(metricData); + } catch (Throwable t) { + if (exportFailureLogged.compareAndSet(false, true)) { + LOGGER.log(Level.WARNING, "Failed to compose metrics for export", t); + } + + return CompletableResultCode.ofExceptionalFailure(t); + } + + List> futures = new ArrayList<>(); + + for (Entry> e : converted.entrySet()) { + futures.addAll(exportTimeSeries(e.getKey(), e.getValue())); + } + + CompletableResultCode exportCode = new CompletableResultCode(); + + StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); + + ApiFutures.addCallback( + ApiFutures.allAsList(futures), + new ApiFutureCallback>() { + @Override + public void onFailure(Throwable throwable) { + if (exportFailureLogged.compareAndSet(false, true)) { + String msg = "createServiceTimeSeries request failed"; + if (throwable instanceof PermissionDeniedException) { + msg += + String.format( + " Need monitoring metric writer permission on project=%s. Follow" + + " https://cloud.google.com/bigtable/docs/client-side-metrics-setup" + + " to set up permissions.", + clientInfo.getInstanceName().getProjectId()); + } + RuntimeException asyncWrapper = new RuntimeException("export failed", throwable); + asyncWrapper.setStackTrace(stackTrace); + + if (state.get() != State.Closing || state.get() != State.Closed) { + // ignore the export warning when client is shutting down + LOGGER.log(Level.WARNING, msg, asyncWrapper); + } + } + exportCode.fail(); + } + + @Override + public void onSuccess(List objects) { + exportFailureLogged.set(false); + exportCode.succeed(); + } + }, + MoreExecutors.directExecutor()); + return exportCode; + } + + private List> exportTimeSeries( + ProjectName projectName, Collection timeSeries) { + List> batchResults = new ArrayList<>(); + + for (List batch : Iterables.partition(timeSeries, EXPORT_BATCH_SIZE_LIMIT)) { + CreateTimeSeriesRequest req = + CreateTimeSeriesRequest.newBuilder() + .setName(projectName.toString()) + .addAllTimeSeries(batch) + .build(); + ApiFuture f = this.client.createServiceTimeSeriesCallable().futureCall(req); + batchResults.add(f); + } + + return batchResults; + } + + @Override + public CompletableResultCode flush() { + if (lastExportCode != null) { + return lastExportCode; + } + return CompletableResultCode.ofSuccess(); + } + + @Override + public CompletableResultCode shutdown() { + State prevState = state.getAndSet(State.Closed); + if (prevState == State.Closed) { + LOGGER.log(Level.WARNING, "shutdown is called multiple times"); + return CompletableResultCode.ofSuccess(); + } + CompletableResultCode flushResult = flush(); + CompletableResultCode shutdownResult = new CompletableResultCode(); + flushResult.whenComplete( + () -> { + Throwable throwable = null; + try { + client.shutdown(); + } catch (Throwable e) { + LOGGER.log(Level.WARNING, "failed to shutdown the monitoring client", e); + throwable = e; + } + if (throwable != null) { + shutdownResult.fail(); + } else { + shutdownResult.succeed(); + } + }); + + return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult)); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return AggregationTemporality.CUMULATIVE; + } + + public void prepareForShutdown() { + state.compareAndSet(State.Running, State.Closing); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtablePeriodicReader.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtablePeriodicReader.java new file mode 100644 index 0000000000..d29dbd8702 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtablePeriodicReader.java @@ -0,0 +1,109 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.exporter; + +import io.opentelemetry.sdk.common.CompletableResultCode; +import io.opentelemetry.sdk.common.export.MemoryMode; +import io.opentelemetry.sdk.metrics.Aggregation; +import io.opentelemetry.sdk.metrics.InstrumentType; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.export.AggregationTemporalitySelector; +import io.opentelemetry.sdk.metrics.export.CollectionRegistration; +import io.opentelemetry.sdk.metrics.export.DefaultAggregationSelector; +import io.opentelemetry.sdk.metrics.export.MetricReader; +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; +import java.io.IOException; +import java.util.concurrent.ScheduledExecutorService; + +/** + * Wrapper around a {@link PeriodicMetricReader} that will notify the exporter when it's shutting + * down. This is necessary to filter out noisy error logs on shutdown. + */ +public class BigtablePeriodicReader implements MetricReader { + private final MetricReader delegate; + private final BigtableCloudMonitoringExporter exporter; + + public BigtablePeriodicReader( + BigtableCloudMonitoringExporter exporter, ScheduledExecutorService executor) { + delegate = PeriodicMetricReader.builder(exporter).setExecutor(executor).build(); + this.exporter = exporter; + } + + @Override + public void register(CollectionRegistration registration) { + delegate.register(registration); + } + + @Override + public Aggregation getDefaultAggregation(InstrumentType instrumentType) { + return delegate.getDefaultAggregation(instrumentType); + } + + @Override + public MemoryMode getMemoryMode() { + return delegate.getMemoryMode(); + } + + @Override + public CompletableResultCode forceFlush() { + return delegate.forceFlush(); + } + + @Override + public CompletableResultCode shutdown() { + return delegate.shutdown(); + } + + @Override + public void close() throws IOException { + exporter.prepareForShutdown(); + delegate.close(); + } + + public static AggregationTemporalitySelector alwaysCumulative() { + return AggregationTemporalitySelector.alwaysCumulative(); + } + + public static AggregationTemporalitySelector deltaPreferred() { + return AggregationTemporalitySelector.deltaPreferred(); + } + + public static AggregationTemporalitySelector lowMemory() { + return AggregationTemporalitySelector.lowMemory(); + } + + @Override + public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { + return delegate.getAggregationTemporality(instrumentType); + } + + public static String asString(AggregationTemporalitySelector selector) { + return AggregationTemporalitySelector.asString(selector); + } + + public static DefaultAggregationSelector getDefault() { + return DefaultAggregationSelector.getDefault(); + } + + @Override + public DefaultAggregationSelector with(InstrumentType instrumentType, Aggregation aggregation) { + return delegate.with(instrumentType, aggregation); + } + + public static String asString(DefaultAggregationSelector selector) { + return DefaultAggregationSelector.asString(selector); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/Converter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/Converter.java new file mode 100644 index 0000000000..c5ec4b3332 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/Converter.java @@ -0,0 +1,217 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.exporter; + +import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE; +import static com.google.api.MetricDescriptor.MetricKind.GAUGE; +import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED; +import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; +import static com.google.api.MetricDescriptor.ValueType.DOUBLE; +import static com.google.api.MetricDescriptor.ValueType.INT64; + +import com.google.api.Distribution; +import com.google.api.Distribution.BucketOptions; +import com.google.api.Distribution.BucketOptions.Explicit; +import com.google.api.Metric; +import com.google.api.MetricDescriptor.MetricKind; +import com.google.api.MetricDescriptor.ValueType; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.MetricWrapper; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.collect.ImmutableMultimap; +import com.google.common.collect.Multimap; +import com.google.monitoring.v3.Point; +import com.google.monitoring.v3.ProjectName; +import com.google.monitoring.v3.TimeInterval; +import com.google.monitoring.v3.TimeSeries; +import com.google.monitoring.v3.TypedValue; +import com.google.protobuf.util.Timestamps; +import io.opentelemetry.sdk.metrics.data.AggregationTemporality; +import io.opentelemetry.sdk.metrics.data.DoublePointData; +import io.opentelemetry.sdk.metrics.data.HistogramData; +import io.opentelemetry.sdk.metrics.data.HistogramPointData; +import io.opentelemetry.sdk.metrics.data.LongPointData; +import io.opentelemetry.sdk.metrics.data.MetricData; +import io.opentelemetry.sdk.metrics.data.MetricDataType; +import io.opentelemetry.sdk.metrics.data.PointData; +import io.opentelemetry.sdk.metrics.data.SumData; +import java.util.Collection; +import java.util.Map; +import java.util.logging.Level; +import java.util.logging.Logger; + +/** + * Helper for exporting metrics from Opentelemetry to Cloud Monitoring. + * + *

Takes collection {@link MetricData} and uses the {@link MetricWrapper}s defined in {@link + * MetricRegistry} to compose both the {@link com.google.api.MonitoredResource} and {@link Point}. + */ +class Converter { + private static final Logger LOGGER = Logger.getLogger(Converter.class.getName()); + + private final MetricRegistry metricRegistry; + private final EnvInfo envInfo; + private final ClientInfo clientInfo; + + Converter(MetricRegistry metricRegistry, EnvInfo envInfo, ClientInfo clientInfo) { + this.metricRegistry = metricRegistry; + this.envInfo = envInfo; + this.clientInfo = clientInfo; + } + + Map> convertAll(Collection otelMetrics) { + ImmutableMultimap.Builder builder = ImmutableMultimap.builder(); + + for (MetricData metricData : otelMetrics) { + Multimap perProject = convertMetricData(metricData); + builder.putAll(perProject); + } + return builder.build().asMap(); + } + + private Multimap convertMetricData(MetricData metricData) { + MetricWrapper metricDef = metricRegistry.getMetric(metricData.getName()); + if (metricDef == null) { + LOGGER.log(Level.FINE, "Skipping unexpected metric: {}", metricData.getName()); + return ImmutableListMultimap.of(); + } + + ImmutableMultimap.Builder builder = ImmutableMultimap.builder(); + + for (PointData pd : metricData.getData().getPoints()) { + ProjectName projectName = + metricDef.getSchema().extractProjectName(pd.getAttributes(), envInfo, clientInfo); + + TimeSeries timeSeries = + TimeSeries.newBuilder() + .setMetricKind(convertMetricKind(metricData)) + .setValueType(convertValueType(metricData.getType())) + .setResource( + metricDef + .getSchema() + .extractMonitoredResource(pd.getAttributes(), envInfo, clientInfo)) + .setMetric( + Metric.newBuilder() + .setType(metricDef.getExternalName()) + .putAllLabels( + metricDef.extractMetricLabels(pd.getAttributes(), envInfo, clientInfo))) + .addPoints(convertPointData(metricData.getType(), pd)) + .build(); + + builder.put(projectName, timeSeries); + } + return builder.build(); + } + + private Point convertPointData(MetricDataType type, PointData pointData) { + TimeInterval timeInterval = + TimeInterval.newBuilder() + .setStartTime(Timestamps.fromNanos(pointData.getStartEpochNanos())) + .setEndTime(Timestamps.fromNanos(pointData.getEpochNanos())) + .build(); + + Point.Builder builder = Point.newBuilder().setInterval(timeInterval); + switch (type) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return builder + .setValue( + TypedValue.newBuilder() + .setDistributionValue(convertHistogramData((HistogramPointData) pointData)) + .build()) + .build(); + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return builder + .setValue( + TypedValue.newBuilder() + .setDoubleValue(((DoublePointData) pointData).getValue()) + .build()) + .build(); + case LONG_GAUGE: + case LONG_SUM: + return builder + .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue())) + .build(); + default: + LOGGER.log(Level.WARNING, "unsupported metric type %s", type); + return builder.build(); + } + } + + private static Distribution convertHistogramData(HistogramPointData pointData) { + return Distribution.newBuilder() + .setCount(pointData.getCount()) + .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount()) + .setBucketOptions( + BucketOptions.newBuilder() + .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries()))) + .addAllBucketCounts(pointData.getCounts()) + .build(); + } + + private static MetricKind convertMetricKind(MetricData metricData) { + switch (metricData.getType()) { + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return convertHistogramType(metricData.getHistogramData()); + case LONG_GAUGE: + case DOUBLE_GAUGE: + return GAUGE; + case LONG_SUM: + return convertSumDataType(metricData.getLongSumData()); + case DOUBLE_SUM: + return convertSumDataType(metricData.getDoubleSumData()); + default: + return UNRECOGNIZED; + } + } + + private static MetricKind convertHistogramType(HistogramData histogramData) { + if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static MetricKind convertSumDataType(SumData sum) { + if (!sum.isMonotonic()) { + return GAUGE; + } + if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { + return CUMULATIVE; + } + return UNRECOGNIZED; + } + + private static ValueType convertValueType(MetricDataType metricDataType) { + switch (metricDataType) { + case LONG_GAUGE: + case LONG_SUM: + return INT64; + case DOUBLE_GAUGE: + case DOUBLE_SUM: + return DOUBLE; + case HISTOGRAM: + case EXPONENTIAL_HISTOGRAM: + return DISTRIBUTION; + default: + return ValueType.UNRECOGNIZED; + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientBatchWriteFlowControlFactor.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientBatchWriteFlowControlFactor.java new file mode 100644 index 0000000000..c4c6d97118 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientBatchWriteFlowControlFactor.java @@ -0,0 +1,69 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleGauge; +import io.opentelemetry.api.metrics.Meter; + +public class ClientBatchWriteFlowControlFactor extends MetricWrapper { + public static final String NAME = + "bigtable.googleapis.com/internal/client/batch_write_flow_control_factor"; + + public ClientBatchWriteFlowControlFactor() { + super(ClientSchema.INSTANCE, NAME); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleGauge instrument; + + private Recorder(Meter meter) { + this.instrument = + meter + .gaugeBuilder(NAME) + .setDescription( + "The distribution of batch write flow control factors received from the server.") + .setUnit("1") + .build(); + } + + public void record( + ClientInfo clientInfo, + Status.Code code, + boolean applied, + MethodInfo methodInfo, + double factor) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.APPLIED_KEY, applied) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .build(); + + instrument.set(factor, attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientBatchWriteFlowControlTargetQps.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientBatchWriteFlowControlTargetQps.java new file mode 100644 index 0000000000..a15189aa4a --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientBatchWriteFlowControlTargetQps.java @@ -0,0 +1,61 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleGauge; +import io.opentelemetry.api.metrics.Meter; + +public class ClientBatchWriteFlowControlTargetQps extends MetricWrapper { + public static final String NAME = + "bigtable.googleapis.com/internal/client/batch_write_flow_control_target_qps"; + + public ClientBatchWriteFlowControlTargetQps() { + super(ClientSchema.INSTANCE, NAME); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleGauge instrument; + + private Recorder(Meter meter) { + this.instrument = + meter + .gaugeBuilder(NAME) + .setDescription( + "The current target QPS of the client under batch write flow control.") + .setUnit("1") + .build(); + } + + public void record(ClientInfo clientInfo, MethodInfo methodInfo, double qps) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .build(); + + instrument.set(qps, attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientChannelPoolOutstandingRpcs.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientChannelPoolOutstandingRpcs.java new file mode 100644 index 0000000000..c5c1589c4f --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientChannelPoolOutstandingRpcs.java @@ -0,0 +1,79 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.PeerInfo.TransportType; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema; +import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelPoolSettings.LoadBalancingStrategy; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.util.List; +import java.util.stream.Collectors; + +public class ClientChannelPoolOutstandingRpcs extends MetricWrapper { + public static final String NAME = + "bigtable.googleapis.com/internal/client/connection_pool/outstanding_rpcs"; + + private static final List BUCKETS = + Buckets.generateLinearSeq(0d, 200d, 5).stream() + .map(Double::longValue) + .collect(Collectors.toList()); + + public ClientChannelPoolOutstandingRpcs() { + super(ClientSchema.INSTANCE, NAME); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final LongHistogram instrument; + + private Recorder(Meter meter) { + this.instrument = + meter + .histogramBuilder(NAME) + .ofLongs() + .setExplicitBucketBoundariesAdvice(BUCKETS) + .setDescription( + "A distribution of the number of outstanding RPCs per connection in the client" + + " pool, sampled periodically.") + .setUnit("1") + .build(); + } + + public void record( + ClientInfo clientInfo, + TransportType transportType, + LoadBalancingStrategy lbPolicy, + boolean isStreaming, + long rpcCount) { + instrument.record( + rpcCount, + getSchema() + .createResourceAttrs(clientInfo) + .put(MetricLabels.TRANSPORT_TYPE, Util.transportTypeToString(transportType)) + .put(MetricLabels.CHANNEL_POOL_LB_POLICY, lbPolicy.name()) + .put(MetricLabels.STREAMING_KEY, isStreaming) + .build()); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientDpCompatGuage.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientDpCompatGuage.java new file mode 100644 index 0000000000..9746e67448 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientDpCompatGuage.java @@ -0,0 +1,73 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema; +import io.opentelemetry.api.metrics.LongGauge; +import io.opentelemetry.api.metrics.Meter; + +public class ClientDpCompatGuage extends MetricWrapper { + private static final String NAME = + "bigtable.googleapis.com/internal/client/direct_access/compatible"; + + public ClientDpCompatGuage() { + super(ClientSchema.INSTANCE, NAME); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final LongGauge instrument; + + private Recorder(Meter meter) { + this.instrument = + meter + .gaugeBuilder(NAME) + .ofLongs() + .setDescription( + "Reports 1 if the environment is eligible for DirectPath, 0 otherwise. Based on" + + " an attempt at startup.") + .setUnit("1") + .build(); + } + + // TODO: replace ipPreference with an enum + public void recordSuccess(ClientInfo clientInfo, String ipPreference) { + instrument.set( + 1, + getSchema() + .createResourceAttrs(clientInfo) + .put(MetricLabels.DP_REASON_KEY, "") + .put(MetricLabels.DP_IP_PREFERENCE_KEY, ipPreference) + .build()); + } + + // TODO: replace reason with an enum + public void recordFailure(ClientInfo clientInfo, String reason) { + instrument.set( + 1, + getSchema() + .createResourceAttrs(clientInfo) + .put(MetricLabels.DP_REASON_KEY, reason) + .put(MetricLabels.DP_IP_PREFERENCE_KEY, "") + .build()); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientPerConnectionErrorCount.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientPerConnectionErrorCount.java new file mode 100644 index 0000000000..dc07f6e0e9 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/ClientPerConnectionErrorCount.java @@ -0,0 +1,111 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.util.List; +import java.util.Set; + +public class ClientPerConnectionErrorCount extends MetricWrapper { + public static final String NAME = + "bigtable.googleapis.com/internal/client/per_connection_error_count"; + + static final List BUCKETS = + ImmutableList.builder() + .add(0L) + .addAll(Buckets.generateGeometricSeq(1, 64)) + .addAll(Buckets.generateGeometricSeq(125, 1_000_000L)) + .build(); + // This metric migrated from gce/gke schemas to bigtable_client + // So a lot of the metric labels overlap with the resource labels. + // we need special handling since the logic in MetricWrapper assumes that there is no + // overlap. + private static final Set> METRIC_LABELS = + ImmutableSet.of( + MetricLabels.BIGTABLE_PROJECT_ID_KEY, + MetricLabels.CLIENT_UID, + MetricLabels.INSTANCE_ID_KEY, + MetricLabels.CLIENT_NAME, + MetricLabels.APP_PROFILE_KEY); + + public ClientPerConnectionErrorCount() { + super(ClientSchema.INSTANCE, NAME); + } + + // Override the default metric labels to account for backwards compatibility. + // This metric used to live under bigtable_table, and has moved to bigtable_client + // The new schema duplicates some of the metric labels. However the default implementation + // in MetricWrapper will remove all resource labels from the metric labels. + // To maintain backwards compatibility, this metric override the extractMetricLabels + // to always emit the duplicate metric labels. + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + ImmutableMap.Builder builder = ImmutableMap.builder(); + metricAttrs.forEach( + (k, v) -> { + if (METRIC_LABELS.contains(k) && v != null) { + builder.put(k.getKey(), v.toString()); + } + }); + builder.put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()); + return builder.build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final LongHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .ofLongs() + .setDescription("Distribution of counts of channels per 'error count per minute'.") + .setUnit("1") + .setExplicitBucketBoundariesAdvice(BUCKETS) + .build(); + } + + public void record(ClientInfo clientInfo, long value) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo) + .put( + MetricLabels.BIGTABLE_PROJECT_ID_KEY, clientInfo.getInstanceName().getProjectId()) + .put(MetricLabels.INSTANCE_ID_KEY, clientInfo.getInstanceName().getInstanceId()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .build(); + instrument.record(value, attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/Constants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/Constants.java new file mode 100644 index 0000000000..3478fd2e42 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/Constants.java @@ -0,0 +1,122 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.common.AttributeKey; +import java.util.ArrayList; +import java.util.List; + +public final class Constants { + private Constants() {} + + public static final class MetricLabels { + private MetricLabels() {} + + // TODO: remove overlapping attributes + // Project & Instance overlap with resource labels because they were migrated from + // an old gce/gke schema to support per_connection_error_count metric + @Deprecated + public static final AttributeKey BIGTABLE_PROJECT_ID_KEY = + AttributeKey.stringKey("project_id"); + + @Deprecated + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance"); + + public static final AttributeKey TRANSPORT_TYPE = + AttributeKey.stringKey("transport_type"); + public static final AttributeKey TRANSPORT_REGION = + AttributeKey.stringKey("transport_region"); + public static final AttributeKey TRANSPORT_ZONE = + AttributeKey.stringKey("transport_zone"); + public static final AttributeKey TRANSPORT_SUBZONE = + AttributeKey.stringKey("transport_subzone"); + + public static final AttributeKey CLIENT_UID = AttributeKey.stringKey("client_uid"); + public static final AttributeKey CLIENT_NAME = AttributeKey.stringKey("client_name"); + public static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); + public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming"); + public static final AttributeKey APP_PROFILE_KEY = + AttributeKey.stringKey("app_profile"); + public static final AttributeKey DEBUG_TAG_KEY = AttributeKey.stringKey("tag"); + + public static final AttributeKey APPLIED_KEY = AttributeKey.booleanKey("applied"); + + static final AttributeKey CHANNEL_POOL_LB_POLICY = AttributeKey.stringKey("lb_policy"); + static final AttributeKey DP_REASON_KEY = AttributeKey.stringKey("reason"); + static final AttributeKey DP_IP_PREFERENCE_KEY = + AttributeKey.stringKey("ip_preference"); + + public static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); + + static final AttributeKey EXECUTOR_KEY = AttributeKey.stringKey("executor"); + } + + static final class Units { + private Units() {} + + static final String MILLISECOND = "ms"; + static final String MICROSECOND = "us"; + static final String COUNT = "1"; + } + + static final class Buckets { + static final List AGGREGATION_WITH_MILLIS_HISTOGRAM = + ImmutableList.builder() + // Match `bigtable.googleapis.com/frontend_server/handler_latencies` buckets + .addAll(generateLinearSeq(0, 3.0, 0.1)) + .add(4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, 50.0, 65.0, 80.0) + .add(100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, 800.0, 900.0) + .add(1000.0, 2000.0, 3000.0, 4000.0, 5000.0, 6000.0, 10000.0, 20000.0, 50000.0) + .add(100000.0, 200000.0, 500000.0, 1000000.0, 2000000.0, 5000000.0) + .build(); + + @SuppressWarnings("SameParameterValue") + static List generateLinearSeq(double start, double end, double increment) { + ImmutableList.Builder builder = ImmutableList.builder(); + for (int i = 0; true; i++) { + double next = start + (increment * i); + if (next > end) { + break; + } + builder.add(next); + } + + return builder.build(); + } + + @SuppressWarnings("SameParameterValue") + static List generateExponentialSeq(double start, int count, double factor) { + List buckets = new ArrayList<>(); + + for (int i = 0; i < count; i++) { + buckets.add(start); + start *= factor; + } + + return buckets; + } + + static List generateGeometricSeq(long startClose, long endClosed) { + ImmutableList.Builder builder = ImmutableList.builder(); + for (long i = startClose; i <= endClosed; i *= 2) { + builder.add(i); + } + return builder.build(); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/GrpcMetric.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/GrpcMetric.java new file mode 100644 index 0000000000..e4ddc12165 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/GrpcMetric.java @@ -0,0 +1,65 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.GrpcClientSchema; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import java.util.List; + +/** + * Base class for gRpc metrics that are exported using bigtable_client schema. + * + *

gRPC doesn't record the bigtable specific metric labels, so they must be passed to the + * exporter via a side channel. + */ +public class GrpcMetric extends MetricWrapper { + public static final String METER_SCOPE = "grpc-java"; + + private final List> metricKeys; + + public GrpcMetric(String name, List metricKeys) { + super(GrpcClientSchema.INSTANCE, name); + this.metricKeys = + metricKeys.stream().map(AttributeKey::stringKey).collect(ImmutableList.toImmutableList()); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo ignored1, ClientInfo ignored2) { + ImmutableMap.Builder attributes = ImmutableMap.builder(); + + for (AttributeKey key : metricKeys) { + String newKeyName = key.getKey().replace('.', '_'); + Object value = metricAttrs.get(key); + if (value != null) { + attributes.put(newKeyName, value.toString()); + } + } + + return attributes.build(); + } + + @Override + public String getExternalName() { + return "bigtable.googleapis.com/internal/client/" + getName().replace('.', '/'); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/MetricWrapper.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/MetricWrapper.java new file mode 100644 index 0000000000..a6c882d820 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/MetricWrapper.java @@ -0,0 +1,103 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.api.MonitoredResource; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.Schema; +import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.common.Attributes; +import java.time.Duration; +import java.util.concurrent.TimeUnit; + +/** + * Base class for all the metrics. + * + *

Each metric is composed of an OpenTelemetry instrument (ie histogram), and a set of resource + * and metric attributes. Since some of the resource attributes are dynamic, all the resource + * attributes are sent to the instrument as metric {@link Attributes}. Then during the export phase, + * a {@link MonitoredResource} and a set of metric labels are extracted from the collected + * attributes. + * + *

This base class implements the foundation of this lifecycle: + * + *

    + *
  • The instrument for recording is passed in during construction + *
  • The concrete subclass will define a metric specific typesafe record method to populate the + * metric labels for the instrument + *
  • The list of resource attribute keys are defined by a resource specific subclass and passed + * in during construction. These will be used by {@code MetricWrapper.createMonitoredResource} + * to create the monitored resource during the export phase + *
  • The remaining attributes will be added as metric labels + *
+ */ +public abstract class MetricWrapper { + private final SchemaT schema; + private final String name; + + public MetricWrapper(SchemaT schema, String name) { + this.schema = schema; + this.name = name; + } + + public SchemaT getSchema() { + return schema; + } + + /** + * Used by the Exporter to compose metric labels to be sent to Cloud Monitoring. + * + *

Extracts metric labels from metric {@link Attributes}. By default, all keys that are not + * listed in {@code resourceKeys} are extracted. However, subclasses can override this method to + * inject data from {@link EnvInfo} and {@link ClientInfo}. + */ + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + ImmutableMap.Builder builder = ImmutableMap.builder(); + metricAttrs.forEach( + (k, v) -> { + if (!getSchema().getResourceKeys().contains(k) && v != null) { + builder.put(k.getKey(), v.toString()); + } + }); + return builder.build(); + } + + /** + * Used by the Exporter to match an instance of this class to the aggregated timeseries to export. + * + *

Gets the name of the metric. This is used by the exporter to look up this metric definition + * in MetricRegistry during export. + */ + public String getName() { + return name; + } + + /** + * Used by the exporter to post process the metric name from grpc conventions to Cloud Monitoring. + */ + public String getExternalName() { + return getName(); + } + + /** Converts a duration in fractional milliseconds. */ + protected static double toMillis(Duration duration) { + return Math.round(((double) duration.toNanos()) / TimeUnit.MILLISECONDS.toNanos(1) * 100.0) + / 100.0; + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/PacemakerDelay.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/PacemakerDelay.java new file mode 100644 index 0000000000..ec081f2afd --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/PacemakerDelay.java @@ -0,0 +1,76 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema; +import com.google.common.collect.ImmutableList; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import java.util.List; + +/** + * Pacemaker delay records the delta between the pacemaker scheduled time and the actual time. When + * the delay is high, it could indicate issues with the machine that the client is running on like + * CPU saturation. + */ +public class PacemakerDelay extends MetricWrapper { + private static final String NAME = "bigtable.googleapis.com/internal/client/pacemaker_delays"; + + private static final List BUCKETS = + ImmutableList.builder() + // Up to 67,108,864, ~1 minute in microseconds + .addAll(Buckets.generateExponentialSeq(1.0, 13, 4)) + .build(); + + public PacemakerDelay() { + super(ClientSchema.INSTANCE, NAME); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription( + "Distribution of the delay between the pacemaker firing and the pacemaker task" + + " being scheduled.") + .setUnit(Units.MICROSECOND) + .setExplicitBucketBoundariesAdvice(BUCKETS) + .build(); + } + + public void record(ClientInfo clientInfo, String executorName, Duration delta) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo) + .put(MetricLabels.EXECUTOR_KEY, executorName) + .build(); + instrument.record(delta.toNanos() / 1000.0, attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableApplicationBlockingLatency.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableApplicationBlockingLatency.java new file mode 100644 index 0000000000..9fd5561d0f --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableApplicationBlockingLatency.java @@ -0,0 +1,86 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import javax.annotation.Nullable; + +public class TableApplicationBlockingLatency extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/application_latencies"; + + public TableApplicationBlockingLatency() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + this.instrument = + meter + .histogramBuilder(NAME) + .setDescription( + "The latency of the client application consuming available response data.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + @Nullable ResponseParams clusterInfo, + Duration duration) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + + instrument.record(toMillis(duration), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableAttemptLatency.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableAttemptLatency.java new file mode 100644 index 0000000000..e792cb8eb8 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableAttemptLatency.java @@ -0,0 +1,89 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import javax.annotation.Nullable; + +public class TableAttemptLatency extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/attempt_latencies"; + + public TableAttemptLatency() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription("Client observed latency per RPC attempt.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + @Nullable ResponseParams clusterInfo, + MethodInfo methodInfo, + Status.Code code, + Duration latency) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.STREAMING_KEY, methodInfo.getStreaming()) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + + instrument.record(toMillis(latency), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableAttemptLatency2.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableAttemptLatency2.java new file mode 100644 index 0000000000..ca895e0e1b --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableAttemptLatency2.java @@ -0,0 +1,96 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.PeerInfo; +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import javax.annotation.Nullable; + +public class TableAttemptLatency2 extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/attempt_latencies2"; + + public TableAttemptLatency2() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription("Client observed latency per RPC attempt.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + @Nullable PeerInfo peerInfo, + @Nullable ResponseParams clusterInfo, + MethodInfo methodInfo, + Status.Code code, + Duration latency) { + + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.TRANSPORT_TYPE, Util.formatTransportType(peerInfo)) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.TRANSPORT_REGION, "") + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .put(MetricLabels.TRANSPORT_ZONE, Util.formatTransportZone(peerInfo)) + .put(MetricLabels.TRANSPORT_SUBZONE, Util.formatTransportSubzone(peerInfo)) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.STREAMING_KEY, methodInfo.getStreaming()) + .build(); + + instrument.record(toMillis(latency), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableClientBlockingLatency.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableClientBlockingLatency.java new file mode 100644 index 0000000000..7fc46c5559 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableClientBlockingLatency.java @@ -0,0 +1,87 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import javax.annotation.Nullable; + +public class TableClientBlockingLatency extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/throttling_latencies"; + + public TableClientBlockingLatency() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription( + "The latency introduced by the client queuing the RPC due to an unavailable" + + " transport or overload.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + @Nullable ResponseParams clusterInfo, + Duration duration) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + + instrument.record(toMillis(duration), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableConnectivityErrorCount.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableConnectivityErrorCount.java new file mode 100644 index 0000000000..3f99f90248 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableConnectivityErrorCount.java @@ -0,0 +1,88 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import javax.annotation.Nullable; + +public class TableConnectivityErrorCount extends MetricWrapper { + public static final String NAME = + "bigtable.googleapis.com/internal/client/connectivity_error_count"; + + public TableConnectivityErrorCount() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final LongCounter instrument; + + private Recorder(Meter meter) { + instrument = + meter + .counterBuilder(NAME) + .setDescription( + "Number of requests that failed to reach the Google datacenter. (Requests without" + + " google response headers)") + .setUnit(Units.COUNT) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + @Nullable ResponseParams clusterInfo, + Status.Code code, + long count) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + + instrument.add(count, attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableDebugTagCount.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableDebugTagCount.java new file mode 100644 index 0000000000..5d9dbc8536 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableDebugTagCount.java @@ -0,0 +1,80 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import javax.annotation.Nullable; + +public class TableDebugTagCount extends MetricWrapper { + private static final String NAME = "bigtable.googleapis.com/internal/client/debug_tags"; + + public TableDebugTagCount() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final LongCounter instrument; + + private Recorder(Meter meter) { + instrument = + meter + .counterBuilder(NAME) + .setDescription("A counter of internal client events used for debugging.") + .setUnit(Units.COUNT) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + String tag, + @Nullable ResponseParams clusterInfo, + long amount) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + .put(MetricLabels.DEBUG_TAG_KEY, tag) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .build(); + instrument.add(amount, attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableFirstResponseLatency.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableFirstResponseLatency.java new file mode 100644 index 0000000000..6ad09e7798 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableFirstResponseLatency.java @@ -0,0 +1,92 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import javax.annotation.Nullable; + +public class TableFirstResponseLatency extends MetricWrapper { + public static final String NAME = + "bigtable.googleapis.com/internal/client/first_response_latencies"; + + public TableFirstResponseLatency() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription( + "Latency from operation start until the response headers were received. The" + + " publishing of the measurement will be delayed until the attempt response" + + " has been received.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + @Nullable ResponseParams clusterInfo, + Status.Code code, + Duration duration) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + + instrument.record(toMillis(duration), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableOperationLatency.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableOperationLatency.java new file mode 100644 index 0000000000..781501100f --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableOperationLatency.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import javax.annotation.Nullable; + +public class TableOperationLatency extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/operation_latencies"; + + public TableOperationLatency() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription( + "Total time until final operation success or failure, including retries and" + + " backoff.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + @Nullable ResponseParams clusterInfo, + Status.Code code, + Duration duration) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.STREAMING_KEY, methodInfo.getStreaming()) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + + instrument.record(toMillis(duration), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableRemainingDeadline.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableRemainingDeadline.java new file mode 100644 index 0000000000..314f9874c8 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableRemainingDeadline.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; + +public class TableRemainingDeadline extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/remaining_deadline"; + + public TableRemainingDeadline() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription( + "The remaining deadline when the request is sent to grpc. This will either be the" + + " operation timeout, or the remaining deadline from operation timeout after" + + " retries and back offs.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + ResponseParams clusterInfo, + Status.Code code, + Duration duration) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.STATUS_KEY, code.name()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.STREAMING_KEY, methodInfo.getStreaming()) + .build(); + + instrument.record(toMillis(duration), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableRetryCount.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableRetryCount.java new file mode 100644 index 0000000000..205bf83962 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableRetryCount.java @@ -0,0 +1,84 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.LongCounter; +import io.opentelemetry.api.metrics.Meter; +import javax.annotation.Nullable; + +public class TableRetryCount extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/retry_count"; + + public TableRetryCount() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final LongCounter instrument; + + private Recorder(Meter meter) { + instrument = + meter + .counterBuilder(NAME) + .setDescription("The number of additional RPCs sent after the initial attempt.") + .setUnit(Units.COUNT) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + @Nullable ResponseParams clusterInfo, + Status.Code code, + long amount) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + instrument.add(amount, attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableServerLatency.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableServerLatency.java new file mode 100644 index 0000000000..cafc0c245e --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/TableServerLatency.java @@ -0,0 +1,91 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Buckets; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels; +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.Units; +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema; +import com.google.common.collect.ImmutableMap; +import io.grpc.Status; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.metrics.DoubleHistogram; +import io.opentelemetry.api.metrics.Meter; +import java.time.Duration; +import javax.annotation.Nullable; + +public class TableServerLatency extends MetricWrapper { + public static final String NAME = "bigtable.googleapis.com/internal/client/server_latencies"; + + public TableServerLatency() { + super(TableSchema.INSTANCE, NAME); + } + + @Override + public ImmutableMap extractMetricLabels( + Attributes metricAttrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ImmutableMap.builder() + .putAll(super.extractMetricLabels(metricAttrs, envInfo, clientInfo)) + .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid()) + .build(); + } + + public Recorder newRecorder(Meter meter) { + return new Recorder(meter); + } + + public class Recorder { + private final DoubleHistogram instrument; + + private Recorder(Meter meter) { + instrument = + meter + .histogramBuilder(NAME) + .setDescription( + "The latency measured from the moment that the RPC entered the Google data center" + + " until the RPC was completed.") + .setUnit(Units.MILLISECOND) + .setExplicitBucketBoundariesAdvice(Buckets.AGGREGATION_WITH_MILLIS_HISTOGRAM) + .build(); + } + + public void record( + ClientInfo clientInfo, + String tableId, + MethodInfo methodInfo, + @Nullable ResponseParams clusterInfo, + Status.Code code, + Duration duration) { + Attributes attributes = + getSchema() + .createResourceAttrs(clientInfo, tableId, clusterInfo) + .put(MetricLabels.METHOD_KEY, methodInfo.getName()) + .put(MetricLabels.APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(MetricLabels.STREAMING_KEY, methodInfo.getStreaming()) + .put(MetricLabels.STATUS_KEY, code.name()) + .put(MetricLabels.CLIENT_NAME, clientInfo.getClientName()) + // To maintain backwards compat CLIENT_UID is set using sideband data in the exporter + .build(); + + instrument.record(toMillis(duration), attributes); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/package-info.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/package-info.java new file mode 100644 index 0000000000..e6e4fb388c --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/metrics/package-info.java @@ -0,0 +1,19 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.metrics; + +// Implements the metrics from bigtable_googleapis_com/metrics/aliased_metrics.gcl & +// cloud_pulse_monarch/bigtable/metrics diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracer.java similarity index 90% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracer.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracer.java index c322b75df8..921d0329ad 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracer.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus; import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; @@ -21,6 +21,9 @@ import com.google.api.gax.retrying.ServerStreamingAttemptException; import com.google.api.gax.tracing.ApiTracerFactory.OperationType; import com.google.api.gax.tracing.SpanName; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util; +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor; +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer; import com.google.common.base.Stopwatch; import io.opencensus.stats.MeasureMap; import io.opencensus.stats.StatsRecorder; @@ -63,6 +66,7 @@ class MetricsTracer extends BigtableTracer { private volatile boolean reportBatchingLatency = false; private volatile long batchThrottledLatency = 0; + private MetadataExtractorInterceptor.SidebandData sidebandData; MetricsTracer( OperationType operationType, @@ -131,7 +135,7 @@ private void recordOperationCompletion(@Nullable Throwable throwable) { newTagCtxBuilder() .putLocal( RpcMeasureConstants.BIGTABLE_STATUS, - TagValue.create(Util.extractStatus(throwable))); + TagValue.create(Util.extractStatus(throwable).name())); measures.record(tagCtx.build()); } @@ -187,6 +191,14 @@ private void recordAttemptCompletion(@Nullable Throwable throwable) { RpcMeasureConstants.BIGTABLE_ATTEMPT_LATENCY, attemptTimer.elapsed(TimeUnit.MILLISECONDS)); + if (sidebandData != null && sidebandData.getGfeTiming() != null) { + measures + .put(RpcMeasureConstants.BIGTABLE_GFE_LATENCY, sidebandData.getGfeTiming().toMillis()) + .put(RpcMeasureConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT, 0L); + } else { + measures.put(RpcMeasureConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT, 1L); + } + if (reportBatchingLatency) { measures.put(RpcMeasureConstants.BIGTABLE_BATCH_THROTTLED_TIME, batchThrottledLatency); @@ -206,7 +218,7 @@ private void recordAttemptCompletion(@Nullable Throwable throwable) { newTagCtxBuilder() .putLocal( RpcMeasureConstants.BIGTABLE_STATUS, - TagValue.create(Util.extractStatus(throwable))); + TagValue.create(Util.extractStatus(throwable).name())); measures.record(tagCtx.build()); } @@ -226,20 +238,8 @@ public int getAttempt() { } @Override - public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) { - MeasureMap measures = stats.newMeasureMap(); - if (latency != null) { - measures - .put(RpcMeasureConstants.BIGTABLE_GFE_LATENCY, latency) - .put(RpcMeasureConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT, 0L); - } else { - measures.put(RpcMeasureConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT, 1L); - } - measures.record( - newTagCtxBuilder() - .putLocal( - RpcMeasureConstants.BIGTABLE_STATUS, TagValue.create(Util.extractStatus(throwable))) - .build()); + public void setSidebandData(MetadataExtractorInterceptor.SidebandData sidebandData) { + this.sidebandData = sidebandData; } @Override diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracerFactory.java similarity index 96% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerFactory.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracerFactory.java index e0c173a2be..0f557e6536 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracerFactory.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus; import com.google.api.core.InternalApi; import com.google.api.gax.tracing.ApiTracer; diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcMeasureConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/RpcMeasureConstants.java similarity index 98% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcMeasureConstants.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/RpcMeasureConstants.java index 560bb084bf..39c9bb0e99 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcMeasureConstants.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/RpcMeasureConstants.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus; import com.google.api.core.InternalApi; import io.opencensus.stats.Measure.MeasureLong; diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcViewConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/RpcViewConstants.java similarity index 73% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcViewConstants.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/RpcViewConstants.java index 4e21eaf785..51af4269ad 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcViewConstants.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/RpcViewConstants.java @@ -13,22 +13,24 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_ATTEMPT_LATENCY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_BATCH_THROTTLED_TIME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_GFE_LATENCY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_INSTANCE_ID; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_OP; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_OP_ATTEMPT_COUNT; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_OP_LATENCY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_PROJECT_ID; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_READ_ROWS_FIRST_ROW_LATENCY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants.BIGTABLE_STATUS; +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_ATTEMPT_LATENCY; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_BATCH_THROTTLED_TIME; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_GFE_LATENCY; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_INSTANCE_ID; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_OP; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_OP_ATTEMPT_COUNT; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_OP_LATENCY; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_PROJECT_ID; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_READ_ROWS_FIRST_ROW_LATENCY; +import static com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcMeasureConstants.BIGTABLE_STATUS; + +import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import io.opencensus.stats.Aggregation; import io.opencensus.stats.Aggregation.Count; import io.opencensus.stats.Aggregation.Distribution; @@ -37,7 +39,7 @@ import io.opencensus.stats.View; import java.util.Arrays; -class RpcViewConstants { +public class RpcViewConstants { // Aggregations private static final Aggregation COUNT = Count.create(); private static final Aggregation SUM = Sum.create(); @@ -167,4 +169,19 @@ class RpcViewConstants { AGGREGATION_WITH_MILLIS_HISTOGRAM, ImmutableList.of( BIGTABLE_INSTANCE_ID, BIGTABLE_PROJECT_ID, BIGTABLE_APP_PROFILE_ID, BIGTABLE_OP)); + + @VisibleForTesting + public static final ImmutableSet BIGTABLE_CLIENT_VIEWS_SET = + ImmutableSet.of( + RpcViewConstants.BIGTABLE_OP_LATENCY_VIEW, + RpcViewConstants.BIGTABLE_COMPLETED_OP_VIEW, + RpcViewConstants.BIGTABLE_READ_ROWS_FIRST_ROW_LATENCY_VIEW, + RpcViewConstants.BIGTABLE_ATTEMPT_LATENCY_VIEW, + RpcViewConstants.BIGTABLE_ATTEMPTS_PER_OP_VIEW, + RpcViewConstants.BIGTABLE_BATCH_THROTTLED_TIME_VIEW); + + public static final ImmutableSet GFE_VIEW_SET = + ImmutableSet.of( + RpcViewConstants.BIGTABLE_GFE_LATENCY_VIEW, + RpcViewConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT_VIEW); } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/ClientSchema.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/ClientSchema.java new file mode 100644 index 0000000000..5ef030539d --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/ClientSchema.java @@ -0,0 +1,77 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.schema; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.ProjectName; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; + +/** + * The attributes for this schema are partially populated during the record phase and finalized + * during the export phase with {@link EnvInfo}. This is necessary because resolving {@link EnvInfo} + * is slow and should not happen during client startup. + */ +public final class ClientSchema extends Schema { + // This implements the `bigtable_client` resource defined in + // bigtable_googleapis_com/metrics/resource_types.gcl + + public static final AttributeKey BIGTABLE_PROJECT_ID_KEY = + AttributeKey.stringKey("project_id"); + // Resource labels passed during recording + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance"); + public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); + public static final AttributeKey CLIENT_NAME = AttributeKey.stringKey("client_name"); + + // Resource labels injected during export + private static final DeferredAttr CLIENT_PROJECT = + DeferredAttr.fromEnv("client_project", EnvInfo::getProject); + private static final DeferredAttr CLIENT_REGION = + DeferredAttr.fromEnv("region", EnvInfo::getRegion); + private static final DeferredAttr CLOUD_PLATFORM = + DeferredAttr.fromEnv("cloud_platform", EnvInfo::getPlatform); + private static final DeferredAttr HOST_ID = DeferredAttr.fromEnv("host_id", EnvInfo::getHostId); + private static final DeferredAttr HOST_NAME = + DeferredAttr.fromEnv("host_name", EnvInfo::getHostName); + private static final DeferredAttr UUID = DeferredAttr.fromEnv("uuid", EnvInfo::getUid); + + // Must come after all other static members + public static final ClientSchema INSTANCE = new ClientSchema(); + + public ClientSchema() { + super( + "bigtable_client", + ImmutableList.of(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME), + ImmutableList.of(CLIENT_PROJECT, CLIENT_REGION, CLOUD_PLATFORM, HOST_ID, HOST_NAME, UUID)); + } + + @Override + public ProjectName extractProjectName(Attributes attrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ProjectName.of(clientInfo.getInstanceName().getProjectId()); + } + + public AttributesBuilder createResourceAttrs(ClientInfo clientInfo) { + return Attributes.builder() + .put(BIGTABLE_PROJECT_ID_KEY, clientInfo.getInstanceName().getProjectId()) + .put(INSTANCE_ID_KEY, clientInfo.getInstanceName().getInstanceId()) + .put(APP_PROFILE_KEY, clientInfo.getAppProfileId()) + .put(CLIENT_NAME, clientInfo.getClientName()); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/GrpcClientSchema.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/GrpcClientSchema.java new file mode 100644 index 0000000000..0a5b3adeb2 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/GrpcClientSchema.java @@ -0,0 +1,78 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.schema; + +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.ProjectName; +import io.opentelemetry.api.common.Attributes; + +/** + * The attributes for this schema are partially populated during the record phase and finalized + * during the export phase with {@link EnvInfo}. This is necessary because resolving {@link EnvInfo} + * is slow and should not happen during client startup. + */ +public final class GrpcClientSchema extends Schema { + // Unlike the normal ClientSchema, the bigtable resource ids must be injected during export time + private static final DeferredAttr BIGTABLE_PROJECT_ID = + DeferredAttr.fromClientInfo("project_id", ci -> ci.getInstanceName().getProjectId()); + private static final DeferredAttr INSTANCE_ID = + DeferredAttr.fromClientInfo("instance", ci -> ci.getInstanceName().getInstanceId()); + private static final DeferredAttr APP_PROFILE_ID = + DeferredAttr.fromClientInfo("app_profile", ClientInfo::getAppProfileId); + private static final DeferredAttr CLIENT_NAME = + DeferredAttr.fromClientInfo("client_name", ClientInfo::getClientName); + + private static final DeferredAttr CLIENT_PROJECT = + DeferredAttr.fromEnv("client_project", EnvInfo::getProject); + private static final DeferredAttr CLIENT_REGION = + DeferredAttr.fromEnv("region", EnvInfo::getRegion); + private static final DeferredAttr CLOUD_PLATFORM = + DeferredAttr.fromEnv("cloud_platform", EnvInfo::getPlatform); + private static final DeferredAttr HOST_ID = DeferredAttr.fromEnv("host_id", EnvInfo::getHostId); + private static final DeferredAttr HOST_NAME = + DeferredAttr.fromEnv("host_name", EnvInfo::getHostName); + private static final DeferredAttr UUID = DeferredAttr.fromEnv("uuid", EnvInfo::getUid); + + // Must come after all other static members + public static final GrpcClientSchema INSTANCE = new GrpcClientSchema(); + + private GrpcClientSchema() { + super( + "bigtable_client", + ImmutableList.of(), + ImmutableList.of( + BIGTABLE_PROJECT_ID, + INSTANCE_ID, + APP_PROFILE_ID, + CLIENT_NAME, + // Same as ClientSchema + CLIENT_PROJECT, + CLIENT_REGION, + CLOUD_PLATFORM, + HOST_ID, + HOST_NAME, + UUID)); + } + + @Override + public ProjectName extractProjectName( + Attributes ignored, EnvInfo ignored2, ClientInfo clientInfo) { + return ProjectName.of(clientInfo.getInstanceName().getProjectId()); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/Schema.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/Schema.java new file mode 100644 index 0000000000..a5d621acbc --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/Schema.java @@ -0,0 +1,100 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.schema; + +import com.google.api.MonitoredResource; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.ProjectName; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import java.util.List; +import java.util.function.BiFunction; +import java.util.function.Function; + +public abstract class Schema { + private final String name; + private final List> resourceKeys; + private final List deferredAttrs; + + Schema(String name, List> resourceKeys) { + this(name, resourceKeys, ImmutableList.of()); + } + + Schema(String name, List> resourceKeys, List deferredAttrs) { + this.name = name; + this.resourceKeys = resourceKeys; + this.deferredAttrs = deferredAttrs; + } + + public List> getResourceKeys() { + return resourceKeys; + } + + public abstract ProjectName extractProjectName( + Attributes attrs, EnvInfo envInfo, ClientInfo clientInfo); + + public MonitoredResource extractMonitoredResource( + Attributes attrs, EnvInfo envInfo, ClientInfo clientInfo) { + MonitoredResource.Builder builder = MonitoredResource.newBuilder().setType(name); + + for (AttributeKey key : resourceKeys) { + Object value = attrs.get(key); + if (value != null) { + builder.putLabels(key.getKey(), value.toString()); + } + } + for (DeferredAttr a : deferredAttrs) { + builder.putLabels(a.getKey().getKey(), a.getValue(envInfo, clientInfo)); + } + return builder.build(); + } + + public String getName() { + return name; + } + + static class DeferredAttr { + private final AttributeKey name; + private BiFunction extractor; + + static DeferredAttr fromEnv(String name, Function envExtractor) { + return new DeferredAttr( + AttributeKey.stringKey(name), (envInfo, ignored) -> envExtractor.apply(envInfo)); + } + + static DeferredAttr fromClientInfo(String name, Function envExtractor) { + return new DeferredAttr( + AttributeKey.stringKey(name), (ignored, clientInfo) -> envExtractor.apply(clientInfo)); + } + + private DeferredAttr( + AttributeKey name, BiFunction extractor) { + this.name = name; + this.extractor = extractor; + } + + AttributeKey getKey() { + return name; + } + + String getValue(EnvInfo envInfo, ClientInfo clientInfo) { + return extractor.apply(envInfo, clientInfo); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/TableSchema.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/TableSchema.java new file mode 100644 index 0000000000..e333837d7a --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/schema/TableSchema.java @@ -0,0 +1,65 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.bigtable.data.v2.internal.csm.schema; + +import com.google.bigtable.v2.ResponseParams; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util; +import com.google.common.collect.ImmutableList; +import com.google.monitoring.v3.ProjectName; +import io.opentelemetry.api.common.AttributeKey; +import io.opentelemetry.api.common.Attributes; +import io.opentelemetry.api.common.AttributesBuilder; +import javax.annotation.Nullable; + +public final class TableSchema extends Schema { + // This implements the `bigtable_client_raw` resource defined in + // bigtable_googleapis_com/metrics/resource_types.gcl + + public static final AttributeKey BIGTABLE_PROJECT_ID_KEY = + AttributeKey.stringKey("project_id"); + public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance"); + public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table"); + public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster"); + public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone"); + + // Must come after all other static members + public static final TableSchema INSTANCE = new TableSchema(); + + public TableSchema() { + super( + "bigtable_client_raw", + ImmutableList.of( + BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY)); + } + + @Override + public ProjectName extractProjectName(Attributes attrs, EnvInfo envInfo, ClientInfo clientInfo) { + return ProjectName.of(attrs.get(BIGTABLE_PROJECT_ID_KEY)); + } + + public AttributesBuilder createResourceAttrs( + ClientInfo clientInfo, String tableId, @Nullable ResponseParams clusterInfo) { + return Attributes.builder() + .put(BIGTABLE_PROJECT_ID_KEY, clientInfo.getInstanceName().getProjectId()) + .put(INSTANCE_ID_KEY, clientInfo.getInstanceName().getInstanceId()) + .put(TABLE_ID_KEY, tableId) + .put(CLUSTER_ID_KEY, Util.formatClusterIdMetricLabel(clusterInfo)) + .put(ZONE_ID_KEY, Util.formatZoneIdMetricLabel(clusterInfo)); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableGrpcStreamTracer.java similarity index 58% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableGrpcStreamTracer.java index a364adbc46..99a184b5e3 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableGrpcStreamTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableGrpcStreamTracer.java @@ -13,12 +13,11 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; -import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracer.TransportAttrs; +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer; import io.grpc.ClientStreamTracer; import io.grpc.Metadata; -import io.grpc.Status; /** * Records the time a request is enqueued in a grpc channel queue. This a bridge between gRPC stream @@ -26,16 +25,9 @@ * asking gRPC to start an RPC and gRPC actually serializing that RPC. */ class BigtableGrpcStreamTracer extends ClientStreamTracer { - private static final String GRPC_LB_LOCALITY_KEY = "grpc.lb.locality"; - private static final String GRPC_LB_BACKEND_SERVICE_KEY = "grpc.lb.backend_service"; - - private final StreamInfo info; private final BigtableTracer tracer; - private volatile String backendService = null; - private volatile String locality = null; - public BigtableGrpcStreamTracer(StreamInfo info, BigtableTracer tracer) { - this.info = info; + private BigtableGrpcStreamTracer(BigtableTracer tracer) { this.tracer = tracer; } @@ -44,26 +36,6 @@ public void outboundMessageSent(int seqNo, long optionalWireSize, long optionalU tracer.grpcMessageSent(); } - @Override - public void addOptionalLabel(String key, String value) { - switch (key) { - case GRPC_LB_LOCALITY_KEY: - this.locality = value; - break; - case GRPC_LB_BACKEND_SERVICE_KEY: - this.backendService = value; - break; - } - - super.addOptionalLabel(key, value); - } - - @Override - public void streamClosed(Status status) { - tracer.setTransportAttrs(TransportAttrs.create(locality, backendService)); - super.streamClosed(status); - } - static class Factory extends ClientStreamTracer.Factory { private final BigtableTracer tracer; @@ -75,7 +47,7 @@ static class Factory extends ClientStreamTracer.Factory { @Override public ClientStreamTracer newClientStreamTracer( ClientStreamTracer.StreamInfo info, Metadata headers) { - return new BigtableGrpcStreamTracer(info, tracer); + return new BigtableGrpcStreamTracer(tracer); } } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableTracerStreamingCallable.java similarity index 67% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerStreamingCallable.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableTracerStreamingCallable.java index 13b832b8b1..562305798f 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerStreamingCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableTracerStreamingCallable.java @@ -13,34 +13,25 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; import com.google.api.core.InternalApi; -import com.google.api.gax.grpc.GrpcResponseMetadata; +import com.google.api.gax.grpc.GrpcCallContext; import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.api.gax.rpc.StreamController; +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor; import com.google.cloud.bigtable.data.v2.stub.SafeResponseObserver; +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer; import com.google.common.base.Preconditions; import com.google.common.base.Stopwatch; import java.util.concurrent.TimeUnit; import javax.annotation.Nonnull; /** - * This callable will - *

  • -Inject a {@link GrpcResponseMetadata} to access the headers returned by gRPC methods upon - * completion. The {@link BigtableTracer} will process metrics that were injected in the - * header/trailer and publish them to OpenCensus. If {@link GrpcResponseMetadata#getMetadata()} - * returned null, it probably means that the request has never reached GFE, and it'll increment - * the gfe_header_missing_counter in this case. - *
  • -This class will also access trailers from {@link GrpcResponseMetadata} to record zone and - * cluster ids. - *
  • -Call {@link BigtableTracer#onRequest(int)} to record the request events in a stream. - *
  • -This class will also inject a {@link BigtableGrpcStreamTracer} that'll record the time an - * RPC spent in a grpc channel queue. - *
  • This class is considered an internal implementation detail and not meant to be used by - * applications. + * This class is considered an internal implementation detail and not meant to be used by + * applications. */ @InternalApi public class BigtableTracerStreamingCallable @@ -56,40 +47,41 @@ public BigtableTracerStreamingCallable( @Override public void call( RequestT request, ResponseObserver responseObserver, ApiCallContext context) { - final GrpcResponseMetadata responseMetadata = new GrpcResponseMetadata(); + GrpcCallContext grpcCtx = (GrpcCallContext) context; + + MetadataExtractorInterceptor metadataExtractor = new MetadataExtractorInterceptor(); + grpcCtx = metadataExtractor.injectInto(grpcCtx); + // tracer should always be an instance of bigtable tracer if (context.getTracer() instanceof BigtableTracer) { BigtableTracer tracer = (BigtableTracer) context.getTracer(); + tracer.setSidebandData(metadataExtractor.getSidebandData()); + grpcCtx = + grpcCtx.withCallOptions( + grpcCtx + .getCallOptions() + .withStreamTracerFactory(new BigtableGrpcStreamTracer.Factory(tracer))); + BigtableTracerResponseObserver innerObserver = - new BigtableTracerResponseObserver<>(responseObserver, tracer, responseMetadata); + new BigtableTracerResponseObserver<>(responseObserver, tracer); if (context.getRetrySettings() != null) { tracer.setTotalTimeoutDuration(context.getRetrySettings().getTotalTimeoutDuration()); } - innerCallable.call( - request, - innerObserver, - Util.injectBigtableStreamTracer( - context, responseMetadata, (BigtableTracer) context.getTracer())); + innerCallable.call(request, innerObserver, grpcCtx); } else { - innerCallable.call(request, responseObserver, context); + innerCallable.call(request, responseObserver, grpcCtx); } } private class BigtableTracerResponseObserver extends SafeResponseObserver { - private final BigtableTracer tracer; private final ResponseObserver outerObserver; - private final GrpcResponseMetadata responseMetadata; - BigtableTracerResponseObserver( - ResponseObserver observer, - BigtableTracer tracer, - GrpcResponseMetadata metadata) { + BigtableTracerResponseObserver(ResponseObserver observer, BigtableTracer tracer) { super(observer); this.tracer = tracer; this.outerObserver = observer; - this.responseMetadata = metadata; } @Override @@ -107,13 +99,11 @@ protected void onResponseImpl(ResponseT response) { @Override protected void onErrorImpl(Throwable t) { - Util.recordMetricsFromMetadata(responseMetadata, tracer, t); outerObserver.onError(t); } @Override protected void onCompleteImpl() { - Util.recordMetricsFromMetadata(responseMetadata, tracer, null); outerObserver.onComplete(); } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableTracerUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableTracerUnaryCallable.java new file mode 100644 index 0000000000..443ee17345 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BigtableTracerUnaryCallable.java @@ -0,0 +1,64 @@ +/* + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; + +import com.google.api.core.ApiFuture; +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.UnaryCallable; +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor; +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer; +import com.google.common.base.Preconditions; +import javax.annotation.Nonnull; + +/** + * This class is considered an internal implementation detail and not meant to be used by + * applications. + */ +@InternalApi +public class BigtableTracerUnaryCallable + extends UnaryCallable { + + private final UnaryCallable innerCallable; + + public BigtableTracerUnaryCallable(@Nonnull UnaryCallable innerCallable) { + this.innerCallable = Preconditions.checkNotNull(innerCallable, "Inner callable must be set"); + } + + @Override + public ApiFuture futureCall(RequestT request, ApiCallContext context) { + MetadataExtractorInterceptor interceptor = new MetadataExtractorInterceptor(); + GrpcCallContext grpcCtx = interceptor.injectInto((GrpcCallContext) context); + + // tracer should always be an instance of BigtableTracer + if (context.getTracer() instanceof BigtableTracer) { + BigtableTracer tracer = (BigtableTracer) context.getTracer(); + tracer.setSidebandData(interceptor.getSidebandData()); + + grpcCtx = + grpcCtx.withCallOptions( + grpcCtx + .getCallOptions() + .withStreamTracerFactory(new BigtableGrpcStreamTracer.Factory(tracer))); + + if (context.getRetrySettings() != null) { + tracer.setTotalTimeoutDuration(context.getRetrySettings().getTotalTimeoutDuration()); + } + } + return innerCallable.futureCall(request, grpcCtx); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracer.java new file mode 100644 index 0000000000..88dd39c0dc --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracer.java @@ -0,0 +1,425 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; + +import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; +import static com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util.extractStatus; + +import com.google.api.core.ObsoleteApi; +import com.google.api.gax.retrying.ServerStreamingAttemptException; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util; +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor; +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor.SidebandData; +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer; +import com.google.common.base.Stopwatch; +import com.google.common.collect.Comparators; +import com.google.common.math.IntMath; +import io.grpc.Deadline; +import io.grpc.Status; +import java.time.Duration; +import java.util.concurrent.CancellationException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import javax.annotation.Nullable; + +/** + * A {@link BigtableTracer} that records built-in metrics and publish under the + * bigtable.googleapis.com/client namespace + */ +class BuiltinMetricsTracer extends BigtableTracer { + private static final MethodInfo READ_ROWS = + MethodInfo.builder().setName("Bigtable.ReadRows").setStreaming(true).build(); + private final MetricRegistry.RecorderRegistry recorder; + private final ClientInfo clientInfo; + private final MethodInfo methodInfo; + + // Operation level metrics + private final AtomicBoolean operationFinishedEarly = new AtomicBoolean(); + private final AtomicBoolean opFinished = new AtomicBoolean(); + private final Stopwatch operationTimer = Stopwatch.createStarted(); + private final Stopwatch firstResponsePerOpTimer = Stopwatch.createStarted(); + + // Attempt level metrics + private int attemptCount = 0; + private Stopwatch attemptTimer; + private volatile int attempt = 0; + + // Total server latency needs to be atomic because it's accessed from different threads. E.g. + // request() from user thread and attempt failed from grpc thread. We're only measuring the extra + // time application spent blocking grpc buffer, which will be operationLatency - serverLatency. + private final AtomicLong totalServerLatencyNano = new AtomicLong(0); + // Stopwatch is not thread safe so this is a workaround to check if the stopwatch changes is + // flushed to memory. + private final Stopwatch serverLatencyTimer = Stopwatch.createUnstarted(); + private final Object timerLock = new Object(); + + private boolean flowControlIsDisabled = false; + + private final AtomicInteger requestLeft = new AtomicInteger(0); + + private String tableId = ""; + + private final AtomicLong totalClientBlockingTime = new AtomicLong(0); + + private final AtomicLong grpcMessageSentDelay = new AtomicLong(0); + + private Deadline operationDeadline = null; + private volatile Duration remainingDeadlineAtAttemptStart = Duration.ZERO; + + private volatile MetadataExtractorInterceptor.SidebandData sidebandData = new SidebandData(); + + BuiltinMetricsTracer( + MetricRegistry.RecorderRegistry recorder, ClientInfo clientInfo, MethodInfo methodInfo) { + + this.recorder = recorder; + this.clientInfo = clientInfo; + this.methodInfo = methodInfo; + } + + @Override + public Scope inScope() { + return new Scope() { + @Override + public void close() {} + }; + } + + @Override + public void operationFinishEarly() { + operationFinishedEarly.set(true); + attemptTimer.stop(); + operationTimer.stop(); + } + + @Override + public void operationSucceeded() { + recordOperationCompletion(null); + } + + @Override + public void operationCancelled() { + recordOperationCompletion(new CancellationException()); + } + + @Override + public void operationFailed(Throwable error) { + recordOperationCompletion(error); + } + + @Override + public void attemptStarted(int attemptNumber) { + attemptStarted(null, attemptNumber); + } + + @Override + public void attemptStarted(Object request, int attemptNumber) { + this.attempt = attemptNumber; + attemptCount++; + attemptTimer = Stopwatch.createStarted(); + if (operationDeadline != null) { + remainingDeadlineAtAttemptStart = + Duration.ofMillis(operationDeadline.timeRemaining(TimeUnit.MILLISECONDS)); + } + if (request != null) { + this.tableId = Util.extractTableId(request); + } + if (!flowControlIsDisabled) { + synchronized (timerLock) { + if (!serverLatencyTimer.isRunning()) { + serverLatencyTimer.start(); + } + } + } + } + + @Override + public void attemptSucceeded() { + recordAttemptCompletion(null); + } + + @Override + public void attemptCancelled() { + recordAttemptCompletion(new CancellationException()); + } + + /** + * This method is obsolete. Use {@link #attemptFailedDuration(Throwable, java.time.Duration)} + * instead. + */ + @ObsoleteApi("Use attemptFailedDuration(Throwable, java.time.Duration) instead") + @Override + public void attemptFailed(Throwable error, org.threeten.bp.Duration delay) { + attemptFailedDuration(error, toJavaTimeDuration(delay)); + } + + @Override + public void attemptFailedDuration(Throwable error, Duration delay) { + recordAttemptCompletion(error); + } + + @Override + public void attemptPermanentFailure(Throwable throwable) { + recordAttemptCompletion(throwable); + } + + @Override + public void onRequest(int requestCount) { + requestLeft.accumulateAndGet(requestCount, IntMath::saturatedAdd); + + if (operationFinishedEarly.get()) { + return; + } + + if (flowControlIsDisabled) { + // On request is only called when auto flow control is disabled. When auto flow control is + // disabled, server latency is measured between onRequest and onResponse. + synchronized (timerLock) { + if (!serverLatencyTimer.isRunning()) { + serverLatencyTimer.start(); + } + } + } + } + + @Override + public void responseReceived() { + if (operationFinishedEarly.get()) { + return; + } + + if (firstResponsePerOpTimer.isRunning()) { + firstResponsePerOpTimer.stop(); + } + // When auto flow control is enabled, server latency is measured between afterResponse and + // responseReceived. + // When auto flow control is disabled, server latency is measured between onRequest and + // responseReceived. + // When auto flow control is disabled and application requested multiple responses, server + // latency is measured between afterResponse and responseReceived. + // In all the cases, we want to stop the serverLatencyTimer here. + synchronized (timerLock) { + if (serverLatencyTimer.isRunning()) { + totalServerLatencyNano.addAndGet(serverLatencyTimer.elapsed(TimeUnit.NANOSECONDS)); + serverLatencyTimer.reset(); + } + } + } + + @Override + public void afterResponse(long applicationLatency) { + if (!flowControlIsDisabled || requestLeft.decrementAndGet() > 0) { + if (operationFinishedEarly.get()) { + return; + } + // When auto flow control is enabled, request will never be called, so server latency is + // measured between after the last response is processed and before the next response is + // received. If flow control is disabled but requestLeft is greater than 0, + // also start the timer to count the time between afterResponse and responseReceived. + synchronized (timerLock) { + if (!serverLatencyTimer.isRunning()) { + serverLatencyTimer.start(); + } + } + } + } + + @Override + public int getAttempt() { + return attempt; + } + + @Override + public void setSidebandData(MetadataExtractorInterceptor.SidebandData sidebandData) { + this.sidebandData = sidebandData; + } + + @Override + public void batchRequestThrottled(long throttledTimeNanos) { + totalClientBlockingTime.addAndGet(java.time.Duration.ofNanos(throttledTimeNanos).toMillis()); + } + + @Override + public void grpcMessageSent() { + grpcMessageSentDelay.set(attemptTimer.elapsed(TimeUnit.NANOSECONDS)); + } + + @Override + public void setTotalTimeoutDuration(java.time.Duration totalTimeoutDuration) { + // This method is called by BigtableTracerStreamingCallable and + // BigtableTracerUnaryCallable which is called per attempt. We only set + // the operationDeadline on the first attempt and when totalTimeout is set. + if (operationDeadline == null && !totalTimeoutDuration.isZero()) { + this.operationDeadline = + Deadline.after(totalTimeoutDuration.toMillis(), TimeUnit.MILLISECONDS); + this.remainingDeadlineAtAttemptStart = totalTimeoutDuration; + } + } + + @Override + public void disableFlowControl() { + flowControlIsDisabled = true; + } + + private void recordOperationCompletion(@Nullable Throwable throwable) { + if (operationFinishedEarly.get()) { + throwable = null; // force an ok + } + + if (!opFinished.compareAndSet(false, true)) { + return; + } + long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS); + + Status.Code code = extractStatus(throwable); + + // Only record when retry count is greater than 0 so the retry + // graph will be less confusing + if (attemptCount > 1) { + recorder.retryCount.record( + clientInfo, + tableId, + methodInfo, + sidebandData.getResponseParams(), + code, + attemptCount - 1); + } + + recorder.operationLatency.record( + clientInfo, + tableId, + methodInfo, + sidebandData.getResponseParams(), + code, + Duration.ofNanos(operationLatencyNano)); + + // serverLatencyTimer should already be stopped in recordAttemptCompletion + long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get(); + recorder.applicationBlockingLatency.record( + clientInfo, + tableId, + methodInfo, + sidebandData.getResponseParams(), + Duration.ofNanos(applicationLatencyNano)); + + if (methodInfo.equals(READ_ROWS)) { + recorder.firstResponseLantency.record( + clientInfo, + tableId, + methodInfo, + sidebandData.getResponseParams(), + code, + firstResponsePerOpTimer.elapsed()); + } + } + + private void recordAttemptCompletion(@Nullable Throwable throwable) { + if (operationFinishedEarly.get()) { + throwable = null; // force an ok + } + // If the attempt failed, the time spent in retry should be counted in application latency. + // Stop the stopwatch and decrement requestLeft. + synchronized (timerLock) { + if (serverLatencyTimer.isRunning()) { + requestLeft.decrementAndGet(); + totalServerLatencyNano.addAndGet(serverLatencyTimer.elapsed(TimeUnit.NANOSECONDS)); + serverLatencyTimer.reset(); + } + } + + // Patch the throwable until it's fixed in gax. When an attempt failed, + // it'll throw a ServerStreamingAttemptException. Unwrap the exception + // so it could get processed by extractStatus + if (throwable instanceof ServerStreamingAttemptException) { + throwable = throwable.getCause(); + } + + Status.Code code = extractStatus(throwable); + + totalClientBlockingTime.addAndGet(grpcMessageSentDelay.get()); + recorder.clientBlockingLatency.record( + clientInfo, + tableId, + methodInfo, + sidebandData.getResponseParams(), + Duration.ofNanos(totalClientBlockingTime.get())); + + recorder.attemptLatency.record( + clientInfo, + tableId, + sidebandData.getResponseParams(), + methodInfo, + code, + attemptTimer.elapsed()); + + recorder.attemptLatency2.record( + clientInfo, + tableId, + sidebandData.getPeerInfo(), + sidebandData.getResponseParams(), + methodInfo, + code, + attemptTimer.elapsed()); + + // When operationDeadline is set, it's possible that the deadline is passed by the time we send + // a new attempt. In this case we'll record 0. + if (operationDeadline != null) { + recorder.remainingDeadline.record( + clientInfo, + tableId, + methodInfo, + sidebandData.getResponseParams(), + code, + Comparators.max(remainingDeadlineAtAttemptStart, Duration.ZERO)); + } + + if (sidebandData.getGfeTiming() != null) { + recorder.serverLatency.record( + clientInfo, + tableId, + methodInfo, + sidebandData.getResponseParams(), + code, + sidebandData.getGfeTiming()); + recorder.connectivityErrorCount.record( + clientInfo, tableId, methodInfo, sidebandData.getResponseParams(), code, 0); + } else { + recorder.connectivityErrorCount.record( + clientInfo, tableId, methodInfo, sidebandData.getResponseParams(), code, 1); + } + } + + private static double convertToMs(long nanoSeconds) { + double toMs = 1e-6; + return nanoSeconds * toMs; + } + + @Override + public void setBatchWriteFlowControlTargetQps(double targetQps) { + recorder.batchWriteFlowControlTargetQps.record(clientInfo, methodInfo, targetQps); + } + + @Override + public void addBatchWriteFlowControlFactor( + double factor, @Nullable Throwable throwable, boolean applied) { + recorder.batchWriteFlowControlFactor.record( + clientInfo, extractStatus(throwable), applied, methodInfo, factor); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracerFactory.java new file mode 100644 index 0000000000..8a41fd339e --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracerFactory.java @@ -0,0 +1,56 @@ +/* + * Copyright 2022 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; + +import com.google.api.core.InternalApi; +import com.google.api.gax.tracing.ApiTracer; +import com.google.api.gax.tracing.ApiTracerFactory; +import com.google.api.gax.tracing.BaseApiTracerFactory; +import com.google.api.gax.tracing.SpanName; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo; + +/** + * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer} + * api. + */ +@InternalApi("For internal use only") +public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory { + + private final MetricRegistry.RecorderRegistry recorder; + private final ClientInfo clientInfo; + + public static BuiltinMetricsTracerFactory create( + MetricRegistry.RecorderRegistry recorder, ClientInfo clientInfo) { + return new BuiltinMetricsTracerFactory(recorder, clientInfo); + } + + BuiltinMetricsTracerFactory(MetricRegistry.RecorderRegistry recorder, ClientInfo clientInfo) { + this.recorder = recorder; + this.clientInfo = clientInfo; + } + + @Override + public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType operationType) { + MethodInfo methodInfo = + MethodInfo.builder() + .setName(spanName.toString()) + .setStreaming(operationType == OperationType.ServerStreaming) + .build(); + return new BuiltinMetricsTracer(recorder, clientInfo, methodInfo); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ChannelPoolMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/ChannelPoolMetricsTracer.java similarity index 50% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ChannelPoolMetricsTracer.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/ChannelPoolMetricsTracer.java index ea849cf8ce..0eb9242b77 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/ChannelPoolMetricsTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/ChannelPoolMetricsTracer.java @@ -13,19 +13,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OUTSTANDING_RPCS_PER_CHANNEL_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; import com.google.api.core.InternalApi; +import com.google.bigtable.v2.PeerInfo.TransportType; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelObserver; import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelPoolObserver; -import io.opentelemetry.api.OpenTelemetry; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.LongHistogram; -import io.opentelemetry.api.metrics.Meter; +import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelPoolSettings.LoadBalancingStrategy; import java.util.List; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -38,34 +34,19 @@ public class ChannelPoolMetricsTracer implements Runnable { private static final Logger logger = Logger.getLogger(ChannelPoolMetricsTracer.class.getName()); private static final int SAMPLING_PERIOD_SECONDS = 60; - private final LongHistogram outstandingRpcsHistogram; - private final LongHistogram perConnectionErrorCountHistogram; + private final MetricRegistry.RecorderRegistry recorder; + private final ClientInfo clientInfo; private final AtomicReference bigtableChannelInsightsProviderRef = new AtomicReference<>(); - private final AtomicReference lbPolicyRef = new AtomicReference<>("ROUND_ROBIN"); + private final AtomicReference lbPolicyRef = + new AtomicReference<>(LoadBalancingStrategy.ROUND_ROBIN); // Attributes for unary and streaming RPCs, built on demand in run() - public ChannelPoolMetricsTracer(OpenTelemetry openTelemetry) { - Meter meter = openTelemetry.getMeter(METER_NAME); - this.outstandingRpcsHistogram = - meter - .histogramBuilder(OUTSTANDING_RPCS_PER_CHANNEL_NAME) - .ofLongs() - .setDescription( - "A distribution of the number of outstanding RPCs per connection in the client" - + " pool, sampled periodically.") - .setUnit("1") - .build(); - - this.perConnectionErrorCountHistogram = - meter - .histogramBuilder(PER_CONNECTION_ERROR_COUNT_NAME) - .ofLongs() - .setDescription("Distribution of counts of channels per 'error count per minute'.") - .setUnit("1") - .build(); + public ChannelPoolMetricsTracer(MetricRegistry.RecorderRegistry recorder, ClientInfo clientInfo) { + this.recorder = recorder; + this.clientInfo = clientInfo; } /** @@ -77,7 +58,7 @@ public void registerChannelInsightsProvider(BigtableChannelPoolObserver channelI } /** Register the current lb policy * */ - public void registerLoadBalancingStrategy(String lbPolicy) { + public void registerLoadBalancingStrategy(LoadBalancingStrategy lbPolicy) { this.lbPolicyRef.set(lbPolicy); } @@ -100,45 +81,25 @@ public void run() { return; } - String lbPolicy = lbPolicyRef.get(); - - Attributes dpUnaryAttrs = - Attributes.builder() - .put("transport_type", "directpath") - .put("streaming", false) - .put("lb_policy", lbPolicy) - .build(); - Attributes dpStreamingAttrs = - Attributes.builder() - .put("transport_type", "directpath") - .put("streaming", true) - .put("lb_policy", lbPolicy) - .build(); - Attributes cpUnaryAttrs = - Attributes.builder() - .put("transport_type", "cloudpath") - .put("streaming", false) - .put("lb_policy", lbPolicy) - .build(); - Attributes cpStreamingAttrs = - Attributes.builder() - .put("transport_type", "cloudpath") - .put("streaming", true) - .put("lb_policy", lbPolicy) - .build(); + LoadBalancingStrategy lbPolicy = lbPolicyRef.get(); for (BigtableChannelObserver info : channelInsights) { - Attributes unaryAttrs = info.isAltsChannel() ? dpUnaryAttrs : cpUnaryAttrs; - Attributes streamingAttrs = info.isAltsChannel() ? dpStreamingAttrs : cpStreamingAttrs; + TransportType transportType = + info.isAltsChannel() + ? TransportType.TRANSPORT_TYPE_DIRECT_ACCESS + : TransportType.TRANSPORT_TYPE_CLOUD_PATH; long currentOutstandingUnaryRpcs = info.getOutstandingUnaryRpcs(); long currentOutstandingStreamingRpcs = info.getOutstandingStreamingRpcs(); - outstandingRpcsHistogram.record(currentOutstandingUnaryRpcs, unaryAttrs); - outstandingRpcsHistogram.record(currentOutstandingStreamingRpcs, streamingAttrs); + + recorder.channelPoolOutstandingRpcs.record( + clientInfo, transportType, lbPolicy, false, currentOutstandingUnaryRpcs); + recorder.channelPoolOutstandingRpcs.record( + clientInfo, transportType, lbPolicy, true, currentOutstandingStreamingRpcs); long errors = info.getAndResetErrorCount(); // Record errors with empty attributes. - perConnectionErrorCountHistogram.record(errors, Attributes.empty()); + recorder.perConnectionErrorCount.record(clientInfo, errors); } } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracer.java similarity index 89% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracer.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracer.java index f6d0858459..d9362acb48 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracer.java @@ -13,12 +13,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; import com.google.api.core.ObsoleteApi; import com.google.api.gax.tracing.ApiTracer; +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor; +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer; import com.google.common.collect.ImmutableList; import java.util.ArrayList; import java.util.List; @@ -197,13 +199,6 @@ public int getAttempt() { return attempt; } - @Override - public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) { - for (BigtableTracer tracer : bigtableTracers) { - tracer.recordGfeMetadata(latency, throwable); - } - } - @Override public void batchRequestThrottled(long throttledTimeMs) { for (BigtableTracer tracer : bigtableTracers) { @@ -212,16 +207,9 @@ public void batchRequestThrottled(long throttledTimeMs) { } @Override - public void setLocations(String zone, String cluster) { - for (BigtableTracer tracer : bigtableTracers) { - tracer.setLocations(zone, cluster); - } - } - - @Override - public void setTransportAttrs(BuiltinMetricsTracer.TransportAttrs attrs) { - for (BigtableTracer tracer : bigtableTracers) { - tracer.setTransportAttrs(attrs); + public void setSidebandData(MetadataExtractorInterceptor.SidebandData sidebandData) { + for (BigtableTracer bigtableTracer : bigtableTracers) { + bigtableTracer.setSidebandData(sidebandData); } } @@ -246,13 +234,6 @@ public void afterResponse(long applicationLatency) { } } - @Override - public void grpcChannelQueuedLatencies(long queuedTimeMs) { - for (BigtableTracer tracer : bigtableTracers) { - tracer.grpcChannelQueuedLatencies(queuedTimeMs); - } - } - @Override public void grpcMessageSent() { for (BigtableTracer tracer : bigtableTracers) { diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracerFactory.java similarity index 96% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracerFactory.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracerFactory.java index 2d9256a5ea..8b2606e955 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracerFactory.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracerFactory.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; import com.google.api.core.InternalApi; import com.google.api.gax.tracing.ApiTracer; diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/Pacemaker.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/Pacemaker.java new file mode 100644 index 0000000000..8a3771406a --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/Pacemaker.java @@ -0,0 +1,59 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; + +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry.RecorderRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; +import java.time.Duration; +import java.time.Instant; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; + +public class Pacemaker implements Runnable { + + static final Duration PACEMAKER_INTERVAL = Duration.ofMillis(100); + + private final RecorderRegistry registry; + private final ClientInfo clientInfo; + private final String executorName; + + private Instant prev; + + public Pacemaker(RecorderRegistry registry, ClientInfo clientInfo, String name) { + this.prev = Instant.now(); + this.registry = registry; + this.clientInfo = clientInfo; + this.executorName = name; + } + + public ScheduledFuture start(ScheduledExecutorService executor) { + return executor.scheduleAtFixedRate( + this, + Pacemaker.PACEMAKER_INTERVAL.toMillis(), + Pacemaker.PACEMAKER_INTERVAL.toMillis(), + TimeUnit.MILLISECONDS); + } + + @Override + public void run() { + Instant current = Instant.now(); + Duration delta = Duration.between(prev, current).minus(PACEMAKER_INTERVAL); + prev = current; + registry.pacemakerDelay.record( + clientInfo, executorName, delta.isNegative() ? Duration.ZERO : delta); + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/TracedBatcherUnaryCallable.java similarity index 93% rename from google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java rename to google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/TracedBatcherUnaryCallable.java index 44ba688d55..9b1b9764ab 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/TracedBatcherUnaryCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/TracedBatcherUnaryCallable.java @@ -13,7 +13,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package com.google.cloud.bigtable.data.v2.stub.metrics; +package com.google.cloud.bigtable.data.v2.internal.csm.tracers; import com.google.api.core.ApiFuture; import com.google.api.core.InternalApi; @@ -21,6 +21,7 @@ import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.UnaryCallable; import com.google.api.gax.tracing.ApiTracer; +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer; /** * This callable will extract total throttled time from {@link ApiCallContext} and add it to {@link diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableChannelPrimer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableChannelPrimer.java index 97c6e364c8..3b2a169910 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableChannelPrimer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableChannelPrimer.java @@ -20,9 +20,9 @@ import com.google.api.core.SettableApiFuture; import com.google.auth.Credentials; import com.google.bigtable.v2.BigtableGrpc; -import com.google.bigtable.v2.InstanceName; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.bigtable.v2.PingAndWarmResponse; +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName; import com.google.cloud.bigtable.gaxx.grpc.ChannelPrimer; import io.grpc.CallCredentials; import io.grpc.CallOptions; @@ -80,7 +80,7 @@ static BigtableChannelPrimer create( request = PingAndWarmRequest.newBuilder() - .setName(InstanceName.format(projectId, instanceId)) + .setName(InstanceName.of(projectId, instanceId).toString()) .setAppProfileId(appProfileId) .build(); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableClientContext.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableClientContext.java index c7634bdc70..2828d67f43 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableClientContext.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/BigtableClientContext.java @@ -26,18 +26,20 @@ import com.google.api.gax.rpc.ClientContext; import com.google.auth.Credentials; import com.google.auth.oauth2.ServiceAccountJwtAccessCredentials; -import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.internal.JwtCredentialsWithAudience; -import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants; -import com.google.cloud.bigtable.data.v2.stub.metrics.ChannelPoolMetricsTracer; +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry; +import com.google.cloud.bigtable.data.v2.internal.csm.Metrics; +import com.google.cloud.bigtable.data.v2.internal.csm.MetricsImpl; +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo; import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider; -import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; -import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; -import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider; import com.google.cloud.bigtable.gaxx.grpc.BigtableTransportChannelProvider; import com.google.cloud.bigtable.gaxx.grpc.ChannelPrimer; import io.grpc.ManagedChannelBuilder; -import io.grpc.opentelemetry.GrpcOpenTelemetry; +import io.opencensus.stats.Stats; +import io.opencensus.stats.StatsRecorder; +import io.opencensus.tags.Tagger; +import io.opencensus.tags.Tags; import io.opentelemetry.api.OpenTelemetry; import io.opentelemetry.sdk.OpenTelemetrySdk; import java.io.IOException; @@ -57,9 +59,9 @@ public class BigtableClientContext { private static final Logger logger = Logger.getLogger(BigtableClientContext.class.getName()); - @Nullable private final OpenTelemetry openTelemetry; - @Nullable private final OpenTelemetrySdk internalOpenTelemetry; - private final MetricsProvider metricsProvider; + private final boolean isChild; + private final ClientInfo clientInfo; + private final Metrics metrics; private final ClientContext clientContext; // the background executor shared for OTEL instances and monitoring client and all other // background tasks @@ -67,6 +69,18 @@ public class BigtableClientContext { public static BigtableClientContext create(EnhancedBigtableStubSettings settings) throws IOException { + return create(settings, Tags.getTagger(), Stats.getStatsRecorder()); + } + + public static BigtableClientContext create( + EnhancedBigtableStubSettings settings, Tagger ocTagger, StatsRecorder ocRecorder) + throws IOException { + ClientInfo clientInfo = + ClientInfo.builder() + .setInstanceName(InstanceName.of(settings.getProjectId(), settings.getInstanceId())) + .setAppProfileId(settings.getAppProfileId()) + .build(); + EnhancedBigtableStubSettings.Builder builder = settings.toBuilder(); // Set up credentials @@ -88,51 +102,51 @@ public static BigtableClientContext create(EnhancedBigtableStubSettings settings FixedExecutorProvider.create(backgroundExecutor, shouldAutoClose); builder.setBackgroundExecutorProvider(executorProvider); + MetricRegistry metricRegistry = new MetricRegistry(); // Set up OpenTelemetry - OpenTelemetry openTelemetry = null; + @Nullable OpenTelemetry userOtel = null; + if (settings.getMetricsProvider() instanceof CustomOpenTelemetryMetricsProvider) { + userOtel = + ((CustomOpenTelemetryMetricsProvider) settings.getMetricsProvider()).getOpenTelemetry(); + } + + @Nullable OpenTelemetrySdk builtinOtel = null; try { - // We don't want client side metrics to crash the client, so catch any exception when getting - // the OTEL instance and log the exception instead. - openTelemetry = - getOpenTelemetryFromMetricsProvider( - settings.getMetricsProvider(), - credentials, - settings.getMetricsEndpoint(), - universeDomain, - backgroundExecutor); + if (settings.areInternalMetricsEnabled()) { + builtinOtel = + MetricsImpl.createBuiltinOtel( + metricRegistry, + clientInfo, + credentials, + settings.getMetricsEndpoint(), + universeDomain, + backgroundExecutor); + } } catch (Throwable t) { logger.log(Level.WARNING, "Failed to get OTEL, will skip exporting client side metrics", t); } + Metrics metrics = + new MetricsImpl( + metricRegistry, + clientInfo, + settings.getTracerFactory(), + builtinOtel, + userOtel, + ocTagger, + ocRecorder, + backgroundExecutor); + // Set up channel InstantiatingGrpcChannelProvider.Builder transportProvider = builder.getTransportChannelProvider() instanceof InstantiatingGrpcChannelProvider ? ((InstantiatingGrpcChannelProvider) builder.getTransportChannelProvider()).toBuilder() : null; - @Nullable OpenTelemetrySdk internalOtel = null; - @Nullable ChannelPoolMetricsTracer channelPoolMetricsTracer = null; - // Internal metrics are scoped to the connections, so we need a mutable transportProvider, - // otherwise there is - // no reason to build the internal OtelProvider if (transportProvider != null) { - internalOtel = - settings - .getInternalMetricsProvider() - .createOtelProvider(settings, credentials, backgroundExecutor); - if (internalOtel != null) { - channelPoolMetricsTracer = new ChannelPoolMetricsTracer(internalOtel); - - // Configure grpc metrics - configureGrpcOtel(transportProvider, internalOtel); - } - } + configureGrpcOtel(transportProvider, metrics); - if (transportProvider != null) { - // Set up cookie holder if routing cookie is enabled - if (builder.getEnableRoutingCookie()) { - setupCookieHolder(transportProvider); - } + setupCookieHolder(transportProvider); ChannelPrimer channelPrimer = NoOpChannelPrimer.create(); @@ -149,40 +163,27 @@ public static BigtableClientContext create(EnhancedBigtableStubSettings settings BigtableTransportChannelProvider btTransportProvider = BigtableTransportChannelProvider.create( - (InstantiatingGrpcChannelProvider) transportProvider.build(), + transportProvider.build(), channelPrimer, - channelPoolMetricsTracer, + metrics.getChannelPoolMetricsTracer(), backgroundExecutor); builder.setTransportChannelProvider(btTransportProvider); } ClientContext clientContext = ClientContext.create(builder.build()); - if (channelPoolMetricsTracer != null) { - channelPoolMetricsTracer.start(clientContext.getExecutor()); - } - return new BigtableClientContext( - clientContext, - openTelemetry, - internalOtel, - settings.getMetricsProvider(), - executorProvider); + metrics.start(); + try { + return new BigtableClientContext(false, clientInfo, clientContext, metrics, executorProvider); + } catch (IOException | RuntimeException t) { + metrics.close(); + throw t; + } } private static void configureGrpcOtel( - InstantiatingGrpcChannelProvider.Builder transportProvider, OpenTelemetrySdk otel) { - - GrpcOpenTelemetry grpcOtel = - GrpcOpenTelemetry.newBuilder() - .sdk(otel) - .addOptionalLabel("grpc.lb.locality") - // Disable default grpc metrics - .disableAllMetrics() - // Enable specific grpc metrics - .enableMetrics(BuiltinMetricsConstants.GRPC_METRICS.keySet()) - .build(); - + InstantiatingGrpcChannelProvider.Builder transportProvider, Metrics metrics) { @SuppressWarnings("rawtypes") ApiFunction oldConfigurator = transportProvider.getChannelConfigurator(); @@ -192,81 +193,64 @@ private static void configureGrpcOtel( if (oldConfigurator != null) { b = oldConfigurator.apply(b); } - grpcOtel.configureChannelBuilder(b); - return b; + return metrics.configureGrpcChannel(b); }); } private BigtableClientContext( + boolean isChild, + ClientInfo clientInfo, ClientContext clientContext, - @Nullable OpenTelemetry openTelemetry, - @Nullable OpenTelemetrySdk internalOtel, - MetricsProvider metricsProvider, - ExecutorProvider backgroundExecutorProvider) { - this.clientContext = clientContext; - this.openTelemetry = openTelemetry; - this.internalOpenTelemetry = internalOtel; - this.metricsProvider = metricsProvider; + Metrics metrics, + ExecutorProvider backgroundExecutorProvider) + throws IOException { + this.isChild = isChild; + this.clientInfo = clientInfo; + + this.metrics = metrics; this.backgroundExecutorProvider = backgroundExecutorProvider; + + this.clientContext = + clientContext.toBuilder().setTracerFactory(metrics.createTracerFactory(clientInfo)).build(); + } + + public ClientInfo getClientInfo() { + return clientInfo; } - public OpenTelemetry getOpenTelemetry() { - return this.openTelemetry; + public Metrics getMetrics() { + return metrics; } public ClientContext getClientContext() { return this.clientContext; } - public BigtableClientContext withClientContext(ClientContext clientContext) { + public BigtableClientContext createChild(InstanceName instanceName, String appProfileId) + throws IOException { return new BigtableClientContext( + true, + clientInfo.toBuilder().setInstanceName(instanceName).setAppProfileId(appProfileId).build(), clientContext, - openTelemetry, - internalOpenTelemetry, - metricsProvider, + metrics, backgroundExecutorProvider); } public void close() throws Exception { + if (isChild) { + return; + } + for (BackgroundResource resource : clientContext.getBackgroundResources()) { resource.close(); } - if (internalOpenTelemetry != null) { - internalOpenTelemetry.close(); - } - if (metricsProvider instanceof DefaultMetricsProvider && openTelemetry != null) { - ((OpenTelemetrySdk) openTelemetry).close(); - } + metrics.close(); + if (backgroundExecutorProvider.shouldAutoClose()) { backgroundExecutorProvider.getExecutor().shutdown(); } } - private static OpenTelemetry getOpenTelemetryFromMetricsProvider( - MetricsProvider metricsProvider, - @Nullable Credentials defaultCredentials, - @Nullable String metricsEndpoint, - String universeDomain, - ScheduledExecutorService executor) - throws IOException { - if (metricsProvider instanceof CustomOpenTelemetryMetricsProvider) { - CustomOpenTelemetryMetricsProvider customMetricsProvider = - (CustomOpenTelemetryMetricsProvider) metricsProvider; - return customMetricsProvider.getOpenTelemetry(); - } else if (metricsProvider instanceof DefaultMetricsProvider) { - Credentials credentials = - BigtableDataSettings.getMetricsCredentials() != null - ? BigtableDataSettings.getMetricsCredentials() - : defaultCredentials; - DefaultMetricsProvider defaultMetricsProvider = (DefaultMetricsProvider) metricsProvider; - return defaultMetricsProvider.getOpenTelemetry( - metricsEndpoint, universeDomain, credentials, executor); - } else if (metricsProvider instanceof NoopMetricsProvider) { - return null; - } - throw new IOException("Invalid MetricsProvider type " + metricsProvider); - } - private static void patchCredentials(EnhancedBigtableStubSettings.Builder settings) throws IOException { String audience = settings.getJwtAudience(); diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/ClientOperationSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/ClientOperationSettings.java new file mode 100644 index 0000000000..540eb08cc8 --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/ClientOperationSettings.java @@ -0,0 +1,406 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub; + +import com.google.api.core.InternalApi; +import com.google.api.gax.batching.BatchingSettings; +import com.google.api.gax.batching.FlowControlSettings; +import com.google.api.gax.batching.FlowController; +import com.google.api.gax.retrying.RetrySettings; +import com.google.api.gax.rpc.ServerStreamingCallSettings; +import com.google.api.gax.rpc.StatusCode; +import com.google.api.gax.rpc.UnaryCallSettings; +import com.google.bigtable.v2.PingAndWarmRequest; +import com.google.cloud.bigtable.data.v2.internal.PrepareQueryRequest; +import com.google.cloud.bigtable.data.v2.internal.PrepareResponse; +import com.google.cloud.bigtable.data.v2.internal.SqlRow; +import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; +import com.google.cloud.bigtable.data.v2.models.ConditionalRowMutation; +import com.google.cloud.bigtable.data.v2.models.KeyOffset; +import com.google.cloud.bigtable.data.v2.models.Query; +import com.google.cloud.bigtable.data.v2.models.Range; +import com.google.cloud.bigtable.data.v2.models.ReadChangeStreamQuery; +import com.google.cloud.bigtable.data.v2.models.ReadModifyWriteRow; +import com.google.cloud.bigtable.data.v2.models.Row; +import com.google.cloud.bigtable.data.v2.models.RowMutation; +import com.google.cloud.bigtable.data.v2.models.sql.BoundStatement; +import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; +import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableSet; +import java.util.List; +import java.util.Set; +import org.threeten.bp.Duration; + +@InternalApi +public class ClientOperationSettings { + private static final Set IDEMPOTENT_RETRY_CODES = + ImmutableSet.of(StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE); + + // Copy of default retrying settings in the yaml + private static final RetrySettings IDEMPOTENT_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setInitialRpcTimeout(Duration.ofSeconds(20)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofSeconds(20)) + .setTotalTimeout(Duration.ofMinutes(10)) + .build(); + + // Allow retrying ABORTED statuses. These will be returned by the server when the client is + // too slow to read the rows. This makes sense for the java client because retries happen + // after the row merging logic. Which means that the retry will not be invoked until the + // current buffered chunks are consumed. + private static final Set READ_ROWS_RETRY_CODES = + ImmutableSet.builder() + .addAll(IDEMPOTENT_RETRY_CODES) + .add(StatusCode.Code.ABORTED) + .build(); + + // Priming request should have a shorter timeout + private static final Duration PRIME_REQUEST_TIMEOUT = Duration.ofSeconds(30); + + private static final RetrySettings READ_ROWS_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setMaxAttempts(10) + .setInitialRpcTimeout(Duration.ofMinutes(30)) + .setRpcTimeoutMultiplier(2.0) + .setMaxRpcTimeout(Duration.ofMinutes(30)) + .setTotalTimeout(Duration.ofHours(12)) + .build(); + + private static final RetrySettings MUTATE_ROWS_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setInitialRpcTimeout(Duration.ofMinutes(1)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMinutes(1)) + .setTotalTimeout(Duration.ofMinutes(10)) + .build(); + + private static final Set GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_CODES = + ImmutableSet.builder() + .addAll(IDEMPOTENT_RETRY_CODES) + .add(StatusCode.Code.ABORTED) + .build(); + + private static final RetrySettings GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setMaxAttempts(10) + .setInitialRpcTimeout(Duration.ofMinutes(1)) + .setRpcTimeoutMultiplier(2.0) + .setMaxRpcTimeout(Duration.ofMinutes(10)) + .setTotalTimeout(Duration.ofMinutes(60)) + .build(); + + // Allow retrying ABORTED statuses. These will be returned by the server when the client is + // too slow to read the change stream records. This makes sense for the java client because + // retries happen after the mutation merging logic. Which means that the retry will not be + // invoked until the current buffered change stream mutations are consumed. + private static final Set READ_CHANGE_STREAM_RETRY_CODES = + ImmutableSet.builder() + .addAll(IDEMPOTENT_RETRY_CODES) + .add(StatusCode.Code.ABORTED) + .build(); + + private static final RetrySettings READ_CHANGE_STREAM_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setMaxAttempts(10) + .setInitialRpcTimeout(Duration.ofMinutes(5)) + .setRpcTimeoutMultiplier(2.0) + .setMaxRpcTimeout(Duration.ofMinutes(5)) + .setTotalTimeout(Duration.ofHours(12)) + .build(); + + // Allow retrying ABORTED statuses. These will be returned by the server when the client is + // too slow to read the responses. + private static final Set EXECUTE_QUERY_RETRY_CODES = + ImmutableSet.builder() + .addAll(IDEMPOTENT_RETRY_CODES) + .add(StatusCode.Code.ABORTED) + .build(); + + // We use the same configuration as READ_ROWS + private static final RetrySettings EXECUTE_QUERY_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2.0) + .setMaxRetryDelay(Duration.ofMinutes(1)) + .setMaxAttempts(10) + .setInitialRpcTimeout(Duration.ofMinutes(30)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofMinutes(30)) + .setTotalTimeout(Duration.ofHours(12)) + .build(); + + // Similar to IDEMPOTENT but with a lower initial rpc timeout since we expect + // these calls to be quick in most circumstances + private static final RetrySettings PREPARE_QUERY_RETRY_SETTINGS = + RetrySettings.newBuilder() + .setInitialRetryDelay(Duration.ofMillis(10)) + .setRetryDelayMultiplier(2) + .setMaxRetryDelay(Duration.ofMinutes(1)) + // TODO: fix the settings: initial attempt deadline: 5s, max is 20s but multiplier is 1 + .setInitialRpcTimeout(Duration.ofSeconds(5)) + .setRpcTimeoutMultiplier(1.0) + .setMaxRpcTimeout(Duration.ofSeconds(20)) + .setTotalTimeout(Duration.ofMinutes(10)) + .build(); + + final ServerStreamingCallSettings readRowsSettings; + final UnaryCallSettings readRowSettings; + final UnaryCallSettings> sampleRowKeysSettings; + final UnaryCallSettings mutateRowSettings; + final BigtableBatchingCallSettings bulkMutateRowsSettings; + final BigtableBulkReadRowsCallSettings bulkReadRowsSettings; + final UnaryCallSettings checkAndMutateRowSettings; + final UnaryCallSettings readModifyWriteRowSettings; + final ServerStreamingCallSettings + generateInitialChangeStreamPartitionsSettings; + final ServerStreamingCallSettings + readChangeStreamSettings; + final UnaryCallSettings pingAndWarmSettings; + final ServerStreamingCallSettings executeQuerySettings; + final UnaryCallSettings prepareQuerySettings; + + ClientOperationSettings(Builder builder) { + // Since point reads, streaming reads, bulk reads share the same base callable that converts + // grpc errors into ApiExceptions, they must have the same retry codes. + Preconditions.checkState( + builder + .readRowSettings + .getRetryableCodes() + .equals(builder.readRowsSettings.getRetryableCodes()), + "Single ReadRow retry codes must match ReadRows retry codes"); + Preconditions.checkState( + builder + .bulkReadRowsSettings + .getRetryableCodes() + .equals(builder.readRowsSettings.getRetryableCodes()), + "Bulk ReadRow retry codes must match ReadRows retry codes"); + + // Per method settings. + readRowsSettings = builder.readRowsSettings.build(); + readRowSettings = builder.readRowSettings.build(); + sampleRowKeysSettings = builder.sampleRowKeysSettings.build(); + mutateRowSettings = builder.mutateRowSettings.build(); + bulkMutateRowsSettings = builder.bulkMutateRowsSettings.build(); + bulkReadRowsSettings = builder.bulkReadRowsSettings.build(); + checkAndMutateRowSettings = builder.checkAndMutateRowSettings.build(); + readModifyWriteRowSettings = builder.readModifyWriteRowSettings.build(); + generateInitialChangeStreamPartitionsSettings = + builder.generateInitialChangeStreamPartitionsSettings.build(); + readChangeStreamSettings = builder.readChangeStreamSettings.build(); + pingAndWarmSettings = builder.pingAndWarmSettings.build(); + executeQuerySettings = builder.executeQuerySettings.build(); + prepareQuerySettings = builder.prepareQuerySettings.build(); + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("readRowsSettings", readRowsSettings) + .add("readRowSettings", readRowSettings) + .add("sampleRowKeysSettings", sampleRowKeysSettings) + .add("mutateRowSettings", mutateRowSettings) + .add("bulkMutateRowsSettings", bulkMutateRowsSettings) + .add("bulkReadRowsSettings", bulkReadRowsSettings) + .add("checkAndMutateRowSettings", checkAndMutateRowSettings) + .add("readModifyWriteRowSettings", readModifyWriteRowSettings) + .add( + "generateInitialChangeStreamPartitionsSettings", + generateInitialChangeStreamPartitionsSettings) + .add("readChangeStreamSettings", readChangeStreamSettings) + .add("pingAndWarmSettings", pingAndWarmSettings) + .add("executeQuerySettings", executeQuerySettings) + .add("prepareQuerySettings", prepareQuerySettings) + .toString(); + } + + static class Builder { + ServerStreamingCallSettings.Builder readRowsSettings; + UnaryCallSettings.Builder readRowSettings; + UnaryCallSettings.Builder> sampleRowKeysSettings; + UnaryCallSettings.Builder mutateRowSettings; + BigtableBatchingCallSettings.Builder bulkMutateRowsSettings; + BigtableBulkReadRowsCallSettings.Builder bulkReadRowsSettings; + UnaryCallSettings.Builder checkAndMutateRowSettings; + UnaryCallSettings.Builder readModifyWriteRowSettings; + ServerStreamingCallSettings.Builder + generateInitialChangeStreamPartitionsSettings; + ServerStreamingCallSettings.Builder + readChangeStreamSettings; + UnaryCallSettings.Builder pingAndWarmSettings; + ServerStreamingCallSettings.Builder executeQuerySettings; + UnaryCallSettings.Builder prepareQuerySettings; + + Builder() { + BigtableStubSettings.Builder baseDefaults = BigtableStubSettings.newBuilder(); + + readRowsSettings = ServerStreamingCallSettings.newBuilder(); + + readRowsSettings + .setRetryableCodes(READ_ROWS_RETRY_CODES) + .setRetrySettings(READ_ROWS_RETRY_SETTINGS) + .setIdleTimeout(Duration.ofMinutes(5)) + .setWaitTimeout(Duration.ofMinutes(5)); + + readRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + readRowSettings + .setRetryableCodes(readRowsSettings.getRetryableCodes()) + .setRetrySettings(IDEMPOTENT_RETRY_SETTINGS); + + sampleRowKeysSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + sampleRowKeysSettings + .setRetryableCodes(IDEMPOTENT_RETRY_CODES) + .setRetrySettings( + IDEMPOTENT_RETRY_SETTINGS.toBuilder() + .setInitialRpcTimeout(Duration.ofMinutes(5)) + .setMaxRpcTimeout(Duration.ofMinutes(5)) + .build()); + + mutateRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + copyRetrySettings(baseDefaults.mutateRowSettings(), mutateRowSettings); + + long maxBulkMutateElementPerBatch = 100L; + long maxBulkMutateOutstandingElementCount = 20_000L; + + bulkMutateRowsSettings = + BigtableBatchingCallSettings.newBuilder(new MutateRowsBatchingDescriptor()) + .setRetryableCodes(IDEMPOTENT_RETRY_CODES) + .setRetrySettings(MUTATE_ROWS_RETRY_SETTINGS) + .setBatchingSettings( + BatchingSettings.newBuilder() + .setIsEnabled(true) + .setElementCountThreshold(maxBulkMutateElementPerBatch) + .setRequestByteThreshold(20L * 1024 * 1024) + .setDelayThreshold(Duration.ofSeconds(1)) + .setFlowControlSettings( + FlowControlSettings.newBuilder() + .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Block) + .setMaxOutstandingRequestBytes(100L * 1024 * 1024) + .setMaxOutstandingElementCount(maxBulkMutateOutstandingElementCount) + .build()) + .build()); + + long maxBulkReadElementPerBatch = 100L; + long maxBulkReadRequestSizePerBatch = 400L * 1024L; + long maxBulkReadOutstandingElementCount = 20_000L; + + bulkReadRowsSettings = + BigtableBulkReadRowsCallSettings.newBuilder(new ReadRowsBatchingDescriptor()) + .setRetryableCodes(readRowsSettings.getRetryableCodes()) + .setRetrySettings(IDEMPOTENT_RETRY_SETTINGS) + .setBatchingSettings( + BatchingSettings.newBuilder() + .setElementCountThreshold(maxBulkReadElementPerBatch) + .setRequestByteThreshold(maxBulkReadRequestSizePerBatch) + .setDelayThreshold(Duration.ofSeconds(1)) + .setFlowControlSettings( + FlowControlSettings.newBuilder() + .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Block) + .setMaxOutstandingElementCount(maxBulkReadOutstandingElementCount) + .build()) + .build()); + + checkAndMutateRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + copyRetrySettings(baseDefaults.checkAndMutateRowSettings(), checkAndMutateRowSettings); + + readModifyWriteRowSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + copyRetrySettings(baseDefaults.readModifyWriteRowSettings(), readModifyWriteRowSettings); + + generateInitialChangeStreamPartitionsSettings = ServerStreamingCallSettings.newBuilder(); + generateInitialChangeStreamPartitionsSettings + .setRetryableCodes(GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_CODES) + .setRetrySettings(GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS) + .setIdleTimeout(Duration.ofMinutes(5)) + .setWaitTimeout(Duration.ofMinutes(1)); + + readChangeStreamSettings = ServerStreamingCallSettings.newBuilder(); + readChangeStreamSettings + .setRetryableCodes(READ_CHANGE_STREAM_RETRY_CODES) + .setRetrySettings(READ_CHANGE_STREAM_RETRY_SETTINGS) + .setIdleTimeout(Duration.ofMinutes(5)) + .setWaitTimeout(Duration.ofMinutes(1)); + + pingAndWarmSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + pingAndWarmSettings.setRetrySettings( + RetrySettings.newBuilder() + .setMaxAttempts(1) + .setInitialRpcTimeout(PRIME_REQUEST_TIMEOUT) + .setMaxRpcTimeout(PRIME_REQUEST_TIMEOUT) + .setTotalTimeout(PRIME_REQUEST_TIMEOUT) + .build()); + + executeQuerySettings = ServerStreamingCallSettings.newBuilder(); + executeQuerySettings + .setRetryableCodes(EXECUTE_QUERY_RETRY_CODES) + .setRetrySettings(EXECUTE_QUERY_RETRY_SETTINGS) + .setIdleTimeout(Duration.ofMinutes(5)) + .setWaitTimeout(Duration.ofMinutes(5)); + + prepareQuerySettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); + prepareQuerySettings + .setRetryableCodes(IDEMPOTENT_RETRY_CODES) + .setRetrySettings(PREPARE_QUERY_RETRY_SETTINGS); + } + + Builder(ClientOperationSettings settings) { + readRowsSettings = settings.readRowsSettings.toBuilder(); + readRowSettings = settings.readRowSettings.toBuilder(); + sampleRowKeysSettings = settings.sampleRowKeysSettings.toBuilder(); + mutateRowSettings = settings.mutateRowSettings.toBuilder(); + bulkMutateRowsSettings = settings.bulkMutateRowsSettings.toBuilder(); + bulkReadRowsSettings = settings.bulkReadRowsSettings.toBuilder(); + checkAndMutateRowSettings = settings.checkAndMutateRowSettings.toBuilder(); + readModifyWriteRowSettings = settings.readModifyWriteRowSettings.toBuilder(); + generateInitialChangeStreamPartitionsSettings = + settings.generateInitialChangeStreamPartitionsSettings.toBuilder(); + readChangeStreamSettings = settings.readChangeStreamSettings.toBuilder(); + pingAndWarmSettings = settings.pingAndWarmSettings.toBuilder(); + executeQuerySettings = settings.executeQuerySettings.toBuilder(); + prepareQuerySettings = settings.prepareQuerySettings.toBuilder(); + } + + /** + * Copies settings from unary RPC to another. This is necessary when modifying request and + * response types while trying to retain retry settings. + */ + private static void copyRetrySettings( + UnaryCallSettings.Builder source, UnaryCallSettings.Builder dest) { + dest.setRetryableCodes(source.getRetryableCodes()); + dest.setRetrySettings(source.getRetrySettings()); + } + + ClientOperationSettings build() { + return new ClientOperationSettings(this); + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java index cf8b65684e..ec223c470e 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStub.java @@ -15,18 +15,12 @@ */ package com.google.cloud.bigtable.data.v2.stub; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; - import com.google.api.core.ApiFuture; import com.google.api.core.BetaApi; import com.google.api.core.InternalApi; import com.google.api.gax.batching.Batcher; import com.google.api.gax.batching.BatcherImpl; import com.google.api.gax.batching.FlowController; -import com.google.api.gax.grpc.GaxGrpcProperties; import com.google.api.gax.grpc.GrpcCallContext; import com.google.api.gax.grpc.GrpcCallSettings; import com.google.api.gax.grpc.GrpcRawCallableFactory; @@ -45,8 +39,6 @@ import com.google.api.gax.rpc.ServerStreamingCallable; import com.google.api.gax.rpc.UnaryCallSettings; import com.google.api.gax.rpc.UnaryCallable; -import com.google.api.gax.tracing.ApiTracerFactory; -import com.google.api.gax.tracing.OpencensusTracerFactory; import com.google.api.gax.tracing.SpanName; import com.google.api.gax.tracing.TracedServerStreamingCallable; import com.google.api.gax.tracing.TracedUnaryCallable; @@ -64,12 +56,14 @@ import com.google.bigtable.v2.ReadRowsResponse; import com.google.bigtable.v2.RowRange; import com.google.bigtable.v2.SampleRowKeysResponse; -import com.google.cloud.bigtable.Version; import com.google.cloud.bigtable.data.v2.internal.NameUtil; import com.google.cloud.bigtable.data.v2.internal.PrepareQueryRequest; import com.google.cloud.bigtable.data.v2.internal.PrepareResponse; import com.google.cloud.bigtable.data.v2.internal.RequestContext; import com.google.cloud.bigtable.data.v2.internal.SqlRow; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.BigtableTracerStreamingCallable; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.BigtableTracerUnaryCallable; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.TracedBatcherUnaryCallable; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.ChangeStreamMutation; import com.google.cloud.bigtable.data.v2.models.ChangeStreamRecord; @@ -94,15 +88,8 @@ import com.google.cloud.bigtable.data.v2.stub.changestream.GenerateInitialChangeStreamPartitionsUserCallable; import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamResumptionStrategy; import com.google.cloud.bigtable.data.v2.stub.changestream.ReadChangeStreamUserCallable; -import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerStreamingCallable; -import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracerUnaryCallable; -import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTracerFactory; -import com.google.cloud.bigtable.data.v2.stub.metrics.CompositeTracerFactory; -import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsTracerFactory; -import com.google.cloud.bigtable.data.v2.stub.metrics.RpcMeasureConstants; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersServerStreamingCallable; import com.google.cloud.bigtable.data.v2.stub.metrics.StatsHeadersUnaryCallable; -import com.google.cloud.bigtable.data.v2.stub.metrics.TracedBatcherUnaryCallable; import com.google.cloud.bigtable.data.v2.stub.mutaterows.BulkMutateRowsUserFacingCallable; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsAttemptResult; import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; @@ -121,24 +108,13 @@ import com.google.cloud.bigtable.data.v2.stub.sql.MetadataErrorHandlingCallable; import com.google.cloud.bigtable.data.v2.stub.sql.PlanRefreshingCallable; import com.google.cloud.bigtable.data.v2.stub.sql.SqlRowMergingCallable; -import com.google.cloud.bigtable.gaxx.retrying.ApiResultRetryAlgorithm; import com.google.cloud.bigtable.gaxx.retrying.RetryInfoRetryAlgorithm; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Functions; import com.google.common.base.MoreObjects; import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.protobuf.ByteString; import io.grpc.MethodDescriptor; -import io.opencensus.stats.Stats; -import io.opencensus.stats.StatsRecorder; -import io.opencensus.tags.TagKey; -import io.opencensus.tags.TagValue; -import io.opencensus.tags.Tagger; -import io.opencensus.tags.Tags; -import io.opentelemetry.api.OpenTelemetry; -import io.opentelemetry.api.common.Attributes; import java.io.IOException; import java.time.Duration; import java.util.List; @@ -165,10 +141,9 @@ public class EnhancedBigtableStub implements AutoCloseable { private static final String CLIENT_NAME = "Bigtable"; private static final long FLOW_CONTROL_ADJUSTING_INTERVAL_MS = TimeUnit.SECONDS.toMillis(20); - private final EnhancedBigtableStubSettings settings; + private final ClientOperationSettings perOpSettings; private final BigtableClientContext bigtableClientContext; - private final boolean closeClientContext; private final RequestContext requestContext; private final FlowController bulkMutationFlowController; private final DynamicFlowControlStats bulkMutationDynamicFlowControlStats; @@ -199,111 +174,17 @@ public class EnhancedBigtableStub implements AutoCloseable { public static EnhancedBigtableStub create(EnhancedBigtableStubSettings settings) throws IOException { - BigtableClientContext bigtableClientContext = createBigtableClientContext(settings); - OpenTelemetry openTelemetry = bigtableClientContext.getOpenTelemetry(); - ClientContext contextWithTracer = - bigtableClientContext.getClientContext().toBuilder() - .setTracerFactory(createBigtableTracerFactory(settings, openTelemetry)) - .build(); - bigtableClientContext = bigtableClientContext.withClientContext(contextWithTracer); - return new EnhancedBigtableStub(settings, bigtableClientContext); - } - - public static EnhancedBigtableStub createWithClientContext( - EnhancedBigtableStubSettings settings, BigtableClientContext clientContext) - throws IOException { - - return new EnhancedBigtableStub(settings, clientContext, false); - } - - public static BigtableClientContext createBigtableClientContext( - EnhancedBigtableStubSettings settings) throws IOException { - return BigtableClientContext.create(settings); - } - - public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings, @Nullable OpenTelemetry openTelemetry) - throws IOException { - return createBigtableTracerFactory( - settings, Tags.getTagger(), Stats.getStatsRecorder(), openTelemetry); - } - - @VisibleForTesting - public static ApiTracerFactory createBigtableTracerFactory( - EnhancedBigtableStubSettings settings, - Tagger tagger, - StatsRecorder stats, - @Nullable OpenTelemetry openTelemetry) - throws IOException { - String projectId = settings.getProjectId(); - String instanceId = settings.getInstanceId(); - String appProfileId = settings.getAppProfileId(); - - ImmutableMap attributes = - ImmutableMap.builder() - .put(RpcMeasureConstants.BIGTABLE_PROJECT_ID, TagValue.create(projectId)) - .put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID, TagValue.create(instanceId)) - .put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID, TagValue.create(appProfileId)) - .build(); - - ImmutableList.Builder tracerFactories = ImmutableList.builder(); - tracerFactories - .add( - // Add OpenCensus Tracing - new OpencensusTracerFactory( - ImmutableMap.builder() - // Annotate traces with the same tags as metrics - .put(RpcMeasureConstants.BIGTABLE_PROJECT_ID.getName(), projectId) - .put(RpcMeasureConstants.BIGTABLE_INSTANCE_ID.getName(), instanceId) - .put(RpcMeasureConstants.BIGTABLE_APP_PROFILE_ID.getName(), appProfileId) - // Also annotate traces with library versions - .put("gax", GaxGrpcProperties.getGaxGrpcVersion()) - .put("grpc", GaxGrpcProperties.getGrpcVersion()) - .put("gapic", Version.VERSION) - .build())) - // Add OpenCensus Metrics - .add(MetricsTracerFactory.create(tagger, stats, attributes)) - // Add user configured tracer - .add(settings.getTracerFactory()); - BuiltinMetricsTracerFactory builtinMetricsTracerFactory = - openTelemetry != null - ? BuiltinMetricsTracerFactory.create(openTelemetry, createBuiltinAttributes(settings)) - : null; - if (builtinMetricsTracerFactory != null) { - tracerFactories.add(builtinMetricsTracerFactory); - } - return new CompositeTracerFactory(tracerFactories.build()); - } - - static Attributes createBuiltinAttributes(EnhancedBigtableStubSettings settings) { - return Attributes.of( - BIGTABLE_PROJECT_ID_KEY, - settings.getProjectId(), - INSTANCE_ID_KEY, - settings.getInstanceId(), - APP_PROFILE_KEY, - settings.getAppProfileId(), - CLIENT_NAME_KEY, - "bigtable-java/" + Version.VERSION); + BigtableClientContext bigtableClientContext = BigtableClientContext.create(settings); + return new EnhancedBigtableStub(settings.getPerOpSettings(), bigtableClientContext); } public EnhancedBigtableStub( - EnhancedBigtableStubSettings settings, BigtableClientContext clientContext) { - this(settings, clientContext, true); - } - - public EnhancedBigtableStub( - EnhancedBigtableStubSettings settings, - BigtableClientContext clientContext, - boolean closeClientContext) { - this.settings = settings; + ClientOperationSettings perOpSettings, BigtableClientContext clientContext) { + this.perOpSettings = perOpSettings; this.bigtableClientContext = clientContext; - this.closeClientContext = closeClientContext; - this.requestContext = - RequestContext.create( - settings.getProjectId(), settings.getInstanceId(), settings.getAppProfileId()); + this.requestContext = RequestContext.create(clientContext.getClientInfo()); this.bulkMutationFlowController = - new FlowController(settings.bulkMutateRowsSettings().getDynamicFlowControlSettings()); + new FlowController(perOpSettings.bulkMutateRowsSettings.getDynamicFlowControlSettings()); this.bulkMutationDynamicFlowControlStats = new DynamicFlowControlStats(); readRowsCallable = createReadRowsCallable(new DefaultRowAdapter()); @@ -345,7 +226,7 @@ public EnhancedBigtableStub( @BetaApi("This surface is stable yet it might be removed in the future.") public ServerStreamingCallable createReadRowsRawCallable( RowAdapter rowAdapter) { - return createReadRowsBaseCallable(settings.readRowsSettings(), rowAdapter) + return createReadRowsBaseCallable(perOpSettings.readRowsSettings, rowAdapter) .withDefaultCallContext(bigtableClientContext.getClientContext().getDefaultCallContext()); } @@ -366,7 +247,7 @@ public ServerStreamingCallable createReadRowsRawCa public ServerStreamingCallable createReadRowsCallable( RowAdapter rowAdapter) { ServerStreamingCallable readRowsCallable = - createReadRowsBaseCallable(settings.readRowsSettings(), rowAdapter); + createReadRowsBaseCallable(perOpSettings.readRowsSettings, rowAdapter); ServerStreamingCallable readRowsUserCallable = new ReadRowsUserCallable<>(readRowsCallable, requestContext); @@ -382,7 +263,7 @@ public ServerStreamingCallable createReadRowsCallable( bigtableClientContext .getClientContext() .getDefaultCallContext() - .withRetrySettings(settings.readRowsSettings().getRetrySettings())); + .withRetrySettings(perOpSettings.readRowsSettings.getRetrySettings())); } /** @@ -405,8 +286,8 @@ public UnaryCallable createReadRowCallable(RowAdapter ServerStreamingCallable readRowsCallable = createReadRowsBaseCallable( ServerStreamingCallSettings.newBuilder() - .setRetryableCodes(settings.readRowSettings().getRetryableCodes()) - .setRetrySettings(settings.readRowSettings().getRetrySettings()) + .setRetryableCodes(perOpSettings.readRowSettings.getRetryableCodes()) + .setRetrySettings(perOpSettings.readRowSettings.getRetrySettings()) .setIdleTimeoutDuration(Duration.ZERO) .setWaitTimeoutDuration(Duration.ZERO) .build(), @@ -422,7 +303,7 @@ public UnaryCallable createReadRowCallable(RowAdapter readRowCallable, clientContext .getDefaultCallContext() - .withRetrySettings(settings.readRowSettings().getRetrySettings()), + .withRetrySettings(perOpSettings.readRowSettings.getRetrySettings()), clientContext.getTracerFactory(), getSpanName("ReadRow"), /* allowNoResponses= */ true); @@ -525,7 +406,7 @@ public ServerStreamingCallable createSkipLargeRowsCall RowAdapter rowAdapter) { ServerStreamingCallSettings readRowsSettings = - (ServerStreamingCallSettings) settings.readRowsSettings(); + (ServerStreamingCallSettings) perOpSettings.readRowsSettings; ServerStreamingCallable base = GrpcRawCallableFactory.createServerStreamingCallable( @@ -616,7 +497,7 @@ public ServerStreamingCallable createSkipLargeRowsCall private UnaryCallable> createBulkReadRowsCallable( RowAdapter rowAdapter) { ServerStreamingCallable readRowsCallable = - createReadRowsBaseCallable(settings.readRowsSettings(), rowAdapter); + createReadRowsBaseCallable(perOpSettings.readRowsSettings, rowAdapter); ServerStreamingCallable readRowsUserCallable = new ReadRowsUserCallable<>(readRowsCallable, requestContext); @@ -636,7 +517,7 @@ private UnaryCallable> createBulkReadRowsCallable( bigtableClientContext .getClientContext() .getDefaultCallContext() - .withRetrySettings(settings.readRowsSettings().getRetrySettings())); + .withRetrySettings(perOpSettings.readRowsSettings.getRetrySettings())); } /** @@ -686,7 +567,7 @@ public ApiFuture> futureCall(String s, ApiCallContext apiCallCon composeRequestParams( r.getAppProfileId(), r.getTableName(), r.getAuthorizedViewName())) .build(), - settings.sampleRowKeysSettings().getRetryableCodes()); + perOpSettings.sampleRowKeysSettings.getRetryableCodes()); UnaryCallable> spoolable = base.all(); @@ -695,10 +576,10 @@ public ApiFuture> futureCall(String s, ApiCallContext apiCallCon withStatsHeaders = new StatsHeadersUnaryCallable<>(spoolable); UnaryCallable> - withBigtableTracer = new BigtableTracerUnaryCallable<>(withStatsHeaders); + withAttemptTracer = new BigtableTracerUnaryCallable<>(withStatsHeaders); UnaryCallable> - retryable = withRetries(withBigtableTracer, settings.sampleRowKeysSettings()); + retryable = withRetries(withAttemptTracer, perOpSettings.sampleRowKeysSettings); return createUserFacingUnaryCallable( methodName, @@ -707,7 +588,7 @@ public ApiFuture> futureCall(String s, ApiCallContext apiCallCon bigtableClientContext .getClientContext() .getDefaultCallContext() - .withRetrySettings(settings.sampleRowKeysSettings().getRetrySettings()))); + .withRetrySettings(perOpSettings.sampleRowKeysSettings.getRetrySettings()))); } /** @@ -724,7 +605,7 @@ private UnaryCallable createMutateRowCallable() { req -> composeRequestParams( req.getAppProfileId(), req.getTableName(), req.getAuthorizedViewName()), - settings.mutateRowSettings(), + perOpSettings.mutateRowSettings, req -> req.toProto(requestContext), resp -> null); } @@ -758,12 +639,12 @@ private UnaryCallable createMutateRowsBas composeRequestParams( r.getAppProfileId(), r.getTableName(), r.getAuthorizedViewName())) .build(), - settings.bulkMutateRowsSettings().getRetryableCodes()); + perOpSettings.bulkMutateRowsSettings.getRetryableCodes()); ServerStreamingCallable callable = new StatsHeadersServerStreamingCallable<>(base); - if (settings.bulkMutateRowsSettings().isServerInitiatedFlowControlEnabled()) { + if (perOpSettings.bulkMutateRowsSettings.isServerInitiatedFlowControlEnabled()) { callable = new RateLimitingServerStreamingCallable(callable); } @@ -774,15 +655,12 @@ private UnaryCallable createMutateRowsBas ServerStreamingCallable convertException = new ConvertExceptionCallable<>(callable); - ServerStreamingCallable withBigtableTracer = + ServerStreamingCallable withAttemptTracer = new BigtableTracerStreamingCallable<>(convertException); - BasicResultRetryAlgorithm resultRetryAlgorithm; - if (settings.getEnableRetryInfo()) { - resultRetryAlgorithm = new RetryInfoRetryAlgorithm<>(); - } else { - resultRetryAlgorithm = new ApiResultRetryAlgorithm<>(); - } + BasicResultRetryAlgorithm resultRetryAlgorithm = + new RetryInfoRetryAlgorithm<>(); + MutateRowsPartialErrorRetryAlgorithm mutateRowsPartialErrorRetryAlgorithm = new MutateRowsPartialErrorRetryAlgorithm(resultRetryAlgorithm); @@ -790,32 +668,29 @@ private UnaryCallable createMutateRowsBas new RetryAlgorithm<>( mutateRowsPartialErrorRetryAlgorithm, new ExponentialRetryAlgorithm( - settings.bulkMutateRowsSettings().getRetrySettings(), clientContext.getClock())); + perOpSettings.bulkMutateRowsSettings.getRetrySettings(), clientContext.getClock())); RetryingExecutorWithContext retryingExecutor = new ScheduledRetryingExecutor<>(retryAlgorithm, clientContext.getExecutor()); UnaryCallable baseCallable = new MutateRowsRetryingCallable( clientContext.getDefaultCallContext(), - withBigtableTracer, + withAttemptTracer, retryingExecutor, - settings.bulkMutateRowsSettings().getRetryableCodes(), + perOpSettings.bulkMutateRowsSettings.getRetryableCodes(), retryAlgorithm); - UnaryCallable withCookie = baseCallable; - - if (settings.getEnableRoutingCookie()) { - withCookie = new CookiesUnaryCallable<>(baseCallable); - } + UnaryCallable withCookie = + new CookiesUnaryCallable<>(baseCallable); UnaryCallable flowControlCallable = null; - if (settings.bulkMutateRowsSettings().isLatencyBasedThrottlingEnabled()) { + if (perOpSettings.bulkMutateRowsSettings.isLatencyBasedThrottlingEnabled()) { flowControlCallable = new DynamicFlowControlCallable( withCookie, bulkMutationFlowController, bulkMutationDynamicFlowControlStats, - settings.bulkMutateRowsSettings().getTargetRpcLatencyMs(), + perOpSettings.bulkMutateRowsSettings.getTargetRpcLatencyMs(), FLOW_CONTROL_ADJUSTING_INTERVAL_MS); } UnaryCallable userFacing = @@ -834,7 +709,7 @@ private UnaryCallable createMutateRowsBas return traced.withDefaultCallContext( clientContext .getDefaultCallContext() - .withRetrySettings(settings.bulkMutateRowsSettings().getRetrySettings())); + .withRetrySettings(perOpSettings.bulkMutateRowsSettings.getRetrySettings())); } /** @@ -859,10 +734,10 @@ private UnaryCallable createMutateRowsBas public Batcher newMutateRowsBatcher( @Nonnull String tableId, @Nullable GrpcCallContext ctx) { return new BatcherImpl<>( - settings.bulkMutateRowsSettings().getBatchingDescriptor(), + perOpSettings.bulkMutateRowsSettings.getBatchingDescriptor(), bulkMutateRowsCallable, BulkMutation.create(tableId), - settings.bulkMutateRowsSettings().getBatchingSettings(), + perOpSettings.bulkMutateRowsSettings.getBatchingSettings(), bigtableClientContext.getClientContext().getExecutor(), bulkMutationFlowController, MoreObjects.firstNonNull( @@ -891,10 +766,10 @@ public Batcher newMutateRowsBatcher( public Batcher newMutateRowsBatcher( TargetId targetId, @Nullable GrpcCallContext ctx) { return new BatcherImpl<>( - settings.bulkMutateRowsSettings().getBatchingDescriptor(), + perOpSettings.bulkMutateRowsSettings.getBatchingDescriptor(), bulkMutateRowsCallable, BulkMutation.create(targetId), - settings.bulkMutateRowsSettings().getBatchingSettings(), + perOpSettings.bulkMutateRowsSettings.getBatchingSettings(), bigtableClientContext.getClientContext().getExecutor(), bulkMutationFlowController, MoreObjects.firstNonNull( @@ -920,10 +795,10 @@ public Batcher newBulkReadRowsBatcher( @Nonnull Query query, @Nullable GrpcCallContext ctx) { Preconditions.checkNotNull(query, "query cannot be null"); return new BatcherImpl<>( - settings.bulkReadRowsSettings().getBatchingDescriptor(), + perOpSettings.bulkReadRowsSettings.getBatchingDescriptor(), bulkReadRowsCallable, query, - settings.bulkReadRowsSettings().getBatchingSettings(), + perOpSettings.bulkReadRowsSettings.getBatchingSettings(), bigtableClientContext.getClientContext().getExecutor(), null, MoreObjects.firstNonNull( @@ -945,7 +820,7 @@ private UnaryCallable createCheckAndMutateRowCa req -> composeRequestParams( req.getAppProfileId(), req.getTableName(), req.getAuthorizedViewName()), - settings.checkAndMutateRowSettings(), + perOpSettings.checkAndMutateRowSettings, req -> req.toProto(requestContext), CheckAndMutateRowResponse::getPredicateMatched); } @@ -968,7 +843,7 @@ private UnaryCallable createReadModifyWriteRowCallable( req -> composeRequestParams( req.getAppProfileId(), req.getTableName(), req.getAuthorizedViewName()), - settings.readModifyWriteRowSettings(), + perOpSettings.readModifyWriteRowSettings, req -> req.toProto(requestContext), resp -> rowAdapter.createRowFromProto(resp.getRow())); } @@ -1002,7 +877,7 @@ private UnaryCallable createReadModifyWriteRowCallable( .setParamsExtractor( r -> composeRequestParams(r.getAppProfileId(), r.getTableName(), "")) .build(), - settings.generateInitialChangeStreamPartitionsSettings().getRetryableCodes()); + perOpSettings.generateInitialChangeStreamPartitionsSettings.getRetryableCodes()); ServerStreamingCallable userCallable = new GenerateInitialChangeStreamPartitionsUserCallable(base, requestContext); @@ -1021,23 +896,23 @@ private UnaryCallable createReadModifyWriteRowCallable( ServerStreamingCallSettings innerSettings = ServerStreamingCallSettings.newBuilder() .setRetryableCodes( - settings.generateInitialChangeStreamPartitionsSettings().getRetryableCodes()) + perOpSettings.generateInitialChangeStreamPartitionsSettings.getRetryableCodes()) .setRetrySettings( - settings.generateInitialChangeStreamPartitionsSettings().getRetrySettings()) + perOpSettings.generateInitialChangeStreamPartitionsSettings.getRetrySettings()) .setIdleTimeout( - settings.generateInitialChangeStreamPartitionsSettings().getIdleTimeout()) + perOpSettings.generateInitialChangeStreamPartitionsSettings.getIdleTimeout()) .setWaitTimeout( - settings.generateInitialChangeStreamPartitionsSettings().getWaitTimeout()) + perOpSettings.generateInitialChangeStreamPartitionsSettings.getWaitTimeout()) .build(); ServerStreamingCallable watched = Callables.watched(convertException, innerSettings, clientContext); - ServerStreamingCallable withBigtableTracer = + ServerStreamingCallable withAttemptTracer = new BigtableTracerStreamingCallable<>(watched); ServerStreamingCallable retrying = - withRetries(withBigtableTracer, innerSettings); + withRetries(withAttemptTracer, innerSettings); SpanName span = getSpanName("GenerateInitialChangeStreamPartitions"); ServerStreamingCallable traced = @@ -1047,7 +922,7 @@ private UnaryCallable createReadModifyWriteRowCallable( clientContext .getDefaultCallContext() .withRetrySettings( - settings.generateInitialChangeStreamPartitionsSettings().getRetrySettings())); + perOpSettings.generateInitialChangeStreamPartitionsSettings.getRetrySettings())); } /** @@ -1076,7 +951,7 @@ private UnaryCallable createReadModifyWriteRowCallable( .setParamsExtractor( r -> composeRequestParams(r.getAppProfileId(), r.getTableName(), "")) .build(), - settings.readChangeStreamSettings().getRetryableCodes()); + perOpSettings.readChangeStreamSettings.getRetryableCodes()); ServerStreamingCallable withStatsHeaders = new StatsHeadersServerStreamingCallable<>(base); @@ -1096,20 +971,20 @@ private UnaryCallable createReadModifyWriteRowCallable( ServerStreamingCallSettings.newBuilder() .setResumptionStrategy( new ReadChangeStreamResumptionStrategy<>(changeStreamRecordAdapter)) - .setRetryableCodes(settings.readChangeStreamSettings().getRetryableCodes()) - .setRetrySettings(settings.readChangeStreamSettings().getRetrySettings()) - .setIdleTimeout(settings.readChangeStreamSettings().getIdleTimeout()) - .setWaitTimeout(settings.readChangeStreamSettings().getWaitTimeout()) + .setRetryableCodes(perOpSettings.readChangeStreamSettings.getRetryableCodes()) + .setRetrySettings(perOpSettings.readChangeStreamSettings.getRetrySettings()) + .setIdleTimeout(perOpSettings.readChangeStreamSettings.getIdleTimeout()) + .setWaitTimeout(perOpSettings.readChangeStreamSettings.getWaitTimeout()) .build(); ServerStreamingCallable watched = Callables.watched(merging, innerSettings, clientContext); - ServerStreamingCallable withBigtableTracer = + ServerStreamingCallable withAttemptTracer = new BigtableTracerStreamingCallable<>(watched); ServerStreamingCallable readChangeStreamCallable = - withRetries(withBigtableTracer, innerSettings); + withRetries(withAttemptTracer, innerSettings); ServerStreamingCallable readChangeStreamUserCallable = @@ -1123,7 +998,7 @@ private UnaryCallable createReadModifyWriteRowCallable( return traced.withDefaultCallContext( clientContext .getDefaultCallContext() - .withRetrySettings(settings.readChangeStreamSettings().getRetrySettings())); + .withRetrySettings(perOpSettings.readChangeStreamSettings.getRetrySettings())); } /** @@ -1160,7 +1035,7 @@ public Map extract(ExecuteQueryRequest executeQueryRequest) { } }) .build(), - settings.executeQuerySettings().getRetryableCodes()); + perOpSettings.executeQuerySettings.getRetryableCodes()); ServerStreamingCallable withStatsHeaders = new StatsHeadersServerStreamingCallable<>(base); @@ -1175,13 +1050,16 @@ public Map extract(ExecuteQueryRequest executeQueryRequest) { ServerStreamingCallable convertException = new ConvertExceptionCallable<>(withPlanRefresh); + ServerStreamingCallable withAttemptTracer = + new BigtableTracerStreamingCallable<>(convertException); + ServerStreamingCallSettings retrySettings = ServerStreamingCallSettings.newBuilder() .setResumptionStrategy(new ExecuteQueryResumptionStrategy()) - .setRetryableCodes(settings.executeQuerySettings().getRetryableCodes()) - .setRetrySettings(settings.executeQuerySettings().getRetrySettings()) - .setIdleTimeout(settings.executeQuerySettings().getIdleTimeout()) - .setWaitTimeout(settings.executeQuerySettings().getWaitTimeout()) + .setRetryableCodes(perOpSettings.executeQuerySettings.getRetryableCodes()) + .setRetrySettings(perOpSettings.executeQuerySettings.getRetrySettings()) + .setIdleTimeout(perOpSettings.executeQuerySettings.getIdleTimeout()) + .setWaitTimeout(perOpSettings.executeQuerySettings.getWaitTimeout()) .build(); // Retries need to happen before row merging, because the resumeToken is part @@ -1189,15 +1067,15 @@ public Map extract(ExecuteQueryRequest executeQueryRequest) { // attempt stream will have reset set to true, so any unyielded data from the previous // attempt will be reset properly ServerStreamingCallable retries = - withRetries(convertException, retrySettings); + withRetries(withAttemptTracer, retrySettings); ServerStreamingCallable merging = new SqlRowMergingCallable(retries); ServerStreamingCallSettings watchdogSettings = ServerStreamingCallSettings.newBuilder() - .setIdleTimeout(settings.executeQuerySettings().getIdleTimeout()) - .setWaitTimeout(settings.executeQuerySettings().getWaitTimeout()) + .setIdleTimeout(perOpSettings.executeQuerySettings.getIdleTimeout()) + .setWaitTimeout(perOpSettings.executeQuerySettings.getWaitTimeout()) .build(); // Watchdog needs to stay above the metadata error handling so that watchdog errors @@ -1208,26 +1086,23 @@ public Map extract(ExecuteQueryRequest executeQueryRequest) { ServerStreamingCallable passingThroughErrorsToMetadata = new MetadataErrorHandlingCallable(watched); - ServerStreamingCallable withBigtableTracer = - new BigtableTracerStreamingCallable<>(passingThroughErrorsToMetadata); - SpanName span = getSpanName("ExecuteQuery"); ServerStreamingCallable traced = new TracedServerStreamingCallable<>( - withBigtableTracer, clientContext.getTracerFactory(), span); + passingThroughErrorsToMetadata, clientContext.getTracerFactory(), span); return new ExecuteQueryCallable( traced.withDefaultCallContext( clientContext .getDefaultCallContext() - .withRetrySettings(settings.executeQuerySettings().getRetrySettings()))); + .withRetrySettings(perOpSettings.executeQuerySettings.getRetrySettings()))); } private UnaryCallable createPrepareQueryCallable() { return createUnaryCallable( BigtableGrpc.getPrepareQueryMethod(), req -> composeInstanceLevelRequestParams(req.getInstanceName(), req.getAppProfileId()), - settings.prepareQuerySettings(), + perOpSettings.prepareQuerySettings, req -> req.toProto(requestContext), PrepareResponse::fromProto); } @@ -1311,56 +1186,31 @@ ServerStreamingCallSettings convertUnaryToServerStreamingSettings( private UnaryCallable withRetries( UnaryCallable innerCallable, UnaryCallSettings unaryCallSettings) { - UnaryCallable retrying; - if (settings.getEnableRetryInfo()) { - retrying = - com.google.cloud.bigtable.gaxx.retrying.Callables.retrying( - innerCallable, unaryCallSettings, bigtableClientContext.getClientContext()); - } else { - retrying = - Callables.retrying( - innerCallable, unaryCallSettings, bigtableClientContext.getClientContext()); - } - if (settings.getEnableRoutingCookie()) { - return new CookiesUnaryCallable<>(retrying); - } - return retrying; + UnaryCallable retrying = + com.google.cloud.bigtable.gaxx.retrying.Callables.retrying( + innerCallable, unaryCallSettings, bigtableClientContext.getClientContext()); + return new CookiesUnaryCallable<>(retrying); } private ServerStreamingCallable withRetries( ServerStreamingCallable innerCallable, ServerStreamingCallSettings serverStreamingCallSettings) { - ServerStreamingCallable retrying; - if (settings.getEnableRetryInfo()) { - retrying = - com.google.cloud.bigtable.gaxx.retrying.Callables.retrying( - innerCallable, serverStreamingCallSettings, bigtableClientContext.getClientContext()); - } else { - retrying = - Callables.retrying( - innerCallable, serverStreamingCallSettings, bigtableClientContext.getClientContext()); - } - if (settings.getEnableRoutingCookie()) { - return new CookiesServerStreamingCallable<>(retrying); - } - return retrying; + ServerStreamingCallable retrying = + com.google.cloud.bigtable.gaxx.retrying.Callables.retrying( + innerCallable, serverStreamingCallSettings, bigtableClientContext.getClientContext()); + + return new CookiesServerStreamingCallable<>(retrying); } private ServerStreamingCallable largeRowWithRetries( ServerStreamingCallable innerCallable, ServerStreamingCallSettings serverStreamingCallSettings) { - // Retrying algorithm in retryingForLargeRows also takes RetryInfo into consideration, so we - // skip the check for settings.getEnableRetryInfo here - ServerStreamingCallable retrying; - retrying = + ServerStreamingCallable retrying = com.google.cloud.bigtable.gaxx.retrying.Callables.retryingForLargeRows( innerCallable, serverStreamingCallSettings, bigtableClientContext.getClientContext()); - if (settings.getEnableRoutingCookie()) { - return new CookiesServerStreamingCallable<>(retrying); - } - return retrying; + return new CookiesServerStreamingCallable<>(retrying); } // @@ -1454,12 +1304,10 @@ private SpanName getSpanName(String methodName) { @Override public void close() { - if (closeClientContext) { - try { - bigtableClientContext.close(); - } catch (Exception e) { - throw new IllegalStateException("failed to close client context", e); - } + try { + bigtableClientContext.close(); + } catch (Exception e) { + throw new IllegalStateException("failed to close client context", e); } } } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java index d1fe259ea1..1a416d51e4 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettings.java @@ -21,7 +21,6 @@ import com.google.api.gax.batching.BatchingSettings; import com.google.api.gax.batching.FlowControlSettings; import com.google.api.gax.batching.FlowController; -import com.google.api.gax.batching.FlowController.LimitExceededBehavior; import com.google.api.gax.core.GoogleCredentialsProvider; import com.google.api.gax.grpc.ChannelPoolSettings; import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; @@ -32,7 +31,6 @@ import com.google.api.gax.rpc.StubSettings; import com.google.api.gax.rpc.TransportChannelProvider; import com.google.api.gax.rpc.UnaryCallSettings; -import com.google.auth.Credentials; import com.google.bigtable.v2.FeatureFlags; import com.google.bigtable.v2.PingAndWarmRequest; import com.google.cloud.bigtable.Version; @@ -51,15 +49,10 @@ import com.google.cloud.bigtable.data.v2.models.sql.BoundStatement; import com.google.cloud.bigtable.data.v2.stub.metrics.DefaultMetricsProvider; import com.google.cloud.bigtable.data.v2.stub.metrics.MetricsProvider; -import com.google.cloud.bigtable.data.v2.stub.metrics.Util; -import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor; -import com.google.cloud.bigtable.data.v2.stub.readrows.ReadRowsBatchingDescriptor; import com.google.common.base.MoreObjects; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import io.opentelemetry.sdk.OpenTelemetrySdk; import java.io.ByteArrayOutputStream; import java.io.IOException; import java.nio.charset.StandardCharsets; @@ -67,9 +60,6 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.concurrent.ScheduledExecutorService; -import java.util.logging.Logger; import javax.annotation.Nonnull; import javax.annotation.Nullable; import org.threeten.bp.Duration; @@ -103,9 +93,6 @@ * } */ public class EnhancedBigtableStubSettings extends StubSettings { - private static final Logger logger = - Logger.getLogger(EnhancedBigtableStubSettings.class.getName()); - // The largest message that can be received is a 256 MB ReadRowsResponse. private static final int MAX_MESSAGE_SIZE = 256 * 1024 * 1024; private static final String SERVER_DEFAULT_APP_PROFILE_ID = ""; @@ -118,123 +105,6 @@ public class EnhancedBigtableStubSettings extends StubSettings IDEMPOTENT_RETRY_CODES = - ImmutableSet.of(Code.DEADLINE_EXCEEDED, Code.UNAVAILABLE); - - // Copy of default retrying settings in the yaml - private static final RetrySettings IDEMPOTENT_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(10)) - .setRetryDelayMultiplier(2) - .setMaxRetryDelay(Duration.ofMinutes(1)) - .setInitialRpcTimeout(Duration.ofSeconds(20)) - .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofSeconds(20)) - .setTotalTimeout(Duration.ofMinutes(10)) - .build(); - - // Allow retrying ABORTED statuses. These will be returned by the server when the client is - // too slow to read the rows. This makes sense for the java client because retries happen - // after the row merging logic. Which means that the retry will not be invoked until the - // current buffered chunks are consumed. - private static final Set READ_ROWS_RETRY_CODES = - ImmutableSet.builder().addAll(IDEMPOTENT_RETRY_CODES).add(Code.ABORTED).build(); - - // Priming request should have a shorter timeout - private static Duration PRIME_REQUEST_TIMEOUT = Duration.ofSeconds(30); - - private static final RetrySettings READ_ROWS_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(10)) - .setRetryDelayMultiplier(2.0) - .setMaxRetryDelay(Duration.ofMinutes(1)) - .setMaxAttempts(10) - .setJittered(true) - .setInitialRpcTimeout(Duration.ofMinutes(30)) - .setRpcTimeoutMultiplier(2.0) - .setMaxRpcTimeout(Duration.ofMinutes(30)) - .setTotalTimeout(Duration.ofHours(12)) - .build(); - - private static final RetrySettings MUTATE_ROWS_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(10)) - .setRetryDelayMultiplier(2) - .setMaxRetryDelay(Duration.ofMinutes(1)) - .setInitialRpcTimeout(Duration.ofMinutes(1)) - .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofMinutes(1)) - .setTotalTimeout(Duration.ofMinutes(10)) - .build(); - - private static final Set GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_CODES = - ImmutableSet.builder().addAll(IDEMPOTENT_RETRY_CODES).add(Code.ABORTED).build(); - - private static final RetrySettings GENERATE_INITIAL_CHANGE_STREAM_PARTITIONS_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(10)) - .setRetryDelayMultiplier(2.0) - .setMaxRetryDelay(Duration.ofMinutes(1)) - .setMaxAttempts(10) - .setJittered(true) - .setInitialRpcTimeout(Duration.ofMinutes(1)) - .setRpcTimeoutMultiplier(2.0) - .setMaxRpcTimeout(Duration.ofMinutes(10)) - .setTotalTimeout(Duration.ofMinutes(60)) - .build(); - - // Allow retrying ABORTED statuses. These will be returned by the server when the client is - // too slow to read the change stream records. This makes sense for the java client because - // retries happen after the mutation merging logic. Which means that the retry will not be - // invoked until the current buffered change stream mutations are consumed. - private static final Set READ_CHANGE_STREAM_RETRY_CODES = - ImmutableSet.builder().addAll(IDEMPOTENT_RETRY_CODES).add(Code.ABORTED).build(); - - private static final RetrySettings READ_CHANGE_STREAM_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(10)) - .setRetryDelayMultiplier(2.0) - .setMaxRetryDelay(Duration.ofMinutes(1)) - .setMaxAttempts(10) - .setJittered(true) - .setInitialRpcTimeout(Duration.ofMinutes(5)) - .setRpcTimeoutMultiplier(2.0) - .setMaxRpcTimeout(Duration.ofMinutes(5)) - .setTotalTimeout(Duration.ofHours(12)) - .build(); - - // Allow retrying ABORTED statuses. These will be returned by the server when the client is - // too slow to read the responses. - private static final Set EXECUTE_QUERY_RETRY_CODES = - ImmutableSet.builder().addAll(IDEMPOTENT_RETRY_CODES).add(Code.ABORTED).build(); - - // We use the same configuration as READ_ROWS - private static final RetrySettings EXECUTE_QUERY_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(10)) - .setRetryDelayMultiplier(2.0) - .setMaxRetryDelay(Duration.ofMinutes(1)) - .setMaxAttempts(10) - .setJittered(true) - .setInitialRpcTimeout(Duration.ofMinutes(30)) - .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofMinutes(30)) - .setTotalTimeout(Duration.ofHours(12)) - .build(); - - // Similar to IDEMPOTENT but with a lower initial rpc timeout since we expect - // these calls to be quick in most circumstances - private static final RetrySettings PREPARE_QUERY_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(10)) - .setRetryDelayMultiplier(2) - .setMaxRetryDelay(Duration.ofMinutes(1)) - .setInitialRpcTimeout(Duration.ofSeconds(5)) - .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofSeconds(20)) - .setTotalTimeout(Duration.ofMinutes(10)) - .build(); - /** * Scopes that are equivalent to JWT's audience. * @@ -259,78 +129,29 @@ public class EnhancedBigtableStubSettings extends StubSettings primedTableIds; - private final boolean enableRoutingCookie; - private final boolean enableRetryInfo; - - private final ServerStreamingCallSettings readRowsSettings; - private final UnaryCallSettings readRowSettings; - private final UnaryCallSettings> sampleRowKeysSettings; - private final UnaryCallSettings mutateRowSettings; - private final BigtableBatchingCallSettings bulkMutateRowsSettings; - private final BigtableBulkReadRowsCallSettings bulkReadRowsSettings; - private final UnaryCallSettings checkAndMutateRowSettings; - private final UnaryCallSettings readModifyWriteRowSettings; - private final ServerStreamingCallSettings - generateInitialChangeStreamPartitionsSettings; - private final ServerStreamingCallSettings - readChangeStreamSettings; - private final UnaryCallSettings pingAndWarmSettings; - private final ServerStreamingCallSettings executeQuerySettings; - private final UnaryCallSettings prepareQuerySettings; + + private final ClientOperationSettings perOpSettings; private final FeatureFlags featureFlags; private final MetricsProvider metricsProvider; @Nullable private final String metricsEndpoint; - @Nonnull private final InternalMetricsProvider internalMetricsProvider; + private final boolean areInternalMetricsEnabled; private final String jwtAudience; private EnhancedBigtableStubSettings(Builder builder) { super(builder); - // Since point reads, streaming reads, bulk reads share the same base callable that converts - // grpc errors into ApiExceptions, they must have the same retry codes. - Preconditions.checkState( - builder - .readRowSettings - .getRetryableCodes() - .equals(builder.readRowsSettings.getRetryableCodes()), - "Single ReadRow retry codes must match ReadRows retry codes"); - Preconditions.checkState( - builder - .bulkReadRowsSettings - .getRetryableCodes() - .equals(builder.readRowsSettings.getRetryableCodes()), - "Bulk ReadRow retry codes must match ReadRows retry codes"); - projectId = builder.projectId; instanceId = builder.instanceId; appProfileId = builder.appProfileId; isRefreshingChannel = builder.isRefreshingChannel; - primedTableIds = builder.primedTableIds; - enableRoutingCookie = builder.enableRoutingCookie; - enableRetryInfo = builder.enableRetryInfo; metricsProvider = builder.metricsProvider; metricsEndpoint = builder.metricsEndpoint; - internalMetricsProvider = builder.internalMetricsProvider; + areInternalMetricsEnabled = builder.areInternalMetricsEnabled; jwtAudience = builder.jwtAudience; - // Per method settings. - readRowsSettings = builder.readRowsSettings.build(); - readRowSettings = builder.readRowSettings.build(); - sampleRowKeysSettings = builder.sampleRowKeysSettings.build(); - mutateRowSettings = builder.mutateRowSettings.build(); - bulkMutateRowsSettings = builder.bulkMutateRowsSettings.build(); - bulkReadRowsSettings = builder.bulkReadRowsSettings.build(); - checkAndMutateRowSettings = builder.checkAndMutateRowSettings.build(); - readModifyWriteRowSettings = builder.readModifyWriteRowSettings.build(); - generateInitialChangeStreamPartitionsSettings = - builder.generateInitialChangeStreamPartitionsSettings.build(); - readChangeStreamSettings = builder.readChangeStreamSettings.build(); - pingAndWarmSettings = builder.pingAndWarmSettings.build(); - executeQuerySettings = builder.executeQuerySettings.build(); - prepareQuerySettings = builder.prepareQuerySettings.build(); + perOpSettings = new ClientOperationSettings(builder.perOpSettings); featureFlags = builder.featureFlags.build(); } @@ -370,7 +191,7 @@ public boolean isRefreshingChannel() { */ @Deprecated public List getPrimedTableIds() { - return primedTableIds; + return ImmutableList.of(); } /** @@ -388,21 +209,19 @@ public MetricsProvider getMetricsProvider() { } /** - * Gets if routing cookie is enabled. If true, client will retry a request with extra metadata - * server sent back. + * @deprecated routing cookies are always on. */ - @BetaApi("Routing cookie is not currently stable and may change in the future") + @Deprecated public boolean getEnableRoutingCookie() { - return enableRoutingCookie; + return true; } /** - * Gets if RetryInfo is enabled. If true, client bases retry decision and back off time on server - * returned RetryInfo value. Otherwise, client uses {@link RetrySettings}. + * @deprecated RetryInfo is now always on. */ - @BetaApi("RetryInfo is not currently stable and may change in the future") + @Deprecated public boolean getEnableRetryInfo() { - return enableRetryInfo; + return true; } /** @@ -415,11 +234,12 @@ public String getMetricsEndpoint() { } public boolean areInternalMetricsEnabled() { - return internalMetricsProvider == DEFAULT_INTERNAL_OTEL_PROVIDER; + return areInternalMetricsEnabled; } - InternalMetricsProvider getInternalMetricsProvider() { - return internalMetricsProvider; + @InternalApi + public ClientOperationSettings getPerOpSettings() { + return perOpSettings; } /** Returns a builder for the default ChannelProvider for this service. */ @@ -500,7 +320,7 @@ public String getServiceName() { * */ public ServerStreamingCallSettings readRowsSettings() { - return readRowsSettings; + return perOpSettings.readRowsSettings; } /** @@ -523,7 +343,7 @@ public ServerStreamingCallSettings readRowsSettings() { * */ public UnaryCallSettings> sampleRowKeysSettings() { - return sampleRowKeysSettings; + return perOpSettings.sampleRowKeysSettings; } /** @@ -548,7 +368,7 @@ public UnaryCallSettings> sampleRowKeysSettings() { * @see RetrySettings for more explanation. */ public UnaryCallSettings readRowSettings() { - return readRowSettings; + return perOpSettings.readRowSettings; } /** @@ -573,7 +393,7 @@ public UnaryCallSettings readRowSettings() { * @see RetrySettings for more explanation. */ public UnaryCallSettings mutateRowSettings() { - return mutateRowSettings; + return perOpSettings.mutateRowSettings; } /** @@ -620,7 +440,7 @@ public UnaryCallSettings mutateRowSettings() { * related configuration explanation. */ public BigtableBatchingCallSettings bulkMutateRowsSettings() { - return bulkMutateRowsSettings; + return perOpSettings.bulkMutateRowsSettings; } /** @@ -661,7 +481,7 @@ public BigtableBatchingCallSettings bulkMutateRowsSettings() { * @see BatchingSettings for batch related configuration explanation. */ public BigtableBulkReadRowsCallSettings bulkReadRowsSettings() { - return bulkReadRowsSettings; + return perOpSettings.bulkReadRowsSettings; } /** @@ -675,7 +495,7 @@ public BigtableBulkReadRowsCallSettings bulkReadRowsSettings() { * @see RetrySettings for more explanation. */ public UnaryCallSettings checkAndMutateRowSettings() { - return checkAndMutateRowSettings; + return perOpSettings.checkAndMutateRowSettings; } /** @@ -689,21 +509,21 @@ public UnaryCallSettings checkAndMutateRowSetti * @see RetrySettings for more explanation. */ public UnaryCallSettings readModifyWriteRowSettings() { - return readModifyWriteRowSettings; + return perOpSettings.readModifyWriteRowSettings; } public ServerStreamingCallSettings generateInitialChangeStreamPartitionsSettings() { - return generateInitialChangeStreamPartitionsSettings; + return perOpSettings.generateInitialChangeStreamPartitionsSettings; } public ServerStreamingCallSettings readChangeStreamSettings() { - return readChangeStreamSettings; + return perOpSettings.readChangeStreamSettings; } public ServerStreamingCallSettings executeQuerySettings() { - return executeQuerySettings; + return perOpSettings.executeQuerySettings; } /** @@ -729,7 +549,7 @@ public ServerStreamingCallSettings executeQuerySettings( * @see RetrySettings for more explanation. */ public UnaryCallSettings prepareQuerySettings() { - return prepareQuerySettings; + return perOpSettings.prepareQuerySettings; } /** @@ -738,7 +558,7 @@ public UnaryCallSettings prepareQuerySetti *

    By default the retries are disabled for PingAndWarm and deadline is set to 30 seconds. */ UnaryCallSettings pingAndWarmSettings() { - return pingAndWarmSettings; + return perOpSettings.pingAndWarmSettings; } /** Returns a builder containing all the values of this settings class. */ @@ -753,34 +573,15 @@ public static class Builder extends StubSettings.Builder primedTableIds; private String jwtAudience; - private boolean enableRoutingCookie; - private boolean enableRetryInfo; - - private final ServerStreamingCallSettings.Builder readRowsSettings; - private final UnaryCallSettings.Builder readRowSettings; - private final UnaryCallSettings.Builder> sampleRowKeysSettings; - private final UnaryCallSettings.Builder mutateRowSettings; - private final BigtableBatchingCallSettings.Builder bulkMutateRowsSettings; - private final BigtableBulkReadRowsCallSettings.Builder bulkReadRowsSettings; - private final UnaryCallSettings.Builder - checkAndMutateRowSettings; - private final UnaryCallSettings.Builder readModifyWriteRowSettings; - private final ServerStreamingCallSettings.Builder - generateInitialChangeStreamPartitionsSettings; - private final ServerStreamingCallSettings.Builder - readChangeStreamSettings; - private final UnaryCallSettings.Builder pingAndWarmSettings; - private final ServerStreamingCallSettings.Builder executeQuerySettings; - private final UnaryCallSettings.Builder - prepareQuerySettings; - - private FeatureFlags.Builder featureFlags; + + private final ClientOperationSettings.Builder perOpSettings; + + private final FeatureFlags.Builder featureFlags; private MetricsProvider metricsProvider; @Nullable private String metricsEndpoint; - private InternalMetricsProvider internalMetricsProvider; + private boolean areInternalMetricsEnabled; /** * Initializes a new Builder with sane defaults for all settings. @@ -793,12 +594,9 @@ public static class Builder extends StubSettings.Builder - - /** - * Copies settings from unary RPC to another. This is necessary when modifying request and - * response types while trying to retain retry settings. - */ - private static void copyRetrySettings( - UnaryCallSettings.Builder source, UnaryCallSettings.Builder dest) { - dest.setRetryableCodes(source.getRetryableCodes()); - dest.setRetrySettings(source.getRetrySettings()); + featureFlags = settings.featureFlags.toBuilder(); } - // - // /** * Sets the project id of that target instance. This setting is required. All RPCs will be made @@ -1056,7 +719,6 @@ public Builder setRefreshingChannel(boolean isRefreshingChannel) { */ @Deprecated public Builder setPrimedTableIds(String... tableIds) { - this.primedTableIds = ImmutableList.copyOf(tableIds); return this; } @@ -1076,7 +738,7 @@ public boolean isRefreshingChannel() { */ @Deprecated public List getPrimedTableIds() { - return primedTableIds; + return ImmutableList.of(); } /** @@ -1140,19 +802,13 @@ public String getMetricsEndpoint() { /** Disable collection of internal metrics that help google detect issues accessing Bigtable. */ public Builder disableInternalMetrics() { - return setInternalMetricsProvider(DISABLED_INTERNAL_OTEL_PROVIDER); - } - - // For testing - @InternalApi - public Builder setInternalMetricsProvider(InternalMetricsProvider internalMetricsProvider) { - this.internalMetricsProvider = internalMetricsProvider; + this.areInternalMetricsEnabled = false; return this; } /** Checks if internal metrics are disabled */ public boolean areInternalMetricsEnabled() { - return internalMetricsProvider == DISABLED_INTERNAL_OTEL_PROVIDER; + return areInternalMetricsEnabled; } /** @@ -1172,87 +828,81 @@ String getJwtAudience() { } /** - * Sets if routing cookie is enabled. If true, client will retry a request with extra metadata - * server sent back. + * @deprecated this now a no-op as routing cookies are always on. */ - @BetaApi("Routing cookie is not currently stable and may change in the future") + @Deprecated public Builder setEnableRoutingCookie(boolean enableRoutingCookie) { - this.enableRoutingCookie = enableRoutingCookie; return this; } /** - * Gets if routing cookie is enabled. If true, client will retry a request with extra metadata - * server sent back. + * @deprecated routing cookies are always on. */ - @BetaApi("Routing cookie is not currently stable and may change in the future") + @Deprecated public boolean getEnableRoutingCookie() { - return enableRoutingCookie; + return true; } /** - * Sets if RetryInfo is enabled. If true, client bases retry decision and back off time on - * server returned RetryInfo value. Otherwise, client uses {@link RetrySettings}. + * @deprecated This is a no-op, RetryInfo is always used now. */ - @BetaApi("RetryInfo is not currently stable and may change in the future") + @Deprecated public Builder setEnableRetryInfo(boolean enableRetryInfo) { - this.enableRetryInfo = enableRetryInfo; return this; } /** - * Gets if RetryInfo is enabled. If true, client bases retry decision and back off time on - * server returned RetryInfo value. Otherwise, client uses {@link RetrySettings}. + * @deprecated RetryInfo is always on. */ - @BetaApi("RetryInfo is not currently stable and may change in the future") + @Deprecated public boolean getEnableRetryInfo() { - return enableRetryInfo; + return true; } /** Returns the builder for the settings used for calls to readRows. */ public ServerStreamingCallSettings.Builder readRowsSettings() { - return readRowsSettings; + return perOpSettings.readRowsSettings; } /** Returns the builder for the settings used for point reads using readRow. */ public UnaryCallSettings.Builder readRowSettings() { - return readRowSettings; + return perOpSettings.readRowSettings; } /** Returns the builder for the settings used for calls to SampleRowKeysSettings. */ public UnaryCallSettings.Builder> sampleRowKeysSettings() { - return sampleRowKeysSettings; + return perOpSettings.sampleRowKeysSettings; } /** Returns the builder for the settings used for calls to MutateRow. */ public UnaryCallSettings.Builder mutateRowSettings() { - return mutateRowSettings; + return perOpSettings.mutateRowSettings; } /** Returns the builder for the settings used for calls to MutateRows. */ public BigtableBatchingCallSettings.Builder bulkMutateRowsSettings() { - return bulkMutateRowsSettings; + return perOpSettings.bulkMutateRowsSettings; } /** Returns the builder for the settings used for calls to MutateRows. */ public BigtableBulkReadRowsCallSettings.Builder bulkReadRowsSettings() { - return bulkReadRowsSettings; + return perOpSettings.bulkReadRowsSettings; } /** Returns the builder for the settings used for calls to CheckAndMutateRow. */ public UnaryCallSettings.Builder checkAndMutateRowSettings() { - return checkAndMutateRowSettings; + return perOpSettings.checkAndMutateRowSettings; } /** Returns the builder with the settings used for calls to ReadModifyWriteRow. */ public UnaryCallSettings.Builder readModifyWriteRowSettings() { - return readModifyWriteRowSettings; + return perOpSettings.readModifyWriteRowSettings; } /** Returns the builder for the settings used for calls to ReadChangeStream. */ public ServerStreamingCallSettings.Builder readChangeStreamSettings() { - return readChangeStreamSettings; + return perOpSettings.readChangeStreamSettings; } /** @@ -1260,12 +910,12 @@ public UnaryCallSettings.Builder readModifyWriteRowSett */ public ServerStreamingCallSettings.Builder generateInitialChangeStreamPartitionsSettings() { - return generateInitialChangeStreamPartitionsSettings; + return perOpSettings.generateInitialChangeStreamPartitionsSettings; } /** Returns the builder with the settings used for calls to PingAndWarm. */ public UnaryCallSettings.Builder pingAndWarmSettings() { - return pingAndWarmSettings; + return perOpSettings.pingAndWarmSettings; } /** @@ -1276,13 +926,13 @@ public UnaryCallSettings.Builder pingAndWarmSettings() */ @BetaApi public ServerStreamingCallSettings.Builder executeQuerySettings() { - return executeQuerySettings; + return perOpSettings.executeQuerySettings; } /** Returns the builder with the settings used for calls to PrepareQuery */ @BetaApi public UnaryCallSettings.Builder prepareQuerySettings() { - return prepareQuerySettings; + return perOpSettings.prepareQuerySettings; } @SuppressWarnings("unchecked") @@ -1296,8 +946,8 @@ public EnhancedBigtableStubSettings build() { featureFlags.setMutateRowsRateLimit2(true); } - featureFlags.setRoutingCookie(this.getEnableRoutingCookie()); - featureFlags.setRetryInfo(this.getEnableRetryInfo()); + featureFlags.setRoutingCookie(true); + featureFlags.setRetryInfo(true); // client_Side_metrics_enabled feature flag is only set when a user is running with a // DefaultMetricsProvider. This may cause false negatives when a user registered the // metrics on their CustomOpenTelemetryMetricsProvider. @@ -1338,45 +988,12 @@ public String toString() { .add("instanceId", instanceId) .add("appProfileId", appProfileId) .add("isRefreshingChannel", isRefreshingChannel) - .add("primedTableIds", primedTableIds) - .add("enableRoutingCookie", enableRoutingCookie) - .add("enableRetryInfo", enableRetryInfo) - .add("readRowsSettings", readRowsSettings) - .add("readRowSettings", readRowSettings) - .add("sampleRowKeysSettings", sampleRowKeysSettings) - .add("mutateRowSettings", mutateRowSettings) - .add("bulkMutateRowsSettings", bulkMutateRowsSettings) - .add("bulkReadRowsSettings", bulkReadRowsSettings) - .add("checkAndMutateRowSettings", checkAndMutateRowSettings) - .add("readModifyWriteRowSettings", readModifyWriteRowSettings) - .add( - "generateInitialChangeStreamPartitionsSettings", - generateInitialChangeStreamPartitionsSettings) - .add("readChangeStreamSettings", readChangeStreamSettings) - .add("pingAndWarmSettings", pingAndWarmSettings) - .add("executeQuerySettings", executeQuerySettings) - .add("prepareQuerySettings", prepareQuerySettings) + .add("perOpSettings", perOpSettings) .add("metricsProvider", metricsProvider) .add("metricsEndpoint", metricsEndpoint) - .add("areInternalMetricsEnabled", internalMetricsProvider == DEFAULT_INTERNAL_OTEL_PROVIDER) + .add("areInternalMetricsEnabled", areInternalMetricsEnabled) .add("jwtAudience", jwtAudience) .add("parent", super.toString()) .toString(); } - - @InternalApi - @FunctionalInterface - public interface InternalMetricsProvider { - @Nullable - OpenTelemetrySdk createOtelProvider( - EnhancedBigtableStubSettings userSettings, - Credentials creds, - ScheduledExecutorService executor) - throws IOException; - } - - private static final InternalMetricsProvider DEFAULT_INTERNAL_OTEL_PROVIDER = - Util::newInternalOpentelemetry; - private static final InternalMetricsProvider DISABLED_INTERNAL_OTEL_PROVIDER = - (ignored1, ignored2, ignored3) -> null; } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/MetadataExtractorInterceptor.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/MetadataExtractorInterceptor.java new file mode 100644 index 0000000000..14ad73131f --- /dev/null +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/MetadataExtractorInterceptor.java @@ -0,0 +1,199 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.google.cloud.bigtable.data.v2.stub; + +import com.google.api.core.InternalApi; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.bigtable.v2.PeerInfo; +import com.google.bigtable.v2.ResponseParams; +import com.google.common.base.Strings; +import com.google.protobuf.InvalidProtocolBufferException; +import io.grpc.Attributes; +import io.grpc.CallOptions; +import io.grpc.Channel; +import io.grpc.ClientCall; +import io.grpc.ClientInterceptor; +import io.grpc.ClientInterceptors; +import io.grpc.ForwardingClientCall; +import io.grpc.ForwardingClientCallListener; +import io.grpc.Metadata; +import io.grpc.MethodDescriptor; +import io.grpc.Status; +import io.grpc.alts.AltsContextUtil; +import java.time.Duration; +import java.util.Base64; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import javax.annotation.Nullable; + +@InternalApi +public class MetadataExtractorInterceptor implements ClientInterceptor { + private final SidebandData sidebandData = new SidebandData(); + + public GrpcCallContext injectInto(GrpcCallContext ctx) { + // TODO: migrate to using .withTransportChannel + // This will require a change on gax's side to expose the underlying ManagedChannel in + // GrpcTransportChannel (its currently package private). + return ctx.withChannel(ClientInterceptors.intercept(ctx.getChannel(), this)) + .withCallOptions(ctx.getCallOptions().withOption(SidebandData.KEY, sidebandData)); + } + + @Override + public ClientCall interceptCall( + MethodDescriptor methodDescriptor, CallOptions callOptions, Channel channel) { + return new ForwardingClientCall.SimpleForwardingClientCall( + channel.newCall(methodDescriptor, callOptions)) { + @Override + public void start(Listener responseListener, Metadata headers) { + sidebandData.reset(); + + super.start( + new ForwardingClientCallListener.SimpleForwardingClientCallListener( + responseListener) { + @Override + public void onHeaders(Metadata headers) { + sidebandData.onResponseHeaders(headers, getAttributes()); + super.onHeaders(headers); + } + + @Override + public void onClose(Status status, Metadata trailers) { + sidebandData.onClose(status, trailers); + super.onClose(status, trailers); + } + }, + headers); + } + }; + } + + public SidebandData getSidebandData() { + return sidebandData; + } + + public static class SidebandData { + private static final CallOptions.Key KEY = + CallOptions.Key.create("bigtable-sideband"); + + private static final Metadata.Key SERVER_TIMING_HEADER_KEY = + Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER); + private static final Pattern SERVER_TIMING_HEADER_PATTERN = + Pattern.compile(".*dur=(?\\d+)"); + private static final Metadata.Key LOCATION_METADATA_KEY = + Metadata.Key.of("x-goog-ext-425905942-bin", Metadata.BINARY_BYTE_MARSHALLER); + private static final Metadata.Key PEER_INFO_KEY = + Metadata.Key.of("bigtable-peer-info", Metadata.ASCII_STRING_MARSHALLER); + + @Nullable private volatile ResponseParams responseParams; + @Nullable private volatile PeerInfo peerInfo; + @Nullable private volatile Duration gfeTiming; + + @Nullable + public ResponseParams getResponseParams() { + return responseParams; + } + + @Nullable + public PeerInfo getPeerInfo() { + return peerInfo; + } + + @Nullable + public Duration getGfeTiming() { + return gfeTiming; + } + + private void reset() { + responseParams = null; + peerInfo = null; + gfeTiming = null; + } + + void onResponseHeaders(Metadata md, Attributes attributes) { + responseParams = extractResponseParams(md); + gfeTiming = extractGfeLatency(md); + peerInfo = extractPeerInfo(md, gfeTiming, attributes); + } + + void onClose(Status status, Metadata trailers) { + if (responseParams == null) { + responseParams = extractResponseParams(trailers); + } + } + + @Nullable + private static Duration extractGfeLatency(Metadata metadata) { + String serverTiming = metadata.get(SERVER_TIMING_HEADER_KEY); + if (serverTiming == null) { + return null; + } + Matcher matcher = SERVER_TIMING_HEADER_PATTERN.matcher(serverTiming); + // this should always be true + if (matcher.find()) { + return Duration.ofMillis(Long.parseLong(matcher.group("dur"))); + } + return null; + } + + @Nullable + private static PeerInfo extractPeerInfo( + Metadata metadata, Duration gfeTiming, Attributes attributes) { + String encodedStr = metadata.get(PEER_INFO_KEY); + if (Strings.isNullOrEmpty(encodedStr)) { + return null; + } + + try { + byte[] decoded = Base64.getUrlDecoder().decode(encodedStr); + PeerInfo peerInfo = PeerInfo.parseFrom(decoded); + PeerInfo.TransportType effectiveTransport = peerInfo.getTransportType(); + + // TODO: remove this once transport_type is being sent by the server + // This is a temporary workaround to detect directpath until its available from the server + if (effectiveTransport == PeerInfo.TransportType.TRANSPORT_TYPE_UNKNOWN) { + boolean isAlts = AltsContextUtil.check(attributes); + if (isAlts) { + effectiveTransport = PeerInfo.TransportType.TRANSPORT_TYPE_DIRECT_ACCESS; + } else if (gfeTiming != null) { + effectiveTransport = PeerInfo.TransportType.TRANSPORT_TYPE_CLOUD_PATH; + } + } + if (effectiveTransport != PeerInfo.TransportType.TRANSPORT_TYPE_UNKNOWN) { + peerInfo = peerInfo.toBuilder().setTransportType(effectiveTransport).build(); + } + return peerInfo; + } catch (Exception e) { + throw new IllegalArgumentException( + "Failed to parse " + + PEER_INFO_KEY.name() + + " from the response header value: " + + encodedStr); + } + } + + @Nullable + private static ResponseParams extractResponseParams(Metadata metadata) { + byte[] responseParams = metadata.get(LOCATION_METADATA_KEY); + if (responseParams != null) { + try { + return ResponseParams.parseFrom(responseParams); + } catch (InvalidProtocolBufferException e) { + } + } + return null; + } + } +} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java index c9f9ba06c1..4f4f788aac 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/RateLimitingServerStreamingCallable.java @@ -15,7 +15,7 @@ */ package com.google.cloud.bigtable.data.v2.stub; -import static com.google.cloud.bigtable.data.v2.stub.metrics.Util.extractStatus; +import static com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util.extractStatus; import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.DeadlineExceededException; diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java deleted file mode 100644 index 375ab17142..0000000000 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporter.java +++ /dev/null @@ -1,370 +0,0 @@ -/* - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES2_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.REMAINING_DEADLINE_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; - -import com.google.api.MonitoredResource; -import com.google.api.core.ApiFuture; -import com.google.api.core.ApiFutureCallback; -import com.google.api.core.ApiFutures; -import com.google.api.core.InternalApi; -import com.google.api.gax.core.CredentialsProvider; -import com.google.api.gax.core.FixedCredentialsProvider; -import com.google.api.gax.core.FixedExecutorProvider; -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.rpc.PermissionDeniedException; -import com.google.auth.Credentials; -import com.google.cloud.monitoring.v3.MetricServiceClient; -import com.google.cloud.monitoring.v3.MetricServiceSettings; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Iterables; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.monitoring.v3.CreateTimeSeriesRequest; -import com.google.monitoring.v3.ProjectName; -import com.google.monitoring.v3.TimeSeries; -import com.google.protobuf.Empty; -import io.opentelemetry.sdk.common.CompletableResultCode; -import io.opentelemetry.sdk.metrics.InstrumentType; -import io.opentelemetry.sdk.metrics.data.AggregationTemporality; -import io.opentelemetry.sdk.metrics.data.MetricData; -import io.opentelemetry.sdk.metrics.export.MetricExporter; -import java.io.IOException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicLong; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Collectors; -import javax.annotation.Nullable; - -/** - * Bigtable Cloud Monitoring OpenTelemetry Exporter. - * - *

    The exporter will look for all bigtable owned metrics under bigtable.googleapis.com - * instrumentation scope and upload it via the Google Cloud Monitoring API. - */ -@InternalApi -public final class BigtableCloudMonitoringExporter implements MetricExporter { - - private static final Logger logger = - Logger.getLogger(BigtableCloudMonitoringExporter.class.getName()); - - // This system property can be used to override the monitoring endpoint - // to a different environment. It's meant for internal testing only and - // will be removed in future versions. Use settings in EnhancedBigtableStubSettings - // to override the endpoint. - @Deprecated @Nullable - private static final String MONITORING_ENDPOINT_OVERRIDE_SYS_PROP = - System.getProperty("bigtable.test-monitoring-endpoint"); - - private static final String APPLICATION_RESOURCE_PROJECT_ID = "project_id"; - - // This the quota limit from Cloud Monitoring. More details in - // https://cloud.google.com/monitoring/quotas#custom_metrics_quotas. - private static final int EXPORT_BATCH_SIZE_LIMIT = 200; - - private final String exporterName; - - private final MetricServiceClient client; - - private final TimeSeriesConverter timeSeriesConverter; - - private final AtomicBoolean isShutdown = new AtomicBoolean(false); - - private CompletableResultCode lastExportCode; - - private final AtomicBoolean exportFailureLogged = new AtomicBoolean(false); - - static BigtableCloudMonitoringExporter create( - String exporterName, - @Nullable Credentials credentials, - @Nullable String endpoint, - String universeDomain, - TimeSeriesConverter converter, - @Nullable ScheduledExecutorService executorService) - throws IOException { - Preconditions.checkNotNull(universeDomain); - MetricServiceSettings.Builder settingsBuilder = MetricServiceSettings.newBuilder(); - CredentialsProvider credentialsProvider = - Optional.ofNullable(credentials) - .map(FixedCredentialsProvider::create) - .orElse(NoCredentialsProvider.create()); - settingsBuilder.setCredentialsProvider(credentialsProvider); - - settingsBuilder.setUniverseDomain(universeDomain); - - // If background executor is not null, use it for the monitoring client. This allows us to - // share the same background executor with the data client. When it's null, the monitoring - // client will create a new executor service from InstantiatingExecutorProvider. It could be - // null if someone uses a CustomOpenTelemetryMetricsProvider#setupSdkMeterProvider without - // the executor. - if (executorService != null) { - settingsBuilder.setBackgroundExecutorProvider(FixedExecutorProvider.create(executorService)); - } - - if (MONITORING_ENDPOINT_OVERRIDE_SYS_PROP != null) { - logger.warning( - "Setting the monitoring endpoint through system variable will be removed in future" - + " versions"); - settingsBuilder.setEndpoint(MONITORING_ENDPOINT_OVERRIDE_SYS_PROP); - } - if (endpoint != null) { - settingsBuilder.setEndpoint(endpoint); - } - - Duration timeout = Duration.ofMinutes(1); - // TODO: createServiceTimeSeries needs special handling if the request failed. Leaving - // it as not retried for now. - settingsBuilder.createServiceTimeSeriesSettings().setSimpleTimeoutNoRetriesDuration(timeout); - - return new BigtableCloudMonitoringExporter( - exporterName, MetricServiceClient.create(settingsBuilder.build()), converter); - } - - @VisibleForTesting - BigtableCloudMonitoringExporter( - String exporterName, MetricServiceClient client, TimeSeriesConverter converter) { - this.exporterName = exporterName; - this.client = client; - this.timeSeriesConverter = converter; - } - - @Override - public CompletableResultCode export(Collection metricData) { - Preconditions.checkState(!isShutdown.get(), "Exporter is shutting down"); - - lastExportCode = doExport(metricData); - return lastExportCode; - } - - /** Export metrics associated with a BigtableTable resource. */ - private CompletableResultCode doExport(Collection metricData) { - Map> bigtableTimeSeries; - - try { - bigtableTimeSeries = timeSeriesConverter.convert(metricData); - } catch (Throwable t) { - logger.log( - Level.WARNING, - String.format( - "Failed to convert %s metric data to cloud monitoring timeseries.", exporterName), - t); - return CompletableResultCode.ofFailure(); - } - - // Skips exporting if there's none - if (bigtableTimeSeries.isEmpty()) { - return CompletableResultCode.ofSuccess(); - } - - CompletableResultCode exportCode = new CompletableResultCode(); - bigtableTimeSeries.forEach( - (projectName, ts) -> { - ApiFuture> future = exportTimeSeries(projectName, ts); - ApiFutures.addCallback( - future, - new ApiFutureCallback>() { - @Override - public void onFailure(Throwable throwable) { - if (exportFailureLogged.compareAndSet(false, true)) { - String msg = - String.format( - "createServiceTimeSeries request failed for %s.", exporterName); - if (throwable instanceof PermissionDeniedException) { - msg += - String.format( - " Need monitoring metric writer permission on project=%s. Follow" - + " https://cloud.google.com/bigtable/docs/client-side-metrics-setup" - + " to set up permissions.", - projectName.getProject()); - } - logger.log(Level.WARNING, msg, throwable); - } - exportCode.fail(); - } - - @Override - public void onSuccess(List emptyList) { - // When an export succeeded reset the export failure flag to false so if there's a - // transient failure it'll be logged. - exportFailureLogged.set(false); - exportCode.succeed(); - } - }, - MoreExecutors.directExecutor()); - }); - - return exportCode; - } - - private ApiFuture> exportTimeSeries( - ProjectName projectName, List timeSeries) { - List> batchResults = new ArrayList<>(); - - for (List batch : Iterables.partition(timeSeries, EXPORT_BATCH_SIZE_LIMIT)) { - CreateTimeSeriesRequest req = - CreateTimeSeriesRequest.newBuilder() - .setName(projectName.toString()) - .addAllTimeSeries(batch) - .build(); - ApiFuture f = this.client.createServiceTimeSeriesCallable().futureCall(req); - batchResults.add(f); - } - - return ApiFutures.allAsList(batchResults); - } - - @Override - public CompletableResultCode flush() { - if (lastExportCode != null) { - return lastExportCode; - } - return CompletableResultCode.ofSuccess(); - } - - @Override - public CompletableResultCode shutdown() { - if (!isShutdown.compareAndSet(false, true)) { - logger.log(Level.WARNING, "shutdown is called multiple times"); - return CompletableResultCode.ofSuccess(); - } - CompletableResultCode flushResult = flush(); - CompletableResultCode shutdownResult = new CompletableResultCode(); - flushResult.whenComplete( - () -> { - Throwable throwable = null; - try { - client.shutdown(); - } catch (Throwable e) { - logger.log(Level.WARNING, "failed to shutdown the monitoring client", e); - throwable = e; - } - if (throwable != null) { - shutdownResult.fail(); - } else { - shutdownResult.succeed(); - } - }); - return CompletableResultCode.ofAll(Arrays.asList(flushResult, shutdownResult)); - } - - /** - * For Google Cloud Monitoring always return CUMULATIVE to keep track of the cumulative value of a - * metric over time. - */ - @Override - public AggregationTemporality getAggregationTemporality(InstrumentType instrumentType) { - return AggregationTemporality.CUMULATIVE; - } - - interface TimeSeriesConverter { - Map> convert(Collection metricData); - } - - static class PublicTimeSeriesConverter implements TimeSeriesConverter { - private static final ImmutableList BIGTABLE_TABLE_METRICS = - ImmutableSet.of( - OPERATION_LATENCIES_NAME, - ATTEMPT_LATENCIES_NAME, - ATTEMPT_LATENCIES2_NAME, - SERVER_LATENCIES_NAME, - FIRST_RESPONSE_LATENCIES_NAME, - CLIENT_BLOCKING_LATENCIES_NAME, - APPLICATION_BLOCKING_LATENCIES_NAME, - RETRY_COUNT_NAME, - CONNECTIVITY_ERROR_COUNT_NAME, - REMAINING_DEADLINE_NAME) - .stream() - .map(m -> METER_NAME + m) - .collect(ImmutableList.toImmutableList()); - - private static final AtomicLong nextTaskIdSuffix = new AtomicLong(); - private final String taskId; - - PublicTimeSeriesConverter() { - this( - BigtableExporterUtils.DEFAULT_TASK_VALUE.get() - + "-" - + nextTaskIdSuffix.getAndIncrement()); - } - - PublicTimeSeriesConverter(String taskId) { - this.taskId = taskId; - } - - @Override - public Map> convert(Collection metricData) { - List relevantData = - metricData.stream() - .filter(md -> BIGTABLE_TABLE_METRICS.contains(md.getName())) - .collect(Collectors.toList()); - if (relevantData.isEmpty()) { - return ImmutableMap.of(); - } - return BigtableExporterUtils.convertToBigtableTimeSeries(relevantData, taskId); - } - } - - static class InternalTimeSeriesConverter implements TimeSeriesConverter { - private static final ImmutableList APPLICATION_METRICS = - ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME).stream() - .map(m -> METER_NAME + m) - .collect(ImmutableList.toImmutableList()); - - private final Supplier monitoredResource; - - InternalTimeSeriesConverter(Supplier monitoredResource) { - this.monitoredResource = monitoredResource; - } - - @Override - public Map> convert(Collection metricData) { - MonitoredResource monitoredResource = this.monitoredResource.get(); - if (monitoredResource == null) { - return ImmutableMap.of(); - } - - return ImmutableMap.of( - ProjectName.of(monitoredResource.getLabelsOrThrow(APPLICATION_RESOURCE_PROJECT_ID)), - BigtableExporterUtils.convertToApplicationResourceTimeSeries( - metricData, monitoredResource)); - } - } -} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java deleted file mode 100644 index 882365c6b4..0000000000 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableExporterUtils.java +++ /dev/null @@ -1,471 +0,0 @@ -/* - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import static com.google.api.Distribution.BucketOptions; -import static com.google.api.Distribution.BucketOptions.Explicit; -import static com.google.api.MetricDescriptor.MetricKind; -import static com.google.api.MetricDescriptor.MetricKind.CUMULATIVE; -import static com.google.api.MetricDescriptor.MetricKind.GAUGE; -import static com.google.api.MetricDescriptor.MetricKind.UNRECOGNIZED; -import static com.google.api.MetricDescriptor.ValueType; -import static com.google.api.MetricDescriptor.ValueType.DISTRIBUTION; -import static com.google.api.MetricDescriptor.ValueType.DOUBLE; -import static com.google.api.MetricDescriptor.ValueType.INT64; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.GRPC_METRICS; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INTERNAL_METRICS; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; - -import com.google.api.Distribution; -import com.google.api.Metric; -import com.google.api.MonitoredResource; -import com.google.cloud.bigtable.Version; -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings; -import com.google.cloud.opentelemetry.detection.AttributeKeys; -import com.google.cloud.opentelemetry.detection.DetectedPlatform; -import com.google.cloud.opentelemetry.detection.GCPPlatformDetector; -import com.google.common.base.Preconditions; -import com.google.common.base.Supplier; -import com.google.common.base.Suppliers; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.monitoring.v3.Point; -import com.google.monitoring.v3.ProjectName; -import com.google.monitoring.v3.TimeInterval; -import com.google.monitoring.v3.TimeSeries; -import com.google.monitoring.v3.TypedValue; -import com.google.protobuf.Timestamp; -import com.google.protobuf.util.Timestamps; -import io.opentelemetry.api.common.AttributeKey; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.sdk.metrics.data.AggregationTemporality; -import io.opentelemetry.sdk.metrics.data.DoublePointData; -import io.opentelemetry.sdk.metrics.data.HistogramData; -import io.opentelemetry.sdk.metrics.data.HistogramPointData; -import io.opentelemetry.sdk.metrics.data.LongPointData; -import io.opentelemetry.sdk.metrics.data.MetricData; -import io.opentelemetry.sdk.metrics.data.MetricDataType; -import io.opentelemetry.sdk.metrics.data.PointData; -import io.opentelemetry.sdk.metrics.data.SumData; -import java.lang.management.ManagementFactory; -import java.net.InetAddress; -import java.net.UnknownHostException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.stream.Collectors; -import javax.annotation.Nullable; - -/** Utils to convert OpenTelemetry types to Google Cloud Monitoring types. */ -class BigtableExporterUtils { - private static final String CLIENT_NAME = "java-bigtable/" + Version.VERSION; - - private static final Logger logger = Logger.getLogger(BigtableExporterUtils.class.getName()); - - private static final String BIGTABLE_RESOURCE_TYPE = "bigtable_client_raw"; - - // These metric labels will be promoted to the bigtable_table monitored resource fields - private static final Set> BIGTABLE_PROMOTED_RESOURCE_LABELS = - ImmutableSet.of( - BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, TABLE_ID_KEY, CLUSTER_ID_KEY, ZONE_ID_KEY); - - private static final Map SUPPORTED_PLATFORM_MAP = - ImmutableMap.of( - GCPPlatformDetector.SupportedPlatform.GOOGLE_COMPUTE_ENGINE, "gcp_compute_engine", - GCPPlatformDetector.SupportedPlatform.GOOGLE_KUBERNETES_ENGINE, "gcp_kubernetes_engine"); - - private static final AtomicLong nextUuidSuffix = new AtomicLong(); - - private BigtableExporterUtils() {} - - /** - * In most cases this should look like java-${UUID}@${hostname}. The hostname will be retrieved - * from the jvm name and fallback to the local hostname. - */ - private static String defaultTaskValue = null; - - static final Supplier DEFAULT_TASK_VALUE = - Suppliers.memoize(BigtableExporterUtils::computeDefaultTaskValue); - - private static String computeDefaultTaskValue() { - if (defaultTaskValue != null) { - return defaultTaskValue; - } - // Something like '@' - final String jvmName = ManagementFactory.getRuntimeMXBean().getName(); - // If jvm doesn't have the expected format, fallback to the local hostname - if (jvmName.indexOf('@') < 1) { - String hostname = "localhost"; - try { - hostname = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException e) { - logger.log(Level.INFO, "Unable to get the hostname.", e); - } - // Generate a random number and use the same format "random_number@hostname". - return "java-" + UUID.randomUUID() + "@" + hostname; - } - return "java-" + UUID.randomUUID() + jvmName; - } - - static ProjectName getProjectName(PointData pointData) { - return ProjectName.of(pointData.getAttributes().get(BIGTABLE_PROJECT_ID_KEY)); - } - - // Returns a list of timeseries by project name - static Map> convertToBigtableTimeSeries( - Collection collection, String taskId) { - Map> allTimeSeries = new HashMap<>(); - - for (MetricData metricData : collection) { - if (!metricData.getInstrumentationScopeInfo().getName().equals(METER_NAME)) { - // Filter out metric data for instruments that are not part of the bigtable builtin metrics - continue; - } - - for (PointData pd : metricData.getData().getPoints()) { - ProjectName projectName = getProjectName(pd); - List current = - allTimeSeries.computeIfAbsent(projectName, ignored -> new ArrayList<>()); - current.add(convertPointToBigtableTimeSeries(metricData, pd, taskId)); - allTimeSeries.put(projectName, current); - } - } - - return allTimeSeries; - } - - static List convertToApplicationResourceTimeSeries( - Collection collection, MonitoredResource applicationResource) { - Preconditions.checkNotNull( - applicationResource, - "convert application metrics is called when the supported resource is not detected"); - List allTimeSeries = new ArrayList<>(); - for (MetricData metricData : collection) { - metricData.getData().getPoints().stream() - .map( - pointData -> - createInternalMetricsTimeSeries(metricData, pointData, applicationResource)) - .filter(Optional::isPresent) - .forEach(ts -> ts.ifPresent(allTimeSeries::add)); - } - return allTimeSeries; - } - - @Nullable - static MonitoredResource createInternalMonitoredResource(EnhancedBigtableStubSettings settings) { - try { - MonitoredResource monitoredResource = detectResource(settings); - logger.log(Level.FINE, "Internal metrics monitored resource: %s", monitoredResource); - return monitoredResource; - } catch (Exception e) { - logger.log( - Level.WARNING, - "Failed to detect resource, will skip exporting application level metrics ", - e); - return null; - } - } - - @Nullable - private static MonitoredResource detectResource(EnhancedBigtableStubSettings settings) { - GCPPlatformDetector detector = GCPPlatformDetector.DEFAULT_INSTANCE; - DetectedPlatform detectedPlatform = detector.detectPlatform(); - - @Nullable - String cloud_platform = SUPPORTED_PLATFORM_MAP.get(detectedPlatform.getSupportedPlatform()); - if (cloud_platform == null) { - return null; - } - - Map attrs = detectedPlatform.getAttributes(); - ImmutableList locationKeys = - ImmutableList.of( - AttributeKeys.GCE_CLOUD_REGION, - AttributeKeys.GCE_AVAILABILITY_ZONE, - AttributeKeys.GKE_LOCATION_TYPE_REGION, - AttributeKeys.GKE_CLUSTER_LOCATION); - - String region = - locationKeys.stream().map(attrs::get).filter(Objects::nonNull).findFirst().orElse("global"); - - // Deal with possibility of a zone. Zones are of the form us-east1-c, but we want a region - // which, which is us-east1. - region = Arrays.stream(region.split("-")).limit(2).collect(Collectors.joining("-")); - - String hostname = attrs.get(AttributeKeys.GCE_INSTANCE_HOSTNAME); - // if (hostname == null) { - // hostname = attrs.get(AttributeKeys.SERVERLESS_COMPUTE_NAME); - // } - // if (hostname == null) { - // hostname = attrs.get(AttributeKeys.GAE_MODULE_NAME); - // } - if (hostname == null) { - hostname = System.getenv("HOSTNAME"); - } - if (hostname == null) { - try { - hostname = InetAddress.getLocalHost().getHostName(); - } catch (UnknownHostException ignored) { - } - } - if (hostname == null) { - hostname = ""; - } - - return MonitoredResource.newBuilder() - .setType("bigtable_client") - .putLabels("project_id", settings.getProjectId()) - .putLabels("instance", settings.getInstanceId()) - .putLabels("app_profile", settings.getAppProfileId()) - .putLabels("client_project", detectedPlatform.getProjectId()) - .putLabels("region", region) - .putLabels("cloud_platform", cloud_platform) - .putLabels("host_id", attrs.get(AttributeKeys.GKE_HOST_ID)) - .putLabels("host_name", hostname) - .putLabels("client_name", CLIENT_NAME) - .putLabels("uuid", DEFAULT_TASK_VALUE.get() + "-" + nextUuidSuffix.getAndIncrement()) - .build(); - } - - private static TimeSeries convertPointToBigtableTimeSeries( - MetricData metricData, PointData pointData, String taskId) { - TimeSeries.Builder builder = - TimeSeries.newBuilder() - .setMetricKind(convertMetricKind(metricData)) - .setValueType(convertValueType(metricData.getType())); - Metric.Builder metricBuilder = Metric.newBuilder().setType(metricData.getName()); - - Attributes attributes = pointData.getAttributes(); - MonitoredResource.Builder monitoredResourceBuilder = - MonitoredResource.newBuilder().setType(BIGTABLE_RESOURCE_TYPE); - - for (AttributeKey key : attributes.asMap().keySet()) { - if (BIGTABLE_PROMOTED_RESOURCE_LABELS.contains(key)) { - monitoredResourceBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); - } else { - metricBuilder.putLabels(key.getKey(), String.valueOf(attributes.get(key))); - } - } - - builder.setResource(monitoredResourceBuilder.build()); - - metricBuilder.putLabels(CLIENT_UID_KEY.getKey(), taskId); - builder.setMetric(metricBuilder.build()); - - MetricKind kind = convertMetricKind(metricData); - - Timestamp endTimestamp = Timestamps.fromNanos(pointData.getEpochNanos()); - Timestamp startTimestamp; - - if (kind == GAUGE) { - // GAUGE metrics must have start_time equal to end_time. - startTimestamp = endTimestamp; - } else { - startTimestamp = Timestamps.fromNanos(pointData.getStartEpochNanos()); - } - TimeInterval timeInterval = - TimeInterval.newBuilder().setStartTime(startTimestamp).setEndTime(endTimestamp).build(); - - builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); - - return builder.build(); - } - - private static Optional createInternalMetricsTimeSeries( - MetricData metricData, PointData pointData, MonitoredResource applicationResource) { - MetricKind kind = convertMetricKind(metricData); - TimeSeries.Builder builder = - TimeSeries.newBuilder() - .setMetricKind(kind) - .setValueType(convertValueType(metricData.getType())) - .setResource(applicationResource); - - final Metric.Builder metricBuilder; - // TODO: clean this up - // Internal metrics are based on views that include the metric prefix - // gRPC metrics dont have views and are dot encoded - // To unify these: - // - the useless views should be removed - // - internal metrics should use relative metric names w/o the prefix - if (INTERNAL_METRICS.contains(metricData.getName())) { - metricBuilder = newApplicationMetricBuilder(metricData.getName(), pointData.getAttributes()); - } else if (GRPC_METRICS.containsKey(metricData.getName())) { - metricBuilder = newGrpcMetricBuilder(metricData.getName(), pointData.getAttributes()); - } else { - logger.fine("Skipping unexpected internal metric: " + metricData.getName()); - return Optional.empty(); - } - - builder.setMetric(metricBuilder.build()); - - Timestamp endTimestamp = Timestamps.fromNanos(pointData.getEpochNanos()); - Timestamp startTimestamp; - if (kind == GAUGE) { - startTimestamp = endTimestamp; - } else { - startTimestamp = Timestamps.fromNanos(pointData.getStartEpochNanos()); - } - TimeInterval timeInterval = - TimeInterval.newBuilder().setStartTime(startTimestamp).setEndTime(endTimestamp).build(); - - builder.addPoints(createPoint(metricData.getType(), pointData, timeInterval)); - return Optional.of(builder.build()); - } - - private static Metric.Builder newApplicationMetricBuilder( - String metricName, Attributes attributes) { - // TODO: unify handling of metric prefixes - Metric.Builder metricBuilder = Metric.newBuilder().setType(metricName); - for (Map.Entry, Object> e : attributes.asMap().entrySet()) { - metricBuilder.putLabels(e.getKey().getKey(), String.valueOf(e.getValue())); - } - return metricBuilder; - } - - private static Metric.Builder newGrpcMetricBuilder(String grpcMetricName, Attributes attributes) { - Set allowedAttrs = GRPC_METRICS.get(grpcMetricName); - - Metric.Builder metricBuilder = - Metric.newBuilder() - .setType("bigtable.googleapis.com/internal/client/" + grpcMetricName.replace('.', '/')); - for (Map.Entry, Object> e : attributes.asMap().entrySet()) { - String attrKey = e.getKey().getKey(); - Object attrValue = e.getValue(); - - // gRPC metrics are experimental and can change attribute names, to avoid incompatibility with - // the predefined - // metric schemas in stackdriver, filter out unknown keys - if (!allowedAttrs.contains(attrKey)) { - continue; - } - // translate grpc key format to be compatible with cloud monitoring: - // grpc.xds_client.server_failure -> grpc_xds_client_server_failure - String normalizedKey = attrKey.replace('.', '_'); - metricBuilder.putLabels(normalizedKey, String.valueOf(attrValue)); - } - - return metricBuilder; - } - - private static MetricKind convertMetricKind(MetricData metricData) { - switch (metricData.getType()) { - case HISTOGRAM: - case EXPONENTIAL_HISTOGRAM: - return convertHistogramType(metricData.getHistogramData()); - case LONG_GAUGE: - case DOUBLE_GAUGE: - return GAUGE; - case LONG_SUM: - return convertSumDataType(metricData.getLongSumData()); - case DOUBLE_SUM: - return convertSumDataType(metricData.getDoubleSumData()); - default: - return UNRECOGNIZED; - } - } - - private static MetricKind convertHistogramType(HistogramData histogramData) { - if (histogramData.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { - return CUMULATIVE; - } - return UNRECOGNIZED; - } - - private static MetricKind convertSumDataType(SumData sum) { - if (!sum.isMonotonic()) { - return GAUGE; - } - if (sum.getAggregationTemporality() == AggregationTemporality.CUMULATIVE) { - return CUMULATIVE; - } - return UNRECOGNIZED; - } - - private static ValueType convertValueType(MetricDataType metricDataType) { - switch (metricDataType) { - case LONG_GAUGE: - case LONG_SUM: - return INT64; - case DOUBLE_GAUGE: - case DOUBLE_SUM: - return DOUBLE; - case HISTOGRAM: - case EXPONENTIAL_HISTOGRAM: - return DISTRIBUTION; - default: - return ValueType.UNRECOGNIZED; - } - } - - private static Point createPoint( - MetricDataType type, PointData pointData, TimeInterval timeInterval) { - Point.Builder builder = Point.newBuilder().setInterval(timeInterval); - switch (type) { - case HISTOGRAM: - case EXPONENTIAL_HISTOGRAM: - return builder - .setValue( - TypedValue.newBuilder() - .setDistributionValue(convertHistogramData((HistogramPointData) pointData)) - .build()) - .build(); - case DOUBLE_GAUGE: - case DOUBLE_SUM: - return builder - .setValue( - TypedValue.newBuilder() - .setDoubleValue(((DoublePointData) pointData).getValue()) - .build()) - .build(); - case LONG_GAUGE: - case LONG_SUM: - return builder - .setValue(TypedValue.newBuilder().setInt64Value(((LongPointData) pointData).getValue())) - .build(); - default: - logger.log(Level.WARNING, "unsupported metric type"); - return builder.build(); - } - } - - private static Distribution convertHistogramData(HistogramPointData pointData) { - return Distribution.newBuilder() - .setCount(pointData.getCount()) - .setMean(pointData.getCount() == 0L ? 0.0D : pointData.getSum() / pointData.getCount()) - .setBucketOptions( - BucketOptions.newBuilder() - .setExplicitBuckets(Explicit.newBuilder().addAllBounds(pointData.getBoundaries()))) - .addAllBucketCounts(pointData.getCounts()) - .build(); - } -} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracer.java index 898d743cd9..df27fbd842 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracer.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracer.java @@ -20,6 +20,9 @@ import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.tracing.ApiTracer; import com.google.api.gax.tracing.BaseApiTracer; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.BigtableTracerStreamingCallable; +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.BigtableTracerUnaryCallable; +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor; import java.time.Duration; import javax.annotation.Nullable; @@ -27,6 +30,8 @@ * A Bigtable specific {@link ApiTracer} that includes additional contexts. This class is a base * implementation that does nothing. */ +// NOTE: this class was part of the public surface so can't move to +// com.google.cloud.bigtable.data.v2.internal.csm with the rest of the metrics. @BetaApi("This surface is not stable yet it might be removed in the future.") public class BigtableTracer extends BaseApiTracer { @@ -70,36 +75,12 @@ public int getAttempt() { return attempt; } - /** - * Record the latency between Google's network receives the RPC and reads back the first byte of - * the response from server-timing header. If server-timing header is missing, increment the - * missing header count. - */ - public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) { - // noop - } - /** Adds an annotation of the total throttled time of a batch. */ public void batchRequestThrottled(long throttledTimeMs) { // noop } - /** - * Set the Bigtable zone and cluster so metrics can be tagged with location information. This will - * be called in BuiltinMetricsTracer. - */ - public void setLocations(String zone, String cluster) { - // noop - } - - /** Set the underlying transport used to process the attempt */ - public void setTransportAttrs(BuiltinMetricsTracer.TransportAttrs attrs) {} - - @Deprecated - /** - * @deprecated {@link #grpcMessageSent()} is called instead. - */ - public void grpcChannelQueuedLatencies(long queuedTimeMs) { + public void setSidebandData(MetadataExtractorInterceptor.SidebandData sidebandData) { // noop } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerUnaryCallable.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerUnaryCallable.java deleted file mode 100644 index 37ba74bfdb..0000000000 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerUnaryCallable.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import com.google.api.core.ApiFuture; -import com.google.api.core.ApiFutureCallback; -import com.google.api.core.ApiFutures; -import com.google.api.core.InternalApi; -import com.google.api.gax.grpc.GrpcResponseMetadata; -import com.google.api.gax.rpc.ApiCallContext; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.MoreExecutors; -import javax.annotation.Nonnull; - -/** - * This callable will: - *

  • - Inject a {@link GrpcResponseMetadata} to access the headers returned by gRPC methods upon - * completion. The {@link BigtableTracer} will process metrics that were injected in the - * header/trailer and publish them to OpenCensus. If {@link GrpcResponseMetadata#getMetadata()} - * returned null, it probably means that the request has never reached GFE, and it'll increment - * the gfe_header_missing_counter in this case. - *
  • -This class will also access trailers from {@link GrpcResponseMetadata} to record zone and - * cluster ids. - *
  • -This class will also inject a {@link BigtableGrpcStreamTracer} that'll record the time an - * RPC spent in a grpc channel queue. - *
  • This class is considered an internal implementation detail and not meant to be used by - * applications. - */ -@InternalApi -public class BigtableTracerUnaryCallable - extends UnaryCallable { - - private final UnaryCallable innerCallable; - - public BigtableTracerUnaryCallable(@Nonnull UnaryCallable innerCallable) { - this.innerCallable = Preconditions.checkNotNull(innerCallable, "Inner callable must be set"); - } - - @Override - public ApiFuture futureCall(RequestT request, ApiCallContext context) { - // tracer should always be an instance of BigtableTracer - if (context.getTracer() instanceof BigtableTracer) { - BigtableTracer tracer = (BigtableTracer) context.getTracer(); - final GrpcResponseMetadata responseMetadata = new GrpcResponseMetadata(); - BigtableTracerUnaryCallback callback = - new BigtableTracerUnaryCallback( - (BigtableTracer) context.getTracer(), responseMetadata); - if (context.getRetrySettings() != null) { - tracer.setTotalTimeoutDuration(context.getRetrySettings().getTotalTimeoutDuration()); - } - ApiFuture future = - innerCallable.futureCall( - request, - Util.injectBigtableStreamTracer( - context, responseMetadata, (BigtableTracer) context.getTracer())); - ApiFutures.addCallback(future, callback, MoreExecutors.directExecutor()); - return future; - } else { - return innerCallable.futureCall(request, context); - } - } - - private class BigtableTracerUnaryCallback implements ApiFutureCallback { - - private final BigtableTracer tracer; - private final GrpcResponseMetadata responseMetadata; - - BigtableTracerUnaryCallback(BigtableTracer tracer, GrpcResponseMetadata responseMetadata) { - this.tracer = tracer; - this.responseMetadata = responseMetadata; - } - - @Override - public void onFailure(Throwable throwable) { - Util.recordMetricsFromMetadata(responseMetadata, tracer, throwable); - } - - @Override - public void onSuccess(ResponseT response) { - Util.recordMetricsFromMetadata(responseMetadata, tracer, null); - } - } -} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java deleted file mode 100644 index 810d555de2..0000000000 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsConstants.java +++ /dev/null @@ -1,395 +0,0 @@ -/* - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import com.google.api.core.InternalApi; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import io.opentelemetry.api.common.AttributeKey; -import io.opentelemetry.sdk.metrics.Aggregation; -import io.opentelemetry.sdk.metrics.InstrumentSelector; -import io.opentelemetry.sdk.metrics.InstrumentType; -import io.opentelemetry.sdk.metrics.View; -import io.opentelemetry.sdk.metrics.ViewBuilder; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; -import javax.annotation.Nullable; - -/** Defining Bigtable builit-in metrics scope, attributes, metric names and views. */ -@InternalApi -public class BuiltinMetricsConstants { - - // Metric attribute keys for monitored resource - public static final AttributeKey BIGTABLE_PROJECT_ID_KEY = - AttributeKey.stringKey("project_id"); - public static final AttributeKey INSTANCE_ID_KEY = AttributeKey.stringKey("instance"); - public static final AttributeKey TABLE_ID_KEY = AttributeKey.stringKey("table"); - public static final AttributeKey CLUSTER_ID_KEY = AttributeKey.stringKey("cluster"); - public static final AttributeKey ZONE_ID_KEY = AttributeKey.stringKey("zone"); - - // Metric attribute keys for labels - // We need to access APP_PROFILE_KEY in EnhancedBigtableStubSettings and STREAMING_KEY in - // IT tests, so they're public. - public static final AttributeKey APP_PROFILE_KEY = AttributeKey.stringKey("app_profile"); - public static final AttributeKey STREAMING_KEY = AttributeKey.booleanKey("streaming"); - public static final AttributeKey CLIENT_NAME_KEY = AttributeKey.stringKey("client_name"); - static final AttributeKey METHOD_KEY = AttributeKey.stringKey("method"); - static final AttributeKey STATUS_KEY = AttributeKey.stringKey("status"); - static final AttributeKey CLIENT_UID_KEY = AttributeKey.stringKey("client_uid"); - static final AttributeKey APPLIED_KEY = AttributeKey.booleanKey("applied"); - - static final AttributeKey TRANSPORT_TYPE = AttributeKey.stringKey("transport_type"); - static final AttributeKey TRANSPORT_REGION = AttributeKey.stringKey("transport_region"); - static final AttributeKey TRANSPORT_ZONE = AttributeKey.stringKey("transport_zone"); - static final AttributeKey TRANSPORT_SUBZONE = AttributeKey.stringKey("transport_subzone"); - - // gRPC attribute keys - // Note that these attributes keys from transformed from - // A.B.C to A_B_C before exporting to Cloud Monitoring. - static final AttributeKey GRPC_LB_BACKEND_SERVICE_KEY = - AttributeKey.stringKey("grpc.lb.backend_service"); - static final AttributeKey GRPC_DISCONNECT_ERROR_KEY = - AttributeKey.stringKey("grpc.disconnect_error"); - static final AttributeKey GRPC_LB_LOCALITY_KEY = - AttributeKey.stringKey("grpc.lb.locality"); - static final AttributeKey GRPC_TARGET_KEY = AttributeKey.stringKey("grpc.target"); - static final AttributeKey GRPC_SECURITY_LEVEL_KEY = - AttributeKey.stringKey("grpc.security_level"); - static final AttributeKey GRPC_METHOD_KEY = AttributeKey.stringKey("grpc.method"); - static final AttributeKey GRPC_STATUS_KEY = AttributeKey.stringKey("grpc.status"); - static final AttributeKey GRPC_LB_RLS_DATA_PLANE_TARGET_KEY = - AttributeKey.stringKey("grpc.lb.rls.data_plane_target"); - static final AttributeKey GRPC_LB_PICK_RESULT_KEY = - AttributeKey.stringKey("grpc.lb.pick_result"); - static final AttributeKey GRPC_LB_RLS_SERVER_TARGET_KEY = - AttributeKey.stringKey("grpc.lb.rls.server_target"); - static final AttributeKey GRPC_XDS_SERVER_KEY = AttributeKey.stringKey("grpc.xds.server"); - static final AttributeKey GRPC_XDS_RESOURCE_TYPE_KEY = - AttributeKey.stringKey("grpc.xds.resource_type"); - - public static final String METER_NAME = "bigtable.googleapis.com/internal/client/"; - - // Metric names - public static final String OPERATION_LATENCIES_NAME = "operation_latencies"; - public static final String ATTEMPT_LATENCIES_NAME = "attempt_latencies"; - // Temporary workaround for not being able to add new labels to ATTEMPT_LATENCIES_NAME - public static final String ATTEMPT_LATENCIES2_NAME = "attempt_latencies2"; - static final String RETRY_COUNT_NAME = "retry_count"; - static final String CONNECTIVITY_ERROR_COUNT_NAME = "connectivity_error_count"; - static final String SERVER_LATENCIES_NAME = "server_latencies"; - static final String FIRST_RESPONSE_LATENCIES_NAME = "first_response_latencies"; - static final String APPLICATION_BLOCKING_LATENCIES_NAME = "application_latencies"; - static final String REMAINING_DEADLINE_NAME = "remaining_deadline"; - static final String CLIENT_BLOCKING_LATENCIES_NAME = "throttling_latencies"; - static final String PER_CONNECTION_ERROR_COUNT_NAME = "per_connection_error_count"; - static final String OUTSTANDING_RPCS_PER_CHANNEL_NAME = "connection_pool/outstanding_rpcs"; - static final String BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME = - "batch_write_flow_control_target_qps"; - static final String BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME = "batch_write_flow_control_factor"; - - // Start allow list of metrics that will be exported as internal - public static final Map> GRPC_METRICS = - ImmutableMap.>builder() - .put( - "grpc.client.attempt.duration", - ImmutableSet.of( - GRPC_LB_LOCALITY_KEY.getKey(), - GRPC_METHOD_KEY.getKey(), - GRPC_TARGET_KEY.getKey(), - GRPC_STATUS_KEY.getKey())) - .put( - "grpc.lb.rls.default_target_picks", - ImmutableSet.of( - GRPC_LB_RLS_DATA_PLANE_TARGET_KEY.getKey(), GRPC_LB_PICK_RESULT_KEY.getKey())) - .put( - "grpc.lb.rls.target_picks", - ImmutableSet.of( - GRPC_TARGET_KEY.getKey(), - GRPC_LB_RLS_SERVER_TARGET_KEY.getKey(), - GRPC_LB_RLS_DATA_PLANE_TARGET_KEY.getKey(), - GRPC_LB_PICK_RESULT_KEY.getKey())) - .put( - "grpc.lb.rls.failed_picks", - ImmutableSet.of(GRPC_TARGET_KEY.getKey(), GRPC_LB_RLS_SERVER_TARGET_KEY.getKey())) - // TODO: "grpc.xds_client.connected" - .put( - "grpc.xds_client.server_failure", - ImmutableSet.of(GRPC_TARGET_KEY.getKey(), GRPC_XDS_SERVER_KEY.getKey())) - // TODO: "grpc.xds_client.resource_updates_valid", - .put( - "grpc.xds_client.resource_updates_invalid", - ImmutableSet.of( - GRPC_TARGET_KEY.getKey(), - GRPC_XDS_SERVER_KEY.getKey(), - GRPC_XDS_RESOURCE_TYPE_KEY.getKey())) - // TODO: "grpc.xds_client.resources" - // gRPC subchannel metrics - .put( - "grpc.subchannel.disconnections", - ImmutableSet.of( - GRPC_LB_BACKEND_SERVICE_KEY.getKey(), - GRPC_DISCONNECT_ERROR_KEY.getKey(), - GRPC_LB_LOCALITY_KEY.getKey(), - GRPC_TARGET_KEY.getKey())) - .put( - "grpc.subchannel.connection_attempts_succeeded", - ImmutableSet.of( - GRPC_LB_BACKEND_SERVICE_KEY.getKey(), - GRPC_LB_LOCALITY_KEY.getKey(), - GRPC_TARGET_KEY.getKey())) - .put( - "grpc.subchannel.connection_attempts_failed", - ImmutableSet.of( - GRPC_LB_BACKEND_SERVICE_KEY.getKey(), - GRPC_LB_LOCALITY_KEY.getKey(), - GRPC_TARGET_KEY.getKey())) - .put( - "grpc.subchannel.open_connections", - ImmutableSet.of( - GRPC_LB_BACKEND_SERVICE_KEY.getKey(), - GRPC_LB_LOCALITY_KEY.getKey(), - GRPC_SECURITY_LEVEL_KEY.getKey(), - GRPC_TARGET_KEY.getKey())) - .build(); - - public static final Set INTERNAL_METRICS = - ImmutableSet.of(PER_CONNECTION_ERROR_COUNT_NAME, OUTSTANDING_RPCS_PER_CHANNEL_NAME).stream() - .map(m -> METER_NAME + m) - .collect(ImmutableSet.toImmutableSet()); - // End allow list of metrics that will be exported - - // Buckets under 100,000 are identical to buckets for server side metrics handler_latencies. - // Extending client side bucket to up to 3,200,000. - private static final Aggregation AGGREGATION_WITH_MILLIS_HISTOGRAM = - Aggregation.explicitBucketHistogram( - ImmutableList.of( - 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 8.0, 10.0, 13.0, 16.0, 20.0, 25.0, 30.0, 40.0, - 50.0, 65.0, 80.0, 100.0, 130.0, 160.0, 200.0, 250.0, 300.0, 400.0, 500.0, 650.0, - 800.0, 1000.0, 2000.0, 5000.0, 10000.0, 20000.0, 50000.0, 100000.0, 200000.0, - 400000.0, 800000.0, 1600000.0, 3200000.0)); // max is 53.3 minutes - - private static final Aggregation AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM = - Aggregation.explicitBucketHistogram( - ImmutableList.of( - 1.0, - 2.0, - 4.0, - 8.0, - 16.0, - 32.0, - 64.0, - 125.0, - 250.0, - 500.0, - 1_000.0, - 2_000.0, - 4_000.0, - 8_000.0, - 16_000.0, - 32_000.0, - 64_000.0, - 128_000.0, - 250_000.0, - 500_000.0, - 1_000_000.0)); - - // Buckets for outstanding RPCs per channel, max ~100 - private static final Aggregation AGGREGATION_OUTSTANDING_RPCS_HISTOGRAM = - Aggregation.explicitBucketHistogram( - ImmutableList.of( - 0.0, 5.0, 10.0, 15.0, 20.0, 25.0, 30.0, 35.0, 40.0, 45.0, 50.0, 55.0, 60.0, 65.0, - 70.0, 75.0, 80.0, 85.0, 90.0, 95.0, 100.0, 105.0, 110.0, 115.0, 120.0, 125.0, 130.0, - 135.0, 140.0, 145.0, 150.0, 155.0, 160.0, 165.0, 170.0, 175.0, 180.0, 185.0, 190.0, - 195.0, 200.0)); - private static final Aggregation AGGREGATION_BATCH_WRITE_FLOW_CONTROL_FACTOR_HISTOGRAM = - Aggregation.explicitBucketHistogram(ImmutableList.of(0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3)); - - static final Set COMMON_ATTRIBUTES = - ImmutableSet.of( - BIGTABLE_PROJECT_ID_KEY, - INSTANCE_ID_KEY, - TABLE_ID_KEY, - APP_PROFILE_KEY, - CLUSTER_ID_KEY, - ZONE_ID_KEY, - METHOD_KEY, - CLIENT_NAME_KEY); - - static void defineView( - ImmutableMap.Builder viewMap, - String id, - @Nullable Aggregation aggregation, - InstrumentType type, - String unit, - Set attributes) { - InstrumentSelector selector = - InstrumentSelector.builder() - .setName(id) - .setMeterName(METER_NAME) - .setType(type) - .setUnit(unit) - .build(); - Set attributesFilter = - ImmutableSet.builder() - .addAll( - COMMON_ATTRIBUTES.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) - .addAll(attributes.stream().map(AttributeKey::getKey).collect(Collectors.toSet())) - .build(); - ViewBuilder viewBuilder = - View.builder().setName(METER_NAME + id).setAttributeFilter(attributesFilter); - if (aggregation != null) { - viewBuilder.setAggregation(aggregation); - } - viewMap.put(selector, viewBuilder.build()); - } - - // uses cloud.BigtableClient schema - public static Map getInternalViews() { - ImmutableMap.Builder views = ImmutableMap.builder(); - defineView( - views, - PER_CONNECTION_ERROR_COUNT_NAME, - AGGREGATION_PER_CONNECTION_ERROR_COUNT_HISTOGRAM, - InstrumentType.HISTOGRAM, - "1", - ImmutableSet.builder() - .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY) - .build()); - defineView( - views, - OUTSTANDING_RPCS_PER_CHANNEL_NAME, - AGGREGATION_OUTSTANDING_RPCS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "1", - ImmutableSet.builder() - .add(BIGTABLE_PROJECT_ID_KEY, INSTANCE_ID_KEY, APP_PROFILE_KEY, CLIENT_NAME_KEY) - .build()); - return views.build(); - } - - public static Map getAllViews() { - ImmutableMap.Builder views = ImmutableMap.builder(); - - defineView( - views, - OPERATION_LATENCIES_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder() - .addAll(COMMON_ATTRIBUTES) - .add(STREAMING_KEY, STATUS_KEY) - .build()); - defineView( - views, - ATTEMPT_LATENCIES_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder() - .addAll(COMMON_ATTRIBUTES) - .add(STREAMING_KEY, STATUS_KEY) - .build()); - defineView( - views, - ATTEMPT_LATENCIES2_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder() - .addAll(COMMON_ATTRIBUTES) - .add( - STREAMING_KEY, - STATUS_KEY, - TRANSPORT_TYPE, - TRANSPORT_REGION, - TRANSPORT_ZONE, - TRANSPORT_SUBZONE) - .build()); - defineView( - views, - SERVER_LATENCIES_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); - defineView( - views, - FIRST_RESPONSE_LATENCIES_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); - defineView( - views, - APPLICATION_BLOCKING_LATENCIES_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); - defineView( - views, - CLIENT_BLOCKING_LATENCIES_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); - defineView( - views, - RETRY_COUNT_NAME, - Aggregation.sum(), - InstrumentType.COUNTER, - "1", - ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); - defineView( - views, - CONNECTIVITY_ERROR_COUNT_NAME, - Aggregation.sum(), - InstrumentType.COUNTER, - "1", - ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).add(STATUS_KEY).build()); - defineView( - views, - REMAINING_DEADLINE_NAME, - AGGREGATION_WITH_MILLIS_HISTOGRAM, - InstrumentType.HISTOGRAM, - "ms", - ImmutableSet.builder() - .addAll(COMMON_ATTRIBUTES) - .add(STREAMING_KEY, STATUS_KEY) - .build()); - defineView( - views, - BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME, - null, - InstrumentType.GAUGE, - "1", - ImmutableSet.builder().addAll(COMMON_ATTRIBUTES).build()); - defineView( - views, - BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME, - AGGREGATION_BATCH_WRITE_FLOW_CONTROL_FACTOR_HISTOGRAM, - InstrumentType.HISTOGRAM, - "1", - ImmutableSet.builder() - .addAll(COMMON_ATTRIBUTES) - .add(STATUS_KEY, APPLIED_KEY) - .build()); - return views.build(); - } -} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java deleted file mode 100644 index e6ebad367a..0000000000 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracer.java +++ /dev/null @@ -1,528 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import static com.google.api.gax.tracing.ApiTracerFactory.OperationType; -import static com.google.api.gax.util.TimeConversionUtils.toJavaTimeDuration; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLIED_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TRANSPORT_REGION; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TRANSPORT_SUBZONE; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TRANSPORT_TYPE; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TRANSPORT_ZONE; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY; -import static com.google.cloud.bigtable.data.v2.stub.metrics.Util.extractStatus; - -import com.google.api.core.ObsoleteApi; -import com.google.api.gax.retrying.ServerStreamingAttemptException; -import com.google.api.gax.tracing.SpanName; -import com.google.auto.value.AutoValue; -import com.google.cloud.bigtable.Version; -import com.google.common.base.Stopwatch; -import com.google.common.base.Strings; -import com.google.common.math.IntMath; -import com.google.gson.Gson; -import com.google.gson.reflect.TypeToken; -import io.grpc.Deadline; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.DoubleGauge; -import io.opentelemetry.api.metrics.DoubleHistogram; -import io.opentelemetry.api.metrics.LongCounter; -import java.time.Duration; -import java.util.Map; -import java.util.concurrent.CancellationException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.logging.Level; -import java.util.logging.Logger; -import javax.annotation.Nullable; - -/** - * A {@link BigtableTracer} that records built-in metrics and publish under the - * bigtable.googleapis.com/client namespace - */ -class BuiltinMetricsTracer extends BigtableTracer { - @AutoValue - abstract static class TransportAttrs { - @Nullable - abstract String getLocality(); - - @Nullable - abstract String getBackendService(); - - static TransportAttrs create(@Nullable String locality, @Nullable String backendService) { - return new AutoValue_BuiltinMetricsTracer_TransportAttrs(locality, backendService); - } - } - - private static final Logger logger = Logger.getLogger(BuiltinMetricsTracer.class.getName()); - private static final Gson GSON = new Gson(); - private static final TypeToken> LOCALITY_TYPE = - new TypeToken>() {}; - - private static final String NAME = "java-bigtable/" + Version.VERSION; - private final OperationType operationType; - private final SpanName spanName; - - // Operation level metrics - private final AtomicBoolean operationFinishedEarly = new AtomicBoolean(); - private final AtomicBoolean opFinished = new AtomicBoolean(); - private final Stopwatch operationTimer = Stopwatch.createStarted(); - private final Stopwatch firstResponsePerOpTimer = Stopwatch.createStarted(); - - // Attempt level metrics - private int attemptCount = 0; - private Stopwatch attemptTimer; - private volatile int attempt = 0; - - // Total server latency needs to be atomic because it's accessed from different threads. E.g. - // request() from user thread and attempt failed from grpc thread. We're only measuring the extra - // time application spent blocking grpc buffer, which will be operationLatency - serverLatency. - private final AtomicLong totalServerLatencyNano = new AtomicLong(0); - // Stopwatch is not thread safe so this is a workaround to check if the stopwatch changes is - // flushed to memory. - private final Stopwatch serverLatencyTimer = Stopwatch.createUnstarted(); - private final Object timerLock = new Object(); - - private boolean flowControlIsDisabled = false; - - private final AtomicInteger requestLeft = new AtomicInteger(0); - - // Monitored resource labels - private String tableId = ""; - private String zone = "global"; - private String cluster = ""; - - private final AtomicLong totalClientBlockingTime = new AtomicLong(0); - - private final Attributes baseAttributes; - - private Long serverLatencies = null; - private final AtomicLong grpcMessageSentDelay = new AtomicLong(0); - - private Deadline operationDeadline = null; - private volatile long remainingDeadlineAtAttemptStart = 0; - - private TransportAttrs transportAttrs = null; - - // OpenCensus (and server) histogram buckets use [start, end), however OpenTelemetry uses (start, - // end]. To work around this, we measure all the latencies in nanoseconds and convert them - // to milliseconds and use DoubleHistogram. This should minimize the chance of a data - // point fall on the bucket boundary that causes off by one errors. - private final DoubleHistogram operationLatenciesHistogram; - private final DoubleHistogram attemptLatenciesHistogram; - private final DoubleHistogram attemptLatencies2Histogram; - private final DoubleHistogram serverLatenciesHistogram; - private final DoubleHistogram firstResponseLatenciesHistogram; - private final DoubleHistogram clientBlockingLatenciesHistogram; - private final DoubleHistogram applicationBlockingLatenciesHistogram; - private final DoubleHistogram remainingDeadlineHistogram; - private final LongCounter connectivityErrorCounter; - private final LongCounter retryCounter; - private final DoubleGauge batchWriteFlowControlTargetQps; - private final DoubleHistogram batchWriteFlowControlFactorHistogram; - - BuiltinMetricsTracer( - OperationType operationType, - SpanName spanName, - Attributes attributes, - DoubleHistogram operationLatenciesHistogram, - DoubleHistogram attemptLatenciesHistogram, - DoubleHistogram attemptLatencies2Histogram, - DoubleHistogram serverLatenciesHistogram, - DoubleHistogram firstResponseLatenciesHistogram, - DoubleHistogram clientBlockingLatenciesHistogram, - DoubleHistogram applicationBlockingLatenciesHistogram, - DoubleHistogram deadlineHistogram, - LongCounter connectivityErrorCounter, - LongCounter retryCounter, - DoubleGauge batchWriteFlowControlTargetQps, - DoubleHistogram batchWriteFlowControlFactorHistogram) { - this.operationType = operationType; - this.spanName = spanName; - this.baseAttributes = attributes; - - this.operationLatenciesHistogram = operationLatenciesHistogram; - this.attemptLatenciesHistogram = attemptLatenciesHistogram; - this.attemptLatencies2Histogram = attemptLatencies2Histogram; - this.serverLatenciesHistogram = serverLatenciesHistogram; - this.firstResponseLatenciesHistogram = firstResponseLatenciesHistogram; - this.clientBlockingLatenciesHistogram = clientBlockingLatenciesHistogram; - this.applicationBlockingLatenciesHistogram = applicationBlockingLatenciesHistogram; - this.remainingDeadlineHistogram = deadlineHistogram; - this.connectivityErrorCounter = connectivityErrorCounter; - this.retryCounter = retryCounter; - this.batchWriteFlowControlTargetQps = batchWriteFlowControlTargetQps; - this.batchWriteFlowControlFactorHistogram = batchWriteFlowControlFactorHistogram; - } - - @Override - public Scope inScope() { - return new Scope() { - @Override - public void close() {} - }; - } - - @Override - public void operationFinishEarly() { - operationFinishedEarly.set(true); - attemptTimer.stop(); - operationTimer.stop(); - } - - @Override - public void operationSucceeded() { - recordOperationCompletion(null); - } - - @Override - public void operationCancelled() { - recordOperationCompletion(new CancellationException()); - } - - @Override - public void operationFailed(Throwable error) { - recordOperationCompletion(error); - } - - @Override - public void attemptStarted(int attemptNumber) { - attemptStarted(null, attemptNumber); - } - - @Override - public void attemptStarted(Object request, int attemptNumber) { - this.attempt = attemptNumber; - attemptCount++; - attemptTimer = Stopwatch.createStarted(); - if (operationDeadline != null) { - remainingDeadlineAtAttemptStart = operationDeadline.timeRemaining(TimeUnit.MILLISECONDS); - } - if (request != null) { - this.tableId = Util.extractTableId(request); - } - if (!flowControlIsDisabled) { - synchronized (timerLock) { - if (!serverLatencyTimer.isRunning()) { - serverLatencyTimer.start(); - } - } - } - } - - @Override - public void attemptSucceeded() { - recordAttemptCompletion(null); - } - - @Override - public void attemptCancelled() { - recordAttemptCompletion(new CancellationException()); - } - - /** - * This method is obsolete. Use {@link #attemptFailedDuration(Throwable, java.time.Duration)} - * instead. - */ - @ObsoleteApi("Use attemptFailedDuration(Throwable, java.time.Duration) instead") - @Override - public void attemptFailed(Throwable error, org.threeten.bp.Duration delay) { - attemptFailedDuration(error, toJavaTimeDuration(delay)); - } - - @Override - public void attemptFailedDuration(Throwable error, Duration delay) { - recordAttemptCompletion(error); - } - - @Override - public void attemptPermanentFailure(Throwable throwable) { - recordAttemptCompletion(throwable); - } - - @Override - public void onRequest(int requestCount) { - requestLeft.accumulateAndGet(requestCount, IntMath::saturatedAdd); - - if (operationFinishedEarly.get()) { - return; - } - - if (flowControlIsDisabled) { - // On request is only called when auto flow control is disabled. When auto flow control is - // disabled, server latency is measured between onRequest and onResponse. - synchronized (timerLock) { - if (!serverLatencyTimer.isRunning()) { - serverLatencyTimer.start(); - } - } - } - } - - @Override - public void responseReceived() { - if (operationFinishedEarly.get()) { - return; - } - - if (firstResponsePerOpTimer.isRunning()) { - firstResponsePerOpTimer.stop(); - } - // When auto flow control is enabled, server latency is measured between afterResponse and - // responseReceived. - // When auto flow control is disabled, server latency is measured between onRequest and - // responseReceived. - // When auto flow control is disabled and application requested multiple responses, server - // latency is measured between afterResponse and responseReceived. - // In all the cases, we want to stop the serverLatencyTimer here. - synchronized (timerLock) { - if (serverLatencyTimer.isRunning()) { - totalServerLatencyNano.addAndGet(serverLatencyTimer.elapsed(TimeUnit.NANOSECONDS)); - serverLatencyTimer.reset(); - } - } - } - - @Override - public void afterResponse(long applicationLatency) { - if (!flowControlIsDisabled || requestLeft.decrementAndGet() > 0) { - if (operationFinishedEarly.get()) { - return; - } - // When auto flow control is enabled, request will never be called, so server latency is - // measured between after the last response is processed and before the next response is - // received. If flow control is disabled but requestLeft is greater than 0, - // also start the timer to count the time between afterResponse and responseReceived. - synchronized (timerLock) { - if (!serverLatencyTimer.isRunning()) { - serverLatencyTimer.start(); - } - } - } - } - - @Override - public int getAttempt() { - return attempt; - } - - @Override - public void recordGfeMetadata(@Nullable Long latency, @Nullable Throwable throwable) { - if (latency != null) { - serverLatencies = latency; - } - } - - @Override - public void setLocations(String zone, String cluster) { - this.zone = zone; - this.cluster = cluster; - } - - @Override - public void setTransportAttrs(TransportAttrs attrs) { - this.transportAttrs = attrs; - } - - @Override - public void batchRequestThrottled(long throttledTimeNanos) { - totalClientBlockingTime.addAndGet(java.time.Duration.ofNanos(throttledTimeNanos).toMillis()); - } - - @Override - public void grpcMessageSent() { - grpcMessageSentDelay.set(attemptTimer.elapsed(TimeUnit.NANOSECONDS)); - } - - @Override - public void setTotalTimeoutDuration(java.time.Duration totalTimeoutDuration) { - // This method is called by BigtableTracerStreamingCallable and - // BigtableTracerUnaryCallable which is called per attempt. We only set - // the operationDeadline on the first attempt and when totalTimeout is set. - if (operationDeadline == null && !totalTimeoutDuration.isZero()) { - this.operationDeadline = - Deadline.after(totalTimeoutDuration.toMillis(), TimeUnit.MILLISECONDS); - this.remainingDeadlineAtAttemptStart = totalTimeoutDuration.toMillis(); - } - } - - @Override - public void disableFlowControl() { - flowControlIsDisabled = true; - } - - private void recordOperationCompletion(@Nullable Throwable status) { - if (operationFinishedEarly.get()) { - status = null; // force an ok - } - - if (!opFinished.compareAndSet(false, true)) { - return; - } - long operationLatencyNano = operationTimer.elapsed(TimeUnit.NANOSECONDS); - - boolean isStreaming = operationType == OperationType.ServerStreaming; - String statusStr = extractStatus(status); - - // Publish metric data with all the attributes. The attributes get filtered in - // BuiltinMetricsConstants when we construct the views. - Attributes attributes = - baseAttributes.toBuilder() - .put(TABLE_ID_KEY, tableId) - .put(CLUSTER_ID_KEY, cluster) - .put(ZONE_ID_KEY, zone) - .put(METHOD_KEY, spanName.toString()) - .put(CLIENT_NAME_KEY, NAME) - .put(STREAMING_KEY, isStreaming) - .put(STATUS_KEY, statusStr) - .build(); - - // Only record when retry count is greater than 0 so the retry - // graph will be less confusing - if (attemptCount > 1) { - retryCounter.add(attemptCount - 1, attributes); - } - - operationLatenciesHistogram.record(convertToMs(operationLatencyNano), attributes); - - // serverLatencyTimer should already be stopped in recordAttemptCompletion - long applicationLatencyNano = operationLatencyNano - totalServerLatencyNano.get(); - applicationBlockingLatenciesHistogram.record(convertToMs(applicationLatencyNano), attributes); - - if (operationType == OperationType.ServerStreaming - && spanName.getMethodName().equals("ReadRows")) { - firstResponseLatenciesHistogram.record( - convertToMs(firstResponsePerOpTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); - } - } - - private void recordAttemptCompletion(@Nullable Throwable status) { - if (operationFinishedEarly.get()) { - status = null; // force an ok - } - // If the attempt failed, the time spent in retry should be counted in application latency. - // Stop the stopwatch and decrement requestLeft. - synchronized (timerLock) { - if (serverLatencyTimer.isRunning()) { - requestLeft.decrementAndGet(); - totalServerLatencyNano.addAndGet(serverLatencyTimer.elapsed(TimeUnit.NANOSECONDS)); - serverLatencyTimer.reset(); - } - } - - boolean isStreaming = operationType == OperationType.ServerStreaming; - - // Patch the status until it's fixed in gax. When an attempt failed, - // it'll throw a ServerStreamingAttemptException. Unwrap the exception - // so it could get processed by extractStatus - if (status instanceof ServerStreamingAttemptException) { - status = status.getCause(); - } - - String statusStr = extractStatus(status); - - Attributes attributes = - baseAttributes.toBuilder() - .put(TABLE_ID_KEY, tableId) - .put(CLUSTER_ID_KEY, cluster) - .put(ZONE_ID_KEY, zone) - .put(METHOD_KEY, spanName.toString()) - .put(CLIENT_NAME_KEY, NAME) - .put(STREAMING_KEY, isStreaming) - .put(STATUS_KEY, statusStr) - .build(); - - totalClientBlockingTime.addAndGet(grpcMessageSentDelay.get()); - clientBlockingLatenciesHistogram.record(convertToMs(totalClientBlockingTime.get()), attributes); - - attemptLatenciesHistogram.record( - convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), attributes); - - String transportType = "cloudpath"; - String transportRegion = ""; - String transportZone = ""; - String transportSubzone = ""; - - try { - if (transportAttrs != null && !Strings.isNullOrEmpty(transportAttrs.getLocality())) { - // only directpath has locality - transportType = "directpath"; - Map localityMap = - GSON.fromJson(transportAttrs.getLocality(), LOCALITY_TYPE); - transportRegion = localityMap.getOrDefault("region", ""); - transportZone = localityMap.getOrDefault("zone", ""); - transportSubzone = localityMap.getOrDefault("sub_zone", ""); - } - } catch (RuntimeException e) { - logger.log( - Level.WARNING, "Failed to parse transport locality: " + transportAttrs.getLocality(), e); - } - attemptLatencies2Histogram.record( - convertToMs(attemptTimer.elapsed(TimeUnit.NANOSECONDS)), - attributes.toBuilder() - .put(TRANSPORT_TYPE, transportType) - .put(TRANSPORT_REGION, transportRegion) - .put(TRANSPORT_ZONE, transportZone) - .put(TRANSPORT_SUBZONE, transportSubzone) - .build()); - - // When operationDeadline is set, it's possible that the deadline is passed by the time we send - // a new attempt. In this case we'll record 0. - if (operationDeadline != null) { - remainingDeadlineHistogram.record(Math.max(0, remainingDeadlineAtAttemptStart), attributes); - } - - if (serverLatencies != null) { - serverLatenciesHistogram.record(serverLatencies, attributes); - connectivityErrorCounter.add(0, attributes); - } else { - connectivityErrorCounter.add(1, attributes); - } - } - - private static double convertToMs(long nanoSeconds) { - double toMs = 1e-6; - return nanoSeconds * toMs; - } - - @Override - public void setBatchWriteFlowControlTargetQps(double targetQps) { - Attributes attributes = baseAttributes.toBuilder().put(METHOD_KEY, spanName.toString()).build(); - - batchWriteFlowControlTargetQps.set(targetQps, attributes); - } - - @Override - public void addBatchWriteFlowControlFactor( - double factor, @Nullable Throwable status, boolean applied) { - Attributes attributes = - baseAttributes.toBuilder() - .put(METHOD_KEY, spanName.toString()) - .put(STATUS_KEY, extractStatus(status)) - .put(APPLIED_KEY, applied) - .build(); - - batchWriteFlowControlFactorHistogram.record(factor, attributes); - } -} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java deleted file mode 100644 index eb8089b1c6..0000000000 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerFactory.java +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2022 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigtable.data.v2.stub.metrics; - -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES2_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METER_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.REMAINING_DEADLINE_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME; -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME; - -import com.google.api.core.InternalApi; -import com.google.api.gax.tracing.ApiTracer; -import com.google.api.gax.tracing.ApiTracerFactory; -import com.google.api.gax.tracing.BaseApiTracerFactory; -import com.google.api.gax.tracing.SpanName; -import io.opentelemetry.api.OpenTelemetry; -import io.opentelemetry.api.common.Attributes; -import io.opentelemetry.api.metrics.DoubleGauge; -import io.opentelemetry.api.metrics.DoubleHistogram; -import io.opentelemetry.api.metrics.LongCounter; -import io.opentelemetry.api.metrics.Meter; -import java.io.IOException; - -/** - * {@link ApiTracerFactory} that will generate OpenTelemetry metrics by using the {@link ApiTracer} - * api. - */ -@InternalApi("For internal use only") -public class BuiltinMetricsTracerFactory extends BaseApiTracerFactory { - - private final Attributes attributes; - - private static final String MILLISECOND = "ms"; - private static final String COUNT = "1"; - - private final DoubleHistogram operationLatenciesHistogram; - private final DoubleHistogram attemptLatenciesHistogram; - private final DoubleHistogram attemptLatencies2Histogram; - private final DoubleHistogram serverLatenciesHistogram; - private final DoubleHistogram firstResponseLatenciesHistogram; - private final DoubleHistogram clientBlockingLatenciesHistogram; - private final DoubleHistogram applicationBlockingLatenciesHistogram; - private final DoubleHistogram remainingDeadlineHistogram; - private final LongCounter connectivityErrorCounter; - private final LongCounter retryCounter; - private final DoubleGauge batchWriteFlowControlTargetQps; - private final DoubleHistogram batchWriteFlowControlFactorHistogram; - - public static BuiltinMetricsTracerFactory create( - OpenTelemetry openTelemetry, Attributes attributes) throws IOException { - return new BuiltinMetricsTracerFactory(openTelemetry, attributes); - } - - BuiltinMetricsTracerFactory(OpenTelemetry openTelemetry, Attributes attributes) { - this.attributes = attributes; - Meter meter = openTelemetry.getMeter(METER_NAME); - - operationLatenciesHistogram = - meter - .histogramBuilder(OPERATION_LATENCIES_NAME) - .setDescription( - "Total time until final operation success or failure, including retries and" - + " backoff.") - .setUnit(MILLISECOND) - .build(); - attemptLatenciesHistogram = - meter - .histogramBuilder(ATTEMPT_LATENCIES_NAME) - .setDescription("Client observed latency per RPC attempt.") - .setUnit(MILLISECOND) - .build(); - attemptLatencies2Histogram = - meter - .histogramBuilder(ATTEMPT_LATENCIES2_NAME) - .setDescription("Client observed latency per RPC attempt with transport labels.") - .setUnit(MILLISECOND) - .build(); - serverLatenciesHistogram = - meter - .histogramBuilder(SERVER_LATENCIES_NAME) - .setDescription( - "The latency measured from the moment that the RPC entered the Google data center" - + " until the RPC was completed.") - .setUnit(MILLISECOND) - .build(); - firstResponseLatenciesHistogram = - meter - .histogramBuilder(FIRST_RESPONSE_LATENCIES_NAME) - .setDescription( - "Latency from operation start until the response headers were received. The" - + " publishing of the measurement will be delayed until the attempt response" - + " has been received.") - .setUnit(MILLISECOND) - .build(); - clientBlockingLatenciesHistogram = - meter - .histogramBuilder(CLIENT_BLOCKING_LATENCIES_NAME) - .setDescription( - "The artificial latency introduced by the client to limit the number of outstanding" - + " requests. The publishing of the measurement will be delayed until the" - + " attempt trailers have been received.") - .setUnit(MILLISECOND) - .build(); - applicationBlockingLatenciesHistogram = - meter - .histogramBuilder(APPLICATION_BLOCKING_LATENCIES_NAME) - .setDescription( - "The latency of the client application consuming available response data.") - .setUnit(MILLISECOND) - .build(); - remainingDeadlineHistogram = - meter - .histogramBuilder(REMAINING_DEADLINE_NAME) - .setDescription( - "The remaining deadline when the request is sent to grpc. This will either be the" - + " operation timeout, or the remaining deadline from operation timeout after" - + " retries and back offs.") - .setUnit(MILLISECOND) - .build(); - connectivityErrorCounter = - meter - .counterBuilder(CONNECTIVITY_ERROR_COUNT_NAME) - .setDescription( - "Number of requests that failed to reach the Google datacenter. (Requests without" - + " google response headers") - .setUnit(COUNT) - .build(); - retryCounter = - meter - .counterBuilder(RETRY_COUNT_NAME) - .setDescription("The number of additional RPCs sent after the initial attempt.") - .setUnit(COUNT) - .build(); - batchWriteFlowControlTargetQps = - meter - .gaugeBuilder(BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME) - .setDescription("The current target QPS of the client under batch write flow control.") - .setUnit("1") - .build(); - batchWriteFlowControlFactorHistogram = - meter - .histogramBuilder(BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME) - .setDescription( - "The distribution of batch write flow control factors received from the server.") - .setUnit("1") - .build(); - } - - @Override - public ApiTracer newTracer(ApiTracer parent, SpanName spanName, OperationType operationType) { - return new BuiltinMetricsTracer( - operationType, - spanName, - attributes, - operationLatenciesHistogram, - attemptLatenciesHistogram, - attemptLatencies2Histogram, - serverLatenciesHistogram, - firstResponseLatenciesHistogram, - clientBlockingLatenciesHistogram, - applicationBlockingLatenciesHistogram, - remainingDeadlineHistogram, - connectivityErrorCounter, - retryCounter, - batchWriteFlowControlTargetQps, - batchWriteFlowControlFactorHistogram); - } -} diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java index 24e38c3a2c..2ec4fdfed4 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsView.java @@ -16,133 +16,58 @@ package com.google.cloud.bigtable.data.v2.stub.metrics; import com.google.auth.Credentials; -import com.google.auth.oauth2.GoogleCredentials; -import io.opentelemetry.sdk.metrics.InstrumentSelector; import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder; -import io.opentelemetry.sdk.metrics.View; -import io.opentelemetry.sdk.metrics.export.MetricExporter; -import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader; -import io.opentelemetry.sdk.metrics.export.PeriodicMetricReaderBuilder; import java.io.IOException; -import java.util.Map; import java.util.concurrent.ScheduledExecutorService; import javax.annotation.Nullable; /** - * A util class to register built-in metrics on a custom OpenTelemetry instance. This is for - * advanced usage, and is only necessary when wanting to write built-in metrics to cloud monitoring - * and custom sinks. - * - * @deprecated Use methods in {@link CustomOpenTelemetryMetricsProvider} instead. + * @deprecated this class is no longer used and is empty. It only exists because it's symbols were + * part of the public surface. */ @Deprecated public class BuiltinMetricsView { - private BuiltinMetricsView() {} - /** - * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default - * credentials and default endpoint. - * - * @deprecated projectId is no longer used. Call {@link - * #registerBuiltinMetrics(SdkMeterProviderBuilder)} instead. - */ @Deprecated public static void registerBuiltinMetrics(String projectId, SdkMeterProviderBuilder builder) - throws IOException { - BuiltinMetricsView.registerBuiltinMetrics( - GoogleCredentials.getApplicationDefault(), builder, null); - } + throws IOException {} - /** - * Register built-in metrics on the {@link SdkMeterProviderBuilder} with application default - * credentials and default endpoint. - */ - public static void registerBuiltinMetrics(SdkMeterProviderBuilder builder) throws IOException { - BuiltinMetricsView.registerBuiltinMetrics( - GoogleCredentials.getApplicationDefault(), builder, null); - } + @Deprecated + public static void registerBuiltinMetrics(SdkMeterProviderBuilder builder) throws IOException {} - /** - * Register built-in metrics on the {@link SdkMeterProviderBuilder} with custom credentials and - * default endpoint. - * - * @deprecated projectId is no longer used. Call {@link #registerBuiltinMetrics(Credentials, - * SdkMeterProviderBuilder, String)} instead. - */ @Deprecated public static void registerBuiltinMetrics( String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder) - throws IOException { - BuiltinMetricsView.registerBuiltinMetrics(credentials, builder, null); - } + throws IOException {} - /** - * Register built-in metrics on the {@link SdkMeterProviderBuilder} with custom credentials and - * endpoint. - * - * @deprecated projectId is no longer used. Call {@link #registerBuiltinMetrics(Credentials, - * SdkMeterProviderBuilder, String)} instead. - */ @Deprecated public static void registerBuiltinMetrics( String projectId, @Nullable Credentials credentials, SdkMeterProviderBuilder builder, @Nullable String endpoint) - throws IOException { - registerBuiltinMetrics(credentials, builder, endpoint); - } + throws IOException {} - /** - * Register built-in metrics on the {@link SdkMeterProviderBuilder} with custom credentials and - * endpoint. - */ + @Deprecated public static void registerBuiltinMetrics( @Nullable Credentials credentials, SdkMeterProviderBuilder builder, @Nullable String endpoint) - throws IOException { - registerBuiltinMetricsWithUniverseDomain( - credentials, builder, endpoint, Credentials.GOOGLE_DEFAULT_UNIVERSE, null); - } + throws IOException {} - /** - * Register built-in metrics on the {@link SdkMeterProviderBuilder} with custom credentials, - * endpoint and executor service. - */ + @Deprecated public static void registerBuiltinMetrics( @Nullable Credentials credentials, SdkMeterProviderBuilder builder, @Nullable String endpoint, @Nullable ScheduledExecutorService executorService) - throws IOException { - registerBuiltinMetricsWithUniverseDomain( - credentials, builder, endpoint, Credentials.GOOGLE_DEFAULT_UNIVERSE, executorService); - } + throws IOException {} + @Deprecated static void registerBuiltinMetricsWithUniverseDomain( @Nullable Credentials credentials, SdkMeterProviderBuilder builder, @Nullable String endpoint, String universeDomain, @Nullable ScheduledExecutorService executorService) - throws IOException { - MetricExporter publicExporter = - BigtableCloudMonitoringExporter.create( - "bigtable metrics", - credentials, - endpoint, - universeDomain, - new BigtableCloudMonitoringExporter.PublicTimeSeriesConverter(), - executorService); - - for (Map.Entry entry : - BuiltinMetricsConstants.getAllViews().entrySet()) { - builder.registerView(entry.getKey(), entry.getValue()); - } - PeriodicMetricReaderBuilder readerBuilder = PeriodicMetricReader.builder(publicExporter); - if (executorService != null) { - readerBuilder.setExecutor(executorService); - } - builder.registerMetricReader(readerBuilder.build()); - } + throws IOException {} } diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java index c0a8ed7f36..66041e8aca 100644 --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/CustomOpenTelemetryMetricsProvider.java @@ -30,19 +30,6 @@ *
    {@code
      * SdkMeterProviderBuilder sdkMeterProvider = SdkMeterProvider.builder();
      *
    - * // Set up SdkMeterProvider for client side metrics
    - * CustomOpenTelemetryMetricsProvider.setupSdkMeterProvider(sdkMeterProvider);
    - *
    - * // register other metrics reader and views
    - * sdkMeterProvider.registerMetricReader(..);
    - * sdkMeterProvider.registerView(..);
    - *
    - * // create the OTEL instance
    - * OpenTelemetry openTelemetry = OpenTelemetrySdk
    - *     .builder()
    - *     .setMeterProvider(sdkMeterProvider.build())
    - *     .build();
    - *
      * // Override MetricsProvider in BigtableDataSettings
      * BigtableDataSettings settings = BigtableDataSettings.newBuilder()
      *   .setProjectId("my-project")
    @@ -68,43 +55,43 @@ public OpenTelemetry getOpenTelemetry() {
       }
     
       /**
    -   * Convenient method to set up SdkMeterProviderBuilder with the default credential and endpoint.
    +   * @deprecated this is no longer needed and is now a no-op
        */
    -  public static void setupSdkMeterProvider(SdkMeterProviderBuilder builder) throws IOException {
    -    setupSdkMeterProvider(builder, null, null, null);
    -  }
    +  @Deprecated
    +  public static void setupSdkMeterProvider(SdkMeterProviderBuilder builder) throws IOException {}
     
    -  /** Convenient method to set up SdkMeterProviderBuilder with a custom credential. */
    +  /**
    +   * @deprecated this is no longer needed and is now a no-op
    +   */
    +  @Deprecated
       public static void setupSdkMeterProvider(SdkMeterProviderBuilder builder, Credentials credentials)
    -      throws IOException {
    -    setupSdkMeterProvider(builder, credentials, null, null);
    -  }
    +      throws IOException {}
     
    -  /** Convenient method to set up SdkMeterProviderBuilder with a custom endpoint. */
    +  /**
    +   * @deprecated this is no longer needed and is now a no-op
    +   */
    +  @Deprecated
       public static void setupSdkMeterProvider(SdkMeterProviderBuilder builder, String endpoint)
    -      throws IOException {
    -    setupSdkMeterProvider(builder, null, endpoint, null);
    -  }
    +      throws IOException {}
     
    -  /** Convenient method to set up SdkMeterProviderBuilder with custom credentials and endpoint. */
    +  /**
    +   * @deprecated this is no longer needed and is now a no-op
    +   */
    +  @Deprecated
       public static void setupSdkMeterProvider(
           SdkMeterProviderBuilder builder, Credentials credentials, String endpoint)
    -      throws IOException {
    -    setupSdkMeterProvider(builder, credentials, endpoint, null);
    -  }
    +      throws IOException {}
     
       /**
    -   * Convenient method to set up SdkMeterProviderBuilder with custom credentials, endpoint and a
    -   * shared executor service.
    +   * @deprecated this is no longer needed and is now a no-op
        */
    +  @Deprecated
       public static void setupSdkMeterProvider(
           SdkMeterProviderBuilder builder,
           Credentials credentials,
           String endpoint,
           ScheduledExecutorService executor)
    -      throws IOException {
    -    BuiltinMetricsView.registerBuiltinMetrics(credentials, builder, endpoint, executor);
    -  }
    +      throws IOException {}
     
       @Override
       public String toString() {
    diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
    index 4a226d25d9..02cdf7c257 100644
    --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
    +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/DefaultMetricsProvider.java
    @@ -15,20 +15,10 @@
      */
     package com.google.cloud.bigtable.data.v2.stub.metrics;
     
    -import com.google.api.core.InternalApi;
    -import com.google.auth.Credentials;
    -import io.opentelemetry.api.OpenTelemetry;
    -import io.opentelemetry.sdk.OpenTelemetrySdk;
    -import io.opentelemetry.sdk.metrics.SdkMeterProvider;
    -import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
    -import java.io.IOException;
    -import java.util.concurrent.ScheduledExecutorService;
    -import javax.annotation.Nullable;
    -
     /**
      * Set {@link
      * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
    - * to {@link this#INSTANCE} to enable collecting and export client side metrics
    + * to {@link DefaultMetricsProvider#INSTANCE} to enable collecting and export client side metrics
      * https://cloud.google.com/bigtable/docs/client-side-metrics. This is the default setting in {@link
      * com.google.cloud.bigtable.data.v2.BigtableDataSettings}.
      */
    @@ -38,19 +28,6 @@ public final class DefaultMetricsProvider implements MetricsProvider {
     
       private DefaultMetricsProvider() {}
     
    -  @InternalApi
    -  public OpenTelemetry getOpenTelemetry(
    -      @Nullable String metricsEndpoint,
    -      String universeDomain,
    -      @Nullable Credentials credentials,
    -      ScheduledExecutorService executor)
    -      throws IOException {
    -    SdkMeterProviderBuilder meterProvider = SdkMeterProvider.builder();
    -    BuiltinMetricsView.registerBuiltinMetricsWithUniverseDomain(
    -        credentials, meterProvider, metricsEndpoint, universeDomain, executor);
    -    return OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
    -  }
    -
       @Override
       public String toString() {
         return "DefaultMetricsProvider";
    diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
    index 9a00ddb135..2ccb64a890 100644
    --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
    +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/NoopMetricsProvider.java
    @@ -20,8 +20,8 @@
     /**
      * Set {@link
      * com.google.cloud.bigtable.data.v2.BigtableDataSettings.Builder#setMetricsProvider(MetricsProvider)},
    - * to {@link this#INSTANCE} to disable collecting and export client side metrics
    - * https://cloud.google.com/bigtable/docs/client-side-metrics.
    + * to {@link NoopMetricsProvider#INSTANCE} to disable collecting and export of client side metrics.
      */
     public final class NoopMetricsProvider implements MetricsProvider {
     
    diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcViews.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcViews.java
    index e8902108aa..c4948a20bf 100644
    --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcViews.java
    +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/RpcViews.java
    @@ -15,29 +15,15 @@
      */
     package com.google.cloud.bigtable.data.v2.stub.metrics;
     
    +import com.google.api.core.InternalApi;
    +import com.google.cloud.bigtable.data.v2.internal.csm.opencensus.RpcViewConstants;
     import com.google.common.annotations.VisibleForTesting;
    -import com.google.common.collect.ImmutableSet;
     import io.opencensus.stats.Stats;
     import io.opencensus.stats.View;
     import io.opencensus.stats.ViewManager;
     
     @Deprecated
     public class RpcViews {
    -  @VisibleForTesting
    -  private static final ImmutableSet BIGTABLE_CLIENT_VIEWS_SET =
    -      ImmutableSet.of(
    -          RpcViewConstants.BIGTABLE_OP_LATENCY_VIEW,
    -          RpcViewConstants.BIGTABLE_COMPLETED_OP_VIEW,
    -          RpcViewConstants.BIGTABLE_READ_ROWS_FIRST_ROW_LATENCY_VIEW,
    -          RpcViewConstants.BIGTABLE_ATTEMPT_LATENCY_VIEW,
    -          RpcViewConstants.BIGTABLE_ATTEMPTS_PER_OP_VIEW,
    -          RpcViewConstants.BIGTABLE_BATCH_THROTTLED_TIME_VIEW);
    -
    -  private static final ImmutableSet GFE_VIEW_SET =
    -      ImmutableSet.of(
    -          RpcViewConstants.BIGTABLE_GFE_LATENCY_VIEW,
    -          RpcViewConstants.BIGTABLE_GFE_HEADER_MISSING_COUNT_VIEW);
    -
       private static boolean gfeMetricsRegistered = false;
     
       /** Registers all Bigtable specific views. */
    @@ -55,16 +41,18 @@ public static void registerBigtableClientGfeViews() {
         registerBigtableClientGfeViews(Stats.getViewManager());
       }
     
    +  @InternalApi
       @VisibleForTesting
    -  static void registerBigtableClientViews(ViewManager viewManager) {
    -    for (View view : BIGTABLE_CLIENT_VIEWS_SET) {
    +  public static void registerBigtableClientViews(ViewManager viewManager) {
    +    for (View view : RpcViewConstants.BIGTABLE_CLIENT_VIEWS_SET) {
           viewManager.registerView(view);
         }
       }
     
    +  @InternalApi
       @VisibleForTesting
    -  static void registerBigtableClientGfeViews(ViewManager viewManager) {
    -    for (View view : GFE_VIEW_SET) {
    +  public static void registerBigtableClientGfeViews(ViewManager viewManager) {
    +    for (View view : RpcViewConstants.GFE_VIEW_SET) {
           viewManager.registerView(view);
         }
         gfeMetricsRegistered = true;
    diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/Util.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/Util.java
    index 9ba2d39c49..a5e3ebea68 100644
    --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/Util.java
    +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/data/v2/stub/metrics/Util.java
    @@ -16,55 +16,14 @@
     package com.google.cloud.bigtable.data.v2.stub.metrics;
     
     import com.google.api.core.InternalApi;
    -import com.google.api.gax.grpc.GrpcCallContext;
    -import com.google.api.gax.grpc.GrpcResponseMetadata;
     import com.google.api.gax.rpc.ApiCallContext;
    -import com.google.api.gax.rpc.ApiException;
    -import com.google.api.gax.rpc.StatusCode;
    -import com.google.api.gax.rpc.StatusCode.Code;
    -import com.google.auth.Credentials;
    -import com.google.bigtable.v2.AuthorizedViewName;
    -import com.google.bigtable.v2.CheckAndMutateRowRequest;
    -import com.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest;
    -import com.google.bigtable.v2.MaterializedViewName;
    -import com.google.bigtable.v2.MutateRowRequest;
    -import com.google.bigtable.v2.MutateRowsRequest;
    -import com.google.bigtable.v2.ReadChangeStreamRequest;
    -import com.google.bigtable.v2.ReadModifyWriteRowRequest;
    -import com.google.bigtable.v2.ReadRowsRequest;
    -import com.google.bigtable.v2.ResponseParams;
    -import com.google.bigtable.v2.SampleRowKeysRequest;
    -import com.google.bigtable.v2.TableName;
    -import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
    -import com.google.common.base.Suppliers;
     import com.google.common.collect.ImmutableMap;
    -import com.google.protobuf.InvalidProtocolBufferException;
    -import io.grpc.CallOptions;
     import io.grpc.Metadata;
    -import io.grpc.Status;
    -import io.grpc.StatusException;
    -import io.grpc.StatusRuntimeException;
    -import io.opencensus.tags.TagValue;
    -import io.opentelemetry.sdk.OpenTelemetrySdk;
    -import io.opentelemetry.sdk.metrics.InstrumentSelector;
    -import io.opentelemetry.sdk.metrics.SdkMeterProvider;
    -import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
    -import io.opentelemetry.sdk.metrics.View;
    -import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader;
    -import java.io.IOException;
    -import java.time.Duration;
     import java.time.Instant;
     import java.time.temporal.ChronoUnit;
     import java.util.Arrays;
     import java.util.List;
     import java.util.Map;
    -import java.util.concurrent.CancellationException;
    -import java.util.concurrent.ExecutionException;
    -import java.util.concurrent.Future;
    -import java.util.concurrent.ScheduledExecutorService;
    -import java.util.regex.Matcher;
    -import java.util.regex.Pattern;
    -import javax.annotation.Nullable;
     
     /** Utilities to help integrating with OpenCensus. */
     @InternalApi("For internal use only")
    @@ -74,94 +33,6 @@ public class Util {
       static final Metadata.Key ATTEMPT_EPOCH_KEY =
           Metadata.Key.of("bigtable-client-attempt-epoch-usec", Metadata.ASCII_STRING_MARSHALLER);
     
    -  private static final Metadata.Key SERVER_TIMING_HEADER_KEY =
    -      Metadata.Key.of("server-timing", Metadata.ASCII_STRING_MARSHALLER);
    -  private static final Pattern SERVER_TIMING_HEADER_PATTERN = Pattern.compile(".*dur=(?\\d+)");
    -  static final Metadata.Key LOCATION_METADATA_KEY =
    -      Metadata.Key.of("x-goog-ext-425905942-bin", Metadata.BINARY_BYTE_MARSHALLER);
    -
    -  /** Convert an exception into a value that can be used to create an OpenCensus tag value. */
    -  public static String extractStatus(@Nullable Throwable error) {
    -    final String statusString;
    -
    -    if (error == null) {
    -      return StatusCode.Code.OK.toString();
    -    } else if (error instanceof CancellationException) {
    -      statusString = Status.Code.CANCELLED.toString();
    -    } else if (error instanceof ApiException) {
    -      statusString = ((ApiException) error).getStatusCode().getCode().toString();
    -    } else if (error instanceof StatusRuntimeException) {
    -      statusString = ((StatusRuntimeException) error).getStatus().getCode().toString();
    -    } else if (error instanceof StatusException) {
    -      statusString = ((StatusException) error).getStatus().getCode().toString();
    -    } else {
    -      statusString = Code.UNKNOWN.toString();
    -    }
    -
    -    return statusString;
    -  }
    -
    -  /**
    -   * Await the result of the future and convert it into a value that can be used as an OpenCensus
    -   * tag value.
    -   */
    -  static TagValue extractStatusFromFuture(Future future) {
    -    Throwable error = null;
    -
    -    try {
    -      future.get();
    -    } catch (InterruptedException e) {
    -      error = e;
    -      Thread.currentThread().interrupt();
    -    } catch (ExecutionException e) {
    -      error = e.getCause();
    -    } catch (RuntimeException e) {
    -      error = e;
    -    }
    -    return TagValue.create(extractStatus(error));
    -  }
    -
    -  static String extractTableId(Object request) {
    -    String tableName = null;
    -    String authorizedViewName = null;
    -    String materializedViewName = null;
    -    if (request instanceof ReadRowsRequest) {
    -      tableName = ((ReadRowsRequest) request).getTableName();
    -      authorizedViewName = ((ReadRowsRequest) request).getAuthorizedViewName();
    -      materializedViewName = ((ReadRowsRequest) request).getMaterializedViewName();
    -    } else if (request instanceof MutateRowsRequest) {
    -      tableName = ((MutateRowsRequest) request).getTableName();
    -      authorizedViewName = ((MutateRowsRequest) request).getAuthorizedViewName();
    -    } else if (request instanceof MutateRowRequest) {
    -      tableName = ((MutateRowRequest) request).getTableName();
    -      authorizedViewName = ((MutateRowRequest) request).getAuthorizedViewName();
    -    } else if (request instanceof SampleRowKeysRequest) {
    -      tableName = ((SampleRowKeysRequest) request).getTableName();
    -      authorizedViewName = ((SampleRowKeysRequest) request).getAuthorizedViewName();
    -      materializedViewName = ((SampleRowKeysRequest) request).getMaterializedViewName();
    -    } else if (request instanceof CheckAndMutateRowRequest) {
    -      tableName = ((CheckAndMutateRowRequest) request).getTableName();
    -      authorizedViewName = ((CheckAndMutateRowRequest) request).getAuthorizedViewName();
    -    } else if (request instanceof ReadModifyWriteRowRequest) {
    -      tableName = ((ReadModifyWriteRowRequest) request).getTableName();
    -      authorizedViewName = ((ReadModifyWriteRowRequest) request).getAuthorizedViewName();
    -    } else if (request instanceof GenerateInitialChangeStreamPartitionsRequest) {
    -      tableName = ((GenerateInitialChangeStreamPartitionsRequest) request).getTableName();
    -    } else if (request instanceof ReadChangeStreamRequest) {
    -      tableName = ((ReadChangeStreamRequest) request).getTableName();
    -    }
    -    if (tableName != null && !tableName.isEmpty()) {
    -      return TableName.parse(tableName).getTable();
    -    }
    -    if (authorizedViewName != null && !authorizedViewName.isEmpty()) {
    -      return AuthorizedViewName.parse(authorizedViewName).getTable();
    -    }
    -    if (materializedViewName != null && !materializedViewName.isEmpty()) {
    -      return MaterializedViewName.parse(materializedViewName).getMaterializedView();
    -    }
    -    return "";
    -  }
    -
       /**
        * Add attempt number and client timestamp from api call context to request headers. Attempt
        * number starts from 0.
    @@ -178,111 +49,4 @@ static Map> createStatsHeaders(ApiCallContext apiCallContex
         }
         return headers.build();
       }
    -
    -  private static Long getGfeLatency(@Nullable Metadata metadata) {
    -    if (metadata == null) {
    -      return null;
    -    }
    -    String serverTiming = metadata.get(SERVER_TIMING_HEADER_KEY);
    -    if (serverTiming == null) {
    -      return null;
    -    }
    -    Matcher matcher = SERVER_TIMING_HEADER_PATTERN.matcher(serverTiming);
    -    // this should always be true
    -    if (matcher.find()) {
    -      long latency = Long.valueOf(matcher.group("dur"));
    -      return latency;
    -    }
    -    return null;
    -  }
    -
    -  private static ResponseParams getResponseParams(@Nullable Metadata metadata) {
    -    if (metadata == null) {
    -      return null;
    -    }
    -    byte[] responseParams = metadata.get(Util.LOCATION_METADATA_KEY);
    -    if (responseParams != null) {
    -      try {
    -        return ResponseParams.parseFrom(responseParams);
    -      } catch (InvalidProtocolBufferException e) {
    -      }
    -    }
    -    return null;
    -  }
    -
    -  static void recordMetricsFromMetadata(
    -      GrpcResponseMetadata responseMetadata, BigtableTracer tracer, Throwable throwable) {
    -    Metadata metadata = responseMetadata.getMetadata();
    -
    -    // Get the response params from the metadata. Check both headers and trailers
    -    // because in different environments the metadata could be returned in headers or trailers
    -    @Nullable ResponseParams responseParams = getResponseParams(responseMetadata.getMetadata());
    -    if (responseParams == null) {
    -      responseParams = getResponseParams(responseMetadata.getTrailingMetadata());
    -    }
    -    // Set tracer locations if response params is not null
    -    if (responseParams != null) {
    -      tracer.setLocations(responseParams.getZoneId(), responseParams.getClusterId());
    -    }
    -
    -    // server-timing metric will be added through GrpcResponseMetadata#onHeaders(Metadata),
    -    // so it's not checking trailing metadata here.
    -    @Nullable Long latency = getGfeLatency(metadata);
    -    // For direct path, we won't see GFE server-timing header. However, if we received the
    -    // location info, we know that there isn't a connectivity issue. Set the latency to
    -    // 0 so gfe missing header won't get incremented.
    -    if (responseParams != null && latency == null) {
    -      latency = 0L;
    -    }
    -    // Record gfe metrics
    -    tracer.recordGfeMetadata(latency, throwable);
    -  }
    -
    -  /**
    -   * This method bridges gRPC stream tracing to bigtable tracing by adding a {@link
    -   * io.grpc.ClientStreamTracer} to the callContext.
    -   */
    -  static GrpcCallContext injectBigtableStreamTracer(
    -      ApiCallContext context, GrpcResponseMetadata responseMetadata, BigtableTracer tracer) {
    -    if (context instanceof GrpcCallContext) {
    -      GrpcCallContext callContext = (GrpcCallContext) context;
    -      CallOptions callOptions = callContext.getCallOptions();
    -      return responseMetadata.addHandlers(
    -          callContext.withCallOptions(
    -              callOptions.withStreamTracerFactory(new BigtableGrpcStreamTracer.Factory(tracer))));
    -    } else {
    -      // context should always be an instance of GrpcCallContext. If not throw an exception
    -      // so we can see what class context is.
    -      throw new RuntimeException("Unexpected context class: " + context.getClass().getName());
    -    }
    -  }
    -
    -  public static OpenTelemetrySdk newInternalOpentelemetry(
    -      EnhancedBigtableStubSettings settings,
    -      Credentials credentials,
    -      ScheduledExecutorService executor)
    -      throws IOException {
    -    SdkMeterProviderBuilder meterProviderBuilder = SdkMeterProvider.builder();
    -
    -    for (Map.Entry e :
    -        BuiltinMetricsConstants.getInternalViews().entrySet()) {
    -      meterProviderBuilder.registerView(e.getKey(), e.getValue());
    -    }
    -
    -    meterProviderBuilder.registerMetricReader(
    -        PeriodicMetricReader.builder(
    -                BigtableCloudMonitoringExporter.create(
    -                    "application metrics",
    -                    credentials,
    -                    settings.getMetricsEndpoint(),
    -                    settings.getUniverseDomain(),
    -                    new BigtableCloudMonitoringExporter.InternalTimeSeriesConverter(
    -                        Suppliers.memoize(
    -                            () -> BigtableExporterUtils.createInternalMonitoredResource(settings))),
    -                    executor))
    -            .setExecutor(settings.getBackgroundExecutorProvider().getExecutor())
    -            .setInterval(Duration.ofMinutes(1))
    -            .build());
    -    return OpenTelemetrySdk.builder().setMeterProvider(meterProviderBuilder.build()).build();
    -  }
     }
    diff --git a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/gaxx/grpc/BigtableTransportChannelProvider.java b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/gaxx/grpc/BigtableTransportChannelProvider.java
    index e21c100c9c..a893ba8218 100644
    --- a/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/gaxx/grpc/BigtableTransportChannelProvider.java
    +++ b/google-cloud-bigtable/src/main/java/com/google/cloud/bigtable/gaxx/grpc/BigtableTransportChannelProvider.java
    @@ -23,7 +23,7 @@
     import com.google.api.gax.rpc.TransportChannel;
     import com.google.api.gax.rpc.TransportChannelProvider;
     import com.google.auth.Credentials;
    -import com.google.cloud.bigtable.data.v2.stub.metrics.ChannelPoolMetricsTracer;
    +import com.google.cloud.bigtable.data.v2.internal.csm.tracers.ChannelPoolMetricsTracer;
     import com.google.common.base.Preconditions;
     import io.grpc.ManagedChannel;
     import java.io.IOException;
    @@ -166,7 +166,7 @@ public TransportChannel getTransportChannel() throws IOException {
         if (channelPoolMetricsTracer != null) {
           channelPoolMetricsTracer.registerChannelInsightsProvider(btChannelPool::getChannelInfos);
           channelPoolMetricsTracer.registerLoadBalancingStrategy(
    -          btPoolSettings.getLoadBalancingStrategy().name());
    +          btPoolSettings.getLoadBalancingStrategy());
         }
     
         return GrpcTransportChannel.create(btChannelPool);
    diff --git a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json
    index a7f4f88e42..edfd3eed0c 100644
    --- a/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json
    +++ b/google-cloud-bigtable/src/main/resources/META-INF/native-image/com.google.cloud.bigtable.admin.v2/reflect-config.json
    @@ -2654,6 +2654,42 @@
         "allDeclaredClasses": true,
         "allPublicClasses": true
       },
    +  {
    +    "name": "com.google.bigtable.admin.v2.TieredStorageConfig",
    +    "queryAllDeclaredConstructors": true,
    +    "queryAllPublicConstructors": true,
    +    "queryAllDeclaredMethods": true,
    +    "allPublicMethods": true,
    +    "allDeclaredClasses": true,
    +    "allPublicClasses": true
    +  },
    +  {
    +    "name": "com.google.bigtable.admin.v2.TieredStorageConfig$Builder",
    +    "queryAllDeclaredConstructors": true,
    +    "queryAllPublicConstructors": true,
    +    "queryAllDeclaredMethods": true,
    +    "allPublicMethods": true,
    +    "allDeclaredClasses": true,
    +    "allPublicClasses": true
    +  },
    +  {
    +    "name": "com.google.bigtable.admin.v2.TieredStorageRule",
    +    "queryAllDeclaredConstructors": true,
    +    "queryAllPublicConstructors": true,
    +    "queryAllDeclaredMethods": true,
    +    "allPublicMethods": true,
    +    "allDeclaredClasses": true,
    +    "allPublicClasses": true
    +  },
    +  {
    +    "name": "com.google.bigtable.admin.v2.TieredStorageRule$Builder",
    +    "queryAllDeclaredConstructors": true,
    +    "queryAllPublicConstructors": true,
    +    "queryAllDeclaredMethods": true,
    +    "allPublicMethods": true,
    +    "allDeclaredClasses": true,
    +    "allPublicClasses": true
    +  },
       {
         "name": "com.google.bigtable.admin.v2.Type",
         "queryAllDeclaredConstructors": true,
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java
    index 3477fc053d..9f5a50c41e 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BaseBigtableTableAdminClientTest.java
    @@ -81,6 +81,7 @@
     import com.google.bigtable.admin.v2.SnapshotTableRequest;
     import com.google.bigtable.admin.v2.Table;
     import com.google.bigtable.admin.v2.TableName;
    +import com.google.bigtable.admin.v2.TieredStorageConfig;
     import com.google.bigtable.admin.v2.Type;
     import com.google.bigtable.admin.v2.UndeleteTableRequest;
     import com.google.bigtable.admin.v2.UpdateAuthorizedViewRequest;
    @@ -166,6 +167,7 @@ public void createTableTest() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         mockBigtableTableAdmin.addResponse(expectedResponse);
    @@ -216,6 +218,7 @@ public void createTableTest2() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         mockBigtableTableAdmin.addResponse(expectedResponse);
    @@ -266,6 +269,7 @@ public void createTableFromSnapshotTest() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    @@ -328,6 +332,7 @@ public void createTableFromSnapshotTest2() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    @@ -388,6 +393,7 @@ public void createTableFromSnapshotTest3() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    @@ -450,6 +456,7 @@ public void createTableFromSnapshotTest4() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    @@ -598,6 +605,7 @@ public void getTableTest() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         mockBigtableTableAdmin.addResponse(expectedResponse);
    @@ -642,6 +650,7 @@ public void getTableTest2() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         mockBigtableTableAdmin.addResponse(expectedResponse);
    @@ -686,6 +695,7 @@ public void updateTableTest() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    @@ -809,6 +819,7 @@ public void undeleteTableTest() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    @@ -861,6 +872,7 @@ public void undeleteTableTest2() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    @@ -1330,6 +1342,7 @@ public void modifyColumnFamiliesTest() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         mockBigtableTableAdmin.addResponse(expectedResponse);
    @@ -1378,6 +1391,7 @@ public void modifyColumnFamiliesTest2() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         mockBigtableTableAdmin.addResponse(expectedResponse);
    @@ -2534,6 +2548,7 @@ public void restoreTableTest() throws Exception {
                 .setRestoreInfo(RestoreInfo.newBuilder().build())
                 .setChangeStreamConfig(ChangeStreamConfig.newBuilder().build())
                 .setDeletionProtection(true)
    +            .setTieredStorageConfig(TieredStorageConfig.newBuilder().build())
                 .setRowKeySchema(Type.Struct.newBuilder().build())
                 .build();
         Operation resultOperation =
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClientTests.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClientTests.java
    index e89bd8fbb5..c1d5da6592 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClientTests.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/BigtableTableAdminClientTests.java
    @@ -45,6 +45,7 @@
     import com.google.bigtable.admin.v2.ListBackupsRequest;
     import com.google.bigtable.admin.v2.ListTablesRequest;
     import com.google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification;
    +import com.google.bigtable.admin.v2.OptimizeRestoredTableMetadata;
     import com.google.bigtable.admin.v2.RestoreSourceType;
     import com.google.bigtable.admin.v2.RestoreTableMetadata;
     import com.google.bigtable.admin.v2.SchemaBundleName;
    @@ -76,6 +77,7 @@
     import com.google.cloud.bigtable.admin.v2.models.CreateTableRequest;
     import com.google.cloud.bigtable.admin.v2.models.EncryptionInfo;
     import com.google.cloud.bigtable.admin.v2.models.ModifyColumnFamiliesRequest;
    +import com.google.cloud.bigtable.admin.v2.models.OptimizeRestoredTableOperationToken;
     import com.google.cloud.bigtable.admin.v2.models.RestoreTableRequest;
     import com.google.cloud.bigtable.admin.v2.models.RestoredTableResult;
     import com.google.cloud.bigtable.admin.v2.models.SchemaBundle;
    @@ -285,6 +287,10 @@ public class BigtableTableAdminClientTests {
               com.google.iam.v1.TestIamPermissionsRequest, com.google.iam.v1.TestIamPermissionsResponse>
           mockTestIamPermissionsCallable;
     
    +  @Mock
    +  private OperationCallable
    +      mockOptimizeRestoredTableCallable;
    +
       @Before
       public void setUp() {
         adminClient = BigtableTableAdminClient.create(PROJECT_ID, INSTANCE_ID, mockStub);
    @@ -1682,6 +1688,59 @@ public void testWaitForConsistencyWithToken() {
         assertThat(wasCalled.get()).isTrue();
       }
     
    +  @Test
    +  public void testAwaitOptimizeRestoredTable() throws Exception {
    +    // Setup
    +    Mockito.when(mockStub.awaitOptimizeRestoredTableCallable())
    +        .thenReturn(mockOptimizeRestoredTableCallable);
    +
    +    String optimizeToken = "my-optimization-token";
    +
    +    // 1. Mock the Token
    +    OptimizeRestoredTableOperationToken mockToken =
    +        Mockito.mock(OptimizeRestoredTableOperationToken.class);
    +    Mockito.when(mockToken.getOperationName()).thenReturn(optimizeToken);
    +
    +    // 2. Mock the Result (wrapping the token)
    +    RestoredTableResult mockResult = Mockito.mock(RestoredTableResult.class);
    +    Mockito.when(mockResult.getOptimizeRestoredTableOperationToken()).thenReturn(mockToken);
    +
    +    // 3. Mock the Input Future (returning the result)
    +    ApiFuture mockRestoreFuture = Mockito.mock(ApiFuture.class);
    +    Mockito.when(mockRestoreFuture.get()).thenReturn(mockResult);
    +
    +    // 4. Mock the Stub's behavior (resuming the Optimize Op)
    +    OperationFuture mockOptimizeOp =
    +        Mockito.mock(OperationFuture.class);
    +    Mockito.when(mockOptimizeRestoredTableCallable.resumeFutureCall(optimizeToken))
    +        .thenReturn(mockOptimizeOp);
    +
    +    // Execute
    +    ApiFuture result = adminClient.awaitOptimizeRestoredTable(mockRestoreFuture);
    +
    +    // Verify
    +    assertThat(result).isEqualTo(mockOptimizeOp);
    +    Mockito.verify(mockOptimizeRestoredTableCallable).resumeFutureCall(optimizeToken);
    +  }
    +
    +  @Test
    +  public void testAwaitOptimizeRestoredTable_NoOp() throws Exception {
    +    // Setup: Result with NO optimization token (null or empty)
    +    RestoredTableResult mockResult = Mockito.mock(RestoredTableResult.class);
    +    Mockito.when(mockResult.getOptimizeRestoredTableOperationToken()).thenReturn(null);
    +
    +    // Mock the Input Future
    +    ApiFuture mockRestoreFuture = Mockito.mock(ApiFuture.class);
    +    Mockito.when(mockRestoreFuture.get()).thenReturn(mockResult);
    +
    +    // Execute
    +    ApiFuture result = adminClient.awaitOptimizeRestoredTable(mockRestoreFuture);
    +
    +    // Verify: Returns immediate success (Empty) without calling the stub
    +    assertThat(result.get()).isEqualTo(Empty.getDefaultInstance());
    +    Mockito.verifyNoInteractions(mockStub);
    +  }
    +
       private  void mockOperationResult(
           OperationCallable callable,
           ReqT request,
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateClusterRequestTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateClusterRequestTest.java
    index 566641039a..fe28948347 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateClusterRequestTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/CreateClusterRequestTest.java
    @@ -128,6 +128,7 @@ public void testOptionalFields() {
                 .setDisplayName("custom display name")
                 .addLabel("my label", "with some value")
                 .addLabel("my other label", "with some value")
    +            .addTag("tagKeys/123", "tagValues/456")
                 .setType(Instance.Type.DEVELOPMENT)
                 .addCluster("cluster1", "us-east1-c", 1, StorageType.SSD);
     
    @@ -142,6 +143,7 @@ public void testOptionalFields() {
                         .setDisplayName("custom display name")
                         .putLabels("my label", "with some value")
                         .putLabels("my other label", "with some value")
    +                    .putTags("tagKeys/123", "tagValues/456")
                         .setType(com.google.bigtable.admin.v2.Instance.Type.DEVELOPMENT))
                 .putClusters(
                     "cluster1",
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/InstanceTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/InstanceTest.java
    index 78fdf15b03..35b776fbe4 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/InstanceTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/admin/v2/models/InstanceTest.java
    @@ -36,6 +36,8 @@ public void testFromProto() {
                 .setState(com.google.bigtable.admin.v2.Instance.State.READY)
                 .putLabels("label1", "value1")
                 .putLabels("label2", "value2")
    +            .putTags("tagKeys/123", "tagValues/456")
    +            .putTags("tagKeys/234", "tagValues/567")
                 .build();
     
         Instance result = Instance.fromProto(proto);
    @@ -48,6 +50,8 @@ public void testFromProto() {
             .containsExactly(
                 "label1", "value1",
                 "label2", "value2");
    +    assertThat(result.getTags())
    +        .containsExactly("tagKeys/123", "tagValues/456", "tagKeys/234", "tagValues/567");
       }
     
       @Test
    @@ -59,6 +63,7 @@ public void testRequiresName() {
                 .setState(com.google.bigtable.admin.v2.Instance.State.READY)
                 .putLabels("label1", "value1")
                 .putLabels("label2", "value2")
    +            .putTags("tagKeys/123", "tagValues/456")
                 .build();
     
         Exception actualException = null;
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/api/InstanceNameTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/api/InstanceNameTest.java
    new file mode 100644
    index 0000000000..09778bd46e
    --- /dev/null
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/api/InstanceNameTest.java
    @@ -0,0 +1,51 @@
    +/*
    + * Copyright 2025 Google LLC
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package com.google.cloud.bigtable.data.v2.internal.api;
    +
    +import static com.google.common.truth.Truth.assertThat;
    +import static org.junit.jupiter.api.Assertions.*;
    +
    +import org.junit.jupiter.api.Test;
    +
    +class InstanceNameTest {
    +
    +  @Test
    +  void testParseOk() {
    +    assertThat(InstanceName.parse("projects/my-project/instances/my-instance"))
    +        .isEqualTo(
    +            InstanceName.builder().setProjectId("my-project").setInstanceId("my-instance").build());
    +  }
    +
    +  @Test
    +  void testParseFail() {
    +    assertThrows(IllegalArgumentException.class, () -> InstanceName.parse(""));
    +    assertThrows(IllegalArgumentException.class, () -> InstanceName.parse("projects/my-project"));
    +    assertThrows(
    +        IllegalArgumentException.class, () -> TableName.parse("projects/my-project/instances"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> InstanceName.parse("projects/my-project/instances/my-instance/extra"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> InstanceName.parse("projects//instances/my-instance"));
    +    assertThrows(
    +        IllegalArgumentException.class, () -> InstanceName.parse("projects/my-project/instances/"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> InstanceName.parse("projects/my-project/instances//"));
    +  }
    +}
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/api/TableNameTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/api/TableNameTest.java
    new file mode 100644
    index 0000000000..fd8e8310a7
    --- /dev/null
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/api/TableNameTest.java
    @@ -0,0 +1,62 @@
    +/*
    + * Copyright 2025 Google LLC
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package com.google.cloud.bigtable.data.v2.internal.api;
    +
    +import static com.google.common.truth.Truth.assertThat;
    +import static org.junit.jupiter.api.Assertions.assertThrows;
    +
    +import org.junit.jupiter.api.Test;
    +
    +class TableNameTest {
    +
    +  @Test
    +  void testParseOk() {
    +    assertThat(TableName.parse("projects/my-project/instances/my-instance/tables/my-table"))
    +        .isEqualTo(
    +            TableName.builder()
    +                .setProjectId("my-project")
    +                .setInstanceId("my-instance")
    +                .setTableId("my-table")
    +                .build());
    +  }
    +
    +  @Test
    +  void testParseFail() {
    +    assertThrows(IllegalArgumentException.class, () -> TableName.parse(""));
    +    assertThrows(IllegalArgumentException.class, () -> TableName.parse("projects/my-project"));
    +    assertThrows(
    +        IllegalArgumentException.class, () -> TableName.parse("projects/my-project/instances"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> TableName.parse("projects/my-project/instances/my-instance"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> TableName.parse("projects/my-project/instances/my-instance/tables"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> TableName.parse("projects/my-project/instances/my-instance/tables/my-table/extra"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> TableName.parse("projects//instances/my-instance/tables"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> TableName.parse("projects/my-project/instances//tables/my-table"));
    +    assertThrows(
    +        IllegalArgumentException.class,
    +        () -> TableName.parse("projects/my-project/instances/my-instance/tables/"));
    +  }
    +}
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricRegistryExportTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricRegistryExportTest.java
    new file mode 100644
    index 0000000000..974ac41868
    --- /dev/null
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/MetricRegistryExportTest.java
    @@ -0,0 +1,694 @@
    +/*
    + * Copyright 2025 Google LLC
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package com.google.cloud.bigtable.data.v2.internal.csm;
    +
    +import static com.google.common.truth.Truth.assertThat;
    +import static com.google.common.truth.Truth.assertWithMessage;
    +import static com.google.common.truth.extensions.proto.ProtoTruth.assertThat;
    +
    +import com.google.api.Distribution;
    +import com.google.api.MonitoredResource;
    +import com.google.api.gax.core.NoCredentialsProvider;
    +import com.google.api.gax.grpc.GrpcTransportChannel;
    +import com.google.api.gax.rpc.FixedTransportChannelProvider;
    +import com.google.bigtable.v2.PeerInfo;
    +import com.google.bigtable.v2.PeerInfo.TransportType;
    +import com.google.bigtable.v2.ResponseParams;
    +import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
    +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName;
    +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry.RecorderRegistry;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.MethodInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.exporter.BigtableCloudMonitoringExporter;
    +import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelPoolSettings.LoadBalancingStrategy;
    +import com.google.cloud.monitoring.v3.MetricServiceClient;
    +import com.google.cloud.monitoring.v3.MetricServiceSettings;
    +import com.google.common.base.Function;
    +import com.google.common.collect.ImmutableMap;
    +import com.google.common.truth.Correspondence;
    +import com.google.common.truth.Truth;
    +import com.google.monitoring.v3.CreateTimeSeriesRequest;
    +import com.google.monitoring.v3.MetricServiceGrpc.MetricServiceImplBase;
    +import com.google.monitoring.v3.Point;
    +import com.google.monitoring.v3.TimeSeries;
    +import com.google.monitoring.v3.TypedValue;
    +import com.google.protobuf.Empty;
    +import io.grpc.ManagedChannel;
    +import io.grpc.ManagedChannelBuilder;
    +import io.grpc.Server;
    +import io.grpc.Status;
    +import io.grpc.Status.Code;
    +import io.grpc.stub.StreamObserver;
    +import io.opentelemetry.sdk.metrics.SdkMeterProvider;
    +import io.opentelemetry.sdk.metrics.export.PeriodicMetricReader;
    +import java.time.Duration;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.concurrent.BlockingDeque;
    +import java.util.concurrent.LinkedBlockingDeque;
    +import java.util.concurrent.TimeUnit;
    +import java.util.stream.Collectors;
    +import org.junit.jupiter.api.AfterEach;
    +import org.junit.jupiter.api.BeforeEach;
    +import org.junit.jupiter.api.Test;
    +
    +public class MetricRegistryExportTest {
    +  private static final InstanceName INSTANCE_NAME = InstanceName.of("my-project", "my-instance");
    +  private static final String appProfileId = "my-app-profile";
    +  private static final String tableId = "my-table";
    +  private static final String clusterId = "my-cluster";
    +  private static final String clusterZone = "us-east1-b";
    +
    +  private Server server;
    +  private FakeMetricService metricService;
    +  private ManagedChannel fakeServiceChannel;
    +
    +  private PeriodicMetricReader metricReader;
    +  private SdkMeterProvider meterProvider;
    +  private MetricRegistry metricRegistry;
    +  private RecorderRegistry registry;
    +
    +  private EnvInfo envInfo;
    +  private ClientInfo clientInfo =
    +      ClientInfo.builder().setInstanceName(INSTANCE_NAME).setAppProfileId(appProfileId).build();
    +  private MethodInfo methodInfo;
    +  private ResponseParams clusterInfo;
    +  private PeerInfo peerInfo;
    +
    +  private MonitoredResource expectedTableMonitoredResource;
    +  private MonitoredResource expectedClientMonitoredResource;
    +
    +  @BeforeEach
    +  void setUp() throws Exception {
    +    metricService = new FakeMetricService();
    +    server = FakeServiceBuilder.create(metricService).start();
    +
    +    envInfo =
    +        EnvInfo.builder()
    +            .setPlatform("gcp_compute_engine")
    +            .setProject("my-client-project")
    +            .setRegion("us-east1")
    +            .setHostId("123456")
    +            .setHostName("my-vm")
    +            .build();
    +
    +    fakeServiceChannel =
    +        ManagedChannelBuilder.forAddress("localhost", server.getPort()).usePlaintext().build();
    +
    +    metricRegistry = new MetricRegistry();
    +
    +    MetricServiceClient metricClient =
    +        MetricServiceClient.create(
    +            MetricServiceSettings.newBuilder()
    +                .setTransportChannelProvider(
    +                    FixedTransportChannelProvider.create(
    +                        GrpcTransportChannel.create(fakeServiceChannel)))
    +                .setCredentialsProvider(NoCredentialsProvider.create())
    +                .build());
    +    BigtableCloudMonitoringExporter exporter =
    +        new BigtableCloudMonitoringExporter(
    +            metricRegistry, () -> envInfo, clientInfo, metricClient);
    +    metricReader = PeriodicMetricReader.create(exporter);
    +    meterProvider = SdkMeterProvider.builder().registerMetricReader(metricReader).build();
    +
    +    registry = metricRegistry.newRecorderRegistry(meterProvider);
    +
    +    methodInfo = MethodInfo.builder().setName("Bigtable.ReadRow").setStreaming(false).build();
    +
    +    clusterInfo =
    +        ResponseParams.newBuilder().setZoneId(clusterZone).setClusterId(clusterId).build();
    +    peerInfo =
    +        PeerInfo.newBuilder()
    +            .setTransportType(TransportType.TRANSPORT_TYPE_SESSION_CLOUD_PATH)
    +            .setGoogleFrontendId(123)
    +            .setApplicationFrontendZone("us-east1-c")
    +            .setApplicationFrontendSubzone("ab")
    +            .build();
    +
    +    expectedTableMonitoredResource =
    +        MonitoredResource.newBuilder()
    +            .setType("bigtable_client_raw")
    +            .putLabels("project_id", clientInfo.getInstanceName().getProjectId())
    +            .putLabels("instance", clientInfo.getInstanceName().getInstanceId())
    +            .putLabels("cluster", clusterInfo.getClusterId())
    +            .putLabels("table", tableId)
    +            .putLabels("zone", clusterInfo.getZoneId())
    +            .build();
    +
    +    expectedClientMonitoredResource =
    +        MonitoredResource.newBuilder()
    +            .setType("bigtable_client")
    +            .putLabels("project_id", clientInfo.getInstanceName().getProjectId())
    +            .putLabels("instance", clientInfo.getInstanceName().getInstanceId())
    +            .putLabels("app_profile", appProfileId)
    +            .putLabels("client_project", envInfo.getProject())
    +            .putLabels("region", envInfo.getRegion())
    +            .putLabels("cloud_platform", envInfo.getPlatform())
    +            .putLabels("host_id", envInfo.getHostId())
    +            .putLabels("host_name", envInfo.getHostName())
    +            .putLabels("client_name", clientInfo.getClientName())
    +            .putLabels("uuid", envInfo.getUid())
    +            .build();
    +  }
    +
    +  @AfterEach
    +  void tearDown() {
    +    meterProvider.close();
    +    fakeServiceChannel.shutdown();
    +    server.shutdownNow();
    +  }
    +
    +  @Test
    +  void testOpLatency() {
    +    registry.operationLatency.record(
    +        clientInfo,
    +        tableId,
    +        methodInfo,
    +        clusterInfo,
    +        Status.UNAVAILABLE.getCode(),
    +        Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/operation_latencies");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName(),
    +            "streaming", Boolean.toString(methodInfo.getStreaming()));
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123.0)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testAttemptLatency() {
    +    registry.attemptLatency.record(
    +        clientInfo,
    +        tableId,
    +        clusterInfo,
    +        methodInfo,
    +        Status.UNAVAILABLE.getCode(),
    +        Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/attempt_latencies");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName(),
    +            "streaming", Boolean.toString(methodInfo.getStreaming()));
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123.0)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testAttemptLatency2() {
    +    registry.attemptLatency2.record(
    +        clientInfo,
    +        tableId,
    +        peerInfo,
    +        clusterInfo,
    +        methodInfo,
    +        Status.UNAVAILABLE.getCode(),
    +        Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/attempt_latencies2");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "transport_type", "session_cloudpath",
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "transport_region", "",
    +            "transport_zone", peerInfo.getApplicationFrontendZone(),
    +            "transport_subzone", peerInfo.getApplicationFrontendSubzone(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName(),
    +            "streaming", Boolean.toString(methodInfo.getStreaming()));
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123.0)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testRetryCount() {
    +    registry.retryCount.record(
    +        clientInfo, tableId, methodInfo, clusterInfo, Status.UNAVAILABLE.getCode(), 1);
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/retry_count");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName());
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder().setValue(TypedValue.newBuilder().setInt64Value(1)).build());
    +  }
    +
    +  @Test
    +  void testFirstByteLatency() {
    +    registry.firstResponseLantency.record(
    +        clientInfo,
    +        tableId,
    +        methodInfo,
    +        clusterInfo,
    +        Status.UNAVAILABLE.getCode(),
    +        Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/first_response_latencies");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName());
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123.0)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testServerLatencies() {
    +    registry.serverLatency.record(
    +        clientInfo,
    +        tableId,
    +        methodInfo,
    +        clusterInfo,
    +        Status.UNAVAILABLE.getCode(),
    +        Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/server_latencies");
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName(),
    +            "streaming", Boolean.toString(methodInfo.getStreaming()));
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123.0)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testChannelPoolOutstandingRpcs() {
    +    registry.channelPoolOutstandingRpcs.record(
    +        clientInfo,
    +        peerInfo.getTransportType(),
    +        LoadBalancingStrategy.POWER_OF_TWO_LEAST_IN_FLIGHT,
    +        true,
    +        1);
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/connection_pool/outstanding_rpcs");
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedClientMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "transport_type", "session_cloudpath",
    +            "lb_policy", "POWER_OF_TWO_LEAST_IN_FLIGHT",
    +            "streaming", "true");
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(1)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testConnectivityErrors() {
    +    registry.connectivityErrorCount.record(
    +        clientInfo, tableId, methodInfo, clusterInfo, Status.UNAVAILABLE.getCode(), 1);
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/connectivity_error_count");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName());
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder().setValue(TypedValue.newBuilder().setInt64Value(1)).build());
    +  }
    +
    +  @Test
    +  void testDpCompatGuage() {
    +    registry.dpCompatGuage.recordFailure(clientInfo, "something");
    +    registry.dpCompatGuage.recordSuccess(clientInfo, "ipv4");
    +
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    List timeSeriesList =
    +        metricService.findTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/direct_access/compatible");
    +
    +    assertThat(timeSeriesList).hasSize(2);
    +    for (TimeSeries timeSeries : timeSeriesList) {
    +      Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedClientMonitoredResource);
    +    }
    +    assertThat(timeSeriesList)
    +        .comparingElementsUsing(
    +            Correspondence.transforming(
    +                (Function>)
    +                    input -> input.getMetric().getLabelsMap(),
    +                "metric labels"))
    +        .containsExactly(
    +            ImmutableMap.of(
    +                "reason", "",
    +                "ip_preference", "ipv4"),
    +            ImmutableMap.of(
    +                "reason", "something",
    +                "ip_preference", ""));
    +  }
    +
    +  @Test
    +  void testApplicationErrors() {
    +    registry.applicationBlockingLatency.record(
    +        clientInfo, tableId, methodInfo, clusterInfo, Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/application_latencies");
    +
    +    assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName());
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testClientBlocking() {
    +    registry.clientBlockingLatency.record(
    +        clientInfo, tableId, methodInfo, clusterInfo, Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/throttling_latencies");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName());
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123.0)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testRemainingDeadline() {
    +    registry.remainingDeadline.record(
    +        clientInfo,
    +        tableId,
    +        methodInfo,
    +        clusterInfo,
    +        Status.UNAVAILABLE.getCode(),
    +        Duration.ofMillis(123));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/remaining_deadline");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedTableMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "UNAVAILABLE",
    +            "client_uid", envInfo.getUid(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId(),
    +            "method", methodInfo.getName(),
    +            "streaming", Boolean.toString(methodInfo.getStreaming()));
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(123.0)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testPerConnectionErrors() {
    +    registry.perConnectionErrorCount.record(clientInfo, 1);
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/per_connection_error_count");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedClientMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "project_id", clientInfo.getInstanceName().getProjectId(),
    +            "client_uid", envInfo.getUid(),
    +            "instance", clientInfo.getInstanceName().getInstanceId(),
    +            "client_name", clientInfo.getClientName(),
    +            "app_profile", clientInfo.getAppProfileId());
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(Distribution.newBuilder().setCount(1).setMean(1)))
    +                .build());
    +  }
    +
    +  @Test
    +  void testBatchWriteFactor() {
    +    registry.batchWriteFlowControlFactor.record(
    +        clientInfo, Code.DEADLINE_EXCEEDED, true, MethodInfo.of("Bigtable.MutateRows", false), 0.5);
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/batch_write_flow_control_factor");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedClientMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly(
    +            "status", "DEADLINE_EXCEEDED",
    +            "applied", "true",
    +            "method", "Bigtable.MutateRows");
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(TypedValue.newBuilder().setDoubleValue(0.5).build())
    +                .build());
    +  }
    +
    +  @Test
    +  void testBatchWriteQps() {
    +    registry.batchWriteFlowControlTargetQps.record(
    +        clientInfo, MethodInfo.of("Bigtable.MutateRows", false), 123);
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/batch_write_flow_control_target_qps");
    +
    +    Truth.assertThat(timeSeries.getResource()).isEqualTo(expectedClientMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsExactly("method", "Bigtable.MutateRows");
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(TypedValue.newBuilder().setDoubleValue(123.0).build())
    +                .build());
    +  }
    +
    +  @Test
    +  void testPacemaker() {
    +    registry.pacemakerDelay.record(clientInfo, "background", Duration.ofMillis(1));
    +    metricReader.forceFlush().join(1, TimeUnit.MINUTES);
    +
    +    TimeSeries timeSeries =
    +        metricService.getSingleTimeSeriesByName(
    +            "bigtable.googleapis.com/internal/client/pacemaker_delays");
    +
    +    assertThat(timeSeries.getResource()).isEqualTo(expectedClientMonitoredResource);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap()).containsExactly("executor", "background");
    +
    +    assertThat(timeSeries.getPointsList())
    +        .comparingExpectedFieldsOnly()
    +        .containsExactly(
    +            Point.newBuilder()
    +                .setValue(
    +                    TypedValue.newBuilder()
    +                        .setDistributionValue(
    +                            Distribution.newBuilder().setCount(1).setMean(1000.0)))
    +                .build());
    +  }
    +
    +  private static class FakeMetricService extends MetricServiceImplBase {
    +    final BlockingDeque requests = new LinkedBlockingDeque<>();
    +
    +    @Override
    +    public void createServiceTimeSeries(
    +        CreateTimeSeriesRequest request, StreamObserver responseObserver) {
    +      requests.add(request);
    +      responseObserver.onNext(Empty.getDefaultInstance());
    +      responseObserver.onCompleted();
    +    }
    +
    +    List findTimeSeriesByName(String name) {
    +      return requests.stream()
    +          .flatMap(r -> r.getTimeSeriesList().stream())
    +          .filter(ts -> name.equals(ts.getMetric().getType()))
    +          .collect(Collectors.toList());
    +    }
    +
    +    TimeSeries getSingleTimeSeriesByName(String name) {
    +      List timeSeriesList = findTimeSeriesByName(name);
    +      assertWithMessage("Expected to have a single TimeSeries with the name %s", name)
    +          .that(timeSeriesList)
    +          .hasSize(1);
    +
    +      return timeSeriesList.get(0);
    +    }
    +  }
    +}
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/ClientInfoTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/ClientInfoTest.java
    new file mode 100644
    index 0000000000..e97cb1dd52
    --- /dev/null
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/ClientInfoTest.java
    @@ -0,0 +1,34 @@
    +/*
    + * Copyright 2025 Google LLC
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package com.google.cloud.bigtable.data.v2.internal.csm.attributes;
    +
    +import static com.google.common.truth.Truth.assertThat;
    +
    +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName;
    +import org.junit.jupiter.api.Test;
    +
    +class ClientInfoTest {
    +  @Test
    +  void testName() {
    +    ClientInfo clientInfo =
    +        ClientInfo.builder()
    +            .setInstanceName(InstanceName.of("fake-project", "fake-instance"))
    +            .setAppProfileId("fake-app-profile")
    +            .build();
    +    assertThat(clientInfo.getClientName()).containsMatch("java-bigtable/\\d+\\.\\d+\\.\\d+.*");
    +  }
    +}
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/EnvInfoTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/EnvInfoTest.java
    new file mode 100644
    index 0000000000..8ab52111aa
    --- /dev/null
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/EnvInfoTest.java
    @@ -0,0 +1,128 @@
    +/*
    + * Copyright 2025 Google LLC
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package com.google.cloud.bigtable.data.v2.internal.csm.attributes;
    +
    +import static com.google.common.truth.Truth.assertThat;
    +import static org.mockito.Mockito.when;
    +
    +import com.google.cloud.opentelemetry.detection.DetectedPlatform;
    +import com.google.cloud.opentelemetry.detection.GCPPlatformDetector.SupportedPlatform;
    +import com.google.common.base.Function;
    +import com.google.common.base.Supplier;
    +import com.google.common.base.Suppliers;
    +import com.google.common.collect.ImmutableMap;
    +import java.util.Map;
    +import org.junit.jupiter.api.Test;
    +import org.junit.jupiter.api.extension.ExtendWith;
    +import org.mockito.Mock;
    +import org.mockito.junit.jupiter.MockitoExtension;
    +
    +@ExtendWith(MockitoExtension.class)
    +class EnvInfoTest {
    +  private static final Supplier NULL_HOST = Suppliers.ofInstance(null);
    +
    +  @SuppressWarnings("UnnecessaryLambda")
    +  private static final Function NULL_ENV = (ignored) -> null;
    +
    +  @Mock private DetectedPlatform detectedPlatform;
    +
    +  @Test
    +  void testUid() {
    +    when(detectedPlatform.getSupportedPlatform()).thenReturn(SupportedPlatform.UNKNOWN_PLATFORM);
    +
    +    EnvInfo info1 = EnvInfo.detect(detectedPlatform, NULL_ENV, NULL_HOST);
    +    EnvInfo info2 = EnvInfo.detect(detectedPlatform, NULL_ENV, NULL_HOST);
    +
    +    assertThat(info1.getUid()).isNotEmpty();
    +    assertThat(info2.getUid()).isNotEmpty();
    +    assertThat(info1.getUid()).isNotEqualTo(info2.getUid());
    +  }
    +
    +  @Test
    +  void testUnknown() {
    +    when(detectedPlatform.getSupportedPlatform()).thenReturn(SupportedPlatform.UNKNOWN_PLATFORM);
    +    EnvInfo envInfo = EnvInfo.detect(detectedPlatform, NULL_ENV, NULL_HOST);
    +    assertThat(envInfo.getHostName()).isEmpty();
    +    assertThat(envInfo.getHostId()).isEmpty();
    +    assertThat(envInfo.getPlatform()).isEqualTo("unknown");
    +    assertThat(envInfo.getRegion()).isEqualTo("global");
    +  }
    +
    +  @Test
    +  void testGce() {
    +    when(detectedPlatform.getSupportedPlatform())
    +        .thenReturn(SupportedPlatform.GOOGLE_COMPUTE_ENGINE);
    +    when(detectedPlatform.getProjectId()).thenReturn("my-project");
    +    when(detectedPlatform.getAttributes())
    +        .thenReturn(
    +            ImmutableMap.of(
    +                "machine_type", "n2-standard-8",
    +                "availability_zone", "us-central1-c",
    +                "instance_id", "1234567890",
    +                "instance_name", "my-vm-name",
    +                "cloud_region", "us-central1",
    +                "instance_hostname", "my-vm-name.us-central1-c.c.my-project.google.com.internal"));
    +    EnvInfo envInfo = EnvInfo.detect(detectedPlatform, NULL_ENV, NULL_HOST);
    +    assertThat(envInfo.getPlatform()).isEqualTo("gcp_compute_engine");
    +    assertThat(envInfo.getProject()).isEqualTo("my-project");
    +    assertThat(envInfo.getRegion()).isEqualTo("us-central1");
    +    assertThat(envInfo.getHostId()).isEqualTo("1234567890");
    +    assertThat(envInfo.getHostName()).isEqualTo("my-vm-name");
    +  }
    +
    +  @Test
    +  void testGke() {
    +    when(detectedPlatform.getSupportedPlatform())
    +        .thenReturn(SupportedPlatform.GOOGLE_KUBERNETES_ENGINE);
    +    when(detectedPlatform.getProjectId()).thenReturn("my-project");
    +    when(detectedPlatform.getAttributes())
    +        .thenReturn(
    +            ImmutableMap.of(
    +                "gke_cluster_name", "my-cluster",
    +                "gke_cluster_location", "us-central1",
    +                "gke_cluster_location_type", "country-region",
    +                "instance_id", "1234567890"));
    +    Map env = ImmutableMap.of("HOSTNAME", "my-hostname");
    +
    +    EnvInfo envInfo = EnvInfo.detect(detectedPlatform, env::get, NULL_HOST);
    +    assertThat(envInfo.getPlatform()).isEqualTo("gcp_kubernetes_engine");
    +    assertThat(envInfo.getProject()).isEqualTo("my-project");
    +    assertThat(envInfo.getRegion()).isEqualTo("us-central1");
    +    assertThat(envInfo.getHostId()).isEqualTo("1234567890");
    +    assertThat(envInfo.getHostName()).isEqualTo("my-hostname");
    +  }
    +
    +  @Test
    +  void testGkeHostanmeFallback() {
    +    when(detectedPlatform.getSupportedPlatform())
    +        .thenReturn(SupportedPlatform.GOOGLE_KUBERNETES_ENGINE);
    +    when(detectedPlatform.getProjectId()).thenReturn("my-project");
    +    when(detectedPlatform.getAttributes())
    +        .thenReturn(
    +            ImmutableMap.of(
    +                "gke_cluster_name", "my-cluster",
    +                "gke_cluster_location", "us-central1",
    +                "gke_cluster_location_type", "country-region",
    +                "instance_id", "1234567890"));
    +    EnvInfo envInfo = EnvInfo.detect(detectedPlatform, NULL_ENV, () -> "my-hostname");
    +    assertThat(envInfo.getPlatform()).isEqualTo("gcp_kubernetes_engine");
    +    assertThat(envInfo.getProject()).isEqualTo("my-project");
    +    assertThat(envInfo.getRegion()).isEqualTo("us-central1");
    +    assertThat(envInfo.getHostId()).isEqualTo("1234567890");
    +    assertThat(envInfo.getHostName()).isEqualTo("my-hostname");
    +  }
    +}
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/UtilTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/UtilTest.java
    similarity index 52%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/UtilTest.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/UtilTest.java
    index 3c0fb4e617..782b04928e 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/UtilTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/attributes/UtilTest.java
    @@ -1,5 +1,5 @@
     /*
    - * Copyright 2019 Google LLC
    + * Copyright 2025 Google LLC
      *
      * Licensed under the Apache License, Version 2.0 (the "License");
      * you may not use this file except in compliance with the License.
    @@ -13,30 +13,35 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    +
    +package com.google.cloud.bigtable.data.v2.internal.csm.attributes;
     
     import static com.google.common.truth.Truth.assertThat;
    +import static com.google.common.truth.Truth.assertWithMessage;
     
     import com.google.api.gax.grpc.GrpcStatusCode;
     import com.google.api.gax.rpc.DeadlineExceededException;
    -import com.google.common.util.concurrent.Futures;
    +import com.google.bigtable.v2.PeerInfo.TransportType;
     import io.grpc.Status;
     import io.opencensus.tags.TagValue;
    -import org.junit.Test;
    -import org.junit.runner.RunWith;
    -import org.junit.runners.JUnit4;
    +import org.junit.jupiter.api.Test;
     
    -@RunWith(JUnit4.class)
    -public class UtilTest {
    +class UtilTest {
       @Test
    -  public void testOk() {
    -    TagValue tagValue = TagValue.create(Util.extractStatus((Throwable) null));
    -    assertThat(tagValue.asString()).isEqualTo("OK");
    +  void ensureAllTransportTypeHaveExpectedPrefix() {
    +    for (TransportType type : TransportType.values()) {
    +      assertWithMessage("%s should have a mapping", type)
    +          .that(Util.transportTypeToStringWithoutFallback(type))
    +          .isNotNull();
    +    }
       }
     
       @Test
    -  public void testOkFuture() {
    -    TagValue tagValue = Util.extractStatusFromFuture(Futures.immediateFuture(null));
    +  public void testOk() {
    +    TagValue tagValue =
    +        TagValue.create(
    +            com.google.cloud.bigtable.data.v2.internal.csm.attributes.Util.extractStatus(null)
    +                .name());
         assertThat(tagValue.asString()).isEqualTo("OK");
       }
     
    @@ -45,22 +50,7 @@ public void testError() {
         DeadlineExceededException error =
             new DeadlineExceededException(
                 "Deadline exceeded", null, GrpcStatusCode.of(Status.Code.DEADLINE_EXCEEDED), true);
    -    TagValue tagValue = TagValue.create(Util.extractStatus(error));
    -    assertThat(tagValue.asString()).isEqualTo("DEADLINE_EXCEEDED");
    -  }
    -
    -  @Test
    -  public void testErrorFuture() {
    -    DeadlineExceededException error =
    -        new DeadlineExceededException(
    -            "Deadline exceeded", null, GrpcStatusCode.of(Status.Code.DEADLINE_EXCEEDED), true);
    -    TagValue tagValue = Util.extractStatusFromFuture(Futures.immediateFailedFuture(error));
    +    TagValue tagValue = TagValue.create(Util.extractStatus(error).name());
         assertThat(tagValue.asString()).isEqualTo("DEADLINE_EXCEEDED");
       }
    -
    -  @Test
    -  public void testCancelledFuture() {
    -    TagValue tagValue = Util.extractStatusFromFuture(Futures.immediateCancelledFuture());
    -    assertThat(tagValue.asString()).isEqualTo("CANCELLED");
    -  }
     }
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporterTest.java
    similarity index 75%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporterTest.java
    index 0a8ad0afbd..e6c8c109fc 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableCloudMonitoringExporterTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporterTest.java
    @@ -13,16 +13,8 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    -
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APP_PROFILE_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_UID_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.INSTANCE_ID_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
    +package com.google.cloud.bigtable.data.v2.internal.csm.exporter;
    +
     import static com.google.common.truth.Truth.assertThat;
     import static org.mockito.ArgumentMatchers.any;
     import static org.mockito.Mockito.mock;
    @@ -31,10 +23,16 @@
     import static org.mockito.Mockito.when;
     
     import com.google.api.Distribution;
    -import com.google.api.MonitoredResource;
     import com.google.api.core.ApiFuture;
     import com.google.api.core.ApiFutures;
     import com.google.api.gax.rpc.UnaryCallable;
    +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName;
    +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema;
     import com.google.cloud.monitoring.v3.MetricServiceClient;
     import com.google.cloud.monitoring.v3.stub.MetricServiceStub;
     import com.google.common.base.Suppliers;
    @@ -90,29 +88,46 @@ public class BigtableCloudMonitoringExporterTest {
       private Resource resource;
       private InstrumentationScopeInfo scope;
     
    +  private EnvInfo envInfo =
    +      EnvInfo.builder()
    +          .setProject("client-project")
    +          .setPlatform("gce_instance")
    +          .setRegion("cleint-region")
    +          .setHostName("harold")
    +          .setHostId("1234567890")
    +          .setUid(taskId)
    +          .build();
    +  private ClientInfo clientInfo =
    +      ClientInfo.builder()
    +          .setInstanceName(InstanceName.of(projectId, instanceId))
    +          .setAppProfileId(appProfileId)
    +          .setClientName(clientName)
    +          .build();
    +
       @Before
       public void setUp() {
         fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub);
     
         exporter =
             new BigtableCloudMonitoringExporter(
    -            "bigtable metrics",
    -            fakeMetricServiceClient,
    -            new BigtableCloudMonitoringExporter.PublicTimeSeriesConverter(taskId));
    +            new MetricRegistry(),
    +            Suppliers.ofInstance(envInfo),
    +            clientInfo,
    +            fakeMetricServiceClient);
     
         attributes =
             Attributes.builder()
    -            .put(BIGTABLE_PROJECT_ID_KEY, projectId)
    -            .put(INSTANCE_ID_KEY, instanceId)
    -            .put(TABLE_ID_KEY, tableId)
    -            .put(CLUSTER_ID_KEY, cluster)
    -            .put(ZONE_ID_KEY, zone)
    -            .put(APP_PROFILE_KEY, appProfileId)
    +            .put(TableSchema.BIGTABLE_PROJECT_ID_KEY, projectId)
    +            .put(TableSchema.INSTANCE_ID_KEY, instanceId)
    +            .put(TableSchema.TABLE_ID_KEY, tableId)
    +            .put(TableSchema.CLUSTER_ID_KEY, cluster)
    +            .put(TableSchema.ZONE_ID_KEY, zone)
    +            .put(MetricLabels.APP_PROFILE_KEY, appProfileId)
                 .build();
     
         resource = Resource.create(Attributes.empty());
     
    -    scope = InstrumentationScopeInfo.create(BuiltinMetricsConstants.METER_NAME);
    +    scope = InstrumentationScopeInfo.create(MetricRegistry.METER_NAME);
       }
     
       @After
    @@ -155,15 +170,19 @@ public void testExportingSumData() {
     
         assertThat(timeSeries.getResource().getLabelsMap())
             .containsExactly(
    -            BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
    -            INSTANCE_ID_KEY.getKey(), instanceId,
    -            TABLE_ID_KEY.getKey(), tableId,
    -            CLUSTER_ID_KEY.getKey(), cluster,
    -            ZONE_ID_KEY.getKey(), zone);
    +            TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
    +            TableSchema.INSTANCE_ID_KEY.getKey(), instanceId,
    +            TableSchema.TABLE_ID_KEY.getKey(), tableId,
    +            TableSchema.CLUSTER_ID_KEY.getKey(), cluster,
    +            TableSchema.ZONE_ID_KEY.getKey(), zone);
     
         assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
         assertThat(timeSeries.getMetric().getLabelsMap())
    -        .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
    +        .containsAtLeast(
    +            MetricLabels.APP_PROFILE_KEY.getKey(),
    +            appProfileId,
    +            MetricLabels.CLIENT_UID.getKey(),
    +            taskId);
         assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue);
         assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
             .isEqualTo(startEpoch);
    @@ -215,15 +234,19 @@ public void testExportingHistogramData() {
     
         assertThat(timeSeries.getResource().getLabelsMap())
             .containsExactly(
    -            BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
    -            INSTANCE_ID_KEY.getKey(), instanceId,
    -            TABLE_ID_KEY.getKey(), tableId,
    -            CLUSTER_ID_KEY.getKey(), cluster,
    -            ZONE_ID_KEY.getKey(), zone);
    +            TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
    +            TableSchema.INSTANCE_ID_KEY.getKey(), instanceId,
    +            TableSchema.TABLE_ID_KEY.getKey(), tableId,
    +            TableSchema.CLUSTER_ID_KEY.getKey(), cluster,
    +            TableSchema.ZONE_ID_KEY.getKey(), zone);
     
         assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
         assertThat(timeSeries.getMetric().getLabelsMap())
    -        .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
    +        .containsAtLeast(
    +            MetricLabels.APP_PROFILE_KEY.getKey(),
    +            appProfileId,
    +            MetricLabels.CLIENT_UID.getKey(),
    +            taskId);
         Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue();
         assertThat(distribution.getCount()).isEqualTo(3);
         assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
    @@ -248,12 +271,12 @@ public void testExportingSumDataInBatches() {
         for (int i = 0; i < 250; i++) {
           Attributes testAttributes =
               Attributes.builder()
    -              .put(BIGTABLE_PROJECT_ID_KEY, projectId)
    -              .put(INSTANCE_ID_KEY, instanceId)
    -              .put(TABLE_ID_KEY, tableId + i)
    -              .put(CLUSTER_ID_KEY, cluster)
    -              .put(ZONE_ID_KEY, zone)
    -              .put(APP_PROFILE_KEY, appProfileId)
    +              .put(TableSchema.BIGTABLE_PROJECT_ID_KEY, projectId)
    +              .put(TableSchema.INSTANCE_ID_KEY, instanceId)
    +              .put(TableSchema.TABLE_ID_KEY, tableId + i)
    +              .put(TableSchema.CLUSTER_ID_KEY, cluster)
    +              .put(TableSchema.ZONE_ID_KEY, zone)
    +              .put(MetricLabels.APP_PROFILE_KEY, appProfileId)
                   .build();
           LongPointData longPointData =
               ImmutableLongPointData.create(startEpoch, endEpoch, testAttributes, i);
    @@ -289,15 +312,19 @@ public void testExportingSumDataInBatches() {
     
           assertThat(timeSeries.getResource().getLabelsMap())
               .containsExactly(
    -              BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
    -              INSTANCE_ID_KEY.getKey(), instanceId,
    -              TABLE_ID_KEY.getKey(), tableId + i,
    -              CLUSTER_ID_KEY.getKey(), cluster,
    -              ZONE_ID_KEY.getKey(), zone);
    +              TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(), projectId,
    +              TableSchema.INSTANCE_ID_KEY.getKey(), instanceId,
    +              TableSchema.TABLE_ID_KEY.getKey(), tableId + i,
    +              TableSchema.CLUSTER_ID_KEY.getKey(), cluster,
    +              TableSchema.ZONE_ID_KEY.getKey(), zone);
     
           assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
           assertThat(timeSeries.getMetric().getLabelsMap())
    -          .containsAtLeast(APP_PROFILE_KEY.getKey(), appProfileId, CLIENT_UID_KEY.getKey(), taskId);
    +          .containsAtLeast(
    +              MetricLabels.APP_PROFILE_KEY.getKey(),
    +              appProfileId,
    +              MetricLabels.CLIENT_UID.getKey(),
    +              taskId);
           assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(i);
           assertThat(timeSeries.getPoints(0).getInterval().getStartTime().getNanos())
               .isEqualTo(startEpoch);
    @@ -307,26 +334,12 @@ public void testExportingSumDataInBatches() {
     
       @Test
       public void testTimeSeriesForMetricWithGceOrGkeResource() {
    -    String gceProjectId = "fake-gce-project";
         BigtableCloudMonitoringExporter exporter =
             new BigtableCloudMonitoringExporter(
    -            "application metrics",
    -            fakeMetricServiceClient,
    -            new BigtableCloudMonitoringExporter.InternalTimeSeriesConverter(
    -                Suppliers.ofInstance(
    -                    MonitoredResource.newBuilder()
    -                        .setType("bigtable_client")
    -                        .putLabels("project_id", gceProjectId)
    -                        .putLabels("instance", "resource-instance")
    -                        .putLabels("app_profile", "resource-app-profile")
    -                        .putLabels("client_project", "client-project")
    -                        .putLabels("region", "cleint-region")
    -                        .putLabels("cloud_platform", "gce_instance")
    -                        .putLabels("host_id", "1234567890")
    -                        .putLabels("host_name", "harold")
    -                        .putLabels("client_name", "java/1234")
    -                        .putLabels("uuid", "something")
    -                        .build())));
    +            new MetricRegistry(),
    +            Suppliers.ofInstance(envInfo),
    +            clientInfo,
    +            fakeMetricServiceClient);
         ArgumentCaptor argumentCaptor =
             ArgumentCaptor.forClass(CreateTimeSeriesRequest.class);
     
    @@ -342,13 +355,13 @@ public void testTimeSeriesForMetricWithGceOrGkeResource() {
                 startEpoch,
                 endEpoch,
                 Attributes.of(
    -                BIGTABLE_PROJECT_ID_KEY,
    +                ClientSchema.BIGTABLE_PROJECT_ID_KEY,
                     projectId,
    -                INSTANCE_ID_KEY,
    +                ClientSchema.INSTANCE_ID_KEY,
                     instanceId,
    -                APP_PROFILE_KEY,
    +                ClientSchema.APP_PROFILE_KEY,
                     appProfileId,
    -                CLIENT_NAME_KEY,
    +                ClientSchema.CLIENT_NAME,
                     clientName),
                 3d,
                 true,
    @@ -372,7 +385,7 @@ public void testTimeSeriesForMetricWithGceOrGkeResource() {
     
         CreateTimeSeriesRequest request = argumentCaptor.getValue();
     
    -    assertThat(request.getName()).isEqualTo("projects/" + gceProjectId);
    +    assertThat(request.getName()).isEqualTo("projects/" + projectId);
         assertThat(request.getTimeSeriesList()).hasSize(1);
     
         com.google.monitoring.v3.TimeSeries timeSeries = request.getTimeSeriesList().get(0);
    @@ -380,25 +393,25 @@ public void testTimeSeriesForMetricWithGceOrGkeResource() {
         assertThat(timeSeries.getResource().getLabelsMap())
             .isEqualTo(
                 ImmutableMap.builder()
    -                .put("project_id", gceProjectId)
    -                .put("instance", "resource-instance")
    -                .put("app_profile", "resource-app-profile")
    +                .put("project_id", projectId)
    +                .put("instance", instanceId)
    +                .put("app_profile", appProfileId)
                     .put("client_project", "client-project")
                     .put("region", "cleint-region")
                     .put("cloud_platform", "gce_instance")
                     .put("host_id", "1234567890")
                     .put("host_name", "harold")
    -                .put("client_name", "java/1234")
    -                .put("uuid", "something")
    +                .put("client_name", clientName)
    +                .put("uuid", taskId)
                     .build());
     
         assertThat(timeSeries.getMetric().getLabelsMap())
             .isEqualTo(
                 ImmutableMap.builder()
    -                .put(BIGTABLE_PROJECT_ID_KEY.getKey(), projectId)
    -                .put(INSTANCE_ID_KEY.getKey(), instanceId)
    -                .put(APP_PROFILE_KEY.getKey(), appProfileId)
    -                .put(CLIENT_NAME_KEY.getKey(), clientName)
    +                .put(ClientSchema.BIGTABLE_PROJECT_ID_KEY.getKey(), projectId)
    +                .put(ClientSchema.INSTANCE_ID_KEY.getKey(), instanceId)
    +                .put(ClientSchema.APP_PROFILE_KEY.getKey(), appProfileId)
    +                .put(ClientSchema.CLIENT_NAME.getKey(), clientName)
                     .build());
       }
     
    @@ -441,7 +454,9 @@ public void testExportingToMultipleProjects() {
             ImmutableHistogramPointData.create(
                 startEpoch,
                 endEpoch,
    -            attributes.toBuilder().put(BIGTABLE_PROJECT_ID_KEY, "another-project").build(),
    +            attributes.toBuilder()
    +                .put(TableSchema.BIGTABLE_PROJECT_ID_KEY, "another-project")
    +                .build(),
                 50d,
                 true,
                 5d, // min
    @@ -486,26 +501,26 @@ public void testExportingToMultipleProjects() {
         assertThat(labelsMap)
             .containsExactly(
                 ImmutableMap.of(
    -                BIGTABLE_PROJECT_ID_KEY.getKey(),
    +                TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(),
                     projectId,
    -                INSTANCE_ID_KEY.getKey(),
    +                TableSchema.INSTANCE_ID_KEY.getKey(),
                     instanceId,
    -                TABLE_ID_KEY.getKey(),
    +                TableSchema.TABLE_ID_KEY.getKey(),
                     tableId,
    -                CLUSTER_ID_KEY.getKey(),
    +                TableSchema.CLUSTER_ID_KEY.getKey(),
                     cluster,
    -                ZONE_ID_KEY.getKey(),
    +                TableSchema.ZONE_ID_KEY.getKey(),
                     zone),
                 ImmutableMap.of(
    -                BIGTABLE_PROJECT_ID_KEY.getKey(),
    +                TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(),
                     "another-project",
    -                INSTANCE_ID_KEY.getKey(),
    +                TableSchema.INSTANCE_ID_KEY.getKey(),
                     instanceId,
    -                TABLE_ID_KEY.getKey(),
    +                TableSchema.TABLE_ID_KEY.getKey(),
                     tableId,
    -                CLUSTER_ID_KEY.getKey(),
    +                TableSchema.CLUSTER_ID_KEY.getKey(),
                     cluster,
    -                ZONE_ID_KEY.getKey(),
    +                TableSchema.ZONE_ID_KEY.getKey(),
                     zone));
         assertThat(counts).containsExactly(3l, 15l);
       }
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporterTest2.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporterTest2.java
    new file mode 100644
    index 0000000000..adb52ef258
    --- /dev/null
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/exporter/BigtableCloudMonitoringExporterTest2.java
    @@ -0,0 +1,560 @@
    +/*
    + * Copyright 2025 Google LLC
    + *
    + * Licensed under the Apache License, Version 2.0 (the "License");
    + * you may not use this file except in compliance with the License.
    + * You may obtain a copy of the License at
    + *
    + *     https://www.apache.org/licenses/LICENSE-2.0
    + *
    + * Unless required by applicable law or agreed to in writing, software
    + * distributed under the License is distributed on an "AS IS" BASIS,
    + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    + * See the License for the specific language governing permissions and
    + * limitations under the License.
    + */
    +
    +package com.google.cloud.bigtable.data.v2.internal.csm.exporter;
    +
    +import static com.google.common.truth.Truth.assertThat;
    +
    +import com.google.api.Distribution;
    +import com.google.api.core.ApiFuture;
    +import com.google.api.core.ApiFutures;
    +import com.google.api.gax.rpc.ApiCallContext;
    +import com.google.api.gax.rpc.UnaryCallable;
    +import com.google.bigtable.v2.TableName;
    +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName;
    +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.EnvInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.ClientSchema;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema;
    +import com.google.cloud.monitoring.v3.MetricServiceClient;
    +import com.google.cloud.monitoring.v3.stub.MetricServiceStub;
    +import com.google.common.base.Suppliers;
    +import com.google.common.collect.ImmutableList;
    +import com.google.common.collect.ImmutableMap;
    +import com.google.monitoring.v3.CreateTimeSeriesRequest;
    +import com.google.monitoring.v3.TimeSeries;
    +import com.google.protobuf.Empty;
    +import com.google.protobuf.util.Timestamps;
    +import io.opentelemetry.api.common.Attributes;
    +import io.opentelemetry.api.metrics.MeterProvider;
    +import io.opentelemetry.sdk.common.InstrumentationScopeInfo;
    +import io.opentelemetry.sdk.metrics.data.AggregationTemporality;
    +import io.opentelemetry.sdk.metrics.data.HistogramPointData;
    +import io.opentelemetry.sdk.metrics.data.LongPointData;
    +import io.opentelemetry.sdk.metrics.data.MetricData;
    +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramData;
    +import io.opentelemetry.sdk.metrics.internal.data.ImmutableHistogramPointData;
    +import io.opentelemetry.sdk.metrics.internal.data.ImmutableLongPointData;
    +import io.opentelemetry.sdk.metrics.internal.data.ImmutableMetricData;
    +import io.opentelemetry.sdk.metrics.internal.data.ImmutableSumData;
    +import io.opentelemetry.sdk.resources.Resource;
    +import java.util.ArrayList;
    +import java.util.Arrays;
    +import java.util.Collection;
    +import java.util.Collections;
    +import java.util.List;
    +import java.util.Map;
    +import java.util.concurrent.BlockingDeque;
    +import java.util.concurrent.LinkedBlockingDeque;
    +import java.util.concurrent.TimeUnit;
    +import org.junit.jupiter.api.AfterEach;
    +import org.junit.jupiter.api.BeforeEach;
    +import org.junit.jupiter.api.Test;
    +import org.junit.jupiter.api.extension.ExtendWith;
    +import org.mockito.Answers;
    +import org.mockito.Mock;
    +import org.mockito.junit.jupiter.MockitoExtension;
    +
    +@ExtendWith(MockitoExtension.class)
    +public class BigtableCloudMonitoringExporterTest2 {
    +  private static final TableName tableName =
    +      TableName.of("fake-project", "fake-instance", "fake-table");
    +  private static final String appProfileId = "default";
    +  private static final String zone = "us-east-1";
    +  private static final String cluster = "cluster-1";
    +
    +  private ClientInfo clientInfo;
    +  private EnvInfo envInfo;
    +
    +  private FakeMetricServiceStub mockMetricServiceStub;
    +  private MetricServiceClient fakeMetricServiceClient;
    +  private BigtableCloudMonitoringExporter exporter;
    +
    +  private Attributes attributes;
    +  private Resource resource;
    +  private InstrumentationScopeInfo scope;
    +
    +  @Mock(answer = Answers.RETURNS_DEEP_STUBS)
    +  private MeterProvider meterProvider;
    +
    +  @BeforeEach
    +  public void setUp() {
    +    mockMetricServiceStub = new FakeMetricServiceStub();
    +    fakeMetricServiceClient = new FakeMetricServiceClient(mockMetricServiceStub);
    +
    +    envInfo =
    +        EnvInfo.builder()
    +            .setProject("client-project")
    +            .setPlatform("gce_instance")
    +            .setRegion("cleint-region")
    +            .setHostName("harold")
    +            .setHostId("1234567890")
    +            .build();
    +
    +    clientInfo =
    +        ClientInfo.builder()
    +            .setInstanceName(InstanceName.of(tableName.getProject(), tableName.getInstance()))
    +            .setAppProfileId(appProfileId)
    +            .build();
    +
    +    MetricRegistry metricRegistry = new MetricRegistry();
    +    exporter =
    +        new BigtableCloudMonitoringExporter(
    +            metricRegistry, () -> envInfo, clientInfo, fakeMetricServiceClient);
    +
    +    attributes =
    +        Attributes.builder()
    +            .put(TableSchema.BIGTABLE_PROJECT_ID_KEY, tableName.getProject())
    +            .put(TableSchema.INSTANCE_ID_KEY, tableName.getInstance())
    +            .put(TableSchema.TABLE_ID_KEY, tableName.getTable())
    +            .put(TableSchema.CLUSTER_ID_KEY, cluster)
    +            .put(TableSchema.ZONE_ID_KEY, zone)
    +            .put(MetricLabels.APP_PROFILE_KEY, appProfileId)
    +            .build();
    +
    +    resource = Resource.create(Attributes.empty());
    +
    +    scope = InstrumentationScopeInfo.create(MetricRegistry.METER_NAME);
    +  }
    +
    +  @AfterEach
    +  public void tearDown() {}
    +
    +  @Test
    +  public void testExportingSumData() throws InterruptedException {
    +    long fakeValue = 11L;
    +
    +    long startEpoch = 10;
    +    long endEpoch = 15;
    +    LongPointData longPointData =
    +        ImmutableLongPointData.create(startEpoch, endEpoch, attributes, fakeValue);
    +
    +    MetricData longData =
    +        ImmutableMetricData.createLongSum(
    +            resource,
    +            scope,
    +            "bigtable.googleapis.com/internal/client/retry_count",
    +            "description",
    +            "1",
    +            ImmutableSumData.create(
    +                true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData)));
    +
    +    exporter.export(Collections.singletonList(longData));
    +
    +    CreateTimeSeriesRequest request = mockMetricServiceStub.requests.poll(1, TimeUnit.MINUTES);
    +
    +    assertThat(request.getTimeSeriesList()).hasSize(1);
    +
    +    TimeSeries timeSeries = request.getTimeSeriesList().get(0);
    +
    +    assertThat(timeSeries.getResource().getLabelsMap())
    +        .containsExactly(
    +            TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(), tableName.getProject(),
    +            TableSchema.INSTANCE_ID_KEY.getKey(), tableName.getInstance(),
    +            TableSchema.TABLE_ID_KEY.getKey(), tableName.getTable(),
    +            TableSchema.CLUSTER_ID_KEY.getKey(), cluster,
    +            TableSchema.ZONE_ID_KEY.getKey(), zone);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsAtLeast(
    +            MetricLabels.APP_PROFILE_KEY.getKey(),
    +            appProfileId,
    +            MetricLabels.CLIENT_UID.getKey(),
    +            envInfo.getUid());
    +    assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(fakeValue);
    +    assertThat(timeSeries.getPoints(0).getInterval().getStartTime())
    +        .isEqualTo(Timestamps.fromNanos(startEpoch));
    +    assertThat(timeSeries.getPoints(0).getInterval().getEndTime())
    +        .isEqualTo(Timestamps.fromNanos(endEpoch));
    +  }
    +
    +  @Test
    +  public void testExportingHistogramData() throws InterruptedException {
    +    long startEpoch = 10;
    +    long endEpoch = 15;
    +    HistogramPointData histogramPointData =
    +        ImmutableHistogramPointData.create(
    +            startEpoch,
    +            endEpoch,
    +            attributes,
    +            3d,
    +            true,
    +            1d, // min
    +            true,
    +            2d, // max
    +            Collections.singletonList(1.0),
    +            Arrays.asList(1L, 2L));
    +
    +    MetricData histogramData =
    +        ImmutableMetricData.createDoubleHistogram(
    +            resource,
    +            scope,
    +            "bigtable.googleapis.com/internal/client/operation_latencies",
    +            "description",
    +            "ms",
    +            ImmutableHistogramData.create(
    +                AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
    +
    +    exporter.export(Arrays.asList(histogramData));
    +
    +    CreateTimeSeriesRequest request = mockMetricServiceStub.requests.poll(1, TimeUnit.MINUTES);
    +
    +    assertThat(request.getTimeSeriesList()).hasSize(1);
    +
    +    TimeSeries timeSeries = request.getTimeSeriesList().get(0);
    +
    +    assertThat(timeSeries.getResource().getLabelsMap())
    +        .containsExactly(
    +            TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(), tableName.getProject(),
    +            TableSchema.INSTANCE_ID_KEY.getKey(), tableName.getInstance(),
    +            TableSchema.TABLE_ID_KEY.getKey(), tableName.getTable(),
    +            TableSchema.CLUSTER_ID_KEY.getKey(), cluster,
    +            TableSchema.ZONE_ID_KEY.getKey(), zone);
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .containsAtLeast(
    +            MetricLabels.APP_PROFILE_KEY.getKey(),
    +            appProfileId,
    +            MetricLabels.CLIENT_UID.getKey(),
    +            this.envInfo.getUid());
    +    Distribution distribution = timeSeries.getPoints(0).getValue().getDistributionValue();
    +    assertThat(distribution.getCount()).isEqualTo(3);
    +    assertThat(timeSeries.getPoints(0).getInterval().getStartTime())
    +        .isEqualTo(Timestamps.fromNanos(startEpoch));
    +    assertThat(timeSeries.getPoints(0).getInterval().getEndTime())
    +        .isEqualTo(Timestamps.fromNanos(endEpoch));
    +  }
    +
    +  @Test
    +  public void testExportingSumDataInBatches() {
    +    long startEpoch = 10;
    +    long endEpoch = 15;
    +
    +    Collection toExport = new ArrayList<>();
    +    for (int i = 0; i < 250; i++) {
    +      Attributes testAttributes =
    +          Attributes.builder()
    +              .put(TableSchema.BIGTABLE_PROJECT_ID_KEY, tableName.getProject())
    +              .put(TableSchema.INSTANCE_ID_KEY, tableName.getInstance())
    +              .put(TableSchema.TABLE_ID_KEY, tableName.getTable() + i)
    +              .put(TableSchema.CLUSTER_ID_KEY, cluster)
    +              .put(TableSchema.ZONE_ID_KEY, zone)
    +              .put(MetricLabels.APP_PROFILE_KEY, appProfileId)
    +              .build();
    +      LongPointData longPointData =
    +          ImmutableLongPointData.create(startEpoch, endEpoch, testAttributes, i);
    +
    +      MetricData longData =
    +          ImmutableMetricData.createLongSum(
    +              resource,
    +              scope,
    +              "bigtable.googleapis.com/internal/client/retry_count",
    +              "description",
    +              "1",
    +              ImmutableSumData.create(
    +                  true, AggregationTemporality.CUMULATIVE, ImmutableList.of(longPointData)));
    +      toExport.add(longData);
    +    }
    +
    +    exporter.export(toExport);
    +
    +    assertThat(mockMetricServiceStub.requests).hasSize(2);
    +    CreateTimeSeriesRequest firstRequest = mockMetricServiceStub.requests.poll();
    +    CreateTimeSeriesRequest secondRequest = mockMetricServiceStub.requests.poll();
    +
    +    assertThat(firstRequest.getTimeSeriesList()).hasSize(200);
    +    assertThat(secondRequest.getTimeSeriesList()).hasSize(50);
    +
    +    for (int i = 0; i < 250; i++) {
    +      TimeSeries timeSeries;
    +      if (i < 200) {
    +        timeSeries = firstRequest.getTimeSeriesList().get(i);
    +      } else {
    +        timeSeries = secondRequest.getTimeSeriesList().get(i - 200);
    +      }
    +
    +      assertThat(timeSeries.getResource().getLabelsMap())
    +          .containsExactly(
    +              TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(),
    +              tableName.getProject(),
    +              TableSchema.INSTANCE_ID_KEY.getKey(),
    +              tableName.getInstance(),
    +              TableSchema.TABLE_ID_KEY.getKey(),
    +              tableName.getTable() + i,
    +              TableSchema.CLUSTER_ID_KEY.getKey(),
    +              cluster,
    +              TableSchema.ZONE_ID_KEY.getKey(),
    +              zone);
    +
    +      assertThat(timeSeries.getMetric().getLabelsMap()).hasSize(2);
    +      assertThat(timeSeries.getMetric().getLabelsMap())
    +          .containsAtLeast(
    +              MetricLabels.APP_PROFILE_KEY.getKey(),
    +              appProfileId,
    +              MetricLabels.CLIENT_UID.getKey(),
    +              envInfo.getUid());
    +      assertThat(timeSeries.getPoints(0).getValue().getInt64Value()).isEqualTo(i);
    +      assertThat(timeSeries.getPoints(0).getInterval().getStartTime())
    +          .isEqualTo(Timestamps.fromNanos(startEpoch));
    +      assertThat(timeSeries.getPoints(0).getInterval().getEndTime())
    +          .isEqualTo(Timestamps.fromNanos(endEpoch));
    +    }
    +  }
    +
    +  @Test
    +  public void testTimeSeriesForMetricWithGceOrGkeResource() throws InterruptedException {
    +    String gceProjectId = "fake-gce-project";
    +    EnvInfo envInfo =
    +        EnvInfo.builder()
    +            .setPlatform("gce_instance")
    +            .setProject(gceProjectId)
    +            .setRegion("cleint-region")
    +            .setHostId("1234567890")
    +            .setHostName("harold")
    +            .build();
    +
    +    ClientInfo clientInfo =
    +        ClientInfo.builder()
    +            .setInstanceName(InstanceName.of(tableName.getProject(), tableName.getInstance()))
    +            .setAppProfileId(appProfileId)
    +            .build();
    +
    +    MetricRegistry metricRegistry = new MetricRegistry();
    +    BigtableCloudMonitoringExporter exporter =
    +        new BigtableCloudMonitoringExporter(
    +            metricRegistry, Suppliers.ofInstance(envInfo), clientInfo, fakeMetricServiceClient);
    +
    +    long startEpoch = 10;
    +    long endEpoch = 15;
    +    HistogramPointData histogramPointData =
    +        ImmutableHistogramPointData.create(
    +            startEpoch,
    +            endEpoch,
    +            Attributes.of(
    +                ClientSchema.BIGTABLE_PROJECT_ID_KEY,
    +                tableName.getProject(),
    +                ClientSchema.INSTANCE_ID_KEY,
    +                tableName.getInstance(),
    +                ClientSchema.APP_PROFILE_KEY,
    +                appProfileId,
    +                ClientSchema.CLIENT_NAME,
    +                clientInfo.getClientName()),
    +            3d,
    +            true,
    +            1d, // min
    +            true,
    +            2d, // max
    +            Arrays.asList(1.0),
    +            Arrays.asList(1L, 2L));
    +
    +    MetricData histogramData =
    +        ImmutableMetricData.createDoubleHistogram(
    +            resource,
    +            scope,
    +            "bigtable.googleapis.com/internal/client/per_connection_error_count",
    +            "description",
    +            "ms",
    +            ImmutableHistogramData.create(
    +                AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData)));
    +
    +    exporter.export(Collections.singletonList(histogramData));
    +
    +    CreateTimeSeriesRequest request = mockMetricServiceStub.requests.poll(1, TimeUnit.MINUTES);
    +
    +    assertThat(request.getName()).isEqualTo("projects/" + tableName.getProject());
    +    assertThat(request.getTimeSeriesList()).hasSize(1);
    +
    +    TimeSeries timeSeries = request.getTimeSeriesList().get(0);
    +
    +    assertThat(timeSeries.getResource().getLabelsMap())
    +        .isEqualTo(
    +            ImmutableMap.builder()
    +                .put("project_id", tableName.getProject())
    +                .put("instance", tableName.getInstance())
    +                .put("app_profile", appProfileId)
    +                .put("client_project", gceProjectId)
    +                .put("region", "cleint-region")
    +                .put("cloud_platform", "gce_instance")
    +                .put("host_id", "1234567890")
    +                .put("host_name", "harold")
    +                .put("client_name", clientInfo.getClientName())
    +                .put("uuid", envInfo.getUid())
    +                .build());
    +
    +    assertThat(timeSeries.getMetric().getLabelsMap())
    +        .isEqualTo(
    +            ImmutableMap.builder()
    +                .put(ClientSchema.BIGTABLE_PROJECT_ID_KEY.getKey(), tableName.getProject())
    +                .put(ClientSchema.INSTANCE_ID_KEY.getKey(), tableName.getInstance())
    +                .put(ClientSchema.APP_PROFILE_KEY.getKey(), appProfileId)
    +                .put(ClientSchema.CLIENT_NAME.getKey(), clientInfo.getClientName())
    +                .put(MetricLabels.CLIENT_UID.getKey(), envInfo.getUid())
    +                .build());
    +  }
    +
    +  @Test
    +  public void testExportingToMultipleProjects() throws InterruptedException {
    +    long startEpoch = 10;
    +    long endEpoch = 15;
    +    HistogramPointData histogramPointData1 =
    +        ImmutableHistogramPointData.create(
    +            startEpoch,
    +            endEpoch,
    +            attributes,
    +            3d,
    +            true,
    +            1d, // min
    +            true,
    +            2d, // max
    +            Arrays.asList(1.0),
    +            Arrays.asList(1L, 2L));
    +
    +    MetricData histogramData1 =
    +        ImmutableMetricData.createDoubleHistogram(
    +            resource,
    +            scope,
    +            "bigtable.googleapis.com/internal/client/operation_latencies",
    +            "description",
    +            "ms",
    +            ImmutableHistogramData.create(
    +                AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData1)));
    +
    +    HistogramPointData histogramPointData2 =
    +        ImmutableHistogramPointData.create(
    +            startEpoch,
    +            endEpoch,
    +            attributes.toBuilder()
    +                .put(TableSchema.BIGTABLE_PROJECT_ID_KEY, "another-project")
    +                .build(),
    +            50d,
    +            true,
    +            5d, // min
    +            true,
    +            30d, // max
    +            Arrays.asList(1.0),
    +            Arrays.asList(5L, 10L));
    +
    +    MetricData histogramData2 =
    +        ImmutableMetricData.createDoubleHistogram(
    +            resource,
    +            scope,
    +            "bigtable.googleapis.com/internal/client/operation_latencies",
    +            "description",
    +            "ms",
    +            ImmutableHistogramData.create(
    +                AggregationTemporality.CUMULATIVE, ImmutableList.of(histogramPointData2)));
    +
    +    exporter.export(Arrays.asList(histogramData1, histogramData2));
    +
    +    List allValues =
    +        Arrays.asList(
    +            mockMetricServiceStub.requests.poll(1, TimeUnit.MINUTES),
    +            mockMetricServiceStub.requests.poll(1, TimeUnit.MINUTES));
    +
    +    assertThat(allValues).hasSize(2);
    +
    +    List> labelsMap = new ArrayList<>();
    +    List counts = new ArrayList<>();
    +    allValues.forEach(
    +        value -> {
    +          labelsMap.add(value.getTimeSeriesList().get(0).getResource().getLabelsMap());
    +          counts.add(
    +              value
    +                  .getTimeSeriesList()
    +                  .get(0)
    +                  .getPoints(0)
    +                  .getValue()
    +                  .getDistributionValue()
    +                  .getCount());
    +        });
    +
    +    assertThat(labelsMap)
    +        .containsExactly(
    +            ImmutableMap.of(
    +                TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(),
    +                tableName.getProject(),
    +                TableSchema.INSTANCE_ID_KEY.getKey(),
    +                tableName.getInstance(),
    +                TableSchema.TABLE_ID_KEY.getKey(),
    +                tableName.getTable(),
    +                TableSchema.CLUSTER_ID_KEY.getKey(),
    +                cluster,
    +                TableSchema.ZONE_ID_KEY.getKey(),
    +                zone),
    +            ImmutableMap.of(
    +                TableSchema.BIGTABLE_PROJECT_ID_KEY.getKey(),
    +                "another-project",
    +                TableSchema.INSTANCE_ID_KEY.getKey(),
    +                tableName.getInstance(),
    +                TableSchema.TABLE_ID_KEY.getKey(),
    +                tableName.getTable(),
    +                TableSchema.CLUSTER_ID_KEY.getKey(),
    +                cluster,
    +                TableSchema.ZONE_ID_KEY.getKey(),
    +                zone));
    +    assertThat(counts).containsExactly(3l, 15l);
    +  }
    +
    +  private static class FakeMetricServiceClient extends MetricServiceClient {
    +    protected FakeMetricServiceClient(MetricServiceStub stub) {
    +      super(stub);
    +    }
    +  }
    +
    +  private static class FakeMetricServiceStub extends MetricServiceStub {
    +    private final BlockingDeque requests = new LinkedBlockingDeque<>();
    +
    +    @Override
    +    public UnaryCallable createServiceTimeSeriesCallable() {
    +      return new UnaryCallable() {
    +        @Override
    +        public ApiFuture futureCall(
    +            CreateTimeSeriesRequest createTimeSeriesRequest, ApiCallContext apiCallContext) {
    +          requests.add(createTimeSeriesRequest);
    +          return ApiFutures.immediateFuture(Empty.getDefaultInstance());
    +        }
    +      };
    +    }
    +
    +    @Override
    +    public void close() {}
    +
    +    @Override
    +    public void shutdown() {}
    +
    +    @Override
    +    public boolean isShutdown() {
    +      return false;
    +    }
    +
    +    @Override
    +    public boolean isTerminated() {
    +      return false;
    +    }
    +
    +    @Override
    +    public void shutdownNow() {}
    +
    +    @Override
    +    public boolean awaitTermination(long l, TimeUnit timeUnit) throws InterruptedException {
    +      return false;
    +    }
    +  }
    +}
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/BigtableTracerCallableTest.java
    similarity index 92%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/BigtableTracerCallableTest.java
    index 8c3746144f..4eec40a696 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BigtableTracerCallableTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/BigtableTracerCallableTest.java
    @@ -13,12 +13,11 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus;
     
     import static com.google.common.truth.Truth.assertThat;
     import static org.junit.Assert.fail;
     
    -import com.google.api.gax.rpc.ClientContext;
     import com.google.api.gax.rpc.ServerStream;
     import com.google.api.gax.rpc.UnavailableException;
     import com.google.bigtable.v2.BigtableGrpc.BigtableImplBase;
    @@ -46,6 +45,8 @@
     import com.google.cloud.bigtable.data.v2.models.TableId;
     import com.google.cloud.bigtable.data.v2.stub.BigtableClientContext;
     import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
    +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
    +import com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews;
     import com.google.common.collect.ImmutableMap;
     import io.grpc.ForwardingServerCall.SimpleForwardingServerCall;
     import io.grpc.Metadata;
    @@ -125,23 +126,17 @@ public void sendHeaders(Metadata headers) {
                 .setProjectId(PROJECT_ID)
                 .setInstanceId(INSTANCE_ID)
                 .setAppProfileId(APP_PROFILE_ID)
    +            // only testing opencensus
    +            .setMetricsProvider(NoopMetricsProvider.INSTANCE)
    +            .disableInternalMetrics()
                 .build();
     
    -    BigtableClientContext bigtableClientContext =
    -        EnhancedBigtableStub.createBigtableClientContext(settings.getStubSettings());
    -    ClientContext clientContext =
    -        bigtableClientContext.getClientContext().toBuilder()
    -            .setTracerFactory(
    -                EnhancedBigtableStub.createBigtableTracerFactory(
    -                    settings.getStubSettings(),
    -                    Tags.getTagger(),
    -                    localStats.getStatsRecorder(),
    -                    null))
    -            .build();
         attempts = settings.getStubSettings().readRowsSettings().getRetrySettings().getMaxAttempts();
         stub =
             new EnhancedBigtableStub(
    -            settings.getStubSettings(), bigtableClientContext.withClientContext(clientContext));
    +            settings.getStubSettings().getPerOpSettings(),
    +            BigtableClientContext.create(
    +                settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder()));
     
         // Create another server without injecting the server-timing header and another stub that
         // connects to it.
    @@ -152,23 +147,17 @@ public void sendHeaders(Metadata headers) {
                 .setProjectId(PROJECT_ID)
                 .setInstanceId(INSTANCE_ID)
                 .setAppProfileId(APP_PROFILE_ID)
    +            .setMetricsProvider(NoopMetricsProvider.INSTANCE)
    +            .disableInternalMetrics()
                 .build();
     
    -    BigtableClientContext noHeaderBigtableClientContext =
    -        EnhancedBigtableStub.createBigtableClientContext(noHeaderSettings.getStubSettings());
    -    ClientContext noHeaderClientContext =
    -        noHeaderBigtableClientContext.getClientContext().toBuilder()
    -            .setTracerFactory(
    -                EnhancedBigtableStub.createBigtableTracerFactory(
    -                    noHeaderSettings.getStubSettings(),
    -                    Tags.getTagger(),
    -                    localStats.getStatsRecorder(),
    -                    null))
    -            .build();
         noHeaderStub =
             new EnhancedBigtableStub(
    -            noHeaderSettings.getStubSettings(),
    -            noHeaderBigtableClientContext.withClientContext(noHeaderClientContext));
    +            noHeaderSettings.getStubSettings().getPerOpSettings(),
    +            BigtableClientContext.create(
    +                noHeaderSettings.getStubSettings(),
    +                Tags.getTagger(),
    +                localStats.getStatsRecorder()));
       }
     
       @After
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracerTest.java
    similarity index 96%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracerTest.java
    index 5c4161d0e3..cadd777983 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/MetricsTracerTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/MetricsTracerTest.java
    @@ -13,7 +13,7 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus;
     
     import static com.google.common.truth.Truth.assertThat;
     import static org.mockito.ArgumentMatchers.any;
    @@ -25,7 +25,6 @@
     import com.google.api.gax.batching.FlowController;
     import com.google.api.gax.grpc.GrpcCallContext;
     import com.google.api.gax.rpc.ApiCallContext;
    -import com.google.api.gax.rpc.ClientContext;
     import com.google.bigtable.v2.BigtableGrpc;
     import com.google.bigtable.v2.MutateRowsRequest;
     import com.google.bigtable.v2.MutateRowsResponse;
    @@ -40,6 +39,8 @@
     import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
     import com.google.cloud.bigtable.data.v2.stub.BigtableClientContext;
     import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
    +import com.google.cloud.bigtable.data.v2.stub.metrics.NoopMetricsProvider;
    +import com.google.cloud.bigtable.data.v2.stub.metrics.RpcViews;
     import com.google.cloud.bigtable.data.v2.stub.mutaterows.MutateRowsBatchingDescriptor;
     import com.google.common.base.Stopwatch;
     import com.google.common.collect.ImmutableMap;
    @@ -120,22 +121,17 @@ public void setUp() throws Exception {
                 .setProjectId(PROJECT_ID)
                 .setInstanceId(INSTANCE_ID)
                 .setAppProfileId(APP_PROFILE_ID)
    +            .setMetricsProvider(NoopMetricsProvider.INSTANCE)
    +            .disableInternalMetrics()
                 .build();
     
         BigtableClientContext bigtableClientContext =
    -        EnhancedBigtableStub.createBigtableClientContext(settings.getStubSettings());
    -    ClientContext clientContext =
    -        bigtableClientContext.getClientContext().toBuilder()
    -            .setTracerFactory(
    -                EnhancedBigtableStub.createBigtableTracerFactory(
    -                    settings.getStubSettings(),
    -                    Tags.getTagger(),
    -                    localStats.getStatsRecorder(),
    -                    null))
    -            .build();
    +        BigtableClientContext.create(
    +            settings.getStubSettings(), Tags.getTagger(), localStats.getStatsRecorder());
    +
         stub =
             new EnhancedBigtableStub(
    -            settings.getStubSettings(), bigtableClientContext.withClientContext(clientContext));
    +            settings.getStubSettings().getPerOpSettings(), bigtableClientContext);
       }
     
       @After
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/SimpleStatsComponent.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/SimpleStatsComponent.java
    similarity index 93%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/SimpleStatsComponent.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/SimpleStatsComponent.java
    index 99aed9c3b4..bf867989d1 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/SimpleStatsComponent.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/SimpleStatsComponent.java
    @@ -13,7 +13,7 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus;
     
     import io.opencensus.implcore.common.MillisClock;
     import io.opencensus.implcore.internal.SimpleEventQueue;
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/StatsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/StatsTestUtils.java
    similarity index 99%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/StatsTestUtils.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/StatsTestUtils.java
    index e808af8a84..db86a027fc 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/StatsTestUtils.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/opencensus/StatsTestUtils.java
    @@ -13,7 +13,7 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    +package com.google.cloud.bigtable.data.v2.internal.csm.opencensus;
     
     import static com.google.common.base.Preconditions.checkNotNull;
     
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracerTest.java
    similarity index 70%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracerTest.java
    index df63ff8019..17d55870ff 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTracerTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/BuiltinMetricsTracerTest.java
    @@ -13,27 +13,8 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    -
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLICATION_BLOCKING_LATENCIES_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.APPLIED_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_BLOCKING_LATENCIES_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLIENT_NAME_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CLUSTER_ID_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.CONNECTIVITY_ERROR_COUNT_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.FIRST_RESPONSE_LATENCIES_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.METHOD_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OPERATION_LATENCIES_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.REMAINING_DEADLINE_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.RETRY_COUNT_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.SERVER_LATENCIES_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STATUS_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.STREAMING_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.TABLE_ID_KEY;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.ZONE_ID_KEY;
    +package com.google.cloud.bigtable.data.v2.internal.csm.tracers;
    +
     import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedDoubleValue;
     import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getAggregatedValue;
     import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsTestUtils.getMetricData;
    @@ -64,13 +45,28 @@
     import com.google.cloud.bigtable.Version;
     import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
     import com.google.cloud.bigtable.data.v2.FakeServiceBuilder;
    +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName;
    +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientBatchWriteFlowControlFactor;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientBatchWriteFlowControlTargetQps;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableApplicationBlockingLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableAttemptLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableClientBlockingLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableConnectivityErrorCount;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableFirstResponseLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableOperationLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableRemainingDeadline;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableRetryCount;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableServerLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema;
     import com.google.cloud.bigtable.data.v2.models.AuthorizedViewId;
     import com.google.cloud.bigtable.data.v2.models.Query;
     import com.google.cloud.bigtable.data.v2.models.Row;
     import com.google.cloud.bigtable.data.v2.models.RowMutation;
     import com.google.cloud.bigtable.data.v2.models.RowMutationEntry;
     import com.google.cloud.bigtable.data.v2.models.TableId;
    -import com.google.cloud.bigtable.data.v2.stub.BigtableClientContext;
     import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStub;
     import com.google.cloud.bigtable.data.v2.stub.EnhancedBigtableStubSettings;
     import com.google.common.base.Stopwatch;
    @@ -101,10 +97,8 @@
     import io.grpc.stub.StreamObserver;
     import io.opentelemetry.api.common.Attributes;
     import io.opentelemetry.sdk.OpenTelemetrySdk;
    -import io.opentelemetry.sdk.metrics.InstrumentSelector;
     import io.opentelemetry.sdk.metrics.SdkMeterProvider;
     import io.opentelemetry.sdk.metrics.SdkMeterProviderBuilder;
    -import io.opentelemetry.sdk.metrics.View;
     import io.opentelemetry.sdk.metrics.data.HistogramPointData;
     import io.opentelemetry.sdk.metrics.data.MetricData;
     import io.opentelemetry.sdk.testing.exporter.InMemoryMetricReader;
    @@ -118,7 +112,6 @@
     import java.util.Collections;
     import java.util.Iterator;
     import java.util.List;
    -import java.util.Map;
     import java.util.concurrent.TimeUnit;
     import java.util.concurrent.atomic.AtomicBoolean;
     import java.util.concurrent.atomic.AtomicInteger;
    @@ -136,6 +129,9 @@
     
     @RunWith(JUnit4.class)
     public class BuiltinMetricsTracerTest {
    +  private static final Metadata.Key LOCATION_METADATA_KEY =
    +      Metadata.Key.of("x-goog-ext-425905942-bin", Metadata.BINARY_BYTE_MARSHALLER);
    +
       private static final String PROJECT_ID = "fake-project";
       private static final String INSTANCE_ID = "fake-instance";
       private static final String APP_PROFILE_ID = "default";
    @@ -161,7 +157,17 @@ public class BuiltinMetricsTracerTest {
     
       private int batchElementCount = 2;
     
    -  private Attributes baseAttributes;
    +  private ClientInfo clientInfo =
    +      ClientInfo.builder()
    +          .setInstanceName(InstanceName.of(PROJECT_ID, INSTANCE_ID))
    +          .setAppProfileId(APP_PROFILE_ID)
    +          .build();
    +  private Attributes expectedBaseAttributes =
    +      Attributes.builder()
    +          .put(TableSchema.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID)
    +          .put(TableSchema.INSTANCE_ID_KEY, INSTANCE_ID)
    +          .put(MetricLabels.APP_PROFILE_KEY, APP_PROFILE_ID)
    +          .build();
     
       private InMemoryMetricReader metricReader;
     
    @@ -173,24 +179,16 @@ public class BuiltinMetricsTracerTest {
       public void setUp() throws Exception {
         metricReader = InMemoryMetricReader.create();
     
    -    baseAttributes =
    -        Attributes.builder()
    -            .put(BuiltinMetricsConstants.BIGTABLE_PROJECT_ID_KEY, PROJECT_ID)
    -            .put(BuiltinMetricsConstants.INSTANCE_ID_KEY, INSTANCE_ID)
    -            .put(BuiltinMetricsConstants.APP_PROFILE_KEY, APP_PROFILE_ID)
    -            .build();
    -
         SdkMeterProviderBuilder meterProvider =
             SdkMeterProvider.builder().registerMetricReader(metricReader);
     
    -    for (Map.Entry entry :
    -        BuiltinMetricsConstants.getAllViews().entrySet()) {
    -      meterProvider.registerView(entry.getKey(), entry.getValue());
    -    }
    -
         OpenTelemetrySdk otel =
             OpenTelemetrySdk.builder().setMeterProvider(meterProvider.build()).build();
    -    BuiltinMetricsTracerFactory facotry = BuiltinMetricsTracerFactory.create(otel, baseAttributes);
    +    MetricRegistry mr = new MetricRegistry();
    +
    +    BuiltinMetricsTracerFactory facotry =
    +        new BuiltinMetricsTracerFactory(
    +            mr.newRecorderRegistry(otel.getMeterProvider()), clientInfo);
     
         // Add an interceptor to add server-timing in headers
         ServerInterceptor trailersInterceptor =
    @@ -211,7 +209,7 @@ public void sendHeaders(Metadata headers) {
                         ResponseParams params =
                             ResponseParams.newBuilder().setZoneId(ZONE).setClusterId(CLUSTER).build();
                         byte[] byteArray = params.toByteArray();
    -                    headers.put(Util.LOCATION_METADATA_KEY, byteArray);
    +                    headers.put(LOCATION_METADATA_KEY, byteArray);
     
                         super.sendHeaders(headers);
                       }
    @@ -284,8 +282,7 @@ public void sendHeaders(Metadata headers) {
               return builder.proxyDetector(delayProxyDetector).intercept(outstandingRpcCounter);
             });
         stubSettingsBuilder.setTransportChannelProvider(channelProvider.build());
    -    EnhancedBigtableStubSettings stubSettings = stubSettingsBuilder.build();
    -    stub = new EnhancedBigtableStub(stubSettings, BigtableClientContext.create(stubSettings));
    +    stub = EnhancedBigtableStub.create(stubSettingsBuilder.build());
       }
     
       @After
    @@ -301,17 +298,17 @@ public void testReadRowsOperationLatencies() {
         long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
     
         Attributes expectedAttributes =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(STREAMING_KEY, true)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.STREAMING_KEY, true)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
     
    -    MetricData metricData = getMetricData(metricReader, OPERATION_LATENCIES_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableOperationLatency.NAME);
     
         long value = getAggregatedValue(metricData, expectedAttributes);
         assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
    @@ -326,17 +323,17 @@ public void testReadRowsOperationLatenciesOnAuthorizedView() {
         long elapsed = stopwatch.elapsed(TimeUnit.MILLISECONDS);
     
         Attributes expectedAttributes =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(STREAMING_KEY, true)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.STREAMING_KEY, true)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
     
    -    MetricData metricData = getMetricData(metricReader, OPERATION_LATENCIES_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableOperationLatency.NAME);
         long value = getAggregatedValue(metricData, expectedAttributes);
         assertThat(value).isIn(Range.closed(SERVER_LATENCY, elapsed));
       }
    @@ -371,16 +368,16 @@ public void onComplete() {}
                 });
     
         Attributes expectedAttributes =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, FIRST_RESPONSE_TABLE_ID)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, FIRST_RESPONSE_TABLE_ID)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
     
    -    MetricData metricData = getMetricData(metricReader, FIRST_RESPONSE_LATENCIES_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableFirstResponseLatency.NAME);
     
         long value = getAggregatedValue(metricData, expectedAttributes);
         assertThat(value).isAtMost(firstResponseTimer.elapsed(TimeUnit.MILLISECONDS));
    @@ -391,39 +388,39 @@ public void testGfeMetrics() {
         Lists.newArrayList(stub.readRowsCallable().call(Query.create(TABLE)));
     
         Attributes expectedAttributes =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
                 .build();
     
    -    MetricData serverLatenciesMetricData = getMetricData(metricReader, SERVER_LATENCIES_NAME);
    +    MetricData serverLatenciesMetricData = getMetricData(metricReader, TableServerLatency.NAME);
     
         long serverLatencies = getAggregatedValue(serverLatenciesMetricData, expectedAttributes);
         assertThat(serverLatencies).isEqualTo(FAKE_SERVER_TIMING);
     
         MetricData connectivityErrorCountMetricData =
    -        getMetricData(metricReader, CONNECTIVITY_ERROR_COUNT_NAME);
    +        getMetricData(metricReader, TableConnectivityErrorCount.NAME);
         Attributes expected1 =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "UNAVAILABLE")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, "global")
    -            .put(CLUSTER_ID_KEY, "")
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "UNAVAILABLE")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, "global")
    +            .put(TableSchema.CLUSTER_ID_KEY, "")
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
         Attributes expected2 =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
     
         verifyAttributes(connectivityErrorCountMetricData, expected1);
    @@ -469,25 +466,28 @@ public void onComplete() {
         assertThat(counter.get()).isEqualTo(fakeService.getResponseCounter().get());
     
         MetricData applicationLatency =
    -        getMetricData(metricReader, APPLICATION_BLOCKING_LATENCIES_NAME);
    +        getMetricData(metricReader, TableApplicationBlockingLatency.NAME);
     
         Attributes expectedAttributes =
    -        baseAttributes.toBuilder()
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    +        expectedBaseAttributes.toBuilder()
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
                 .build();
         long value = getAggregatedValue(applicationLatency, expectedAttributes);
     
         assertThat(value).isAtLeast((APPLICATION_LATENCY - SLEEP_VARIABILITY) * counter.get());
     
    -    MetricData operationLatency = getMetricData(metricReader, OPERATION_LATENCIES_NAME);
    +    MetricData operationLatency = getMetricData(metricReader, TableOperationLatency.NAME);
         long operationLatencyValue =
             getAggregatedValue(
                 operationLatency,
    -            expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
    +            expectedAttributes.toBuilder()
    +                .put(MetricLabels.STATUS_KEY, "OK")
    +                .put(MetricLabels.STREAMING_KEY, true)
    +                .build());
         assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
       }
     
    @@ -504,15 +504,15 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti
         }
     
         MetricData applicationLatency =
    -        getMetricData(metricReader, APPLICATION_BLOCKING_LATENCIES_NAME);
    +        getMetricData(metricReader, TableApplicationBlockingLatency.NAME);
     
         Attributes expectedAttributes =
    -        baseAttributes.toBuilder()
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    +        expectedBaseAttributes.toBuilder()
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
                 .build();
     
         long value = getAggregatedValue(applicationLatency, expectedAttributes);
    @@ -521,11 +521,14 @@ public void testReadRowsApplicationLatencyWithManualFlowControl() throws Excepti
         assertThat(counter).isEqualTo(fakeService.getResponseCounter().get());
         assertThat(value).isAtLeast(APPLICATION_LATENCY * (counter - 1) - SERVER_LATENCY);
     
    -    MetricData operationLatency = getMetricData(metricReader, OPERATION_LATENCIES_NAME);
    +    MetricData operationLatency = getMetricData(metricReader, TableOperationLatency.NAME);
         long operationLatencyValue =
             getAggregatedValue(
                 operationLatency,
    -            expectedAttributes.toBuilder().put(STATUS_KEY, "OK").put(STREAMING_KEY, true).build());
    +            expectedAttributes.toBuilder()
    +                .put(MetricLabels.STATUS_KEY, "OK")
    +                .put(MetricLabels.STREAMING_KEY, true)
    +                .build());
         assertThat(value).isAtMost(operationLatencyValue - SERVER_LATENCY);
       }
     
    @@ -534,15 +537,15 @@ public void testRetryCount() throws InterruptedException {
         stub.mutateRowCallable()
             .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
     
    -    MetricData metricData = getMetricData(metricReader, RETRY_COUNT_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableRetryCount.NAME);
         Attributes expectedAttributes =
    -        baseAttributes.toBuilder()
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(METHOD_KEY, "Bigtable.MutateRow")
    -            .put(STATUS_KEY, "OK")
    +        expectedBaseAttributes.toBuilder()
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRow")
    +            .put(MetricLabels.STATUS_KEY, "OK")
                 .build();
     
         long value = getAggregatedValue(metricData, expectedAttributes);
    @@ -555,28 +558,28 @@ public void testMutateRowAttemptsTagValues() throws InterruptedException {
             .call(RowMutation.create(TABLE, "random-row").setCell("cf", "q", "value"));
     
         outstandingRpcCounter.waitUntilRpcsDone();
    -    MetricData metricData = getMetricData(metricReader, ATTEMPT_LATENCIES_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableAttemptLatency.NAME);
     
         Attributes expected1 =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "UNAVAILABLE")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, "global")
    -            .put(CLUSTER_ID_KEY, "")
    -            .put(METHOD_KEY, "Bigtable.MutateRow")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(STREAMING_KEY, false)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "UNAVAILABLE")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, "global")
    +            .put(TableSchema.CLUSTER_ID_KEY, "")
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRow")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.STREAMING_KEY, false)
                 .build();
     
         Attributes expected2 =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.MutateRow")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(STREAMING_KEY, false)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRow")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.STREAMING_KEY, false)
                 .build();
     
         verifyAttributes(metricData, expected1);
    @@ -594,17 +597,17 @@ public void testMutateRowsPartialError() throws InterruptedException {
     
         Assert.assertThrows(BatchingException.class, batcher::close);
     
    -    MetricData metricData = getMetricData(metricReader, ATTEMPT_LATENCIES_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableAttemptLatency.NAME);
     
         Attributes expected =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.MutateRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(STREAMING_KEY, false)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.STREAMING_KEY, false)
                 .build();
     
         verifyAttributes(metricData, expected);
    @@ -622,17 +625,17 @@ public void testMutateRowsRpcError() {
     
         Assert.assertThrows(BatchingException.class, batcher::close);
     
    -    MetricData metricData = getMetricData(metricReader, ATTEMPT_LATENCIES_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableAttemptLatency.NAME);
     
         Attributes expected =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "NOT_FOUND")
    -            .put(TABLE_ID_KEY, BAD_TABLE_ID)
    -            .put(ZONE_ID_KEY, "global")
    -            .put(CLUSTER_ID_KEY, "")
    -            .put(METHOD_KEY, "Bigtable.MutateRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(STREAMING_KEY, false)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "NOT_FOUND")
    +            .put(TableSchema.TABLE_ID_KEY, BAD_TABLE_ID)
    +            .put(TableSchema.ZONE_ID_KEY, "global")
    +            .put(TableSchema.CLUSTER_ID_KEY, "")
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.STREAMING_KEY, false)
                 .build();
     
         verifyAttributes(metricData, expected);
    @@ -642,28 +645,28 @@ public void testMutateRowsRpcError() {
       public void testReadRowsAttemptsTagValues() {
         Lists.newArrayList(stub.readRowsCallable().call(Query.create("fake-table")).iterator());
     
    -    MetricData metricData = getMetricData(metricReader, ATTEMPT_LATENCIES_NAME);
    +    MetricData metricData = getMetricData(metricReader, TableAttemptLatency.NAME);
     
         Attributes expected1 =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "UNAVAILABLE")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, "global")
    -            .put(CLUSTER_ID_KEY, "")
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(STREAMING_KEY, true)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "UNAVAILABLE")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, "global")
    +            .put(TableSchema.CLUSTER_ID_KEY, "")
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.STREAMING_KEY, true)
                 .build();
     
         Attributes expected2 =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    -            .put(STREAMING_KEY, true)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
    +            .put(MetricLabels.STREAMING_KEY, true)
                 .build();
     
         verifyAttributes(metricData, expected1);
    @@ -682,15 +685,15 @@ public void testBatchBlockingLatencies() throws InterruptedException {
     
           int expectedNumRequests = 6 / batchElementCount;
     
    -      MetricData applicationLatency = getMetricData(metricReader, CLIENT_BLOCKING_LATENCIES_NAME);
    +      MetricData applicationLatency = getMetricData(metricReader, TableClientBlockingLatency.NAME);
     
           Attributes expectedAttributes =
    -          baseAttributes.toBuilder()
    -              .put(TABLE_ID_KEY, TABLE)
    -              .put(ZONE_ID_KEY, ZONE)
    -              .put(CLUSTER_ID_KEY, CLUSTER)
    -              .put(METHOD_KEY, "Bigtable.MutateRows")
    -              .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +          expectedBaseAttributes.toBuilder()
    +              .put(TableSchema.TABLE_ID_KEY, TABLE)
    +              .put(TableSchema.ZONE_ID_KEY, ZONE)
    +              .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                   .build();
     
           long value = getAggregatedValue(applicationLatency, expectedAttributes);
    @@ -708,15 +711,15 @@ public void testQueuedOnChannelServerStreamLatencies() throws Exception {
         Duration proxyDelayPriorTest = delayProxyDetector.getCurrentDelayUsed();
         f.get();
     
    -    MetricData clientLatency = getMetricData(metricReader, CLIENT_BLOCKING_LATENCIES_NAME);
    +    MetricData clientLatency = getMetricData(metricReader, TableClientBlockingLatency.NAME);
     
         Attributes attributes =
    -        baseAttributes.toBuilder()
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
     
         assertThat(Duration.ofMillis(getAggregatedValue(clientLatency, attributes)))
    @@ -735,15 +738,15 @@ public void testQueuedOnChannelUnaryLatencies() throws Exception {
         f.get();
     
         outstandingRpcCounter.waitUntilRpcsDone();
    -    MetricData clientLatency = getMetricData(metricReader, CLIENT_BLOCKING_LATENCIES_NAME);
    +    MetricData clientLatency = getMetricData(metricReader, TableClientBlockingLatency.NAME);
     
         Attributes attributes =
    -        baseAttributes.toBuilder()
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(METHOD_KEY, "Bigtable.MutateRow")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRow")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
     
         assertThat(Duration.ofMillis(getAggregatedValue(clientLatency, attributes)))
    @@ -761,39 +764,39 @@ public void testPermanentFailure() {
         } catch (NotFoundException e) {
         }
     
    -    MetricData attemptLatency = getMetricData(metricReader, ATTEMPT_LATENCIES_NAME);
    +    MetricData attemptLatency = getMetricData(metricReader, TableAttemptLatency.NAME);
     
         Attributes expected =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "NOT_FOUND")
    -            .put(TABLE_ID_KEY, BAD_TABLE_ID)
    -            .put(CLUSTER_ID_KEY, "")
    -            .put(ZONE_ID_KEY, "global")
    -            .put(STREAMING_KEY, true)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "NOT_FOUND")
    +            .put(TableSchema.TABLE_ID_KEY, BAD_TABLE_ID)
    +            .put(TableSchema.CLUSTER_ID_KEY, "")
    +            .put(TableSchema.ZONE_ID_KEY, "global")
    +            .put(MetricLabels.STREAMING_KEY, true)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
     
         verifyAttributes(attemptLatency, expected);
     
    -    MetricData opLatency = getMetricData(metricReader, OPERATION_LATENCIES_NAME);
    +    MetricData opLatency = getMetricData(metricReader, TableOperationLatency.NAME);
         verifyAttributes(opLatency, expected);
       }
     
       @Test
       public void testRemainingDeadline() {
         stub.readRowsCallable().all().call(Query.create(TABLE));
    -    MetricData deadlineMetric = getMetricData(metricReader, REMAINING_DEADLINE_NAME);
    +    MetricData deadlineMetric = getMetricData(metricReader, TableRemainingDeadline.NAME);
     
         Attributes retryAttributes =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "UNAVAILABLE")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(ZONE_ID_KEY, "global")
    -            .put(CLUSTER_ID_KEY, "")
    -            .put(STREAMING_KEY, true)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "UNAVAILABLE")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(TableSchema.ZONE_ID_KEY, "global")
    +            .put(TableSchema.CLUSTER_ID_KEY, "")
    +            .put(MetricLabels.STREAMING_KEY, true)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
         HistogramPointData retryHistogramPointData =
             deadlineMetric.getHistogramData().getPoints().stream()
    @@ -806,14 +809,14 @@ public void testRemainingDeadline() {
         assertThat(retryRemainingDeadline).isEqualTo(9000);
     
         Attributes okAttributes =
    -        baseAttributes.toBuilder()
    -            .put(STATUS_KEY, "OK")
    -            .put(TABLE_ID_KEY, TABLE)
    -            .put(ZONE_ID_KEY, ZONE)
    -            .put(CLUSTER_ID_KEY, CLUSTER)
    -            .put(METHOD_KEY, "Bigtable.ReadRows")
    -            .put(STREAMING_KEY, true)
    -            .put(CLIENT_NAME_KEY, CLIENT_NAME)
    +        expectedBaseAttributes.toBuilder()
    +            .put(MetricLabels.STATUS_KEY, "OK")
    +            .put(TableSchema.TABLE_ID_KEY, TABLE)
    +            .put(TableSchema.ZONE_ID_KEY, ZONE)
    +            .put(TableSchema.CLUSTER_ID_KEY, CLUSTER)
    +            .put(MetricLabels.METHOD_KEY, "Bigtable.ReadRows")
    +            .put(MetricLabels.STREAMING_KEY, true)
    +            .put(MetricLabels.CLIENT_NAME, CLIENT_NAME)
                 .build();
         HistogramPointData okHistogramPointData =
             deadlineMetric.getHistogramData().getPoints().stream()
    @@ -837,19 +840,21 @@ public void testBatchWriteFlowControlTargetQpsIncreased() throws InterruptedExce
           batcher.close();
     
           MetricData targetQpsMetric =
    -          getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME);
    +          getMetricData(metricReader, ClientBatchWriteFlowControlTargetQps.NAME);
           Attributes targetQpsAttributes =
    -          baseAttributes.toBuilder().put(METHOD_KEY, "Bigtable.MutateRows").build();
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .build();
           double actual_qps = getAggregatedDoubleValue(targetQpsMetric, targetQpsAttributes);
           double expected_qps = 12;
           assertThat(expected_qps).isEqualTo(actual_qps);
     
    -      MetricData factorMetric = getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME);
    +      MetricData factorMetric = getMetricData(metricReader, ClientBatchWriteFlowControlFactor.NAME);
           Attributes factorAttributes =
    -          baseAttributes.toBuilder()
    -              .put(METHOD_KEY, "Bigtable.MutateRows")
    -              .put(APPLIED_KEY, true)
    -              .put(STATUS_KEY, "OK")
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .put(MetricLabels.APPLIED_KEY, true)
    +              .put(MetricLabels.STATUS_KEY, "OK")
                   .build();
           double actual_factor_mean = getAggregatedDoubleValue(factorMetric, factorAttributes);
           double expected_factor_mean = 1.2;
    @@ -867,19 +872,21 @@ public void testBatchWriteFlowControlTargetQpsDecreased() throws InterruptedExce
           batcher.close();
     
           MetricData targetQpsMetric =
    -          getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME);
    +          getMetricData(metricReader, ClientBatchWriteFlowControlTargetQps.NAME);
           Attributes targetQpsAttributes =
    -          baseAttributes.toBuilder().put(METHOD_KEY, "Bigtable.MutateRows").build();
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .build();
           double actual_qps = getAggregatedDoubleValue(targetQpsMetric, targetQpsAttributes);
           double expected_qps = 8.0;
           assertThat(expected_qps).isEqualTo(actual_qps);
     
    -      MetricData factorMetric = getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME);
    +      MetricData factorMetric = getMetricData(metricReader, ClientBatchWriteFlowControlFactor.NAME);
           Attributes factorAttributes =
    -          baseAttributes.toBuilder()
    -              .put(METHOD_KEY, "Bigtable.MutateRows")
    -              .put(APPLIED_KEY, true)
    -              .put(STATUS_KEY, "OK")
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .put(MetricLabels.APPLIED_KEY, true)
    +              .put(MetricLabels.STATUS_KEY, "OK")
                   .build();
           double actual_factor_mean = getAggregatedDoubleValue(factorMetric, factorAttributes);
           double expected_factor_mean = 0.8;
    @@ -897,20 +904,22 @@ public void testBatchWriteFlowControlTargetQpsCappedOnMaxFactor() throws Interru
           batcher.close();
     
           MetricData targetQpsMetric =
    -          getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME);
    +          getMetricData(metricReader, ClientBatchWriteFlowControlTargetQps.NAME);
           Attributes targetQpsAttributes =
    -          baseAttributes.toBuilder().put(METHOD_KEY, "Bigtable.MutateRows").build();
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .build();
           double actual_qps = getAggregatedDoubleValue(targetQpsMetric, targetQpsAttributes);
           // Factor is 1.8 but capped at 1.3 so updated QPS is 13.
           double expected_qps = 13;
           assertThat(expected_qps).isEqualTo(actual_qps);
     
    -      MetricData factorMetric = getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME);
    +      MetricData factorMetric = getMetricData(metricReader, ClientBatchWriteFlowControlFactor.NAME);
           Attributes factorAttributes =
    -          baseAttributes.toBuilder()
    -              .put(METHOD_KEY, "Bigtable.MutateRows")
    -              .put(APPLIED_KEY, true)
    -              .put(STATUS_KEY, "OK")
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .put(MetricLabels.APPLIED_KEY, true)
    +              .put(MetricLabels.STATUS_KEY, "OK")
                   .build();
           double actual_factor_mean = getAggregatedDoubleValue(factorMetric, factorAttributes);
           // Factor is 1.8 but capped at 1.3
    @@ -929,20 +938,22 @@ public void testBatchWriteFlowControlTargetQpsCappedOnMinFactor() throws Interru
           batcher.close();
     
           MetricData targetQpsMetric =
    -          getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME);
    +          getMetricData(metricReader, ClientBatchWriteFlowControlTargetQps.NAME);
           Attributes targetQpsAttributes =
    -          baseAttributes.toBuilder().put(METHOD_KEY, "Bigtable.MutateRows").build();
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .build();
           double actual_qps = getAggregatedDoubleValue(targetQpsMetric, targetQpsAttributes);
           // Factor is 0.5 but capped at 0.7 so updated QPS is 7.
           double expected_qps = 7;
           assertThat(expected_qps).isEqualTo(actual_qps);
     
    -      MetricData factorMetric = getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME);
    +      MetricData factorMetric = getMetricData(metricReader, ClientBatchWriteFlowControlFactor.NAME);
           Attributes factorAttributes =
    -          baseAttributes.toBuilder()
    -              .put(METHOD_KEY, "Bigtable.MutateRows")
    -              .put(APPLIED_KEY, true)
    -              .put(STATUS_KEY, "OK")
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .put(MetricLabels.APPLIED_KEY, true)
    +              .put(MetricLabels.STATUS_KEY, "OK")
                   .build();
           double actual_factor_mean = getAggregatedDoubleValue(factorMetric, factorAttributes);
           // Factor is 0.5 but capped at 0.7
    @@ -962,20 +973,22 @@ public void testBatchWriteFlowControlTargetQpsDecreasedForError() throws Interru
           batcher.close();
     
           MetricData targetQpsMetric =
    -          getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_TARGET_QPS_NAME);
    +          getMetricData(metricReader, ClientBatchWriteFlowControlTargetQps.NAME);
           Attributes targetQpsAttributes =
    -          baseAttributes.toBuilder().put(METHOD_KEY, "Bigtable.MutateRows").build();
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .build();
           double actual_qps = getAggregatedDoubleValue(targetQpsMetric, targetQpsAttributes);
           // On error, min factor is applied.
           double expected_qps = 7;
           assertThat(expected_qps).isEqualTo(actual_qps);
     
    -      MetricData factorMetric = getMetricData(metricReader, BATCH_WRITE_FLOW_CONTROL_FACTOR_NAME);
    +      MetricData factorMetric = getMetricData(metricReader, ClientBatchWriteFlowControlFactor.NAME);
           Attributes factorAttributes =
    -          baseAttributes.toBuilder()
    -              .put(METHOD_KEY, "Bigtable.MutateRows")
    -              .put(APPLIED_KEY, true)
    -              .put(STATUS_KEY, "UNAVAILABLE")
    +          expectedBaseAttributes.toBuilder()
    +              .put(MetricLabels.METHOD_KEY, "Bigtable.MutateRows")
    +              .put(MetricLabels.APPLIED_KEY, true)
    +              .put(MetricLabels.STATUS_KEY, "UNAVAILABLE")
                   .build();
           double actual_factor_mean = getAggregatedDoubleValue(factorMetric, factorAttributes);
           // On error, min factor is applied.
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ChannelPoolMetricsTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/ChannelPoolMetricsTracerTest.java
    similarity index 90%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ChannelPoolMetricsTracerTest.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/ChannelPoolMetricsTracerTest.java
    index 855709503e..fec4f7956a 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/ChannelPoolMetricsTracerTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/ChannelPoolMetricsTracerTest.java
    @@ -13,17 +13,21 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    +package com.google.cloud.bigtable.data.v2.internal.csm.tracers;
     
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.OUTSTANDING_RPCS_PER_CHANNEL_NAME;
    -import static com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants.PER_CONNECTION_ERROR_COUNT_NAME;
     import static com.google.common.truth.Truth.assertThat;
     import static org.mockito.ArgumentMatchers.any;
     import static org.mockito.ArgumentMatchers.anyLong;
     import static org.mockito.Mockito.when;
     
    +import com.google.cloud.bigtable.data.v2.internal.api.InstanceName;
    +import com.google.cloud.bigtable.data.v2.internal.csm.MetricRegistry;
    +import com.google.cloud.bigtable.data.v2.internal.csm.attributes.ClientInfo;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientChannelPoolOutstandingRpcs;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.ClientPerConnectionErrorCount;
     import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelObserver;
     import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelPoolObserver;
    +import com.google.cloud.bigtable.gaxx.grpc.BigtableChannelPoolSettings.LoadBalancingStrategy;
     import com.google.common.collect.ImmutableList;
     import io.opentelemetry.api.OpenTelemetry;
     import io.opentelemetry.api.common.AttributeKey;
    @@ -68,12 +72,21 @@ public class ChannelPoolMetricsTracerTest {
       @Before
       public void setUp() {
         metricReader = InMemoryMetricReader.create();
    +    ClientInfo clientInfo =
    +        ClientInfo.builder()
    +            .setInstanceName(InstanceName.of("fake-project", "fake-instance"))
    +            .setAppProfileId("fake-profile")
    +            .build();
         SdkMeterProvider meterProvider =
             SdkMeterProvider.builder().registerMetricReader(metricReader).build();
         OpenTelemetry openTelemetry =
             OpenTelemetrySdk.builder().setMeterProvider(meterProvider).build();
     
    -    tracker = new ChannelPoolMetricsTracer(openTelemetry);
    +    MetricRegistry mr = new MetricRegistry();
    +
    +    tracker =
    +        new ChannelPoolMetricsTracer(
    +            mr.newRecorderRegistry(openTelemetry.getMeterProvider()), clientInfo);
     
         runnableCaptor = ArgumentCaptor.forClass(Runnable.class);
         // Configure mockScheduler to capture the runnable when tracker.start() is called
    @@ -147,7 +160,7 @@ private static Attributes getExpectedAttributes(String lbPolicy, boolean streami
       public void testSingleRun() {
         // Arrange
         tracker.registerChannelInsightsProvider(mockInsightsProvider);
    -    tracker.registerLoadBalancingStrategy("LEAST_IN_FLIGHT");
    +    tracker.registerLoadBalancingStrategy(LoadBalancingStrategy.LEAST_IN_FLIGHT);
         tracker.start(mockScheduler);
     
         // Outstanding RPCs
    @@ -169,7 +182,7 @@ public void testSingleRun() {
     
         // Assert Outstanding RPCs metric
         Optional rpcMetricDataOpt =
    -        getMetricData(metrics, OUTSTANDING_RPCS_PER_CHANNEL_NAME);
    +        getMetricData(metrics, ClientChannelPoolOutstandingRpcs.NAME);
         assertThat(rpcMetricDataOpt.isPresent()).isTrue();
         MetricData rpcMetricData = rpcMetricDataOpt.get();
         Collection rpcPoints = rpcMetricData.getHistogramData().getPoints();
    @@ -189,7 +202,7 @@ public void testSingleRun() {
     
         // Assert Error Count metric
         Optional errorMetricDataOpt =
    -        getMetricData(metrics, PER_CONNECTION_ERROR_COUNT_NAME);
    +        getMetricData(metrics, ClientPerConnectionErrorCount.NAME);
         assertThat(errorMetricDataOpt.isPresent()).isTrue();
         MetricData errorMetricData = errorMetricDataOpt.get();
         Collection errorPoints = errorMetricData.getHistogramData().getPoints();
    @@ -205,7 +218,7 @@ public void testSingleRun() {
       public void testMultipleRuns() {
         // Arrange
         tracker.registerChannelInsightsProvider(mockInsightsProvider);
    -    tracker.registerLoadBalancingStrategy("ROUND_ROBIN");
    +    tracker.registerLoadBalancingStrategy(LoadBalancingStrategy.ROUND_ROBIN);
         tracker.start(mockScheduler);
     
         // First run
    @@ -236,7 +249,7 @@ public void testMultipleRuns() {
     
         // Assert Outstanding RPCs
         Optional rpcMetricDataOpt =
    -        getMetricData(metrics, OUTSTANDING_RPCS_PER_CHANNEL_NAME);
    +        getMetricData(metrics, ClientChannelPoolOutstandingRpcs.NAME);
         assertThat(rpcMetricDataOpt.isPresent()).isTrue();
         Collection rpcPoints =
             rpcMetricDataOpt.get().getHistogramData().getPoints();
    @@ -252,7 +265,7 @@ public void testMultipleRuns() {
     
         // Assert Error Counts
         Optional errorMetricDataOpt =
    -        getMetricData(metrics, PER_CONNECTION_ERROR_COUNT_NAME);
    +        getMetricData(metrics, ClientPerConnectionErrorCount.NAME);
         assertThat(errorMetricDataOpt.isPresent()).isTrue();
         Collection errorPoints =
             errorMetricDataOpt.get().getHistogramData().getPoints();
    @@ -281,7 +294,7 @@ public void testErrorMetricsOnlyRecordedForAllChannels() {
     
         Collection metrics = metricReader.collectAllMetrics();
         Optional errorMetricDataOpt =
    -        getMetricData(metrics, PER_CONNECTION_ERROR_COUNT_NAME);
    +        getMetricData(metrics, ClientPerConnectionErrorCount.NAME);
         assertThat(errorMetricDataOpt.isPresent()).isTrue();
         Collection errorPoints =
             errorMetricDataOpt.get().getHistogramData().getPoints();
    @@ -302,7 +315,7 @@ public void testDefaultLbPolicy() {
     
         Collection metrics = metricReader.collectAllMetrics();
         Optional rpcMetricDataOpt =
    -        getMetricData(metrics, OUTSTANDING_RPCS_PER_CHANNEL_NAME);
    +        getMetricData(metrics, ClientChannelPoolOutstandingRpcs.NAME);
         assertThat(rpcMetricDataOpt.isPresent()).isTrue();
         Collection points = rpcMetricDataOpt.get().getHistogramData().getPoints();
     
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracerTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracerTest.java
    similarity index 93%
    rename from google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracerTest.java
    rename to google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracerTest.java
    index 71a4728f9f..c77f3e1e50 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/CompositeTracerTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/internal/csm/tracers/CompositeTracerTest.java
    @@ -13,7 +13,7 @@
      * See the License for the specific language governing permissions and
      * limitations under the License.
      */
    -package com.google.cloud.bigtable.data.v2.stub.metrics;
    +package com.google.cloud.bigtable.data.v2.internal.csm.tracers;
     
     import static com.google.api.gax.util.TimeConversionUtils.toThreetenDuration;
     import static com.google.common.truth.Truth.assertThat;
    @@ -25,10 +25,10 @@
     import com.google.api.gax.tracing.ApiTracer;
     import com.google.api.gax.tracing.ApiTracer.Scope;
     import com.google.bigtable.v2.ReadRowsRequest;
    +import com.google.cloud.bigtable.data.v2.stub.MetadataExtractorInterceptor;
    +import com.google.cloud.bigtable.data.v2.stub.metrics.BigtableTracer;
     import com.google.cloud.bigtable.misc_utilities.MethodComparator;
     import com.google.common.collect.ImmutableList;
    -import io.grpc.Status;
    -import io.grpc.StatusRuntimeException;
     import java.lang.reflect.Method;
     import java.util.Arrays;
     import org.junit.Assert;
    @@ -241,11 +241,12 @@ public void testGetAttempt() {
       }
     
       @Test
    -  public void testRecordGfeLatency() {
    -    Throwable t = new StatusRuntimeException(Status.UNAVAILABLE);
    -    compositeTracer.recordGfeMetadata(20L, t);
    -    verify(child3, times(1)).recordGfeMetadata(20L, t);
    -    verify(child4, times(1)).recordGfeMetadata(20L, t);
    +  public void testSidebandData() {
    +    MetadataExtractorInterceptor.SidebandData sidebandData =
    +        new MetadataExtractorInterceptor.SidebandData();
    +    compositeTracer.setSidebandData(sidebandData);
    +    verify(child3, times(1)).setSidebandData(sidebandData);
    +    verify(child4, times(1)).setSidebandData(sidebandData);
       }
     
       @Test
    @@ -264,13 +265,6 @@ public void testMethodsOverride() {
             .containsAtLeastElementsIn(baseMethods);
       }
     
    -  @Test
    -  public void testRequestBlockedOnChannel() {
    -    compositeTracer.grpcChannelQueuedLatencies(5L);
    -    verify(child3, times(1)).grpcChannelQueuedLatencies(5L);
    -    verify(child4, times(1)).grpcChannelQueuedLatencies(5L);
    -  }
    -
       @Test
       public void testGrpcMessageSent() {
         compositeTracer.grpcMessageSent();
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
    index 20555520f6..b8e5df4487 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/BuiltinMetricsIT.java
    @@ -29,10 +29,10 @@
     import com.google.cloud.bigtable.admin.v2.models.Table;
     import com.google.cloud.bigtable.data.v2.BigtableDataClient;
     import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.Constants.MetricLabels;
     import com.google.cloud.bigtable.data.v2.models.Query;
     import com.google.cloud.bigtable.data.v2.models.Row;
     import com.google.cloud.bigtable.data.v2.models.RowMutation;
    -import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
     import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
     import com.google.cloud.bigtable.test_helpers.env.CloudEnv;
     import com.google.cloud.bigtable.test_helpers.env.PrefixGenerator;
    @@ -339,7 +339,7 @@ private void verifyMetricsWithMetricsReader(
                   .putAll(ts.getMetric().getLabelsMap())
                   .build();
           AttributesBuilder attributesBuilder = Attributes.builder();
    -      String streamingKey = BuiltinMetricsConstants.STREAMING_KEY.getKey();
    +      String streamingKey = MetricLabels.STREAMING_KEY.getKey();
           attributesMap.forEach(
               (k, v) -> {
                 if (!k.equals(streamingKey)) {
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
    index 56f6bfa476..5e56d36e72 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/MetricsITUtils.java
    @@ -15,7 +15,7 @@
      */
     package com.google.cloud.bigtable.data.v2.it;
     
    -import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema;
     import com.google.common.truth.Correspondence;
     import io.opentelemetry.sdk.metrics.data.MetricData;
     import io.opentelemetry.sdk.metrics.data.PointData;
    @@ -27,11 +27,11 @@ public class MetricsITUtils {
     
       static final Correspondence POINT_DATA_CLUSTER_ID_CONTAINS =
           Correspondence.from(
    -          (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY).contains(s),
    +          (pd, s) -> pd.getAttributes().get(TableSchema.CLUSTER_ID_KEY).contains(s),
               "contains attributes");
     
       static final Correspondence POINT_DATA_ZONE_ID_CONTAINS =
           Correspondence.from(
    -          (pd, s) -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY).contains(s),
    +          (pd, s) -> pd.getAttributes().get(TableSchema.ZONE_ID_KEY).contains(s),
               "contains attributes");
     }
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/ReadIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/ReadIT.java
    index 95ed16817e..ce45b0fdf1 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/ReadIT.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/ReadIT.java
    @@ -73,6 +73,7 @@
     
     @RunWith(JUnit4.class)
     public class ReadIT {
    +
       private String prefix;
     
       @ClassRule public static TestEnvRule testEnvRule = new TestEnvRule();
    @@ -460,7 +461,7 @@ public void rangeQueries() {
       }
     
       @Test
    -  public void rangeQueriesOnAuthorizedView() {
    +  public void rangeQueriesOnAuthorizedView() throws InterruptedException {
         assume()
             .withMessage("AuthorizedView is not supported on Emulator")
             .that(testEnvRule.env())
    @@ -761,6 +762,7 @@ public void onSuccess(Row result) {
       }
     
       static class AccumulatingObserver implements ResponseObserver {
    +
         final List responses = Lists.newArrayList();
         final SettableApiFuture completionFuture = SettableApiFuture.create();
     
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
    index 1c9245ba39..03d9c156c3 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/StreamingMetricsMetadataIT.java
    @@ -26,9 +26,10 @@
     import com.google.cloud.bigtable.admin.v2.models.Cluster;
     import com.google.cloud.bigtable.data.v2.BigtableDataClient;
     import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableOperationLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema;
     import com.google.cloud.bigtable.data.v2.models.Query;
     import com.google.cloud.bigtable.data.v2.models.Row;
    -import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
     import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
     import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
     import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
    @@ -105,23 +106,23 @@ public void testSuccess() throws Exception {
         Collection allMetricData = metricReader.collectAllMetrics();
         List metrics =
             metricReader.collectAllMetrics().stream()
    -            .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
    +            .filter(m -> m.getName().contains(TableOperationLatency.NAME))
                 .collect(Collectors.toList());
     
         assertThat(allMetricData)
             .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
    -        .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
    +        .contains(TableOperationLatency.NAME);
         assertThat(metrics).hasSize(1);
     
         MetricData metricData = metrics.get(0);
         List pointData = new ArrayList<>(metricData.getData().getPoints());
         List clusterAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.CLUSTER_ID_KEY))
                 .collect(Collectors.toList());
         List zoneAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.ZONE_ID_KEY))
                 .collect(Collectors.toList());
     
         assertThat(pointData)
    @@ -146,23 +147,23 @@ public void testFailure() {
         Collection allMetricData = metricReader.collectAllMetrics();
         List metrics =
             metricReader.collectAllMetrics().stream()
    -            .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
    +            .filter(m -> m.getName().contains(TableOperationLatency.NAME))
                 .collect(Collectors.toList());
     
         assertThat(allMetricData)
             .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
    -        .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
    +        .contains(TableOperationLatency.NAME);
         assertThat(metrics).hasSize(1);
     
         MetricData metricData = metrics.get(0);
         List pointData = new ArrayList<>(metricData.getData().getPoints());
         List clusterAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.CLUSTER_ID_KEY))
                 .collect(Collectors.toList());
         List zoneAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.ZONE_ID_KEY))
                 .collect(Collectors.toList());
     
         assertThat(pointData)
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
    index 0196614299..50ff7ea6ad 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/it/UnaryMetricsMetadataIT.java
    @@ -26,8 +26,10 @@
     import com.google.cloud.bigtable.admin.v2.models.Cluster;
     import com.google.cloud.bigtable.data.v2.BigtableDataClient;
     import com.google.cloud.bigtable.data.v2.BigtableDataSettings;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableAttemptLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.metrics.TableOperationLatency;
    +import com.google.cloud.bigtable.data.v2.internal.csm.schema.TableSchema;
     import com.google.cloud.bigtable.data.v2.models.RowMutation;
    -import com.google.cloud.bigtable.data.v2.stub.metrics.BuiltinMetricsConstants;
     import com.google.cloud.bigtable.data.v2.stub.metrics.CustomOpenTelemetryMetricsProvider;
     import com.google.cloud.bigtable.test_helpers.env.EmulatorEnv;
     import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
    @@ -110,23 +112,23 @@ public void testSuccess() throws Exception {
         Collection allMetricData = metricReader.collectAllMetrics();
         List metrics =
             allMetricData.stream()
    -            .filter(m -> m.getName().contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME))
    +            .filter(m -> m.getName().contains(TableOperationLatency.NAME))
                 .collect(Collectors.toList());
     
         assertThat(allMetricData)
             .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
    -        .contains(BuiltinMetricsConstants.OPERATION_LATENCIES_NAME);
    +        .contains(TableOperationLatency.NAME);
         assertThat(metrics).hasSize(1);
     
         MetricData metricData = metrics.get(0);
         List pointData = new ArrayList<>(metricData.getData().getPoints());
         List clusterAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.CLUSTER_ID_KEY))
                 .collect(Collectors.toList());
         List zoneAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.ZONE_ID_KEY))
                 .collect(Collectors.toList());
     
         assertThat(pointData)
    @@ -163,10 +165,7 @@ public void testFailure() throws Exception {
         Collection allMetricData = metricReader.collectAllMetrics();
         MetricData metricData = null;
         for (MetricData md : allMetricData) {
    -      if (md.getName()
    -          .equals(
    -              BuiltinMetricsConstants.METER_NAME
    -                  + BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME)) {
    +      if (md.getName().equals(TableAttemptLatency.NAME)) {
             metricData = md;
             break;
           }
    @@ -174,7 +173,7 @@ public void testFailure() throws Exception {
     
         assertThat(allMetricData)
             .comparingElementsUsing(METRIC_DATA_NAME_CONTAINS)
    -        .contains(BuiltinMetricsConstants.ATTEMPT_LATENCIES_NAME);
    +        .contains(TableAttemptLatency.NAME);
         assertThat(metricData).isNotNull();
     
         List pointData = new ArrayList<>(metricData.getData().getPoints());
    @@ -185,11 +184,11 @@ public void testFailure() throws Exception {
         assertThat(pointData).comparingElementsUsing(POINT_DATA_ZONE_ID_CONTAINS).contains("global");
         List clusterAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.CLUSTER_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.CLUSTER_ID_KEY))
                 .collect(Collectors.toList());
         List zoneAttributes =
             pointData.stream()
    -            .map(pd -> pd.getAttributes().get(BuiltinMetricsConstants.ZONE_ID_KEY))
    +            .map(pd -> pd.getAttributes().get(TableSchema.ZONE_ID_KEY))
                 .collect(Collectors.toList());
     
         assertThat(clusterAttributes).contains("");
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/CookiesHolderTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/CookiesHolderTest.java
    index bf02ce447a..648cff4809 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/CookiesHolderTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/CookiesHolderTest.java
    @@ -69,7 +69,6 @@
     import io.grpc.Status;
     import io.grpc.StatusRuntimeException;
     import io.grpc.stub.StreamObserver;
    -import java.io.IOException;
     import java.util.ArrayList;
     import java.util.Collections;
     import java.util.HashMap;
    @@ -673,58 +672,6 @@ public void testCookieSetWithBigtableClientFactory() throws Exception {
         }
       }
     
    -  @Test
    -  public void testDisableRoutingCookie() throws IOException {
    -    // This test disables routing cookie in the client settings and ensures that none of the routing
    -    // cookie
    -    // is added.
    -    settings.stubSettings().setEnableRoutingCookie(false);
    -    try (BigtableDataClient client = BigtableDataClient.create(settings.build())) {
    -      @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -      ArrayList ignored = Lists.newArrayList(client.readRows(Query.create("fake-table")));
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -      fakeService.count.set(0);
    -
    -      client.mutateRow(RowMutation.create("fake-table", "key").setCell("cf", "q", "v"));
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -      fakeService.count.set(0);
    -
    -      client.bulkMutateRows(
    -          BulkMutation.create("fake-table")
    -              .add(RowMutationEntry.create("key").setCell("cf", "q", "v")));
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -      fakeService.count.set(0);
    -
    -      client.sampleRowKeys("fake-table");
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -      fakeService.count.set(0);
    -
    -      client.checkAndMutateRow(
    -          ConditionalRowMutation.create("fake-table", "key")
    -              .then(Mutation.create().setCell("cf", "q", "v")));
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -      fakeService.count.set(0);
    -
    -      client.readModifyWriteRow(
    -          ReadModifyWriteRow.create("fake-table", "key").append("cf", "q", "v"));
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -      fakeService.count.set(0);
    -
    -      @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -      ArrayList ignored2 =
    -          Lists.newArrayList(client.generateInitialChangeStreamPartitions("fake-table"));
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -      fakeService.count.set(0);
    -
    -      for (ChangeStreamRecord record :
    -          client.readChangeStream(ReadChangeStreamQuery.create("fake-table"))) {}
    -
    -      assertThat(fakeService.count.get()).isEqualTo(2);
    -
    -      assertThat(methods).isEmpty();
    -    }
    -  }
    -
       static class FakeService extends BigtableGrpc.BigtableImplBase {
     
         private volatile boolean returnCookie = true;
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
    index aecad0cc12..ad0de696a3 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubSettingsTest.java
    @@ -94,8 +94,6 @@ public void settingsAreNotLostTest() {
                 .setCredentialsProvider(credentialsProvider)
                 .setStreamWatchdogProvider(watchdogProvider)
                 .setStreamWatchdogCheckInterval(watchdogInterval)
    -            .setEnableRoutingCookie(enableRoutingCookie)
    -            .setEnableRetryInfo(enableRetryInfo)
                 .setMetricsEndpoint(metricsEndpoint);
     
         verifyBuilder(
    @@ -160,8 +158,6 @@ private void verifyBuilder(
         assertThat(builder.getCredentialsProvider()).isEqualTo(credentialsProvider);
         assertThat(builder.getStreamWatchdogProvider()).isSameInstanceAs(watchdogProvider);
         assertThat(builder.getStreamWatchdogCheckInterval()).isEqualTo(watchdogInterval);
    -    assertThat(builder.getEnableRoutingCookie()).isEqualTo(enableRoutingCookie);
    -    assertThat(builder.getEnableRetryInfo()).isEqualTo(enableRetryInfo);
         assertThat(builder.getMetricsEndpoint()).isEqualTo(metricsEndpoint);
       }
     
    @@ -186,8 +182,6 @@ private void verifySettings(
         assertThat(settings.getCredentialsProvider()).isEqualTo(credentialsProvider);
         assertThat(settings.getStreamWatchdogProvider()).isSameInstanceAs(watchdogProvider);
         assertThat(settings.getStreamWatchdogCheckInterval()).isEqualTo(watchdogInterval);
    -    assertThat(settings.getEnableRoutingCookie()).isEqualTo(enableRoutingCookie);
    -    assertThat(settings.getEnableRetryInfo()).isEqualTo(enableRetryInfo);
         assertThat(settings.getMetricsEndpoint()).isEqualTo(metricsEndpoint);
       }
     
    @@ -920,94 +914,12 @@ public void isRefreshingChannelFalseValueTest() {
         assertThat(builder.build().toBuilder().isRefreshingChannel()).isFalse();
       }
     
    -  @Test
    -  public void routingCookieIsEnabled() throws IOException {
    -    String dummyProjectId = "my-project";
    -    String dummyInstanceId = "my-instance";
    -    CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class);
    -    Mockito.when(credentialsProvider.getCredentials()).thenReturn(new FakeCredentials());
    -    EnhancedBigtableStubSettings.Builder builder =
    -        EnhancedBigtableStubSettings.newBuilder()
    -            .setProjectId(dummyProjectId)
    -            .setInstanceId(dummyInstanceId)
    -            .setCredentialsProvider(credentialsProvider);
    -
    -    assertThat(builder.getEnableRoutingCookie()).isTrue();
    -    assertThat(builder.build().getEnableRoutingCookie()).isTrue();
    -    assertThat(builder.build().toBuilder().getEnableRoutingCookie()).isTrue();
    -  }
    -
    -  @Test
    -  public void enableRetryInfoDefaultValueTest() throws IOException {
    -    String dummyProjectId = "my-project";
    -    String dummyInstanceId = "my-instance";
    -    CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class);
    -    Mockito.when(credentialsProvider.getCredentials()).thenReturn(new FakeCredentials());
    -    EnhancedBigtableStubSettings.Builder builder =
    -        EnhancedBigtableStubSettings.newBuilder()
    -            .setProjectId(dummyProjectId)
    -            .setInstanceId(dummyInstanceId)
    -            .setCredentialsProvider(credentialsProvider);
    -    assertThat(builder.getEnableRetryInfo()).isTrue();
    -    assertThat(builder.build().getEnableRetryInfo()).isTrue();
    -    assertThat(builder.build().toBuilder().getEnableRetryInfo()).isTrue();
    -  }
    -
    -  @Test
    -  public void routingCookieFalseValueSet() throws IOException {
    -    String dummyProjectId = "my-project";
    -    String dummyInstanceId = "my-instance";
    -    CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class);
    -    Mockito.when(credentialsProvider.getCredentials()).thenReturn(new FakeCredentials());
    -    EnhancedBigtableStubSettings.Builder builder =
    -        EnhancedBigtableStubSettings.newBuilder()
    -            .setProjectId(dummyProjectId)
    -            .setInstanceId(dummyInstanceId)
    -            .setEnableRoutingCookie(false)
    -            .setCredentialsProvider(credentialsProvider);
    -    assertThat(builder.getEnableRoutingCookie()).isFalse();
    -    assertThat(builder.build().getEnableRoutingCookie()).isFalse();
    -    assertThat(builder.build().toBuilder().getEnableRoutingCookie()).isFalse();
    -  }
    -
    -  @Test
    -  public void enableRetryInfoFalseValueTest() throws IOException {
    -    String dummyProjectId = "my-project";
    -    String dummyInstanceId = "my-instance";
    -    CredentialsProvider credentialsProvider = Mockito.mock(CredentialsProvider.class);
    -    Mockito.when(credentialsProvider.getCredentials()).thenReturn(new FakeCredentials());
    -    EnhancedBigtableStubSettings.Builder builder =
    -        EnhancedBigtableStubSettings.newBuilder()
    -            .setProjectId(dummyProjectId)
    -            .setInstanceId(dummyInstanceId)
    -            .setEnableRetryInfo(false)
    -            .setCredentialsProvider(credentialsProvider);
    -    assertThat(builder.getEnableRetryInfo()).isFalse();
    -    assertThat(builder.build().getEnableRetryInfo()).isFalse();
    -    assertThat(builder.build().toBuilder().getEnableRetryInfo()).isFalse();
    -  }
    -
       static final String[] SETTINGS_LIST = {
         "projectId",
         "instanceId",
         "appProfileId",
         "isRefreshingChannel",
    -    "primedTableIds",
    -    "enableRoutingCookie",
    -    "enableRetryInfo",
    -    "readRowsSettings",
    -    "readRowSettings",
    -    "sampleRowKeysSettings",
    -    "mutateRowSettings",
    -    "bulkMutateRowsSettings",
    -    "bulkReadRowsSettings",
    -    "checkAndMutateRowSettings",
    -    "readModifyWriteRowSettings",
    -    "generateInitialChangeStreamPartitionsSettings",
    -    "readChangeStreamSettings",
    -    "pingAndWarmSettings",
    -    "executeQuerySettings",
    -    "prepareQuerySettings",
    +    "perOpSettings",
         "metricsProvider",
         "metricsEndpoint",
         "areInternalMetricsEnabled",
    @@ -1025,17 +937,12 @@ public void testToString() {
                 .build();
     
         checkToString(defaultSettings);
    -    assertThat(defaultSettings.toString()).contains("primedTableIds=[]");
     
         EnhancedBigtableStubSettings settings =
    -        defaultSettings.toBuilder()
    -            .setPrimedTableIds("2", "12", "85", "06")
    -            .setEndpoint("example.com:1234")
    -            .build();
    +        defaultSettings.toBuilder().setEndpoint("example.com:1234").build();
     
         checkToString(settings);
         assertThat(settings.toString()).contains("endpoint=example.com:1234");
    -    assertThat(settings.toString()).contains("primedTableIds=[2, 12, 85, 06]");
     
         int nonStaticFields = 0;
         for (Field field : EnhancedBigtableStubSettings.class.getDeclaredFields()) {
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubTest.java
    index fbafe50f47..1531506a11 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/EnhancedBigtableStubTest.java
    @@ -698,10 +698,7 @@ public void testBulkMutationFlowControllerConfigured() throws Exception {
       public void testCallContextPropagatedInMutationBatcher()
           throws IOException, InterruptedException, ExecutionException {
         EnhancedBigtableStubSettings settings =
    -        defaultSettings.toBuilder()
    -            .setRefreshingChannel(true)
    -            .setPrimedTableIds("table1", "table2")
    -            .build();
    +        defaultSettings.toBuilder().setRefreshingChannel(true).build();
     
         try (EnhancedBigtableStub stub = EnhancedBigtableStub.create(settings)) {
           // clear the previous contexts
    @@ -728,10 +725,7 @@ public void testCallContextPropagatedInMutationBatcher()
       public void testCallContextPropagatedInReadBatcher()
           throws IOException, InterruptedException, ExecutionException {
         EnhancedBigtableStubSettings settings =
    -        defaultSettings.toBuilder()
    -            .setRefreshingChannel(true)
    -            .setPrimedTableIds("table1", "table2")
    -            .build();
    +        defaultSettings.toBuilder().setRefreshingChannel(true).build();
     
         try (EnhancedBigtableStub stub = EnhancedBigtableStub.create(settings)) {
           // clear the previous contexts
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
    index ea4b46a713..c206eb20a6 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/RetryInfoTest.java
    @@ -243,29 +243,11 @@ public void testReadRowNonRetryableErrorWithRetryInfo() {
         verifyRetryInfoIsUsed(() -> client.readRow("table", "row"), false);
       }
     
    -  @Test
    -  public void testReadRowDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyRetryInfoCanBeDisabled(() -> newClient.readRow("table", "row"));
    -    }
    -  }
    -
       @Test
       public void testReadRowServerNotReturningRetryInfo() {
         verifyNoRetryInfo(() -> client.readRow("table", "row"), true);
       }
     
    -  @Test
    -  public void testReadRowServerNotReturningRetryInfoClientDisabledHandling() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(() -> newClient.readRow("table", "row"), true);
    -    }
    -  }
    -
       @Test
       public void testReadRowsNonRetraybleErrorWithRetryInfo() {
         verifyRetryInfoIsUsed(
    @@ -276,19 +258,6 @@ public void testReadRowsNonRetraybleErrorWithRetryInfo() {
             false);
       }
     
    -  @Test
    -  public void testReadRowsDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyRetryInfoCanBeDisabled(
    -          () -> {
    -            @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -            ArrayList ignored = Lists.newArrayList(newClient.readRows(Query.create("table")));
    -          });
    -    }
    -  }
    -
       @Test
       public void testReadRowsServerNotReturningRetryInfo() {
         verifyNoRetryInfo(
    @@ -299,20 +268,6 @@ public void testReadRowsServerNotReturningRetryInfo() {
             true);
       }
     
    -  @Test
    -  public void testReadRowsServerNotReturningRetryInfoClientDisabledHandling() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () -> {
    -            @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -            ArrayList ignored = Lists.newArrayList(newClient.readRows(Query.create("table")));
    -          },
    -          true);
    -    }
    -  }
    -
       @Test
       public void testMutateRowsNonRetryableErrorWithRetryInfo() {
         verifyRetryInfoIsUsed(
    @@ -323,19 +278,6 @@ public void testMutateRowsNonRetryableErrorWithRetryInfo() {
             false);
       }
     
    -  @Test
    -  public void testMutateRowsDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyRetryInfoCanBeDisabled(
    -          () ->
    -              newClient.bulkMutateRows(
    -                  BulkMutation.create("fake-table")
    -                      .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))));
    -    }
    -  }
    -
       @Test
       public void testMutateRowsServerNotReturningRetryInfo() {
         verifyNoRetryInfo(
    @@ -346,101 +288,28 @@ public void testMutateRowsServerNotReturningRetryInfo() {
             true);
       }
     
    -  @Test
    -  public void testMutateRowsServerNotReturningRetryInfoClientDisabledHandling() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () ->
    -              newClient.bulkMutateRows(
    -                  BulkMutation.create("fake-table")
    -                      .add(RowMutationEntry.create("row-key-1").setCell("cf", "q", "v"))),
    -          true);
    -    }
    -  }
    -
       @Test
       public void testMutateRowNonRetryableErrorWithRetryInfo() {
         verifyRetryInfoIsUsed(
             () -> client.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")), false);
       }
     
    -  @Test
    -  public void testMutateRowDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -
    -      verifyRetryInfoCanBeDisabled(
    -          () -> newClient.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")));
    -    }
    -  }
    -
       @Test
       public void testMutateRowServerNotReturningRetryInfo() {
         verifyNoRetryInfo(
             () -> client.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")), true);
       }
     
    -  @Test
    -  public void testMutateRowServerNotReturningRetryInfoClientDisabledHandling() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () -> newClient.mutateRow(RowMutation.create("table", "key").setCell("cf", "q", "v")),
    -          true);
    -    }
    -  }
    -
       @Test
       public void testSampleRowKeysNonRetryableErrorWithRetryInfo() {
         verifyRetryInfoIsUsed(() -> client.sampleRowKeys("table"), false);
       }
     
    -  @Test
    -  public void testSampleRowKeysDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyRetryInfoCanBeDisabled(() -> newClient.sampleRowKeys("table"));
    -    }
    -  }
    -
       @Test
       public void testSampleRowKeysServerNotReturningRetryInfo() {
         verifyNoRetryInfo(() -> client.sampleRowKeys("table"), true);
       }
     
    -  @Test
    -  public void testSampleRowKeysServerNotReturningRetryInfoClientDisabledHandling()
    -      throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(() -> newClient.sampleRowKeys("table"), true);
    -    }
    -  }
    -
    -  @Test
    -  public void testCheckAndMutateDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient client = BigtableDataClient.create(settings.build())) {
    -      ApiException exception = enqueueNonRetryableExceptionWithDelay(defaultDelay);
    -      try {
    -        client.checkAndMutateRow(
    -            ConditionalRowMutation.create("table", "key")
    -                .condition(Filters.FILTERS.value().regex("old-value"))
    -                .then(Mutation.create().setCell("cf", "q", "v")));
    -      } catch (ApiException e) {
    -        assertThat(e.getStatusCode()).isEqualTo(exception.getStatusCode());
    -      }
    -      assertThat(attemptCounter.get()).isEqualTo(1);
    -    }
    -  }
    -
       @Test
       public void testCheckAndMutateServerNotReturningRetryInfo() {
         verifyNoRetryInfo(
    @@ -452,37 +321,6 @@ public void testCheckAndMutateServerNotReturningRetryInfo() {
             false);
       }
     
    -  @Test
    -  public void testCheckAndMutateServerNotReturningRetryInfoClientDisabledHandling()
    -      throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () ->
    -              newClient.checkAndMutateRow(
    -                  ConditionalRowMutation.create("table", "key")
    -                      .condition(Filters.FILTERS.value().regex("old-value"))
    -                      .then(Mutation.create().setCell("cf", "q", "v"))),
    -          false);
    -    }
    -  }
    -
    -  @Test
    -  public void testReadModifyWriteDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient client = BigtableDataClient.create(settings.build())) {
    -      ApiException exception = enqueueNonRetryableExceptionWithDelay(defaultDelay);
    -      try {
    -        client.readModifyWriteRow(ReadModifyWriteRow.create("table", "row").append("cf", "q", "v"));
    -      } catch (ApiException e) {
    -        assertThat(e.getStatusCode()).isEqualTo(exception.getStatusCode());
    -      }
    -      assertThat(attemptCounter.get()).isEqualTo(1);
    -    }
    -  }
    -
       @Test
       public void testReadModifyWriteServerNotReturningRetryInfo() {
         verifyNoRetryInfo(
    @@ -492,19 +330,6 @@ public void testReadModifyWriteServerNotReturningRetryInfo() {
             false);
       }
     
    -  @Test
    -  public void testReadModifyWriteNotReturningRetryInfoClientDisabledHandling() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () ->
    -              newClient.readModifyWriteRow(
    -                  ReadModifyWriteRow.create("table", "row").append("cf", "q", "v")),
    -          false);
    -    }
    -  }
    -
       @Test
       public void testReadChangeStreamNonRetryableErrorWithRetryInfo() {
         verifyRetryInfoIsUsed(
    @@ -516,21 +341,6 @@ public void testReadChangeStreamNonRetryableErrorWithRetryInfo() {
             false);
       }
     
    -  @Test
    -  public void testReadChangeStreamDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyRetryInfoCanBeDisabled(
    -          () -> {
    -            @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -            ArrayList ignored =
    -                Lists.newArrayList(
    -                    newClient.readChangeStream(ReadChangeStreamQuery.create("table")));
    -          });
    -    }
    -  }
    -
       @Test
       public void testReadChangeStreamServerNotReturningRetryInfo() {
         verifyNoRetryInfo(
    @@ -542,23 +352,6 @@ public void testReadChangeStreamServerNotReturningRetryInfo() {
             true);
       }
     
    -  @Test
    -  public void testReadChangeStreamNotReturningRetryInfoClientDisabledHandling() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () -> {
    -            @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -            ArrayList ignored =
    -                Lists.newArrayList(
    -                    newClient.readChangeStream(ReadChangeStreamQuery.create("table")));
    -          },
    -          true,
    -          com.google.protobuf.Duration.newBuilder().setSeconds(5).setNanos(0).build());
    -    }
    -  }
    -
       @Test
       public void testGenerateInitialChangeStreamPartitionNonRetryableError() {
         verifyRetryInfoIsUsed(
    @@ -570,20 +363,6 @@ public void testGenerateInitialChangeStreamPartitionNonRetryableError() {
             false);
       }
     
    -  @Test
    -  public void testGenerateInitialChangeStreamPartitionDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyRetryInfoCanBeDisabled(
    -          () -> {
    -            @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -            ArrayList ignored =
    -                Lists.newArrayList(newClient.generateInitialChangeStreamPartitions("table"));
    -          });
    -    }
    -  }
    -
       @Test
       public void testGenerateInitialChangeStreamServerNotReturningRetryInfo() {
         verifyNoRetryInfo(
    @@ -595,55 +374,17 @@ public void testGenerateInitialChangeStreamServerNotReturningRetryInfo() {
             true);
       }
     
    -  @Test
    -  public void testGenerateInitialChangeStreamServerNotReturningRetryInfoClientDisabledHandling()
    -      throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () -> {
    -            @SuppressWarnings("MismatchedQueryAndUpdateOfCollection")
    -            ArrayList ignored =
    -                Lists.newArrayList(newClient.generateInitialChangeStreamPartitions("table"));
    -          },
    -          true);
    -    }
    -  }
    -
       @Test
       public void testPrepareQueryNonRetryableErrorWithRetryInfo() {
         verifyRetryInfoIsUsed(
             () -> client.prepareStatement("SELECT * FROM table", new HashMap<>()), false);
       }
     
    -  @Test
    -  public void testPrepareQueryDisableRetryInfo() throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -
    -      verifyRetryInfoCanBeDisabled(
    -          () -> newClient.prepareStatement("SELECT * FROM table", new HashMap<>()));
    -    }
    -  }
    -
       @Test
       public void testPrepareQueryServerNotReturningRetryInfo() {
         verifyNoRetryInfo(() -> client.prepareStatement("SELECT * FROM table", new HashMap<>()), true);
       }
     
    -  @Test
    -  public void testPrepareQueryServerNotReturningRetryInfoClientDisabledHandling()
    -      throws IOException {
    -    settings.stubSettings().setEnableRetryInfo(false);
    -
    -    try (BigtableDataClient newClient = BigtableDataClient.create(settings.build())) {
    -      verifyNoRetryInfo(
    -          () -> newClient.prepareStatement("SELECT * FROM table", new HashMap<>()), true);
    -    }
    -  }
    -
       // Test the case where server returns retry info and client enables handling of retry info
       private void verifyRetryInfoIsUsed(Runnable runnable, boolean retryableError) {
         if (retryableError) {
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/SkipTrailersTest.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/SkipTrailersTest.java
    index 9759f798c4..5dee789c19 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/SkipTrailersTest.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/SkipTrailersTest.java
    @@ -95,6 +95,7 @@ public void setUp() throws Exception {
         server = FakeServiceBuilder.create(hackedService).start();
     
         when(tracerFactory.newTracer(Mockito.any(), Mockito.any(), Mockito.any())).thenReturn(tracer);
    +    when(tracerFactory.withContext(Mockito.any())).thenReturn(tracerFactory);
     
         BigtableDataSettings.Builder clientBuilder =
             BigtableDataSettings.newBuilderForEmulator(server.getPort())
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
    index 32453efd7f..8eee324317 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/data/v2/stub/metrics/BuiltinMetricsTestUtils.java
    @@ -42,7 +42,7 @@ public class BuiltinMetricsTestUtils {
       private BuiltinMetricsTestUtils() {}
     
       public static MetricData getMetricData(InMemoryMetricReader reader, String metricName) {
    -    String fullMetricName = BuiltinMetricsConstants.METER_NAME + metricName;
    +    String fullMetricName = metricName;
         Collection allMetricData = Collections.emptyList();
     
         // Fetch the MetricData with retries
    diff --git a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/misc_utilities/AuthorizedViewTestHelper.java b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/misc_utilities/AuthorizedViewTestHelper.java
    index 83c40403f8..c501f80e5d 100644
    --- a/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/misc_utilities/AuthorizedViewTestHelper.java
    +++ b/google-cloud-bigtable/src/test/java/com/google/cloud/bigtable/misc_utilities/AuthorizedViewTestHelper.java
    @@ -16,18 +16,26 @@
     
     package com.google.cloud.bigtable.misc_utilities;
     
    +import com.google.api.gax.rpc.UnavailableException;
     import com.google.cloud.bigtable.admin.v2.models.AuthorizedView;
     import com.google.cloud.bigtable.admin.v2.models.CreateAuthorizedViewRequest;
     import com.google.cloud.bigtable.admin.v2.models.FamilySubsets;
     import com.google.cloud.bigtable.admin.v2.models.SubsetView;
     import com.google.cloud.bigtable.test_helpers.env.TestEnvRule;
     import java.util.UUID;
    +import java.util.concurrent.TimeUnit;
    +import java.util.logging.Level;
    +import java.util.logging.Logger;
     
     public class AuthorizedViewTestHelper {
    +
       public static String AUTHORIZED_VIEW_ROW_PREFIX = "row#";
       public static String AUTHORIZED_VIEW_COLUMN_QUALIFIER = "qualifier";
     
    -  public static AuthorizedView createTestAuthorizedView(TestEnvRule testEnvRule) {
    +  private static final Logger logger = Logger.getLogger(AuthorizedViewTestHelper.class.getName());
    +
    +  public static AuthorizedView createTestAuthorizedView(TestEnvRule testEnvRule)
    +      throws InterruptedException {
         String tableId = testEnvRule.env().getTableId();
         String authorizedViewId = UUID.randomUUID().toString();
         CreateAuthorizedViewRequest request =
    @@ -40,6 +48,27 @@ public static AuthorizedView createTestAuthorizedView(TestEnvRule testEnvRule) {
                             FamilySubsets.create()
                                 .addQualifierPrefix(AUTHORIZED_VIEW_COLUMN_QUALIFIER)))
                 .setDeletionProtection(false);
    -    return testEnvRule.env().getTableAdminClient().createAuthorizedView(request);
    +    int retryCount = 0;
    +    int maxRetries = 10;
    +    while (true) {
    +      try {
    +        return testEnvRule.env().getTableAdminClient().createAuthorizedView(request);
    +      } catch (UnavailableException e) {
    +        if (++retryCount == maxRetries) {
    +          throw e;
    +        }
    +        logger.log(
    +            Level.INFO,
    +            "Retrying createAuthorizedView "
    +                + authorizedViewId
    +                + " in  table "
    +                + tableId
    +                + ", retryCount: "
    +                + retryCount);
    +        // Exponential backoff delay starting at 100ms.
    +        double expSleep = 100 * Math.pow(2, retryCount);
    +        Thread.sleep((long) Math.min(expSleep, TimeUnit.MINUTES.toMillis(1)));
    +      }
    +    }
       }
     }
    diff --git a/grpc-google-cloud-bigtable-admin-v2/pom.xml b/grpc-google-cloud-bigtable-admin-v2/pom.xml
    index e6528b9d17..45cbd4bab9 100644
    --- a/grpc-google-cloud-bigtable-admin-v2/pom.xml
    +++ b/grpc-google-cloud-bigtable-admin-v2/pom.xml
    @@ -4,13 +4,13 @@
       4.0.0
       com.google.api.grpc
       grpc-google-cloud-bigtable-admin-v2
    -  2.73.1
    +  2.74.0
       grpc-google-cloud-bigtable-admin-v2
       GRPC library for grpc-google-cloud-bigtable-admin-v2
       
         com.google.cloud
         google-cloud-bigtable-parent
    -    2.73.1
    +    2.74.0
       
     
       
    @@ -18,14 +18,14 @@
           
             com.google.cloud
             google-cloud-bigtable-deps-bom
    -        2.73.1
    +        2.74.0
             pom
             import
           
           
             com.google.cloud
             google-cloud-bigtable-bom
    -        2.73.1
    +        2.74.0
             pom
             import
           
    diff --git a/grpc-google-cloud-bigtable-v2/pom.xml b/grpc-google-cloud-bigtable-v2/pom.xml
    index 319769faa2..49e7cd5c9b 100644
    --- a/grpc-google-cloud-bigtable-v2/pom.xml
    +++ b/grpc-google-cloud-bigtable-v2/pom.xml
    @@ -4,13 +4,13 @@
       4.0.0
       com.google.api.grpc
       grpc-google-cloud-bigtable-v2
    -  2.73.1
    +  2.74.0
       grpc-google-cloud-bigtable-v2
       GRPC library for grpc-google-cloud-bigtable-v2
       
         com.google.cloud
         google-cloud-bigtable-parent
    -    2.73.1
    +    2.74.0
       
     
       
    @@ -18,14 +18,14 @@
           
             com.google.cloud
             google-cloud-bigtable-deps-bom
    -        2.73.1
    +        2.74.0
             pom
             import
           
           
             com.google.cloud
             google-cloud-bigtable-bom
    -        2.73.1
    +        2.74.0
             pom
             import
           
    diff --git a/pom.xml b/pom.xml
    index 1b7a650e7e..e2c82a119e 100644
    --- a/pom.xml
    +++ b/pom.xml
    @@ -4,7 +4,7 @@
     
         google-cloud-bigtable-parent
         pom
    -    2.73.1
    +    2.74.0
         Google Cloud Bigtable Parent
         https://github.com/googleapis/java-bigtable
         
    @@ -14,7 +14,7 @@
         
             com.google.cloud
             sdk-platform-java-config
    -        3.56.1
    +        3.57.0
             
         
     
    @@ -156,27 +156,27 @@
                 
                     com.google.api.grpc
                     proto-google-cloud-bigtable-v2
    -                2.73.1
    +                2.74.0
                 
                 
                     com.google.api.grpc
                     proto-google-cloud-bigtable-admin-v2
    -                2.73.1
    +                2.74.0
                 
                 
                     com.google.api.grpc
                     grpc-google-cloud-bigtable-v2
    -                2.73.1
    +                2.74.0
                 
                 
                     com.google.api.grpc
                     grpc-google-cloud-bigtable-admin-v2
    -                2.73.1
    +                2.74.0
                 
                 
                     com.google.cloud
                     google-cloud-bigtable
    -                2.73.1
    +                2.74.0
                 
                 
                 
    diff --git a/proto-google-cloud-bigtable-admin-v2/pom.xml b/proto-google-cloud-bigtable-admin-v2/pom.xml
    index 4b160499ae..be650c0e6a 100644
    --- a/proto-google-cloud-bigtable-admin-v2/pom.xml
    +++ b/proto-google-cloud-bigtable-admin-v2/pom.xml
    @@ -4,13 +4,13 @@
       4.0.0
       com.google.api.grpc
       proto-google-cloud-bigtable-admin-v2
    -  2.73.1
    +  2.74.0
       proto-google-cloud-bigtable-admin-v2
       PROTO library for proto-google-cloud-bigtable-admin-v2
       
         com.google.cloud
         google-cloud-bigtable-parent
    -    2.73.1
    +    2.74.0
       
     
       
    @@ -18,14 +18,14 @@
           
             com.google.cloud
             google-cloud-bigtable-deps-bom
    -        2.73.1
    +        2.74.0
             pom
             import
           
           
             com.google.cloud
             google-cloud-bigtable-bom
    -        2.73.1
    +        2.74.0
             pom
             import
           
    diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Table.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Table.java
    index e305d9af2b..caed2b904e 100644
    --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Table.java
    +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/Table.java
    @@ -3829,6 +3829,78 @@ public com.google.bigtable.admin.v2.Table.AutomatedBackupPolicy getAutomatedBack
         return com.google.bigtable.admin.v2.Table.AutomatedBackupPolicy.getDefaultInstance();
       }
     
    +  public static final int TIERED_STORAGE_CONFIG_FIELD_NUMBER = 14;
    +  private com.google.bigtable.admin.v2.TieredStorageConfig tieredStorageConfig_;
    +
    +  /**
    +   *
    +   *
    +   * 
    +   * Rules to specify what data is stored in each storage tier.
    +   * Different tiers store data differently, providing different trade-offs
    +   * between cost and performance. Different parts of a table can be stored
    +   * separately on different tiers.
    +   * If a config is specified, tiered storage is enabled for this table.
    +   * Otherwise, tiered storage is disabled.
    +   * Only SSD instances can configure tiered storage.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + * + * @return Whether the tieredStorageConfig field is set. + */ + @java.lang.Override + public boolean hasTieredStorageConfig() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
    +   * Rules to specify what data is stored in each storage tier.
    +   * Different tiers store data differently, providing different trade-offs
    +   * between cost and performance. Different parts of a table can be stored
    +   * separately on different tiers.
    +   * If a config is specified, tiered storage is enabled for this table.
    +   * Otherwise, tiered storage is disabled.
    +   * Only SSD instances can configure tiered storage.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + * + * @return The tieredStorageConfig. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageConfig getTieredStorageConfig() { + return tieredStorageConfig_ == null + ? com.google.bigtable.admin.v2.TieredStorageConfig.getDefaultInstance() + : tieredStorageConfig_; + } + + /** + * + * + *
    +   * Rules to specify what data is stored in each storage tier.
    +   * Different tiers store data differently, providing different trade-offs
    +   * between cost and performance. Different parts of a table can be stored
    +   * separately on different tiers.
    +   * If a config is specified, tiered storage is enabled for this table.
    +   * Otherwise, tiered storage is disabled.
    +   * Only SSD instances can configure tiered storage.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageConfigOrBuilder + getTieredStorageConfigOrBuilder() { + return tieredStorageConfig_ == null + ? com.google.bigtable.admin.v2.TieredStorageConfig.getDefaultInstance() + : tieredStorageConfig_; + } + public static final int ROW_KEY_SCHEMA_FIELD_NUMBER = 15; private com.google.bigtable.admin.v2.Type.Struct rowKeySchema_; @@ -3900,7 +3972,7 @@ public com.google.bigtable.admin.v2.Table.AutomatedBackupPolicy getAutomatedBack */ @java.lang.Override public boolean hasRowKeySchema() { - return ((bitField0_ & 0x00000004) != 0); + return ((bitField0_ & 0x00000008) != 0); } /** @@ -4087,6 +4159,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io 13, (com.google.bigtable.admin.v2.Table.AutomatedBackupPolicy) automatedBackupConfig_); } if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(14, getTieredStorageConfig()); + } + if (((bitField0_ & 0x00000008) != 0)) { output.writeMessage(15, getRowKeySchema()); } getUnknownFields().writeTo(output); @@ -4145,6 +4220,10 @@ public int getSerializedSize() { (com.google.bigtable.admin.v2.Table.AutomatedBackupPolicy) automatedBackupConfig_); } if (((bitField0_ & 0x00000004) != 0)) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize(14, getTieredStorageConfig()); + } + if (((bitField0_ & 0x00000008) != 0)) { size += com.google.protobuf.CodedOutputStream.computeMessageSize(15, getRowKeySchema()); } size += getUnknownFields().getSerializedSize(); @@ -4175,6 +4254,10 @@ public boolean equals(final java.lang.Object obj) { if (!getChangeStreamConfig().equals(other.getChangeStreamConfig())) return false; } if (getDeletionProtection() != other.getDeletionProtection()) return false; + if (hasTieredStorageConfig() != other.hasTieredStorageConfig()) return false; + if (hasTieredStorageConfig()) { + if (!getTieredStorageConfig().equals(other.getTieredStorageConfig())) return false; + } if (hasRowKeySchema() != other.hasRowKeySchema()) return false; if (hasRowKeySchema()) { if (!getRowKeySchema().equals(other.getRowKeySchema())) return false; @@ -4220,6 +4303,10 @@ public int hashCode() { } hash = (37 * hash) + DELETION_PROTECTION_FIELD_NUMBER; hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getDeletionProtection()); + if (hasTieredStorageConfig()) { + hash = (37 * hash) + TIERED_STORAGE_CONFIG_FIELD_NUMBER; + hash = (53 * hash) + getTieredStorageConfig().hashCode(); + } if (hasRowKeySchema()) { hash = (37 * hash) + ROW_KEY_SCHEMA_FIELD_NUMBER; hash = (53 * hash) + getRowKeySchema().hashCode(); @@ -4401,6 +4488,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { internalGetRestoreInfoFieldBuilder(); internalGetChangeStreamConfigFieldBuilder(); + internalGetTieredStorageConfigFieldBuilder(); internalGetRowKeySchemaFieldBuilder(); } } @@ -4427,6 +4515,11 @@ public Builder clear() { if (automatedBackupPolicyBuilder_ != null) { automatedBackupPolicyBuilder_.clear(); } + tieredStorageConfig_ = null; + if (tieredStorageConfigBuilder_ != null) { + tieredStorageConfigBuilder_.dispose(); + tieredStorageConfigBuilder_ = null; + } rowKeySchema_ = null; if (rowKeySchemaBuilder_ != null) { rowKeySchemaBuilder_.dispose(); @@ -4501,9 +4594,16 @@ private void buildPartial0(com.google.bigtable.admin.v2.Table result) { result.deletionProtection_ = deletionProtection_; } if (((from_bitField0_ & 0x00000100) != 0)) { + result.tieredStorageConfig_ = + tieredStorageConfigBuilder_ == null + ? tieredStorageConfig_ + : tieredStorageConfigBuilder_.build(); + to_bitField0_ |= 0x00000004; + } + if (((from_bitField0_ & 0x00000200) != 0)) { result.rowKeySchema_ = rowKeySchemaBuilder_ == null ? rowKeySchema_ : rowKeySchemaBuilder_.build(); - to_bitField0_ |= 0x00000004; + to_bitField0_ |= 0x00000008; } result.bitField0_ |= to_bitField0_; } @@ -4549,6 +4649,9 @@ public Builder mergeFrom(com.google.bigtable.admin.v2.Table other) { if (other.getDeletionProtection() != false) { setDeletionProtection(other.getDeletionProtection()); } + if (other.hasTieredStorageConfig()) { + mergeTieredStorageConfig(other.getTieredStorageConfig()); + } if (other.hasRowKeySchema()) { mergeRowKeySchema(other.getRowKeySchema()); } @@ -4656,11 +4759,18 @@ public Builder mergeFrom( automatedBackupConfigCase_ = 13; break; } // case 106 + case 114: + { + input.readMessage( + internalGetTieredStorageConfigFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000100; + break; + } // case 114 case 122: { input.readMessage( internalGetRowKeySchemaFieldBuilder().getBuilder(), extensionRegistry); - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; break; } // case 122 default: @@ -6248,6 +6358,259 @@ public Builder clearAutomatedBackupPolicy() { return automatedBackupPolicyBuilder_; } + private com.google.bigtable.admin.v2.TieredStorageConfig tieredStorageConfig_; + private com.google.protobuf.SingleFieldBuilder< + com.google.bigtable.admin.v2.TieredStorageConfig, + com.google.bigtable.admin.v2.TieredStorageConfig.Builder, + com.google.bigtable.admin.v2.TieredStorageConfigOrBuilder> + tieredStorageConfigBuilder_; + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + * + * @return Whether the tieredStorageConfig field is set. + */ + public boolean hasTieredStorageConfig() { + return ((bitField0_ & 0x00000100) != 0); + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + * + * @return The tieredStorageConfig. + */ + public com.google.bigtable.admin.v2.TieredStorageConfig getTieredStorageConfig() { + if (tieredStorageConfigBuilder_ == null) { + return tieredStorageConfig_ == null + ? com.google.bigtable.admin.v2.TieredStorageConfig.getDefaultInstance() + : tieredStorageConfig_; + } else { + return tieredStorageConfigBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + public Builder setTieredStorageConfig(com.google.bigtable.admin.v2.TieredStorageConfig value) { + if (tieredStorageConfigBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + tieredStorageConfig_ = value; + } else { + tieredStorageConfigBuilder_.setMessage(value); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + public Builder setTieredStorageConfig( + com.google.bigtable.admin.v2.TieredStorageConfig.Builder builderForValue) { + if (tieredStorageConfigBuilder_ == null) { + tieredStorageConfig_ = builderForValue.build(); + } else { + tieredStorageConfigBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000100; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + public Builder mergeTieredStorageConfig( + com.google.bigtable.admin.v2.TieredStorageConfig value) { + if (tieredStorageConfigBuilder_ == null) { + if (((bitField0_ & 0x00000100) != 0) + && tieredStorageConfig_ != null + && tieredStorageConfig_ + != com.google.bigtable.admin.v2.TieredStorageConfig.getDefaultInstance()) { + getTieredStorageConfigBuilder().mergeFrom(value); + } else { + tieredStorageConfig_ = value; + } + } else { + tieredStorageConfigBuilder_.mergeFrom(value); + } + if (tieredStorageConfig_ != null) { + bitField0_ |= 0x00000100; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + public Builder clearTieredStorageConfig() { + bitField0_ = (bitField0_ & ~0x00000100); + tieredStorageConfig_ = null; + if (tieredStorageConfigBuilder_ != null) { + tieredStorageConfigBuilder_.dispose(); + tieredStorageConfigBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + public com.google.bigtable.admin.v2.TieredStorageConfig.Builder + getTieredStorageConfigBuilder() { + bitField0_ |= 0x00000100; + onChanged(); + return internalGetTieredStorageConfigFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + public com.google.bigtable.admin.v2.TieredStorageConfigOrBuilder + getTieredStorageConfigOrBuilder() { + if (tieredStorageConfigBuilder_ != null) { + return tieredStorageConfigBuilder_.getMessageOrBuilder(); + } else { + return tieredStorageConfig_ == null + ? com.google.bigtable.admin.v2.TieredStorageConfig.getDefaultInstance() + : tieredStorageConfig_; + } + } + + /** + * + * + *
    +     * Rules to specify what data is stored in each storage tier.
    +     * Different tiers store data differently, providing different trade-offs
    +     * between cost and performance. Different parts of a table can be stored
    +     * separately on different tiers.
    +     * If a config is specified, tiered storage is enabled for this table.
    +     * Otherwise, tiered storage is disabled.
    +     * Only SSD instances can configure tiered storage.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.bigtable.admin.v2.TieredStorageConfig, + com.google.bigtable.admin.v2.TieredStorageConfig.Builder, + com.google.bigtable.admin.v2.TieredStorageConfigOrBuilder> + internalGetTieredStorageConfigFieldBuilder() { + if (tieredStorageConfigBuilder_ == null) { + tieredStorageConfigBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.bigtable.admin.v2.TieredStorageConfig, + com.google.bigtable.admin.v2.TieredStorageConfig.Builder, + com.google.bigtable.admin.v2.TieredStorageConfigOrBuilder>( + getTieredStorageConfig(), getParentForChildren(), isClean()); + tieredStorageConfig_ = null; + } + return tieredStorageConfigBuilder_; + } + private com.google.bigtable.admin.v2.Type.Struct rowKeySchema_; private com.google.protobuf.SingleFieldBuilder< com.google.bigtable.admin.v2.Type.Struct, @@ -6322,7 +6685,7 @@ public Builder clearAutomatedBackupPolicy() { * @return Whether the rowKeySchema field is set. */ public boolean hasRowKeySchema() { - return ((bitField0_ & 0x00000100) != 0); + return ((bitField0_ & 0x00000200) != 0); } /** @@ -6474,7 +6837,7 @@ public Builder setRowKeySchema(com.google.bigtable.admin.v2.Type.Struct value) { } else { rowKeySchemaBuilder_.setMessage(value); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -6550,7 +6913,7 @@ public Builder setRowKeySchema( } else { rowKeySchemaBuilder_.setMessage(builderForValue.build()); } - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); return this; } @@ -6621,7 +6984,7 @@ public Builder setRowKeySchema( */ public Builder mergeRowKeySchema(com.google.bigtable.admin.v2.Type.Struct value) { if (rowKeySchemaBuilder_ == null) { - if (((bitField0_ & 0x00000100) != 0) + if (((bitField0_ & 0x00000200) != 0) && rowKeySchema_ != null && rowKeySchema_ != com.google.bigtable.admin.v2.Type.Struct.getDefaultInstance()) { getRowKeySchemaBuilder().mergeFrom(value); @@ -6632,7 +6995,7 @@ public Builder mergeRowKeySchema(com.google.bigtable.admin.v2.Type.Struct value) rowKeySchemaBuilder_.mergeFrom(value); } if (rowKeySchema_ != null) { - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); } return this; @@ -6703,7 +7066,7 @@ public Builder mergeRowKeySchema(com.google.bigtable.admin.v2.Type.Struct value) * .google.bigtable.admin.v2.Type.Struct row_key_schema = 15; */ public Builder clearRowKeySchema() { - bitField0_ = (bitField0_ & ~0x00000100); + bitField0_ = (bitField0_ & ~0x00000200); rowKeySchema_ = null; if (rowKeySchemaBuilder_ != null) { rowKeySchemaBuilder_.dispose(); @@ -6778,7 +7141,7 @@ public Builder clearRowKeySchema() { * .google.bigtable.admin.v2.Type.Struct row_key_schema = 15; */ public com.google.bigtable.admin.v2.Type.Struct.Builder getRowKeySchemaBuilder() { - bitField0_ |= 0x00000100; + bitField0_ |= 0x00000200; onChanged(); return internalGetRowKeySchemaFieldBuilder().getBuilder(); } diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableOrBuilder.java index 722205040e..f82f216bfa 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableOrBuilder.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableOrBuilder.java @@ -408,6 +408,61 @@ com.google.bigtable.admin.v2.ColumnFamily getColumnFamiliesOrDefault( com.google.bigtable.admin.v2.Table.AutomatedBackupPolicyOrBuilder getAutomatedBackupPolicyOrBuilder(); + /** + * + * + *
    +   * Rules to specify what data is stored in each storage tier.
    +   * Different tiers store data differently, providing different trade-offs
    +   * between cost and performance. Different parts of a table can be stored
    +   * separately on different tiers.
    +   * If a config is specified, tiered storage is enabled for this table.
    +   * Otherwise, tiered storage is disabled.
    +   * Only SSD instances can configure tiered storage.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + * + * @return Whether the tieredStorageConfig field is set. + */ + boolean hasTieredStorageConfig(); + + /** + * + * + *
    +   * Rules to specify what data is stored in each storage tier.
    +   * Different tiers store data differently, providing different trade-offs
    +   * between cost and performance. Different parts of a table can be stored
    +   * separately on different tiers.
    +   * If a config is specified, tiered storage is enabled for this table.
    +   * Otherwise, tiered storage is disabled.
    +   * Only SSD instances can configure tiered storage.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + * + * @return The tieredStorageConfig. + */ + com.google.bigtable.admin.v2.TieredStorageConfig getTieredStorageConfig(); + + /** + * + * + *
    +   * Rules to specify what data is stored in each storage tier.
    +   * Different tiers store data differently, providing different trade-offs
    +   * between cost and performance. Different parts of a table can be stored
    +   * separately on different tiers.
    +   * If a config is specified, tiered storage is enabled for this table.
    +   * Otherwise, tiered storage is disabled.
    +   * Only SSD instances can configure tiered storage.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageConfig tiered_storage_config = 14; + */ + com.google.bigtable.admin.v2.TieredStorageConfigOrBuilder getTieredStorageConfigOrBuilder(); + /** * * diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java index 30caaee768..c013ea2ed5 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TableProto.java @@ -116,6 +116,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_bigtable_admin_v2_BackupInfo_descriptor; static final com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_google_bigtable_admin_v2_BackupInfo_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_admin_v2_TieredStorageConfig_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_bigtable_admin_v2_TieredStorageConfig_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_bigtable_admin_v2_TieredStorageRule_descriptor; + static final com.google.protobuf.GeneratedMessage.FieldAccessorTable + internal_static_google_bigtable_admin_v2_TieredStorageRule_fieldAccessorTable; static final com.google.protobuf.Descriptors.Descriptor internal_static_google_bigtable_admin_v2_ProtoSchema_descriptor; static final com.google.protobuf.GeneratedMessage.FieldAccessorTable @@ -145,7 +153,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\013backup_info\030\002 \001(\0132$.google.bigtable.admin.v2.BackupInfoH\000B\r\n" + "\013source_info\"I\n" + "\022ChangeStreamConfig\0223\n" - + "\020retention_period\030\001 \001(\0132\031.google.protobuf.Duration\"\225\014\n" + + "\020retention_period\030\001 \001(\0132\031.google.protobuf.Duration\"\343\014\n" + "\005Table\022\014\n" + "\004name\030\001 \001(\t\022O\n" + "\016cluster_states\030\002" @@ -160,12 +168,14 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + " \001(\0132,.google.bigtable.admin.v2.ChangeStreamConfig\022\033\n" + "\023deletion_protection\030\t \001(\010\022X\n" + "\027automated_backup_policy\030\r" - + " \001(\01325.google.bigtable.admin.v2.Table.AutomatedBackupPolicyH\000\022=\n" + + " \001(\01325.google.bigtable.admin.v2.Table.AutomatedBackupPolicyH\000\022L\n" + + "\025tiered_storage_config\030\016 \001(\013" + + "2-.google.bigtable.admin.v2.TieredStorageConfig\022=\n" + "\016row_key_schema\030\017" + " \001(\0132%.google.bigtable.admin.v2.Type.Struct\032\306\002\n" + "\014ClusterState\022]\n" - + "\021replication_state\030\001 \001(\0162=.g" - + "oogle.bigtable.admin.v2.Table.ClusterState.ReplicationStateB\003\340A\003\022F\n" + + "\021replication_state\030\001 \001(\0162=.goo" + + "gle.bigtable.admin.v2.Table.ClusterState.ReplicationStateB\003\340A\003\022F\n" + "\017encryption_info\030\002" + " \003(\0132(.google.bigtable.admin.v2.EncryptionInfoB\003\340A\003\"\216\001\n" + "\020ReplicationState\022\023\n" @@ -196,13 +206,13 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\020REPLICATION_VIEW\020\003\022\023\n" + "\017ENCRYPTION_VIEW\020\005\022\010\n" + "\004FULL\020\004:_\352A\\\n" - + "\"bigtableadmin.googleapis.com/Tab" - + "le\0226projects/{project}/instances/{instance}/tables/{table}B\031\n" + + "\"bigtableadmin.googleapis.com/Table" + + "\0226projects/{project}/instances/{instance}/tables/{table}B\031\n" + "\027automated_backup_config\"\343\005\n" + "\016AuthorizedView\022\021\n" + "\004name\030\001 \001(\tB\003\340A\010\022J\n" - + "\013subset_view\030\002 \001(\01323.google.bigtab" - + "le.admin.v2.AuthorizedView.SubsetViewH\000\022\014\n" + + "\013subset_view\030\002" + + " \001(\01323.google.bigtable.admin.v2.AuthorizedView.SubsetViewH\000\022\014\n" + "\004etag\030\003 \001(\t\022\033\n" + "\023deletion_protection\030\004 \001(\010\032?\n\r" + "FamilySubsets\022\022\n\n" @@ -210,8 +220,8 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\022qualifier_prefixes\030\002 \003(\014\032\360\001\n\n" + "SubsetView\022\024\n" + "\014row_prefixes\030\001 \003(\014\022^\n" - + "\016family_subsets\030\002 \003(\0132F.google.bigtable.admin.v2.Auth" - + "orizedView.SubsetView.FamilySubsetsEntry\032l\n" + + "\016family_subsets\030\002 \003(\0132F.google.bigtable.admin.v2.Author" + + "izedView.SubsetView.FamilySubsetsEntry\032l\n" + "\022FamilySubsetsEntry\022\013\n" + "\003key\030\001 \001(\t\022E\n" + "\005value\030\002" @@ -221,9 +231,9 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\tNAME_ONLY\020\001\022\t\n" + "\005BASIC\020\002\022\010\n" + "\004FULL\020\003:\254\001\352A\250\001\n" - + "+bigtableadmin.googleapis.com/AuthorizedView\022Xprojects/{project}/instances/{ins" - + "tance}/tables/{table}/authorizedViews/{a" - + "uthorized_view}*\017authorizedViews2\016authorizedViewB\021\n" + + "+bigtableadmin.googleapis.com/AuthorizedView\022Xprojects/{project}/instances/{insta" + + "nce}/tables/{table}/authorizedViews/{aut" + + "horized_view}*\017authorizedViews2\016authorizedViewB\021\n" + "\017authorized_view\"u\n" + "\014ColumnFamily\0221\n" + "\007gc_rule\030\001 \001(\0132 .google.bigtable.admin.v2.GcRule\0222\n\n" @@ -256,15 +266,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\017data_size_bytes\030\003 \001(\003B\003\340A\003\0224\n" + "\013create_time\030\004 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022/\n" + "\013delete_time\030\005 \001(\0132\032.google.protobuf.Timestamp\022<\n" - + "\005state\030\006 \001(\0162(.goog" - + "le.bigtable.admin.v2.Snapshot.StateB\003\340A\003\022\023\n" + + "\005state\030\006 \001(\0162(.google" + + ".bigtable.admin.v2.Snapshot.StateB\003\340A\003\022\023\n" + "\013description\030\007 \001(\t\"5\n" + "\005State\022\023\n" + "\017STATE_NOT_KNOWN\020\000\022\t\n" + "\005READY\020\001\022\014\n" + "\010CREATING\020\002:{\352Ax\n" - + "%bigtableadmin.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance" - + "}/clusters/{cluster}/snapshots/{snapshot}\"\371\005\n" + + "%bigtableadmin.googleapis.com/Snapshot\022Oprojects/{project}/instances/{instance}/" + + "clusters/{cluster}/snapshots/{snapshot}\"\371\005\n" + "\006Backup\022\014\n" + "\004name\030\001 \001(\t\022\034\n" + "\014source_table\030\002 \001(\tB\006\340A\005\340A\002\022\032\n\r" @@ -276,10 +286,10 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\010end_time\030\005" + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\027\n\n" + "size_bytes\030\006 \001(\003B\003\340A\003\022:\n" - + "\005state\030\007 \001" - + "(\0162&.google.bigtable.admin.v2.Backup.StateB\003\340A\003\022F\n" - + "\017encryption_info\030\t \001(\0132(.googl" - + "e.bigtable.admin.v2.EncryptionInfoB\003\340A\003\022@\n" + + "\005state\030\007 \001(\016" + + "2&.google.bigtable.admin.v2.Backup.StateB\003\340A\003\022F\n" + + "\017encryption_info\030\t" + + " \001(\0132(.google.bigtable.admin.v2.EncryptionInfoB\003\340A\003\022@\n" + "\013backup_type\030\013 \001(\0162+.google.bigtable.admin.v2.Backup.BackupType\0228\n" + "\024hot_to_standard_time\030\014 \001(\0132\032.google.protobuf.Timestamp\"7\n" + "\005State\022\025\n" @@ -290,15 +300,21 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\027BACKUP_TYPE_UNSPECIFIED\020\000\022\014\n" + "\010STANDARD\020\001\022\007\n" + "\003HOT\020\002:u\352Ar\n" - + "#bigtableadmin.googleapis.com/Backup\022Kprojects/{project}/instances/{" - + "instance}/clusters/{cluster}/backups/{backup}\"\300\001\n\n" + + "#bigtableadmin.googleapis.com/Backup\022Kprojects/{project}/instances/{in" + + "stance}/clusters/{cluster}/backups/{backup}\"\300\001\n\n" + "BackupInfo\022\023\n" + "\006backup\030\001 \001(\tB\003\340A\003\0223\n\n" + "start_time\030\002 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\0221\n" + "\010end_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\031\n" + "\014source_table\030\004 \001(\tB\003\340A\003\022\032\n\r" + "source_backup\030\n" - + " \001(\tB\003\340A\003\"-\n" + + " \001(\tB\003\340A\003\"]\n" + + "\023TieredStorageConfig\022F\n" + + "\021infrequent_access\030\001" + + " \001(\0132+.google.bigtable.admin.v2.TieredStorageRule\"W\n" + + "\021TieredStorageRule\022:\n" + + "\025include_if_older_than\030\001 \001(\0132\031.google.protobuf.DurationH\000B\006\n" + + "\004rule\"-\n" + "\013ProtoSchema\022\036\n" + "\021proto_descriptors\030\002 \001(\014B\003\340A\002\"\240\002\n" + "\014SchemaBundle\022\021\n" @@ -306,21 +322,21 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "\014proto_schema\030\002" + " \001(\0132%.google.bigtable.admin.v2.ProtoSchemaH\000\022\021\n" + "\004etag\030\003 \001(\tB\003\340A\001:\242\001\352A\236\001\n" - + ")bigtableadmin.googleapis.com/SchemaBundle\022Tprojects/{project}/inst" - + "ances/{instance}/tables/{table}/schemaBundles/{schema_bundle}*\r" + + ")bigtableadmin.googleapis.com/SchemaBundle\022Tp" + + "rojects/{project}/instances/{instance}/tables/{table}/schemaBundles/{schema_bundle}*\r" + "schemaBundles2\014schemaBundleB\006\n" + "\004type*D\n" + "\021RestoreSourceType\022#\n" + "\037RESTORE_SOURCE_TYPE_UNSPECIFIED\020\000\022\n\n" + "\006BACKUP\020\001B\367\002\n" + "\034com.google.bigtable.admin.v2B\n" - + "TableProtoP\001Z8cloud.google.com/go/big" - + "table/admin/apiv2/adminpb;adminpb\252\002\036Goog" - + "le.Cloud.Bigtable.Admin.V2\312\002\036Google\\Clou" - + "d\\Bigtable\\Admin\\V2\352\002\"Google::Cloud::Bigtable::Admin::V2\352A\246\001\n" - + "(cloudkms.googleapis.com/CryptoKeyVersion\022zprojects/{projec" - + "t}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVer" - + "sions/{crypto_key_version}b\006proto3" + + "TableProtoP\001Z8cloud.google.com/go/bigtable/admin/apiv2/" + + "adminpb;adminpb\252\002\036Google.Cloud.Bigtable." + + "Admin.V2\312\002\036Google\\Cloud\\Bigtable\\Admin\\V" + + "2\352\002\"Google::Cloud::Bigtable::Admin::V2\352A\246\001\n" + + "(cloudkms.googleapis.com/CryptoKeyVersion\022zprojects/{project}/locations/{loca" + + "tion}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_" + + "version}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -362,6 +378,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ChangeStreamConfig", "DeletionProtection", "AutomatedBackupPolicy", + "TieredStorageConfig", "RowKeySchema", "AutomatedBackupConfig", }); @@ -508,8 +525,24 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "Backup", "StartTime", "EndTime", "SourceTable", "SourceBackup", }); - internal_static_google_bigtable_admin_v2_ProtoSchema_descriptor = + internal_static_google_bigtable_admin_v2_TieredStorageConfig_descriptor = getDescriptor().getMessageType(10); + internal_static_google_bigtable_admin_v2_TieredStorageConfig_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_bigtable_admin_v2_TieredStorageConfig_descriptor, + new java.lang.String[] { + "InfrequentAccess", + }); + internal_static_google_bigtable_admin_v2_TieredStorageRule_descriptor = + getDescriptor().getMessageType(11); + internal_static_google_bigtable_admin_v2_TieredStorageRule_fieldAccessorTable = + new com.google.protobuf.GeneratedMessage.FieldAccessorTable( + internal_static_google_bigtable_admin_v2_TieredStorageRule_descriptor, + new java.lang.String[] { + "IncludeIfOlderThan", "Rule", + }); + internal_static_google_bigtable_admin_v2_ProtoSchema_descriptor = + getDescriptor().getMessageType(12); internal_static_google_bigtable_admin_v2_ProtoSchema_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_google_bigtable_admin_v2_ProtoSchema_descriptor, @@ -517,7 +550,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "ProtoDescriptors", }); internal_static_google_bigtable_admin_v2_SchemaBundle_descriptor = - getDescriptor().getMessageType(11); + getDescriptor().getMessageType(13); internal_static_google_bigtable_admin_v2_SchemaBundle_fieldAccessorTable = new com.google.protobuf.GeneratedMessage.FieldAccessorTable( internal_static_google_bigtable_admin_v2_SchemaBundle_descriptor, diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageConfig.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageConfig.java new file mode 100644 index 0000000000..e54e3440fb --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageConfig.java @@ -0,0 +1,721 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/bigtable/admin/v2/table.proto +// Protobuf Java Version: 4.33.2 + +package com.google.bigtable.admin.v2; + +/** + * + * + *
    + * Config for tiered storage.
    + * A valid config must have a valid TieredStorageRule. Otherwise the whole
    + * TieredStorageConfig must be unset.
    + * By default all data is stored in the SSD tier (only SSD instances can
    + * configure tiered storage).
    + * 
    + * + * Protobuf type {@code google.bigtable.admin.v2.TieredStorageConfig} + */ +@com.google.protobuf.Generated +public final class TieredStorageConfig extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.TieredStorageConfig) + TieredStorageConfigOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TieredStorageConfig"); + } + + // Use TieredStorageConfig.newBuilder() to construct. + private TieredStorageConfig(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private TieredStorageConfig() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.TieredStorageConfig.class, + com.google.bigtable.admin.v2.TieredStorageConfig.Builder.class); + } + + private int bitField0_; + public static final int INFREQUENT_ACCESS_FIELD_NUMBER = 1; + private com.google.bigtable.admin.v2.TieredStorageRule infrequentAccess_; + + /** + * + * + *
    +   * Rule to specify what data is stored in the infrequent access(IA) tier.
    +   * The IA tier allows storing more data per node with reduced performance.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + * + * @return Whether the infrequentAccess field is set. + */ + @java.lang.Override + public boolean hasInfrequentAccess() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +   * Rule to specify what data is stored in the infrequent access(IA) tier.
    +   * The IA tier allows storing more data per node with reduced performance.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + * + * @return The infrequentAccess. + */ + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageRule getInfrequentAccess() { + return infrequentAccess_ == null + ? com.google.bigtable.admin.v2.TieredStorageRule.getDefaultInstance() + : infrequentAccess_; + } + + /** + * + * + *
    +   * Rule to specify what data is stored in the infrequent access(IA) tier.
    +   * The IA tier allows storing more data per node with reduced performance.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageRuleOrBuilder getInfrequentAccessOrBuilder() { + return infrequentAccess_ == null + ? com.google.bigtable.admin.v2.TieredStorageRule.getDefaultInstance() + : infrequentAccess_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(1, getInfrequentAccess()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getInfrequentAccess()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.bigtable.admin.v2.TieredStorageConfig)) { + return super.equals(obj); + } + com.google.bigtable.admin.v2.TieredStorageConfig other = + (com.google.bigtable.admin.v2.TieredStorageConfig) obj; + + if (hasInfrequentAccess() != other.hasInfrequentAccess()) return false; + if (hasInfrequentAccess()) { + if (!getInfrequentAccess().equals(other.getInfrequentAccess())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + if (hasInfrequentAccess()) { + hash = (37 * hash) + INFREQUENT_ACCESS_FIELD_NUMBER; + hash = (53 * hash) + getInfrequentAccess().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.bigtable.admin.v2.TieredStorageConfig prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Config for tiered storage.
    +   * A valid config must have a valid TieredStorageRule. Otherwise the whole
    +   * TieredStorageConfig must be unset.
    +   * By default all data is stored in the SSD tier (only SSD instances can
    +   * configure tiered storage).
    +   * 
    + * + * Protobuf type {@code google.bigtable.admin.v2.TieredStorageConfig} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.TieredStorageConfig) + com.google.bigtable.admin.v2.TieredStorageConfigOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageConfig_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageConfig_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.TieredStorageConfig.class, + com.google.bigtable.admin.v2.TieredStorageConfig.Builder.class); + } + + // Construct using com.google.bigtable.admin.v2.TieredStorageConfig.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { + internalGetInfrequentAccessFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + infrequentAccess_ = null; + if (infrequentAccessBuilder_ != null) { + infrequentAccessBuilder_.dispose(); + infrequentAccessBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageConfig_descriptor; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageConfig getDefaultInstanceForType() { + return com.google.bigtable.admin.v2.TieredStorageConfig.getDefaultInstance(); + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageConfig build() { + com.google.bigtable.admin.v2.TieredStorageConfig result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageConfig buildPartial() { + com.google.bigtable.admin.v2.TieredStorageConfig result = + new com.google.bigtable.admin.v2.TieredStorageConfig(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0(com.google.bigtable.admin.v2.TieredStorageConfig result) { + int from_bitField0_ = bitField0_; + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.infrequentAccess_ = + infrequentAccessBuilder_ == null ? infrequentAccess_ : infrequentAccessBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.bigtable.admin.v2.TieredStorageConfig) { + return mergeFrom((com.google.bigtable.admin.v2.TieredStorageConfig) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.bigtable.admin.v2.TieredStorageConfig other) { + if (other == com.google.bigtable.admin.v2.TieredStorageConfig.getDefaultInstance()) + return this; + if (other.hasInfrequentAccess()) { + mergeInfrequentAccess(other.getInfrequentAccess()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetInfrequentAccessFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000001; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private com.google.bigtable.admin.v2.TieredStorageRule infrequentAccess_; + private com.google.protobuf.SingleFieldBuilder< + com.google.bigtable.admin.v2.TieredStorageRule, + com.google.bigtable.admin.v2.TieredStorageRule.Builder, + com.google.bigtable.admin.v2.TieredStorageRuleOrBuilder> + infrequentAccessBuilder_; + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + * + * @return Whether the infrequentAccess field is set. + */ + public boolean hasInfrequentAccess() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + * + * @return The infrequentAccess. + */ + public com.google.bigtable.admin.v2.TieredStorageRule getInfrequentAccess() { + if (infrequentAccessBuilder_ == null) { + return infrequentAccess_ == null + ? com.google.bigtable.admin.v2.TieredStorageRule.getDefaultInstance() + : infrequentAccess_; + } else { + return infrequentAccessBuilder_.getMessage(); + } + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + public Builder setInfrequentAccess(com.google.bigtable.admin.v2.TieredStorageRule value) { + if (infrequentAccessBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + infrequentAccess_ = value; + } else { + infrequentAccessBuilder_.setMessage(value); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + public Builder setInfrequentAccess( + com.google.bigtable.admin.v2.TieredStorageRule.Builder builderForValue) { + if (infrequentAccessBuilder_ == null) { + infrequentAccess_ = builderForValue.build(); + } else { + infrequentAccessBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + public Builder mergeInfrequentAccess(com.google.bigtable.admin.v2.TieredStorageRule value) { + if (infrequentAccessBuilder_ == null) { + if (((bitField0_ & 0x00000001) != 0) + && infrequentAccess_ != null + && infrequentAccess_ + != com.google.bigtable.admin.v2.TieredStorageRule.getDefaultInstance()) { + getInfrequentAccessBuilder().mergeFrom(value); + } else { + infrequentAccess_ = value; + } + } else { + infrequentAccessBuilder_.mergeFrom(value); + } + if (infrequentAccess_ != null) { + bitField0_ |= 0x00000001; + onChanged(); + } + return this; + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + public Builder clearInfrequentAccess() { + bitField0_ = (bitField0_ & ~0x00000001); + infrequentAccess_ = null; + if (infrequentAccessBuilder_ != null) { + infrequentAccessBuilder_.dispose(); + infrequentAccessBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + public com.google.bigtable.admin.v2.TieredStorageRule.Builder getInfrequentAccessBuilder() { + bitField0_ |= 0x00000001; + onChanged(); + return internalGetInfrequentAccessFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + public com.google.bigtable.admin.v2.TieredStorageRuleOrBuilder getInfrequentAccessOrBuilder() { + if (infrequentAccessBuilder_ != null) { + return infrequentAccessBuilder_.getMessageOrBuilder(); + } else { + return infrequentAccess_ == null + ? com.google.bigtable.admin.v2.TieredStorageRule.getDefaultInstance() + : infrequentAccess_; + } + } + + /** + * + * + *
    +     * Rule to specify what data is stored in the infrequent access(IA) tier.
    +     * The IA tier allows storing more data per node with reduced performance.
    +     * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.bigtable.admin.v2.TieredStorageRule, + com.google.bigtable.admin.v2.TieredStorageRule.Builder, + com.google.bigtable.admin.v2.TieredStorageRuleOrBuilder> + internalGetInfrequentAccessFieldBuilder() { + if (infrequentAccessBuilder_ == null) { + infrequentAccessBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.bigtable.admin.v2.TieredStorageRule, + com.google.bigtable.admin.v2.TieredStorageRule.Builder, + com.google.bigtable.admin.v2.TieredStorageRuleOrBuilder>( + getInfrequentAccess(), getParentForChildren(), isClean()); + infrequentAccess_ = null; + } + return infrequentAccessBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.TieredStorageConfig) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.TieredStorageConfig) + private static final com.google.bigtable.admin.v2.TieredStorageConfig DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.TieredStorageConfig(); + } + + public static com.google.bigtable.admin.v2.TieredStorageConfig getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TieredStorageConfig parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageConfig getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageConfigOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageConfigOrBuilder.java new file mode 100644 index 0000000000..380f53080d --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageConfigOrBuilder.java @@ -0,0 +1,68 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/bigtable/admin/v2/table.proto +// Protobuf Java Version: 4.33.2 + +package com.google.bigtable.admin.v2; + +@com.google.protobuf.Generated +public interface TieredStorageConfigOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.TieredStorageConfig) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Rule to specify what data is stored in the infrequent access(IA) tier.
    +   * The IA tier allows storing more data per node with reduced performance.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + * + * @return Whether the infrequentAccess field is set. + */ + boolean hasInfrequentAccess(); + + /** + * + * + *
    +   * Rule to specify what data is stored in the infrequent access(IA) tier.
    +   * The IA tier allows storing more data per node with reduced performance.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + * + * @return The infrequentAccess. + */ + com.google.bigtable.admin.v2.TieredStorageRule getInfrequentAccess(); + + /** + * + * + *
    +   * Rule to specify what data is stored in the infrequent access(IA) tier.
    +   * The IA tier allows storing more data per node with reduced performance.
    +   * 
    + * + * .google.bigtable.admin.v2.TieredStorageRule infrequent_access = 1; + */ + com.google.bigtable.admin.v2.TieredStorageRuleOrBuilder getInfrequentAccessOrBuilder(); +} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageRule.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageRule.java new file mode 100644 index 0000000000..2a78390f88 --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageRule.java @@ -0,0 +1,801 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/bigtable/admin/v2/table.proto +// Protobuf Java Version: 4.33.2 + +package com.google.bigtable.admin.v2; + +/** + * + * + *
    + * Rule to specify what data is stored in a storage tier.
    + * 
    + * + * Protobuf type {@code google.bigtable.admin.v2.TieredStorageRule} + */ +@com.google.protobuf.Generated +public final class TieredStorageRule extends com.google.protobuf.GeneratedMessage + implements + // @@protoc_insertion_point(message_implements:google.bigtable.admin.v2.TieredStorageRule) + TieredStorageRuleOrBuilder { + private static final long serialVersionUID = 0L; + + static { + com.google.protobuf.RuntimeVersion.validateProtobufGencodeVersion( + com.google.protobuf.RuntimeVersion.RuntimeDomain.PUBLIC, + /* major= */ 4, + /* minor= */ 33, + /* patch= */ 2, + /* suffix= */ "", + "TieredStorageRule"); + } + + // Use TieredStorageRule.newBuilder() to construct. + private TieredStorageRule(com.google.protobuf.GeneratedMessage.Builder builder) { + super(builder); + } + + private TieredStorageRule() {} + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageRule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageRule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.TieredStorageRule.class, + com.google.bigtable.admin.v2.TieredStorageRule.Builder.class); + } + + private int ruleCase_ = 0; + + @SuppressWarnings("serial") + private java.lang.Object rule_; + + public enum RuleCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + INCLUDE_IF_OLDER_THAN(1), + RULE_NOT_SET(0); + private final int value; + + private RuleCase(int value) { + this.value = value; + } + + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static RuleCase valueOf(int value) { + return forNumber(value); + } + + public static RuleCase forNumber(int value) { + switch (value) { + case 1: + return INCLUDE_IF_OLDER_THAN; + case 0: + return RULE_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public RuleCase getRuleCase() { + return RuleCase.forNumber(ruleCase_); + } + + public static final int INCLUDE_IF_OLDER_THAN_FIELD_NUMBER = 1; + + /** + * + * + *
    +   * Include cells older than the given age.
    +   * For the infrequent access tier, this value must be at least 30 days.
    +   * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + * + * @return Whether the includeIfOlderThan field is set. + */ + @java.lang.Override + public boolean hasIncludeIfOlderThan() { + return ruleCase_ == 1; + } + + /** + * + * + *
    +   * Include cells older than the given age.
    +   * For the infrequent access tier, this value must be at least 30 days.
    +   * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + * + * @return The includeIfOlderThan. + */ + @java.lang.Override + public com.google.protobuf.Duration getIncludeIfOlderThan() { + if (ruleCase_ == 1) { + return (com.google.protobuf.Duration) rule_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + /** + * + * + *
    +   * Include cells older than the given age.
    +   * For the infrequent access tier, this value must be at least 30 days.
    +   * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getIncludeIfOlderThanOrBuilder() { + if (ruleCase_ == 1) { + return (com.google.protobuf.Duration) rule_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (ruleCase_ == 1) { + output.writeMessage(1, (com.google.protobuf.Duration) rule_); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (ruleCase_ == 1) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 1, (com.google.protobuf.Duration) rule_); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj instanceof com.google.bigtable.admin.v2.TieredStorageRule)) { + return super.equals(obj); + } + com.google.bigtable.admin.v2.TieredStorageRule other = + (com.google.bigtable.admin.v2.TieredStorageRule) obj; + + if (!getRuleCase().equals(other.getRuleCase())) return false; + switch (ruleCase_) { + case 1: + if (!getIncludeIfOlderThan().equals(other.getIncludeIfOlderThan())) return false; + break; + case 0: + default: + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + switch (ruleCase_) { + case 1: + hash = (37 * hash) + INCLUDE_IF_OLDER_THAN_FIELD_NUMBER; + hash = (53 * hash) + getIncludeIfOlderThan().hashCode(); + break; + case 0: + default: + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom(java.nio.ByteBuffer data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom(byte[] data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom(java.io.InputStream input) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseDelimitedFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException(PARSER, input); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessage.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder(com.google.bigtable.admin.v2.TieredStorageRule prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
    +   * Rule to specify what data is stored in a storage tier.
    +   * 
    + * + * Protobuf type {@code google.bigtable.admin.v2.TieredStorageRule} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessage.Builder + implements + // @@protoc_insertion_point(builder_implements:google.bigtable.admin.v2.TieredStorageRule) + com.google.bigtable.admin.v2.TieredStorageRuleOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageRule_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageRule_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.bigtable.admin.v2.TieredStorageRule.class, + com.google.bigtable.admin.v2.TieredStorageRule.Builder.class); + } + + // Construct using com.google.bigtable.admin.v2.TieredStorageRule.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + if (includeIfOlderThanBuilder_ != null) { + includeIfOlderThanBuilder_.clear(); + } + ruleCase_ = 0; + rule_ = null; + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.bigtable.admin.v2.TableProto + .internal_static_google_bigtable_admin_v2_TieredStorageRule_descriptor; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageRule getDefaultInstanceForType() { + return com.google.bigtable.admin.v2.TieredStorageRule.getDefaultInstance(); + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageRule build() { + com.google.bigtable.admin.v2.TieredStorageRule result = buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageRule buildPartial() { + com.google.bigtable.admin.v2.TieredStorageRule result = + new com.google.bigtable.admin.v2.TieredStorageRule(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + buildPartialOneofs(result); + onBuilt(); + return result; + } + + private void buildPartial0(com.google.bigtable.admin.v2.TieredStorageRule result) { + int from_bitField0_ = bitField0_; + } + + private void buildPartialOneofs(com.google.bigtable.admin.v2.TieredStorageRule result) { + result.ruleCase_ = ruleCase_; + result.rule_ = this.rule_; + if (ruleCase_ == 1 && includeIfOlderThanBuilder_ != null) { + result.rule_ = includeIfOlderThanBuilder_.build(); + } + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other instanceof com.google.bigtable.admin.v2.TieredStorageRule) { + return mergeFrom((com.google.bigtable.admin.v2.TieredStorageRule) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom(com.google.bigtable.admin.v2.TieredStorageRule other) { + if (other == com.google.bigtable.admin.v2.TieredStorageRule.getDefaultInstance()) return this; + switch (other.getRuleCase()) { + case INCLUDE_IF_OLDER_THAN: + { + mergeIncludeIfOlderThan(other.getIncludeIfOlderThan()); + break; + } + case RULE_NOT_SET: + { + break; + } + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + input.readMessage( + internalGetIncludeIfOlderThanFieldBuilder().getBuilder(), extensionRegistry); + ruleCase_ = 1; + break; + } // case 10 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int ruleCase_ = 0; + private java.lang.Object rule_; + + public RuleCase getRuleCase() { + return RuleCase.forNumber(ruleCase_); + } + + public Builder clearRule() { + ruleCase_ = 0; + rule_ = null; + onChanged(); + return this; + } + + private int bitField0_; + + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + includeIfOlderThanBuilder_; + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + * + * @return Whether the includeIfOlderThan field is set. + */ + @java.lang.Override + public boolean hasIncludeIfOlderThan() { + return ruleCase_ == 1; + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + * + * @return The includeIfOlderThan. + */ + @java.lang.Override + public com.google.protobuf.Duration getIncludeIfOlderThan() { + if (includeIfOlderThanBuilder_ == null) { + if (ruleCase_ == 1) { + return (com.google.protobuf.Duration) rule_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } else { + if (ruleCase_ == 1) { + return includeIfOlderThanBuilder_.getMessage(); + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + public Builder setIncludeIfOlderThan(com.google.protobuf.Duration value) { + if (includeIfOlderThanBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + rule_ = value; + onChanged(); + } else { + includeIfOlderThanBuilder_.setMessage(value); + } + ruleCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + public Builder setIncludeIfOlderThan(com.google.protobuf.Duration.Builder builderForValue) { + if (includeIfOlderThanBuilder_ == null) { + rule_ = builderForValue.build(); + onChanged(); + } else { + includeIfOlderThanBuilder_.setMessage(builderForValue.build()); + } + ruleCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + public Builder mergeIncludeIfOlderThan(com.google.protobuf.Duration value) { + if (includeIfOlderThanBuilder_ == null) { + if (ruleCase_ == 1 && rule_ != com.google.protobuf.Duration.getDefaultInstance()) { + rule_ = + com.google.protobuf.Duration.newBuilder((com.google.protobuf.Duration) rule_) + .mergeFrom(value) + .buildPartial(); + } else { + rule_ = value; + } + onChanged(); + } else { + if (ruleCase_ == 1) { + includeIfOlderThanBuilder_.mergeFrom(value); + } else { + includeIfOlderThanBuilder_.setMessage(value); + } + } + ruleCase_ = 1; + return this; + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + public Builder clearIncludeIfOlderThan() { + if (includeIfOlderThanBuilder_ == null) { + if (ruleCase_ == 1) { + ruleCase_ = 0; + rule_ = null; + onChanged(); + } + } else { + if (ruleCase_ == 1) { + ruleCase_ = 0; + rule_ = null; + } + includeIfOlderThanBuilder_.clear(); + } + return this; + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + public com.google.protobuf.Duration.Builder getIncludeIfOlderThanBuilder() { + return internalGetIncludeIfOlderThanFieldBuilder().getBuilder(); + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + @java.lang.Override + public com.google.protobuf.DurationOrBuilder getIncludeIfOlderThanOrBuilder() { + if ((ruleCase_ == 1) && (includeIfOlderThanBuilder_ != null)) { + return includeIfOlderThanBuilder_.getMessageOrBuilder(); + } else { + if (ruleCase_ == 1) { + return (com.google.protobuf.Duration) rule_; + } + return com.google.protobuf.Duration.getDefaultInstance(); + } + } + + /** + * + * + *
    +     * Include cells older than the given age.
    +     * For the infrequent access tier, this value must be at least 30 days.
    +     * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + private com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder> + internalGetIncludeIfOlderThanFieldBuilder() { + if (includeIfOlderThanBuilder_ == null) { + if (!(ruleCase_ == 1)) { + rule_ = com.google.protobuf.Duration.getDefaultInstance(); + } + includeIfOlderThanBuilder_ = + new com.google.protobuf.SingleFieldBuilder< + com.google.protobuf.Duration, + com.google.protobuf.Duration.Builder, + com.google.protobuf.DurationOrBuilder>( + (com.google.protobuf.Duration) rule_, getParentForChildren(), isClean()); + rule_ = null; + } + ruleCase_ = 1; + onChanged(); + return includeIfOlderThanBuilder_; + } + + // @@protoc_insertion_point(builder_scope:google.bigtable.admin.v2.TieredStorageRule) + } + + // @@protoc_insertion_point(class_scope:google.bigtable.admin.v2.TieredStorageRule) + private static final com.google.bigtable.admin.v2.TieredStorageRule DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = new com.google.bigtable.admin.v2.TieredStorageRule(); + } + + public static com.google.bigtable.admin.v2.TieredStorageRule getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public TieredStorageRule parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.bigtable.admin.v2.TieredStorageRule getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageRuleOrBuilder.java b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageRuleOrBuilder.java new file mode 100644 index 0000000000..642ec6aac2 --- /dev/null +++ b/proto-google-cloud-bigtable-admin-v2/src/main/java/com/google/bigtable/admin/v2/TieredStorageRuleOrBuilder.java @@ -0,0 +1,70 @@ +/* + * Copyright 2026 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// NO CHECKED-IN PROTOBUF GENCODE +// source: google/bigtable/admin/v2/table.proto +// Protobuf Java Version: 4.33.2 + +package com.google.bigtable.admin.v2; + +@com.google.protobuf.Generated +public interface TieredStorageRuleOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.bigtable.admin.v2.TieredStorageRule) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
    +   * Include cells older than the given age.
    +   * For the infrequent access tier, this value must be at least 30 days.
    +   * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + * + * @return Whether the includeIfOlderThan field is set. + */ + boolean hasIncludeIfOlderThan(); + + /** + * + * + *
    +   * Include cells older than the given age.
    +   * For the infrequent access tier, this value must be at least 30 days.
    +   * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + * + * @return The includeIfOlderThan. + */ + com.google.protobuf.Duration getIncludeIfOlderThan(); + + /** + * + * + *
    +   * Include cells older than the given age.
    +   * For the infrequent access tier, this value must be at least 30 days.
    +   * 
    + * + * .google.protobuf.Duration include_if_older_than = 1; + */ + com.google.protobuf.DurationOrBuilder getIncludeIfOlderThanOrBuilder(); + + com.google.bigtable.admin.v2.TieredStorageRule.RuleCase getRuleCase(); +} diff --git a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto index 68913d057a..4ce692f860 100644 --- a/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto +++ b/proto-google-cloud-bigtable-admin-v2/src/main/proto/google/bigtable/admin/v2/table.proto @@ -1,4 +1,4 @@ -// Copyright 2025 Google LLC +// Copyright 2026 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -205,6 +205,15 @@ message Table { AutomatedBackupPolicy automated_backup_policy = 13; } + // Rules to specify what data is stored in each storage tier. + // Different tiers store data differently, providing different trade-offs + // between cost and performance. Different parts of a table can be stored + // separately on different tiers. + // If a config is specified, tiered storage is enabled for this table. + // Otherwise, tiered storage is disabled. + // Only SSD instances can configure tiered storage. + TieredStorageConfig tiered_storage_config = 14; + // The row key schema for this table. The schema is used to decode the raw row // key bytes into a structured format. The order of field declarations in this // schema is important, as it reflects how the raw row key bytes are @@ -638,6 +647,27 @@ enum RestoreSourceType { BACKUP = 1; } +// Config for tiered storage. +// A valid config must have a valid TieredStorageRule. Otherwise the whole +// TieredStorageConfig must be unset. +// By default all data is stored in the SSD tier (only SSD instances can +// configure tiered storage). +message TieredStorageConfig { + // Rule to specify what data is stored in the infrequent access(IA) tier. + // The IA tier allows storing more data per node with reduced performance. + TieredStorageRule infrequent_access = 1; +} + +// Rule to specify what data is stored in a storage tier. +message TieredStorageRule { + // Rules to specify what data is stored in this tier. + oneof rule { + // Include cells older than the given age. + // For the infrequent access tier, this value must be at least 30 days. + google.protobuf.Duration include_if_older_than = 1; + } +} + // Represents a protobuf schema. message ProtoSchema { // Required. Contains a protobuf-serialized diff --git a/proto-google-cloud-bigtable-v2/pom.xml b/proto-google-cloud-bigtable-v2/pom.xml index 0d1da46f9b..664b040aba 100644 --- a/proto-google-cloud-bigtable-v2/pom.xml +++ b/proto-google-cloud-bigtable-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigtable-v2 - 2.73.1 + 2.74.0 proto-google-cloud-bigtable-v2 PROTO library for proto-google-cloud-bigtable-v2 com.google.cloud google-cloud-bigtable-parent - 2.73.1 + 2.74.0 @@ -18,14 +18,14 @@ com.google.cloud google-cloud-bigtable-deps-bom - 2.73.1 + 2.74.0 pom import com.google.cloud google-cloud-bigtable-bom - 2.73.1 + 2.74.0 pom import diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 6e1e0caad6..437edfc514 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -28,7 +28,7 @@ com.google.cloud google-cloud-bigtable - 2.73.1 + 2.74.0 diff --git a/test-proxy/pom.xml b/test-proxy/pom.xml index d5110580fc..9a2ab8b8e7 100644 --- a/test-proxy/pom.xml +++ b/test-proxy/pom.xml @@ -12,11 +12,11 @@ google-cloud-bigtable-parent com.google.cloud - 2.73.1 + 2.74.0 - 2.73.1 + 2.74.0 diff --git a/versions.txt b/versions.txt index 40fcb9fecd..53c64e0201 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-bigtable:2.73.1:2.73.1 -grpc-google-cloud-bigtable-admin-v2:2.73.1:2.73.1 -grpc-google-cloud-bigtable-v2:2.73.1:2.73.1 -proto-google-cloud-bigtable-admin-v2:2.73.1:2.73.1 -proto-google-cloud-bigtable-v2:2.73.1:2.73.1 -google-cloud-bigtable-emulator:0.210.1:0.210.1 -google-cloud-bigtable-emulator-core:0.210.1:0.210.1 +google-cloud-bigtable:2.74.0:2.74.0 +grpc-google-cloud-bigtable-admin-v2:2.74.0:2.74.0 +grpc-google-cloud-bigtable-v2:2.74.0:2.74.0 +proto-google-cloud-bigtable-admin-v2:2.74.0:2.74.0 +proto-google-cloud-bigtable-v2:2.74.0:2.74.0 +google-cloud-bigtable-emulator:0.211.0:0.211.0 +google-cloud-bigtable-emulator-core:0.211.0:0.211.0