From 3087f6d6df7d8028c85388f73dd32cc3082b1e5e Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 6 Aug 2025 13:57:30 -0400 Subject: [PATCH 01/16] chore(main): release 2.55.1-SNAPSHOT (#3233) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- gapic-google-cloud-storage-v2/pom.xml | 4 ++-- google-cloud-storage-bom/pom.xml | 16 ++++++++-------- google-cloud-storage-control/pom.xml | 4 ++-- google-cloud-storage/pom.xml | 4 ++-- grpc-google-cloud-storage-control-v2/pom.xml | 4 ++-- grpc-google-cloud-storage-v2/pom.xml | 4 ++-- pom.xml | 16 ++++++++-------- proto-google-cloud-storage-control-v2/pom.xml | 4 ++-- proto-google-cloud-storage-v2/pom.xml | 4 ++-- samples/snapshot/pom.xml | 6 +++--- storage-shared-benchmarking/pom.xml | 4 ++-- versions.txt | 14 +++++++------- 12 files changed, 42 insertions(+), 42 deletions(-) diff --git a/gapic-google-cloud-storage-v2/pom.xml b/gapic-google-cloud-storage-v2/pom.xml index 0d97691561..f85d90ef24 100644 --- a/gapic-google-cloud-storage-v2/pom.xml +++ b/gapic-google-cloud-storage-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc gapic-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT gapic-google-cloud-storage-v2 GRPC library for gapic-google-cloud-storage-v2 com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT diff --git a/google-cloud-storage-bom/pom.xml b/google-cloud-storage-bom/pom.xml index 87397bf767..2eac19d87e 100644 --- a/google-cloud-storage-bom/pom.xml +++ b/google-cloud-storage-bom/pom.xml @@ -19,7 +19,7 @@ 4.0.0 com.google.cloud google-cloud-storage-bom - 2.55.0 + 2.55.1-SNAPSHOT pom com.google.cloud @@ -69,37 +69,37 @@ com.google.cloud google-cloud-storage - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc gapic-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc proto-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.cloud google-cloud-storage-control - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-storage-control-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc proto-google-cloud-storage-control-v2 - 2.55.0 + 2.55.1-SNAPSHOT diff --git a/google-cloud-storage-control/pom.xml b/google-cloud-storage-control/pom.xml index f004cfe0af..0517d81d37 100644 --- a/google-cloud-storage-control/pom.xml +++ b/google-cloud-storage-control/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.google.cloud google-cloud-storage-control - 2.55.0 + 2.55.1-SNAPSHOT google-cloud-storage-control GRPC library for google-cloud-storage-control com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT diff --git a/google-cloud-storage/pom.xml b/google-cloud-storage/pom.xml index 1f3decd8ab..2ef1cd0848 100644 --- a/google-cloud-storage/pom.xml +++ b/google-cloud-storage/pom.xml @@ -2,7 +2,7 @@ 4.0.0 google-cloud-storage - 2.55.0 + 2.55.1-SNAPSHOT jar Google Cloud Storage https://github.com/googleapis/java-storage @@ -12,7 +12,7 @@ com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT google-cloud-storage diff --git a/grpc-google-cloud-storage-control-v2/pom.xml b/grpc-google-cloud-storage-control-v2/pom.xml index 9cd67488c5..3c132d31e7 100644 --- a/grpc-google-cloud-storage-control-v2/pom.xml +++ b/grpc-google-cloud-storage-control-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-storage-control-v2 - 2.55.0 + 2.55.1-SNAPSHOT grpc-google-cloud-storage-control-v2 GRPC library for google-cloud-storage com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT diff --git a/grpc-google-cloud-storage-v2/pom.xml b/grpc-google-cloud-storage-v2/pom.xml index e9bc40708c..eebe6e2f0e 100644 --- a/grpc-google-cloud-storage-v2/pom.xml +++ b/grpc-google-cloud-storage-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT grpc-google-cloud-storage-v2 GRPC library for grpc-google-cloud-storage-v2 com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT diff --git a/pom.xml b/pom.xml index 927ad55dab..1a7cdd3375 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-storage-parent pom - 2.55.0 + 2.55.1-SNAPSHOT Storage Parent https://github.com/googleapis/java-storage @@ -82,7 +82,7 @@ com.google.cloud google-cloud-storage - 2.55.0 + 2.55.1-SNAPSHOT com.google.apis @@ -104,32 +104,32 @@ com.google.api.grpc proto-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc gapic-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc grpc-google-cloud-storage-control-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.api.grpc proto-google-cloud-storage-control-v2 - 2.55.0 + 2.55.1-SNAPSHOT com.google.cloud google-cloud-storage-control - 2.55.0 + 2.55.1-SNAPSHOT com.google.cloud diff --git a/proto-google-cloud-storage-control-v2/pom.xml b/proto-google-cloud-storage-control-v2/pom.xml index cdb5d15e40..4f630ef121 100644 --- a/proto-google-cloud-storage-control-v2/pom.xml +++ b/proto-google-cloud-storage-control-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-storage-control-v2 - 2.55.0 + 2.55.1-SNAPSHOT proto-google-cloud-storage-control-v2 Proto library for proto-google-cloud-storage-control-v2 com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT diff --git a/proto-google-cloud-storage-v2/pom.xml b/proto-google-cloud-storage-v2/pom.xml index b44026cba8..9b898f142d 100644 --- a/proto-google-cloud-storage-v2/pom.xml +++ b/proto-google-cloud-storage-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-storage-v2 - 2.55.0 + 2.55.1-SNAPSHOT proto-google-cloud-storage-v2 PROTO library for proto-google-cloud-storage-v2 com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 6b34767574..a05980d226 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -28,12 +28,12 @@ com.google.cloud google-cloud-storage - 2.55.0 + 2.55.1-SNAPSHOT com.google.cloud google-cloud-storage-control - 2.55.0 + 2.55.1-SNAPSHOT compile @@ -70,7 +70,7 @@ com.google.cloud google-cloud-storage - 2.55.0 + 2.55.1-SNAPSHOT tests test diff --git a/storage-shared-benchmarking/pom.xml b/storage-shared-benchmarking/pom.xml index 786194b1b6..30105dbe71 100644 --- a/storage-shared-benchmarking/pom.xml +++ b/storage-shared-benchmarking/pom.xml @@ -10,7 +10,7 @@ com.google.cloud google-cloud-storage-parent - 2.55.0 + 2.55.1-SNAPSHOT @@ -31,7 +31,7 @@ com.google.cloud google-cloud-storage - 2.55.0 + 2.55.1-SNAPSHOT tests diff --git a/versions.txt b/versions.txt index f9508c10b7..a05bde7968 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-storage:2.55.0:2.55.0 -gapic-google-cloud-storage-v2:2.55.0:2.55.0 -grpc-google-cloud-storage-v2:2.55.0:2.55.0 -proto-google-cloud-storage-v2:2.55.0:2.55.0 -google-cloud-storage-control:2.55.0:2.55.0 -proto-google-cloud-storage-control-v2:2.55.0:2.55.0 -grpc-google-cloud-storage-control-v2:2.55.0:2.55.0 +google-cloud-storage:2.55.0:2.55.1-SNAPSHOT +gapic-google-cloud-storage-v2:2.55.0:2.55.1-SNAPSHOT +grpc-google-cloud-storage-v2:2.55.0:2.55.1-SNAPSHOT +proto-google-cloud-storage-v2:2.55.0:2.55.1-SNAPSHOT +google-cloud-storage-control:2.55.0:2.55.1-SNAPSHOT +proto-google-cloud-storage-control-v2:2.55.0:2.55.1-SNAPSHOT +grpc-google-cloud-storage-control-v2:2.55.0:2.55.1-SNAPSHOT From 7bb4e67af67a80ade9e416a736307bf257f0a1e5 Mon Sep 17 00:00:00 2001 From: cloud-java-bot <122572305+cloud-java-bot@users.noreply.github.com> Date: Wed, 6 Aug 2025 15:56:44 -0400 Subject: [PATCH 02/16] chore: Update generation configuration at Wed Aug 6 02:42:05 UTC 2025 (#3232) * chore: Update generation configuration at Wed Aug 6 02:42:05 UTC 2025 * chore: generate libraries at Wed Aug 6 02:42:36 UTC 2025 --- generation_config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/generation_config.yaml b/generation_config.yaml index eb49ac610b..b9b47f9fd8 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,5 +1,5 @@ gapic_generator_version: 2.61.0 -googleapis_commitish: 8c74a4f9ad52cfd7a7d1f6427fa0a0511377a395 +googleapis_commitish: 09f2446e4a30d6d79dff93e14bbc992b855ebaaf libraries_bom_version: 26.65.0 libraries: - api_shortname: storage From d7c93535eb5f21b3fa6549ee679f856a049a532e Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 6 Aug 2025 21:57:25 +0200 Subject: [PATCH 03/16] chore(deps): update storage release dependencies to v2.55.0 (#3234) --- samples/install-without-bom/pom.xml | 6 +++--- samples/snippets/pom.xml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index 8b4dcc4463..57dbe8703b 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -30,12 +30,12 @@ com.google.cloud google-cloud-storage - 2.54.0 + 2.55.0 com.google.cloud google-cloud-storage-control - 2.54.0 + 2.55.0 @@ -78,7 +78,7 @@ com.google.cloud google-cloud-storage - 2.54.0 + 2.55.0 tests test diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index b6d37a23a4..0e6d423c63 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -99,7 +99,7 @@ com.google.cloud google-cloud-storage - 2.54.0 + 2.55.0 tests test From 115d27d20ad2de91b429b89accacecd83f782016 Mon Sep 17 00:00:00 2001 From: cloud-java-bot <122572305+cloud-java-bot@users.noreply.github.com> Date: Mon, 11 Aug 2025 12:44:28 -0400 Subject: [PATCH 04/16] chore: Update generation configuration at Mon Aug 11 02:40:32 UTC 2025 (#3237) * chore: Update generation configuration at Thu Aug 7 02:41:43 UTC 2025 * chore: generate libraries at Thu Aug 7 02:42:13 UTC 2025 * chore: Update generation configuration at Fri Aug 8 02:40:54 UTC 2025 * chore: Update generation configuration at Sat Aug 9 02:33:16 UTC 2025 * chore: Update generation configuration at Sun Aug 10 02:43:55 UTC 2025 * chore: Update generation configuration at Mon Aug 11 02:40:32 UTC 2025 --- README.md | 4 ++-- generation_config.yaml | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 05340f4a40..dc0b2f14c7 100644 --- a/README.md +++ b/README.md @@ -46,12 +46,12 @@ If you are using Maven without the BOM, add this to your dependencies: com.google.cloud google-cloud-storage - 2.54.0 + 2.55.0 com.google.cloud google-cloud-storage-control - 2.54.0 + 2.55.0 ``` diff --git a/generation_config.yaml b/generation_config.yaml index b9b47f9fd8..e2d03dcfbc 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,5 +1,5 @@ gapic_generator_version: 2.61.0 -googleapis_commitish: 09f2446e4a30d6d79dff93e14bbc992b855ebaaf +googleapis_commitish: f4902d9b4168b4d1b162f5c5bcf149086579fa26 libraries_bom_version: 26.65.0 libraries: - api_shortname: storage From 33f024b1ae094bf3e3605e1a835cb55eb5c9e750 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 11 Aug 2025 21:45:49 +0200 Subject: [PATCH 05/16] deps: update actions/checkout action to v5 (#3239) --- .github/workflows/ci.yaml | 14 +++++++------- .github/workflows/hermetic_library_generation.yaml | 2 +- .github/workflows/renovate_config_check.yaml | 2 +- .github/workflows/samples.yaml | 2 +- .github/workflows/unmanaged_dependency_check.yaml | 2 +- .github/workflows/update_generation_config.yaml | 2 +- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 10acf46ac7..3306bc7d75 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -27,7 +27,7 @@ jobs: matrix: java: [11, 17, 21, 24] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: distribution: temurin @@ -41,7 +41,7 @@ jobs: name: "units (8)" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: java-version: 8 @@ -63,7 +63,7 @@ jobs: steps: - name: Support longpaths run: git config --system core.longpaths true - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: distribution: temurin @@ -78,7 +78,7 @@ jobs: matrix: java: [17] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: distribution: temurin @@ -88,7 +88,7 @@ jobs: javadoc: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: distribution: temurin @@ -100,7 +100,7 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: distribution: temurin @@ -112,7 +112,7 @@ jobs: clirr: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: distribution: temurin diff --git a/.github/workflows/hermetic_library_generation.yaml b/.github/workflows/hermetic_library_generation.yaml index 519c2f8bfa..e9ee43494d 100644 --- a/.github/workflows/hermetic_library_generation.yaml +++ b/.github/workflows/hermetic_library_generation.yaml @@ -38,7 +38,7 @@ jobs: else echo "SHOULD_RUN=true" >> $GITHUB_ENV fi - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 if: env.SHOULD_RUN == 'true' with: fetch-depth: 0 diff --git a/.github/workflows/renovate_config_check.yaml b/.github/workflows/renovate_config_check.yaml index 47b9e87c98..e32cf4a37a 100644 --- a/.github/workflows/renovate_config_check.yaml +++ b/.github/workflows/renovate_config_check.yaml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v4 + uses: actions/checkout@v5 - name: Set up Node.js uses: actions/setup-node@v4 diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml index 186fd8bcfc..f259a4e1e9 100644 --- a/.github/workflows/samples.yaml +++ b/.github/workflows/samples.yaml @@ -20,7 +20,7 @@ jobs: checkstyle: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: actions/setup-java@v4 with: distribution: temurin diff --git a/.github/workflows/unmanaged_dependency_check.yaml b/.github/workflows/unmanaged_dependency_check.yaml index d7ae36c028..0ff66ee253 100644 --- a/.github/workflows/unmanaged_dependency_check.yaml +++ b/.github/workflows/unmanaged_dependency_check.yaml @@ -5,7 +5,7 @@ jobs: unmanaged_dependency_check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v5 - uses: actions/setup-java@v3 with: distribution: temurin diff --git a/.github/workflows/update_generation_config.yaml b/.github/workflows/update_generation_config.yaml index a7e14bb483..59e39834dd 100644 --- a/.github/workflows/update_generation_config.yaml +++ b/.github/workflows/update_generation_config.yaml @@ -26,7 +26,7 @@ jobs: # the branch into which the pull request is merged base_branch: main steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} From 8f63f53389fd3e7f6d42c3c666534a67556e0646 Mon Sep 17 00:00:00 2001 From: cloud-java-bot <122572305+cloud-java-bot@users.noreply.github.com> Date: Tue, 19 Aug 2025 11:15:39 -0400 Subject: [PATCH 06/16] chore: Update generation configuration at Sat Aug 16 02:30:38 UTC 2025 (#3243) * chore: Update generation configuration at Tue Aug 12 02:31:47 UTC 2025 * chore: generate libraries at Tue Aug 12 02:32:20 UTC 2025 * chore: Update generation configuration at Wed Aug 13 02:32:37 UTC 2025 * chore: Update generation configuration at Thu Aug 14 02:35:24 UTC 2025 * chore: Update generation configuration at Fri Aug 15 02:34:26 UTC 2025 * chore: Update generation configuration at Sat Aug 16 02:30:38 UTC 2025 * chore: generate libraries at Sat Aug 16 02:31:07 UTC 2025 --- .github/workflows/ci.yaml | 14 +++++++------- .github/workflows/renovate_config_check.yaml | 2 +- .github/workflows/samples.yaml | 2 +- .github/workflows/update_generation_config.yaml | 2 +- README.md | 2 +- generation_config.yaml | 4 ++-- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 3306bc7d75..10acf46ac7 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -27,7 +27,7 @@ jobs: matrix: java: [11, 17, 21, 24] steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: distribution: temurin @@ -41,7 +41,7 @@ jobs: name: "units (8)" runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: java-version: 8 @@ -63,7 +63,7 @@ jobs: steps: - name: Support longpaths run: git config --system core.longpaths true - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: distribution: temurin @@ -78,7 +78,7 @@ jobs: matrix: java: [17] steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: distribution: temurin @@ -88,7 +88,7 @@ jobs: javadoc: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: distribution: temurin @@ -100,7 +100,7 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: distribution: temurin @@ -112,7 +112,7 @@ jobs: clirr: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: distribution: temurin diff --git a/.github/workflows/renovate_config_check.yaml b/.github/workflows/renovate_config_check.yaml index e32cf4a37a..47b9e87c98 100644 --- a/.github/workflows/renovate_config_check.yaml +++ b/.github/workflows/renovate_config_check.yaml @@ -11,7 +11,7 @@ jobs: steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v4 - name: Set up Node.js uses: actions/setup-node@v4 diff --git a/.github/workflows/samples.yaml b/.github/workflows/samples.yaml index f259a4e1e9..186fd8bcfc 100644 --- a/.github/workflows/samples.yaml +++ b/.github/workflows/samples.yaml @@ -20,7 +20,7 @@ jobs: checkstyle: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - uses: actions/setup-java@v4 with: distribution: temurin diff --git a/.github/workflows/update_generation_config.yaml b/.github/workflows/update_generation_config.yaml index 59e39834dd..a7e14bb483 100644 --- a/.github/workflows/update_generation_config.yaml +++ b/.github/workflows/update_generation_config.yaml @@ -26,7 +26,7 @@ jobs: # the branch into which the pull request is merged base_branch: main steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 with: fetch-depth: 0 token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} diff --git a/README.md b/README.md index dc0b2f14c7..eda2cb563b 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.65.0') +implementation platform('com.google.cloud:libraries-bom:26.66.0') implementation 'com.google.cloud:google-cloud-storage' ``` diff --git a/generation_config.yaml b/generation_config.yaml index e2d03dcfbc..0f721bdad1 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,6 +1,6 @@ gapic_generator_version: 2.61.0 -googleapis_commitish: f4902d9b4168b4d1b162f5c5bcf149086579fa26 -libraries_bom_version: 26.65.0 +googleapis_commitish: d869249c5d3da60af39dc7968140468ef7d1fdd6 +libraries_bom_version: 26.66.0 libraries: - api_shortname: storage name_pretty: Cloud Storage From e91a7dc99b7ab9b5cff7a55efaa3ca47a3cf4659 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 19 Aug 2025 17:24:08 +0200 Subject: [PATCH 07/16] chore(deps): update dependency com.google.cloud:libraries-bom to v26.66.0 (#3244) --- samples/snippets/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index 0e6d423c63..76b952cfa8 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -31,7 +31,7 @@ com.google.cloud libraries-bom - 26.65.0 + 26.66.0 pom import From c13ce95f91b4d2b44a289cdd0d89784a029e4ab2 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 19 Aug 2025 17:44:23 +0200 Subject: [PATCH 08/16] test(deps): update cross product test dependencies (#3238) --- google-cloud-storage/pom.xml | 4 ++-- samples/install-without-bom/pom.xml | 2 +- samples/snapshot/pom.xml | 2 +- samples/snippets/pom.xml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/google-cloud-storage/pom.xml b/google-cloud-storage/pom.xml index 2ef1cd0848..a58682f862 100644 --- a/google-cloud-storage/pom.xml +++ b/google-cloud-storage/pom.xml @@ -239,14 +239,14 @@ com.google.api.grpc proto-google-cloud-kms-v1 - 0.164.0 + 0.166.0 test com.google.cloud google-cloud-kms - 2.73.0 + 2.75.0 test diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index 57dbe8703b..5c455491e0 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -72,7 +72,7 @@ com.google.cloud google-cloud-kms - 2.73.0 + 2.75.0 test diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index a05980d226..9e263e88a2 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -64,7 +64,7 @@ com.google.cloud google-cloud-kms - 2.73.0 + 2.75.0 test diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index 76b952cfa8..965ca72c09 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -82,7 +82,7 @@ com.google.cloud google-cloud-kms - 2.73.0 + 2.75.0 test From 0782e62fc9534e3cecfaaa4d78b58904ecf699d6 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Wed, 20 Aug 2025 19:30:10 +0200 Subject: [PATCH 09/16] deps: update dependency com.google.cloud:sdk-platform-java-config to v3.52.0 (#3250) --- .github/workflows/unmanaged_dependency_check.yaml | 2 +- google-cloud-storage-bom/pom.xml | 2 +- pom.xml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/unmanaged_dependency_check.yaml b/.github/workflows/unmanaged_dependency_check.yaml index 0ff66ee253..c9d370b95d 100644 --- a/.github/workflows/unmanaged_dependency_check.yaml +++ b/.github/workflows/unmanaged_dependency_check.yaml @@ -17,6 +17,6 @@ jobs: # repository .kokoro/build.sh - name: Unmanaged dependency check - uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.51.0 + uses: googleapis/sdk-platform-java/java-shared-dependencies/unmanaged-dependency-check@google-cloud-shared-dependencies/v3.52.0 with: bom-path: google-cloud-storage-bom/pom.xml diff --git a/google-cloud-storage-bom/pom.xml b/google-cloud-storage-bom/pom.xml index 2eac19d87e..a6f26c1311 100644 --- a/google-cloud-storage-bom/pom.xml +++ b/google-cloud-storage-bom/pom.xml @@ -24,7 +24,7 @@ com.google.cloud sdk-platform-java-config - 3.51.0 + 3.52.0 diff --git a/pom.xml b/pom.xml index 1a7cdd3375..8a93606d1a 100644 --- a/pom.xml +++ b/pom.xml @@ -14,7 +14,7 @@ com.google.cloud sdk-platform-java-config - 3.51.0 + 3.52.0 From 5240d03d12c05aaf95e3d51b0748c1a478c21633 Mon Sep 17 00:00:00 2001 From: cloud-java-bot <122572305+cloud-java-bot@users.noreply.github.com> Date: Wed, 20 Aug 2025 15:00:45 -0400 Subject: [PATCH 10/16] chore: Update generation configuration at Wed Aug 20 02:29:59 UTC 2025 (#3253) * chore: Update generation configuration at Wed Aug 20 02:29:59 UTC 2025 * chore: generate libraries at Wed Aug 20 02:30:32 UTC 2025 --- .../hermetic_library_generation.yaml | 2 +- .kokoro/presubmit/graalvm-native-a.cfg | 2 +- .kokoro/presubmit/graalvm-native-b.cfg | 2 +- .kokoro/presubmit/graalvm-native-c.cfg | 2 +- README.md | 2 +- .../storage/v2/stub/GrpcStorageStub.java | 24 +++++++++++++++++++ generation_config.yaml | 4 ++-- .../v2/stub/GrpcStorageControlStub.java | 23 ++++++++++++++++++ 8 files changed, 54 insertions(+), 7 deletions(-) diff --git a/.github/workflows/hermetic_library_generation.yaml b/.github/workflows/hermetic_library_generation.yaml index e9ee43494d..33eb75a8a0 100644 --- a/.github/workflows/hermetic_library_generation.yaml +++ b/.github/workflows/hermetic_library_generation.yaml @@ -43,7 +43,7 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} - - uses: googleapis/sdk-platform-java/.github/scripts@v2.61.0 + - uses: googleapis/sdk-platform-java/.github/scripts@v2.62.0 if: env.SHOULD_RUN == 'true' with: base_ref: ${{ github.base_ref }} diff --git a/.kokoro/presubmit/graalvm-native-a.cfg b/.kokoro/presubmit/graalvm-native-a.cfg index 783727ef01..5816d61073 100644 --- a/.kokoro/presubmit/graalvm-native-a.cfg +++ b/.kokoro/presubmit/graalvm-native-a.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.51.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.52.0" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-b.cfg b/.kokoro/presubmit/graalvm-native-b.cfg index 83c7afee07..7986fd6731 100644 --- a/.kokoro/presubmit/graalvm-native-b.cfg +++ b/.kokoro/presubmit/graalvm-native-b.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.51.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.52.0" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-c.cfg b/.kokoro/presubmit/graalvm-native-c.cfg index 3a9bbf8c3a..acecfce1bf 100644 --- a/.kokoro/presubmit/graalvm-native-c.cfg +++ b/.kokoro/presubmit/graalvm-native-c.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.51.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.52.0" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/README.md b/README.md index eda2cb563b..a01d688302 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: com.google.cloud libraries-bom - 26.65.0 + 26.66.0 pom import diff --git a/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageStub.java b/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageStub.java index bd0747d9b6..060ffb20d3 100644 --- a/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageStub.java +++ b/gapic-google-cloud-storage-v2/src/main/java/com/google/storage/v2/stub/GrpcStorageStub.java @@ -90,6 +90,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/DeleteBucket") .setRequestMarshaller(ProtoUtils.marshaller(DeleteBucketRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor getBucketMethodDescriptor = @@ -98,6 +99,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/GetBucket") .setRequestMarshaller(ProtoUtils.marshaller(GetBucketRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor createBucketMethodDescriptor = @@ -106,6 +108,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/CreateBucket") .setRequestMarshaller(ProtoUtils.marshaller(CreateBucketRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -116,6 +119,7 @@ public class GrpcStorageStub extends StorageStub { .setRequestMarshaller(ProtoUtils.marshaller(ListBucketsRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(ListBucketsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -126,6 +130,7 @@ public class GrpcStorageStub extends StorageStub { .setRequestMarshaller( ProtoUtils.marshaller(LockBucketRetentionPolicyRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor getIamPolicyMethodDescriptor = @@ -134,6 +139,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/GetIamPolicy") .setRequestMarshaller(ProtoUtils.marshaller(GetIamPolicyRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor setIamPolicyMethodDescriptor = @@ -142,6 +148,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/SetIamPolicy") .setRequestMarshaller(ProtoUtils.marshaller(SetIamPolicyRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Policy.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -153,6 +160,7 @@ public class GrpcStorageStub extends StorageStub { ProtoUtils.marshaller(TestIamPermissionsRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(TestIamPermissionsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor updateBucketMethodDescriptor = @@ -161,6 +169,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/UpdateBucket") .setRequestMarshaller(ProtoUtils.marshaller(UpdateBucketRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Bucket.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -171,6 +180,7 @@ public class GrpcStorageStub extends StorageStub { .setRequestMarshaller( ProtoUtils.marshaller(ComposeObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor deleteObjectMethodDescriptor = @@ -179,6 +189,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/DeleteObject") .setRequestMarshaller(ProtoUtils.marshaller(DeleteObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -189,6 +200,7 @@ public class GrpcStorageStub extends StorageStub { .setRequestMarshaller( ProtoUtils.marshaller(RestoreObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -200,6 +212,7 @@ public class GrpcStorageStub extends StorageStub { ProtoUtils.marshaller(CancelResumableWriteRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(CancelResumableWriteResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor getObjectMethodDescriptor = @@ -208,6 +221,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/GetObject") .setRequestMarshaller(ProtoUtils.marshaller(GetObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -217,6 +231,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/ReadObject") .setRequestMarshaller(ProtoUtils.marshaller(ReadObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(ReadObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -228,6 +243,7 @@ public class GrpcStorageStub extends StorageStub { ProtoUtils.marshaller(BidiReadObjectRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(BidiReadObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor updateObjectMethodDescriptor = @@ -236,6 +252,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/UpdateObject") .setRequestMarshaller(ProtoUtils.marshaller(UpdateObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -246,6 +263,7 @@ public class GrpcStorageStub extends StorageStub { .setRequestMarshaller(ProtoUtils.marshaller(WriteObjectRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(WriteObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -257,6 +275,7 @@ public class GrpcStorageStub extends StorageStub { ProtoUtils.marshaller(BidiWriteObjectRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(BidiWriteObjectResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -267,6 +286,7 @@ public class GrpcStorageStub extends StorageStub { .setRequestMarshaller(ProtoUtils.marshaller(ListObjectsRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(ListObjectsResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -277,6 +297,7 @@ public class GrpcStorageStub extends StorageStub { .setRequestMarshaller( ProtoUtils.marshaller(RewriteObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(RewriteResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -288,6 +309,7 @@ public class GrpcStorageStub extends StorageStub { ProtoUtils.marshaller(StartResumableWriteRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(StartResumableWriteResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -299,6 +321,7 @@ public class GrpcStorageStub extends StorageStub { ProtoUtils.marshaller(QueryWriteStatusRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(QueryWriteStatusResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor moveObjectMethodDescriptor = @@ -307,6 +330,7 @@ public class GrpcStorageStub extends StorageStub { .setFullMethodName("google.storage.v2.Storage/MoveObject") .setRequestMarshaller(ProtoUtils.marshaller(MoveObjectRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Object.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private final UnaryCallable deleteBucketCallable; diff --git a/generation_config.yaml b/generation_config.yaml index 0f721bdad1..a063b9170d 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,5 +1,5 @@ -gapic_generator_version: 2.61.0 -googleapis_commitish: d869249c5d3da60af39dc7968140468ef7d1fdd6 +gapic_generator_version: 2.62.0 +googleapis_commitish: 3b2a2ae91db23a9c879b2b725d6a5de6bd64a800 libraries_bom_version: 26.66.0 libraries: - api_shortname: storage diff --git a/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlStub.java b/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlStub.java index 78876be707..19f8d1916a 100644 --- a/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlStub.java +++ b/google-cloud-storage-control/src/main/java/com/google/storage/control/v2/stub/GrpcStorageControlStub.java @@ -88,6 +88,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setFullMethodName("google.storage.control.v2.StorageControl/CreateFolder") .setRequestMarshaller(ProtoUtils.marshaller(CreateFolderRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Folder.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor deleteFolderMethodDescriptor = @@ -96,6 +97,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setFullMethodName("google.storage.control.v2.StorageControl/DeleteFolder") .setRequestMarshaller(ProtoUtils.marshaller(DeleteFolderRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor getFolderMethodDescriptor = @@ -104,6 +106,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setFullMethodName("google.storage.control.v2.StorageControl/GetFolder") .setRequestMarshaller(ProtoUtils.marshaller(GetFolderRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Folder.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -114,6 +117,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller(ProtoUtils.marshaller(ListFoldersRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(ListFoldersResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -123,6 +127,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setFullMethodName("google.storage.control.v2.StorageControl/RenameFolder") .setRequestMarshaller(ProtoUtils.marshaller(RenameFolderRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -133,6 +138,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(GetStorageLayoutRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(StorageLayout.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -143,6 +149,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(CreateManagedFolderRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(ManagedFolder.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -153,6 +160,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(DeleteManagedFolderRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Empty.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -163,6 +171,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(GetManagedFolderRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(ManagedFolder.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -174,6 +183,7 @@ public class GrpcStorageControlStub extends StorageControlStub { ProtoUtils.marshaller(ListManagedFoldersRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(ListManagedFoldersResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -184,6 +194,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(CreateAnywhereCacheRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -194,6 +205,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(UpdateAnywhereCacheRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(Operation.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -204,6 +216,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(DisableAnywhereCacheRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -214,6 +227,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(PauseAnywhereCacheRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -224,6 +238,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(ResumeAnywhereCacheRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -234,6 +249,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(GetAnywhereCacheRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(AnywhereCache.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -245,6 +261,7 @@ public class GrpcStorageControlStub extends StorageControlStub { ProtoUtils.marshaller(ListAnywhereCachesRequest.getDefaultInstance())) .setResponseMarshaller( ProtoUtils.marshaller(ListAnywhereCachesResponse.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -256,6 +273,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(GetProjectIntelligenceConfigRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -268,6 +286,7 @@ public class GrpcStorageControlStub extends StorageControlStub { ProtoUtils.marshaller( UpdateProjectIntelligenceConfigRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -279,6 +298,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(GetFolderIntelligenceConfigRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor @@ -290,6 +310,7 @@ public class GrpcStorageControlStub extends StorageControlStub { .setRequestMarshaller( ProtoUtils.marshaller(UpdateFolderIntelligenceConfigRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor< @@ -304,6 +325,7 @@ public class GrpcStorageControlStub extends StorageControlStub { ProtoUtils.marshaller( GetOrganizationIntelligenceConfigRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private static final MethodDescriptor< @@ -318,6 +340,7 @@ public class GrpcStorageControlStub extends StorageControlStub { ProtoUtils.marshaller( UpdateOrganizationIntelligenceConfigRequest.getDefaultInstance())) .setResponseMarshaller(ProtoUtils.marshaller(IntelligenceConfig.getDefaultInstance())) + .setSampledToLocalTracing(true) .build(); private final UnaryCallable createFolderCallable; From 7bd73d3104f5c47299f5a9c8d68dec82933eeda5 Mon Sep 17 00:00:00 2001 From: BenWhitehead Date: Wed, 20 Aug 2025 17:08:46 -0400 Subject: [PATCH 11/16] chore: merge train for nonblocking-appendable/** branches (#3231) ## Description feat: *breaking behavior* rewrite Storage.blobAppendableUpload to be non-blocking and have improved throughput (#3231) Rewrite internals of BlobAppendableUpload to provide non-blocking write calls, and it take advantage of grpc async message handling. When `AppendableUploadWriteableByteChannel#write(ByteBuffer)` is called, an attempt will be made to enqueue the bytes in the outbound queue to GCS. If there is only enough room to partially consume the bytes provided in the `ByteBuffer` the write call will return early specifying the number of bytes actually consumed. As acknowledgements come in from gcs, enqueued messages will be evicted freeing space in the outbound queue. Thereby allowing more bytes to be consumed and enqueued. Given appendable objects are still in private preview I can't quote any metrics here, however preliminary benchmarking of several million objects across a range of sizes show across the board throughput improvments. Because the channel's write call is now non-blocking, if you want to block your application until the full buffer is consumed some new helper methods have been added in StorageChannelUtils to provide blocking behavior. A new method `MinFlushSizeFlushPolicy#withMaxPendingBytes(long)` has been added to allow limiting the number of pending outbound bytes. The default values is 16MiB, but can be configured lower if necessary. ## Release Notes BEGIN_COMMIT_OVERRIDE BEGIN_NESTED_COMMIT feat: *breaking behavior* rewrite Storage.blobAppendableUpload to be non-blocking and have improved throughput (#3231) END_NESTED_COMMIT BEGIN_NESTED_COMMIT feat: add StorageChannelUtils to provide helper methods to perform blocking read/write to/from non-blocking channels (#3231) END_NESTED_COMMIT BEGIN_NESTED_COMMIT feat: add MinFlushSizeFlushPolicy#withMaxPendingBytes(long) (#3231) END_NESTED_COMMIT BEGIN_NESTED_COMMIT fix: update BlobAppendableUploadConfig and FlushPolicy.MinFlushSizeFlushPolicy to default to 4MiB minFlushSize and 16MiB maxPendingBytes (#3249) END_NESTED_COMMIT BEGIN_NESTED_COMMIT fix: make FlushPolicy${Min,Max}FlushSizeFlushPolicy constructors private (#3217) END_NESTED_COMMIT END_COMMIMT_OVERRIDE ## Sub PRs This PR is made of up the following PRs, in sequence 1. #3217 2. #3218 3. #3219 4. #3220 5. #3221 6. #3222 7. #3223 8. #3224 9. #3225 10. #3226 11. #3227 12. #3228 13. #3229 14. #3230 15. #3235 16. #3236 17. #3241 18. #3242 19. #3246 20. #3248 21. #3249 22. #3252 --- .../clirr-ignored-differences.xml | 23 + ...pendableUnbufferedWritableByteChannel.java | 195 ++ .../storage/BidiBlobWriteSessionConfig.java | 3 +- .../cloud/storage/BidiResumableWrite.java | 70 - .../google/cloud/storage/BidiUploadState.java | 1110 ++++++++ .../storage/BidiUploadStreamingStream.java | 604 +++++ .../cloud/storage/BlobAppendableUpload.java | 43 +- .../storage/BlobAppendableUploadConfig.java | 198 +- .../storage/BlobAppendableUploadImpl.java | 15 +- .../com/google/cloud/storage/Buffers.java | 17 +- .../cloud/storage/ByteSizeConstants.java | 1 + .../google/cloud/storage/ChannelSession.java | 11 +- .../DefaultBufferedWritableByteChannel.java | 25 +- .../com/google/cloud/storage/FlushPolicy.java | 75 +- ...ufferedAppendableWriteableByteChannel.java | 783 ------ ...apicBidiUnbufferedWritableByteChannel.java | 18 +- ...BidiWritableByteChannelSessionBuilder.java | 115 - ...edChunkedResumableWritableByteChannel.java | 24 +- ...icUnbufferedDirectWritableByteChannel.java | 8 +- ...zeOnCloseResumableWritableByteChannel.java | 12 +- .../google/cloud/storage/GrpcStorageImpl.java | 39 +- .../storage/JsonResumableSessionPutTask.java | 32 +- .../JsonResumableSessionQueryTask.java | 17 +- .../MinFlushBufferedWritableByteChannel.java | 57 +- .../storage/ObjectReadSessionStream.java | 2 +- .../cloud/storage/OtelStorageDecorator.java | 17 +- .../google/cloud/storage/RetryContext.java | 2 +- .../cloud/storage/RewindableContent.java | 13 + .../cloud/storage/StorageChannelUtils.java | 79 + .../cloud/storage/StorageDataClient.java | 4 +- .../cloud/storage/StorageV2ProtoUtils.java | 135 +- ...enario.java => UploadFailureScenario.java} | 34 +- ...BidiUploadStreamingStreamPropertyTest.java | 459 ++++ .../google/cloud/storage/BidiUploadTest.java | 2264 +++++++++++++++++ .../cloud/storage/BidiUploadTestUtils.java | 125 + .../com/google/cloud/storage/BuffersTest.java | 88 - ...efaultBufferedWritableByteChannelTest.java | 212 ++ ...apicUnbufferedReadableByteChannelTest.java | 15 +- .../storage/ITAppendableUploadFakeTest.java | 1502 ++++------- .../cloud/storage/ITAppendableUploadTest.java | 300 ++- ...ableUnbufferedWritableByteChannelTest.java | 90 + .../storage/ITObjectReadSessionFakeTest.java | 6 +- .../storage/ITObjectReadSessionTest.java | 29 +- ...nFlushBufferedWritableByteChannelTest.java | 141 + .../PackagePrivateMethodWorkarounds.java | 5 + .../cloud/storage/RetryContextTest.java | 4 +- .../storage/StorageChannelUtilsTest.java | 270 ++ .../com/google/cloud/storage/TestUtils.java | 27 + ...st.java => UploadFailureScenarioTest.java} | 19 +- .../storage/it/ChecksummedTestContent.java | 27 + .../GrpcPlainRequestLoggingInterceptor.java | 106 +- ...rpcPlainRequestLoggingInterceptorTest.java | 6 +- .../storage/it/runner/registry/TestBench.java | 14 + .../src/test/resources/logback.xml | 1 + 54 files changed, 6906 insertions(+), 2585 deletions(-) create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java delete mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedAppendableWriteableByteChannel.java create mode 100644 google-cloud-storage/src/main/java/com/google/cloud/storage/StorageChannelUtils.java rename google-cloud-storage/src/main/java/com/google/cloud/storage/{ResumableSessionFailureScenario.java => UploadFailureScenario.java} (93%) create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadStreamingStreamPropertyTest.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTestUtils.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/ITBidiAppendableUnbufferedWritableByteChannelTest.java create mode 100644 google-cloud-storage/src/test/java/com/google/cloud/storage/StorageChannelUtilsTest.java rename google-cloud-storage/src/test/java/com/google/cloud/storage/{ResumableSessionFailureScenarioTest.java => UploadFailureScenarioTest.java} (94%) diff --git a/google-cloud-storage/clirr-ignored-differences.xml b/google-cloud-storage/clirr-ignored-differences.xml index 0af49c35a7..bdc578c4c1 100644 --- a/google-cloud-storage/clirr-ignored-differences.xml +++ b/google-cloud-storage/clirr-ignored-differences.xml @@ -161,4 +161,27 @@ com.google.cloud.storage.BucketInfo$Builder setGoogleManagedEncryptionEnforcementConfig(com.google.cloud.storage.BucketInfo$GoogleManagedEncryptionEnforcementConfig) + + + 7004 + com/google/cloud/storage/FlushPolicy$MinFlushSizeFlushPolicy + FlushPolicy$MinFlushSizeFlushPolicy(int) + + + 7009 + com/google/cloud/storage/FlushPolicy$MinFlushSizeFlushPolicy + FlushPolicy$MinFlushSizeFlushPolicy(int) + + + 7009 + com/google/cloud/storage/FlushPolicy$MaxFlushSizeFlushPolicy + FlushPolicy$MaxFlushSizeFlushPolicy(int) + + + + 7012 + com/google/cloud/storage/BlobAppendableUpload$AppendableUploadWriteableByteChannel + int write(java.nio.ByteBuffer) + + diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java new file mode 100644 index 0000000000..7f105d758b --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java @@ -0,0 +1,195 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +final class BidiAppendableUnbufferedWritableByteChannel implements UnbufferedWritableByteChannel { + + private final BidiUploadStreamingStream stream; + private final ChunkSegmenter chunkSegmenter; + + private boolean open; + private long writeOffset; + private volatile boolean nextWriteShouldFinalize; + private boolean writeCalledAtLeastOnce; + + /** If write throws an error, don't attempt to finalize things when {@link #close()} is called. */ + private boolean writeThrewError; + + BidiAppendableUnbufferedWritableByteChannel( + BidiUploadStreamingStream stream, ChunkSegmenter chunkSegmenter, long writeOffset) { + this.stream = stream; + this.chunkSegmenter = chunkSegmenter; + this.open = true; + this.writeOffset = writeOffset; + this.nextWriteShouldFinalize = false; + this.writeThrewError = false; + } + + @Override + public long write(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + return internalWrite(srcs, srcsOffset, srcsLength); + } + + @Override + public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { + long totalRemaining = Buffers.totalRemaining(srcs, offset, length); + // internalWrite is non-blocking, but close is blocking. + // loop here to ensure all the bytes we need flush are enqueued before we transition to trying + // to close. + long written = 0; + do { + written += internalWrite(srcs, offset, length); + } while (written < totalRemaining); + close(); + return written; + } + + @Override + public boolean isOpen() { + return open; + } + + @Override + public void close() throws IOException { + if (!open) { + return; + } + try { + if (writeThrewError) { + return; + } + + if (!writeCalledAtLeastOnce) { + stream.flush(); + } + if (nextWriteShouldFinalize) { + //noinspection StatementWithEmptyBody + while (!stream.finishWrite(writeOffset)) {} + } else { + //noinspection StatementWithEmptyBody + while (!stream.closeStream(writeOffset)) {} + } + + awaitResultFuture(); + } finally { + stream.sendClose(); + open = false; + } + } + + public void nextWriteShouldFinalize() { + this.nextWriteShouldFinalize = true; + } + + private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { + if (!open) { + throw new ClosedChannelException(); + } + // error early. if the result future is already failed, await it to throw the error + if (stream.getResultFuture().isDone()) { + awaitResultFuture(); + return 0; + } + writeCalledAtLeastOnce = true; + + long availableCapacity = stream.availableCapacity(); + if (availableCapacity <= 0) { + return 0; + } + RewindableContent rewindableContent = RewindableContent.of(srcs, srcsOffset, srcsLength); + long totalBufferRemaining = rewindableContent.getLength(); + + ChunkSegment[] data = chunkSegmenter.segmentBuffers(srcs, srcsOffset, srcsLength, true); + if (data.length == 0) { + return 0; + } + // we consumed some bytes from srcs, flag our content as dirty since we aren't writing + // those bytes to implicitly flag as dirty. + rewindableContent.flagDirty(); + + long bytesConsumed = 0; + for (int i = 0, len = data.length, lastIdx = len - 1; i < len; i++) { + ChunkSegment datum = data[i]; + int size = datum.getB().size(); + boolean appended; + if (i < lastIdx) { + appended = stream.append(datum); + } else if (i == lastIdx && nextWriteShouldFinalize) { + appended = stream.appendAndFinalize(datum); + } else { + appended = stream.appendAndFlush(datum); + } + if (appended) { + bytesConsumed += size; + writeOffset += size; + } else { + // if we weren't able to trigger a flush by reaching the end of the array and calling + // appendAndFlush, explicitly call flush here so that some progress can be made. + // we prefer appendAndFlush so a separate message is not needed, but an extra message + // in order to make progress and free buffer space is better than ending up in a live-lock. + stream.flush(); + break; + } + } + + if (bytesConsumed != totalBufferRemaining) { + rewindableContent.rewindTo(bytesConsumed); + } + + return bytesConsumed; + } + + private void awaitResultFuture() throws IOException { + try { + stream.getResultFuture().get(10_717, TimeUnit.MILLISECONDS); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + InterruptedIOException ioe = new InterruptedIOException(); + ioe.initCause(e); + writeThrewError = true; + throw ioe; + } catch (ExecutionException e) { + BaseServiceException coalesce = StorageException.coalesce(e.getCause()); + String message = coalesce.getMessage(); + String ioExceptionMessage = message; + // if the failure is an upload scenario we detect client side, it's message will be + // verbose. To avoid duplication, select the first line only for the io exception + int firstNewLineIndex = message != null ? message.indexOf('\n') : -1; + if (firstNewLineIndex > -1) { + ioExceptionMessage = message.substring(0, firstNewLineIndex); + } + IOException ioException = new IOException(ioExceptionMessage, coalesce); + // ioException.addSuppressed(new AsyncStorageTaskException()); + writeThrewError = true; + throw ioException; + } catch (TimeoutException e) { + writeThrewError = true; + throw new IOException(e); + } + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiBlobWriteSessionConfig.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiBlobWriteSessionConfig.java index b0e5ce639d..5cd7a8a650 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiBlobWriteSessionConfig.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiBlobWriteSessionConfig.java @@ -111,7 +111,8 @@ public WritableByteChannelSession writeSession( GrpcStorageImpl grpc = (GrpcStorageImpl) s; GrpcCallContext grpcCallContext = opts.grpcMetadataMapper().apply(GrpcCallContext.createDefault()); - BidiWriteObjectRequest req = grpc.getBidiWriteObjectRequest(info, opts); + BidiWriteObjectRequest req = + grpc.getBidiWriteObjectRequest(info, opts, false); ApiFuture startResumableWrite = grpc.startResumableWrite(grpcCallContext, req, opts); diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiResumableWrite.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiResumableWrite.java index 18e7cfff96..0f5a378f80 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiResumableWrite.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiResumableWrite.java @@ -94,73 +94,3 @@ static BidiResumableWrite identity(BidiResumableWrite w) { return w; } } - -final class BidiAppendableWrite implements BidiWriteObjectRequestBuilderFactory { - - private final BidiWriteObjectRequest req; - - public BidiAppendableWrite(BidiWriteObjectRequest req) { - this(req, false); - } - - public BidiAppendableWrite(BidiWriteObjectRequest req, boolean takeOver) { - if (takeOver) { - this.req = req; - } else { - req = - req.toBuilder() - .setWriteObjectSpec(req.getWriteObjectSpec().toBuilder().setAppendable(true).build()) - .build(); - this.req = req; - } - } - - public BidiWriteObjectRequest getReq() { - return req; - } - - @Override - public BidiWriteObjectRequest.Builder newBuilder() { - return req.toBuilder(); - } - - @Override - public @Nullable String bucketName() { - if (req.hasWriteObjectSpec() && req.getWriteObjectSpec().hasResource()) { - return req.getWriteObjectSpec().getResource().getBucket(); - } else if (req.hasAppendObjectSpec()) { - return req.getAppendObjectSpec().getBucket(); - } - return null; - } - - @Override - public String toString() { - return "BidiAppendableWrite{" + "req=" + fmtProto(req) + '}'; - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - if (!(o instanceof BidiAppendableWrite)) { - return false; - } - BidiAppendableWrite BidiAppendableWrite = (BidiAppendableWrite) o; - return Objects.equals(req, BidiAppendableWrite.getReq()); - } - - @Override - public int hashCode() { - return Objects.hash(req); - } - - /** - * Helper function which is more specific than {@link Function#identity()}. Constraining the input - * and output to be exactly {@link BidiAppendableWrite}. - */ - static BidiAppendableWrite identity(BidiAppendableWrite w) { - return w; - } -} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java new file mode 100644 index 0000000000..151ff402cb --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java @@ -0,0 +1,1110 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.cloud.storage.Utils.ifNonNull; +import static com.google.common.base.Preconditions.checkArgument; +import static com.google.common.base.Preconditions.checkState; +import static java.util.Objects.requireNonNull; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.ImmutableSet; +import com.google.protobuf.ByteString; +import com.google.protobuf.Descriptors.OneofDescriptor; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiWriteHandle; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.WriteObjectSpec; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.Marker; +import org.slf4j.MarkerFactory; + +@SuppressWarnings("LoggingSimilarMessage") +abstract class BidiUploadState { + private static final Logger LOGGER = LoggerFactory.getLogger(BidiUploadState.class); + private static final Marker TRACE_ENTER = MarkerFactory.getMarker("enter"); + private static final Marker TRACE_EXIT = MarkerFactory.getMarker("exit"); + + static final OneofDescriptor FIRST_MESSAGE_DESCRIPTOR = + BidiWriteObjectRequest.getDescriptor().getOneofs().stream() + .filter(d -> "first_message".equalsIgnoreCase(d.getName())) + .findFirst() + .orElseThrow( + () -> new IllegalStateException("BidiWriteObject.first_message oneof not found")); + + // seal this class to extension + private BidiUploadState() {} + + @VisibleForTesting + BidiUploadState(String testName) { + // some runtime enforcement that this constructor is only called from a test + // if we had java9+ we could seal this all the way without this hack + StackTraceElement[] stackTrace = Thread.currentThread().getStackTrace(); + boolean isJunitTest = + Arrays.stream(stackTrace).anyMatch(ste -> ste.getClassName().startsWith("org.junit")); + + checkState(isJunitTest, "not a junit test", testName); + } + + protected final StorageException err( + UploadFailureScenario scenario, BidiWriteObjectResponse response) { + BidiWriteObjectRequest t = peekLast(); + GrpcCallContext ctx = enqueueFirstMessageAndGetGrpcCallContext(); + return scenario.toStorageException(Utils.nullSafeList(t), response, ctx, null); + } + + @Nullable Crc32cLengthKnown getCumulativeCrc32c() { + return unimplemented(); + } + + long getTotalSentBytes() { + return unimplemented(); + } + + long getConfirmedBytes() { + return unimplemented(); + } + + long availableCapacity() { + return unimplemented(); + } + + boolean offer(ChunkSegmenter.@NonNull ChunkSegment data) { + return unimplemented(); + } + + boolean finalFlush(long totalLength) { + return unimplemented(); + } + + boolean offer(@NonNull BidiWriteObjectRequest e) { + return unimplemented(); + } + + void updateStateFromResponse(BidiWriteObjectResponse response) { + unimplemented(); + } + + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return unimplemented(); + } + + void sendVia(Consumer consumer) { + unimplemented(); + } + + void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError redirect) { + unimplemented(); + } + + void terminalError() { + unimplemented(); + } + + void pendingRetry() { + unimplemented(); + } + + void retrying() { + unimplemented(); + } + + @Nullable BidiWriteObjectRequest peekLast() { + return unimplemented(); + } + + boolean isFinalizing() { + return unimplemented(); + } + + ApiFuture beginReconciliation() { + return unimplemented(); + } + + static AppendableUploadState appendableNew( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + checkArgument( + initial.hasWriteObjectSpec(), "provided initial request did not contain a WriteObjectSpec"); + WriteObjectSpec spec = initial.getWriteObjectSpec(); + return new NewAppendableUploadState( + initial, spec, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + + static AppendableUploadState appendableTakeover( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + checkArgument( + initial.hasAppendObjectSpec(), + "provided initial request did not contain a AppendableObjectSpec"); + AppendObjectSpec spec = initial.getAppendObjectSpec(); + return new TakeoverAppendableUploadState( + initial, spec, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + + private static ImmutableMap> makeHeadersMap( + Stream xGoogRequestParamsEntries) { + return ImmutableMap.of( + "x-goog-request-params", + ImmutableList.of( + xGoogRequestParamsEntries.filter(Objects::nonNull).collect(Collectors.joining("&")))); + } + + /** + * Create a single BidiWriteObjectRequest consisting of the same semantic meaning as if doing + * first then second. + * + * @throws IllegalArgumentException if both first and second have checksummedData + */ + static BidiWriteObjectRequest concatenate( + BidiWriteObjectRequest first, BidiWriteObjectRequest second) { + checkArgument( + !(first.hasChecksummedData() && second.hasChecksummedData()), + "attempting to merge two requests that both specify checksummed_data"); + BidiWriteObjectRequest.Builder b = first.toBuilder().mergeFrom(second); + long lwo = first.getWriteOffset(); + long rwo = second.getWriteOffset(); + if (first.hasChecksummedData()) { + int size = first.getChecksummedData().getContent().size(); + checkArgument( + lwo + size == rwo, + "(leftWriteOffset + size == rightWriteOffset) (%s + %s == %s)", + lwo, + size, + rwo); + b.setWriteOffset(lwo); + } else { + b.setWriteOffset(rwo); + } + + // finish_write implies flush & state_lookup. dedupe to avoid an extra incremental message + if (second.getFinishWrite() && (first.getFlush() || first.getStateLookup())) { + b.clearFlush().clearStateLookup(); + } + return b.build(); + } + + @Nullable StorageException onResponse(BidiWriteObjectResponse response) { + return unimplemented(); + } + + State getState() { + return unimplemented(); + } + + @VisibleForTesting + @Nullable BidiWriteObjectRequest peekFirst() { + return unimplemented(); + } + + SettableApiFuture getResultFuture() { + return unimplemented(); + } + + void awaitState(State... state) throws InterruptedException { + unimplemented(); + } + + public void awaitTakeoverStateReconciliation(Runnable restart) { + unimplemented(); + } + + enum State { + INITIALIZING, + TAKEOVER, + RUNNING, + PENDING_RETRY, + RETRYING, + TERMINAL_SUCCESS, + TERMINAL_ERROR; + + private static final State[] allNonTerminal = + new State[] {INITIALIZING, TAKEOVER, RUNNING, PENDING_RETRY, RETRYING}; + + boolean in(State... states) { + for (State state : states) { + if (state == this) { + return true; + } + } + return false; + } + } + + private static T unimplemented() { + throw new IllegalStateException("not implemented"); + } + + abstract static class BaseUploadState extends BidiUploadState { + + protected final BidiWriteObjectRequest initial; + protected final Supplier baseCallContext; + protected final ReentrantLock lock; + protected final Condition stateUpdated; + + /** The maximum number of bytes allowed to be enqueued in {@link #queue} across all messages. */ + protected final long maxBytes; + + protected final ArrayList queue; + protected final SettableApiFuture resultFuture; + + /** The total number of bytes currently enqueued in {@link #queue} */ + private long enqueuedBytes; + + /** A value in the range of {@code -1 <= lastSentRequest && lastSentRequest < queue.size()} */ + @VisibleForTesting int lastSentRequestIndex; + + /** The minimum offset of bytes for those pending messages. */ + protected long minByteOffset; + + /** + * The number of bytes that have been "sent". This might also be named something like + * cumulativeWriteOffset. + */ + protected long totalSentBytes; + + protected @Nullable Crc32cLengthKnown cumulativeCrc32c; + + /** + * Initially {@code -1} to signify the upload does not exist at all in the server, when the + * server responds successfully this will be updated to a value >= 0. + */ + protected long confirmedBytes; + + protected long generation; + protected @Nullable BidiWriteHandle writeHandle; + protected @Nullable String routingToken; + protected @NonNull State state; + protected @MonotonicNonNull BidiWriteObjectResponse lastResponseWithResource; + protected @Nullable State stateToReturnToAfterRetry; + protected boolean finalFlushSignaled; + protected boolean finalFlushSent; + protected boolean finishWriteSignaled; + protected boolean finishWriteSent; + protected @MonotonicNonNull OpenArguments lastOpenArguments; + protected @Nullable SettableApiFuture pendingReconciliation; + + private BaseUploadState( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c, + State startingState) { + this.initial = initial; + this.baseCallContext = baseCallContext; + this.resultFuture = resultFuture; + this.cumulativeCrc32c = initialCrc32c; + this.maxBytes = maxBytes; + this.queue = new ArrayList<>(); + this.enqueuedBytes = 0; + this.lock = new ReentrantLock(); + this.stateUpdated = lock.newCondition(); + this.lastSentRequestIndex = -1; + this.minByteOffset = 0; + this.totalSentBytes = 0; + this.confirmedBytes = -1; + this.state = startingState; + } + + @Override + final State getState() { + lock.lock(); + try { + return state; + } finally { + lock.unlock(); + } + } + + @Override + final @Nullable Crc32cLengthKnown getCumulativeCrc32c() { + lock.lock(); + try { + return cumulativeCrc32c; + } finally { + lock.unlock(); + } + } + + @Override + final long getTotalSentBytes() { + lock.lock(); + try { + return totalSentBytes; + } finally { + lock.unlock(); + } + } + + @Override + final long getConfirmedBytes() { + lock.lock(); + try { + return confirmedBytes; + } finally { + lock.unlock(); + } + } + + @Override + final long availableCapacity() { + lock.lock(); + try { + return maxBytes - enqueuedBytes; + } finally { + lock.unlock(); + } + } + + @Override + final boolean offer(ChunkSegmenter.@NonNull ChunkSegment datum) { + lock.lock(); + try { + requireNonNull(datum, "data must be non null"); + validateCurrentStateIsOneOf(State.allNonTerminal); + checkNotFinalizing(); + ByteString b = datum.getB(); + long availableCapacity = availableCapacity(); + int size = b.size(); + if (size <= availableCapacity) { + Crc32cLengthKnown crc32c = datum.getCrc32c(); + ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); + if (crc32c != null) { + checksummedData.setCrc32C(crc32c.getValue()); + } + ChecksummedData built = checksummedData.build(); + boolean offered = + internalOffer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(totalSentBytes) + .setChecksummedData(built) + .build()); + if (offered) { + cumulativeCrc32c = crc32cConcat(crc32c); + } + return offered; + } + return false; + } finally { + lock.unlock(); + } + } + + @Override + public boolean finalFlush(long totalLength) { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + checkNotFinalizing(); + checkArgument( + totalLength == totalSentBytes, + "(totalLength == totalSentBytes) (%s == %s)", + totalLength, + totalSentBytes); + + BidiWriteObjectRequest flush = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(totalLength) + .setFlush(true) + .setStateLookup(true) + .build(); + + BidiWriteObjectRequest currentLast = peekLast(); + boolean equals = flush.equals(currentLast); + if (equals && finalFlushSignaled) { + return true; + } else if (equals && lastSentRequestIndex == queue.size() - 1) { + finalFlushSignaled = true; + finalFlushSent = true; + return true; + } + + boolean offered = internalOffer(flush); + if (offered) { + finalFlushSignaled = true; + } + return offered; + } finally { + lock.unlock(); + } + } + + @Override + final boolean offer(@NonNull BidiWriteObjectRequest e) { + lock.lock(); + try { + requireNonNull(e, "e must be non null"); + validateCurrentStateIsOneOf(State.allNonTerminal); + if (e.hasChecksummedData()) { + checkNotFinalizing(); + } + int size = e.getChecksummedData().getContent().size(); + long availableCapacity = availableCapacity(); + if (size > availableCapacity) { + return false; + } + + checkArgument( + e.hasOneof(FIRST_MESSAGE_DESCRIPTOR) || e.getWriteOffset() == totalSentBytes, + "(write_offset == totalSentBytes) (%s == %s)", + e.getWriteOffset(), + totalSentBytes); + return internalOffer(e); + } finally { + lock.unlock(); + } + } + + @Override + final void updateStateFromResponse(BidiWriteObjectResponse response) { + lock.lock(); + try { + long persistedSize = -1; + if (response.hasPersistedSize()) { + persistedSize = response.getPersistedSize(); + } else if (response.hasResource()) { + persistedSize = response.getResource().getSize(); + lastResponseWithResource = response; + generation = lastResponseWithResource.getResource().getGeneration(); + } + checkState(persistedSize > -1, "persistedSize > -1 (%s > -1)", persistedSize); + checkArgument( + persistedSize >= confirmedBytes, + "(persistedSize >= confirmedBytes) (%s >= %s)", + response, + confirmedBytes); + validateCurrentStateIsOneOf( + State.INITIALIZING, State.TAKEOVER, State.RUNNING, State.RETRYING); + routingToken = null; + // todo: test more permutations where this might be true + // 1. retry, object not yet created + if (state == State.INITIALIZING) { + confirmedBytes = persistedSize; + totalSentBytes = Math.max(totalSentBytes, persistedSize); + } + if (state == State.INITIALIZING || state == State.RETRYING) { + transitionTo( + stateToReturnToAfterRetry != null ? stateToReturnToAfterRetry : State.RUNNING); + } + + boolean signalTerminalSuccess = false; + BidiWriteObjectRequest peek; + while ((peek = peekFirst()) != null) { + if (peek.hasChecksummedData()) { + int size = peek.getChecksummedData().getContent().size(); + long endOffset = peek.getWriteOffset() + size; + if (endOffset <= persistedSize) { + poll(); + confirmedBytes = endOffset; + enqueuedBytes -= size; + minByteOffset = peek.getWriteOffset(); + } else { + break; + } + } else if (peek.hasOneof(FIRST_MESSAGE_DESCRIPTOR)) { + poll(); + } else if (peek.getFlush()) { + if (finalFlushSent && persistedSize == totalSentBytes) { + confirmedBytes = persistedSize; + signalTerminalSuccess = true; + poll(); + } else if (persistedSize >= peek.getWriteOffset()) { + confirmedBytes = persistedSize; + poll(); + } else { + break; + } + } else if (peek.getFinishWrite()) { + checkState( + enqueuedBytes == 0, + "attempting to evict finish_write: true while bytes are still enqueued"); + if (response.hasResource() && persistedSize == totalSentBytes) { + confirmedBytes = persistedSize; + if (response.getResource().hasFinalizeTime()) { + signalTerminalSuccess = true; + poll(); + } else { + break; + } + } else { + break; + } + } else { + //noinspection DataFlowIssue + checkState(false, "peek = {%s}, response = {%s}", fmtProto(peek), fmtProto(response)); + } + } + + if (pendingReconciliation != null) { + pendingReconciliation.set(null); + pendingReconciliation = null; + } + + if (signalTerminalSuccess && lastResponseWithResource != null) { + BidiWriteObjectResponse.Builder b = lastResponseWithResource.toBuilder(); + b.getResourceBuilder().setSize(confirmedBytes); + b.getResourceBuilder().getChecksumsBuilder().clearMd5Hash().clearCrc32C(); + if (cumulativeCrc32c != null) { + b.getResourceBuilder().getChecksumsBuilder().setCrc32C(cumulativeCrc32c.getValue()); + } + BidiWriteObjectResponse updated = b.build(); + resultFuture.set(updated); + terminalSuccess(); + } else if (signalTerminalSuccess) { + checkState(false, "signalTerminalSuccess without prior resource response"); + } + } finally { + lock.unlock(); + } + } + + @Override + final void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError redirect) { + lock.lock(); + try { + validateCurrentStateIsOneOf( + State.INITIALIZING, State.RUNNING, State.PENDING_RETRY, State.RETRYING); + if (redirect.hasWriteHandle()) { + this.writeHandle = redirect.getWriteHandle(); + } + if (redirect.hasRoutingToken()) { + routingToken = redirect.getRoutingToken(); + } + if (redirect.hasGeneration()) { + if (generation > 0) { + checkState( + generation == redirect.getGeneration(), + "Generation changed: (generation == redirect.getGeneration()) (%s == %s)", + generation, + redirect.getGeneration()); + } + generation = redirect.getGeneration(); + } + } finally { + lock.unlock(); + } + } + + @Override + final void terminalError() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + transitionTo(State.TERMINAL_ERROR); + if (pendingReconciliation != null) { + pendingReconciliation.cancel(true); + } + stateUpdated.signalAll(); + } finally { + lock.unlock(); + } + } + + private void terminalSuccess() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + transitionTo(State.TERMINAL_SUCCESS); + stateUpdated.signalAll(); + } finally { + lock.unlock(); + } + } + + @Override + final void pendingRetry() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + stateToReturnToAfterRetry = state; + transitionTo(State.PENDING_RETRY); + } finally { + lock.unlock(); + } + } + + @Override + final void retrying() { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.PENDING_RETRY, State.INITIALIZING, State.TAKEOVER); + transitionTo(State.RETRYING); + lastSentRequestIndex = -1; + finishWriteSent = false; + } finally { + lock.unlock(); + } + } + + @Override + final boolean isFinalizing() { + lock.lock(); + try { + return finishWriteSignaled && finishWriteSent; + } finally { + lock.unlock(); + } + } + + @Override + ApiFuture beginReconciliation() { + lock.lock(); + try { + if (pendingReconciliation == null) { + pendingReconciliation = SettableApiFuture.create(); + } + return pendingReconciliation; + } finally { + lock.unlock(); + } + } + + @Override + final void sendVia(Consumer consumer) { + lock.lock(); + try { + validateCurrentStateIsOneOf( + State.INITIALIZING, State.RUNNING, State.RETRYING, State.TAKEOVER); + BidiWriteObjectRequest prev = null; + int i = lastSentRequestIndex + 1; + for (; i < queue.size(); i++) { + BidiWriteObjectRequest m = queue.get(i); + lastSentRequestIndex = i; + if (state == State.RETRYING) { + prev = m; + break; // if retrying only send the first message + } + + if (prev != null) { + // never compact bytes, purely for simplicity’s sake. ByteString won't copy when + // concatenating two values together, but there is a limit on how many bytes can be in + // an + // individual message, and it's much easier to not have to worry about all of that here. + // We're mainly wanting to ensure things like flush/finish are packed into the last data + // message, and the first data message is included with the initial request if no state + // reconciliation needs to take place. + if (prev.hasChecksummedData() && m.hasChecksummedData()) { + consumer.accept(prev); + prev = m; + } else { + prev = concatenate(prev, m); + } + } else { + prev = m; + } + } + if (prev != null) { + if (prev.getFinishWrite()) { + finishWriteSent = true; + } else if (prev.getFlush() && prev.getStateLookup() && finalFlushSignaled) { + finalFlushSent = true; + } + consumer.accept(prev); + } + } finally { + lock.unlock(); + } + } + + private void prepend(BidiWriteObjectRequest e) { + queue.add(0, e); + enqueuedBytes = enqueuedBytes + e.getChecksummedData().getContent().size(); + } + + private void append(BidiWriteObjectRequest e) { + queue.add(e); + enqueuedBytes = enqueuedBytes + e.getChecksummedData().getContent().size(); + } + + @Override + final @Nullable BidiWriteObjectRequest peekLast() { + lock.lock(); + try { + int index = queue.size() - 1; + if (index < 0) { + return null; + } + return queue.get(index); + } finally { + lock.unlock(); + } + } + + @VisibleForTesting + @Override + final @Nullable BidiWriteObjectRequest peekFirst() { + lock.lock(); + try { + if (queue.isEmpty()) { + return null; + } + return queue.get(0); + } finally { + lock.unlock(); + } + } + + private void poll() { + BidiWriteObjectRequest remove = queue.remove(0); + if (remove != null) { + lastSentRequestIndex = Math.max(lastSentRequestIndex - 1, -1); + } + } + + protected final void transitionTo(State state) { + this.state = state; + stateUpdated.signalAll(); + } + + protected final void validateCurrentStateIsOneOf(State... allowed) { + checkState( + state.in(allowed), + "state mismatch. expected one of %s but is %s", + Arrays.toString(allowed), + state); + } + + private void checkNotFinalizing() { + checkState( + !finishWriteSignaled, + "Attempting to append bytes even though finalization has previously been signaled."); + } + + protected final boolean internalOffer(BidiWriteObjectRequest e) { + Consumer add = this::append; + if (e.hasOneof(FIRST_MESSAGE_DESCRIPTOR)) { + if (!queue.isEmpty() && queue.get(0).hasOneof(FIRST_MESSAGE_DESCRIPTOR)) { + poll(); // dequeue the existing first message + } + add = this::prepend; + } + if (e.getFinishWrite()) { + finishWriteSignaled = true; + } + + if (e.hasChecksummedData() && !finishWriteSignaled) { + ChecksummedData checksummedData = e.getChecksummedData(); + int size = checksummedData.getContent().size(); + if (size <= availableCapacity()) { + totalSentBytes += size; + add.accept(e); + return true; + } + return false; + } else { + add.accept(e); + return true; + } + } + + @Nullable + private Crc32cLengthKnown crc32cConcat(@Nullable Crc32cLengthKnown rhs) { + if (cumulativeCrc32c == null) { + return null; + } + requireNonNull(rhs, "rhs must be non null"); + return cumulativeCrc32c.concat(rhs); + } + + @Override + public SettableApiFuture getResultFuture() { + return resultFuture; + } + + @Override + void awaitState(State... anyOf) throws InterruptedException { + lock.lock(); + try { + ImmutableSet states = ImmutableSet.copyOf(anyOf); + while (!states.contains(this.state) && !stateUpdated.await(5, TimeUnit.MILLISECONDS)) { + if (resultFuture.isDone()) { + return; + } + } + } finally { + lock.unlock(); + } + } + + @Override + public void awaitTakeoverStateReconciliation(Runnable restart) { + try { + pendingRetry(); + restart.run(); + awaitState(State.RUNNING); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw StorageException.coalesce(e); + } + } + } + + abstract static class AppendableUploadState extends BaseUploadState { + + private AppendableUploadState( + BidiWriteObjectRequest initial, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c, + State startingState) { + super(initial, baseCallContext, maxBytes, resultFuture, initialCrc32c, startingState); + } + + protected abstract String getBucket(); + + protected abstract BidiWriteObjectRequest.Builder getBuilder(); + + @Override + public @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + super.lock.lock(); + try { + if (!state.in(State.INITIALIZING, State.RETRYING, State.TAKEOVER)) { + return lastOpenArguments.getCtx(); + } + ImmutableMap> xGoogRequestParams = + makeHeadersMap( + Stream.of( + "bucket=" + this.getBucket(), + "appendable=true", + routingToken != null ? "routing_token=" + routingToken : null)); + GrpcCallContext context = baseCallContext.get().withExtraHeaders(xGoogRequestParams); + + BidiWriteObjectRequest.Builder b = this.getBuilder(); + if (state == State.RETRYING) { + b.setStateLookup(true); + } + BidiWriteObjectRequest req = b.build(); + OpenArguments openArguments = new OpenArguments(req, context); + internalOffer(req); + lastOpenArguments = openArguments; + return openArguments.getCtx(); + } finally { + super.lock.unlock(); + } + } + + @Override + @Nullable StorageException onResponse(BidiWriteObjectResponse response) { + lock.lock(); + try { + validateCurrentStateIsOneOf(State.allNonTerminal); + + if (response.hasWriteHandle()) { + this.writeHandle = response.getWriteHandle(); + } + + boolean incremental = !response.hasResource(); + long persistedSize = -1; + if (response.hasPersistedSize()) { + persistedSize = response.getPersistedSize(); + } else if (response.hasResource()) { + persistedSize = response.getResource().getSize(); + } + checkState(persistedSize > -1, "persistedSize > -1 (%s > -1)", persistedSize); + if (state == State.TAKEOVER || stateToReturnToAfterRetry == State.TAKEOVER) { + totalSentBytes = persistedSize; + confirmedBytes = persistedSize; + if (response.hasResource() + && response.getResource().hasChecksums() + && response.getResource().getChecksums().hasCrc32C()) { + cumulativeCrc32c = + Crc32cValue.of(response.getResource().getChecksums().getCrc32C(), persistedSize); + } + updateStateFromResponse(response); + transitionTo(State.RUNNING); + return null; + } + + long totalSentBytes = getTotalSentBytes(); + long minWriteOffset = minByteOffset; + boolean finalizing = isFinalizing(); + + if (!finalizing && incremental) { + if (persistedSize == totalSentBytes) { + updateStateFromResponse(response); + } else if (persistedSize < totalSentBytes) { + updateStateFromResponse(response); + } else { + return err(UploadFailureScenario.SCENARIO_7, response); + } + } else if (finalizing && !incremental) { + if (persistedSize == totalSentBytes) { + updateStateFromResponse(response); + } else if (persistedSize < totalSentBytes) { + if (persistedSize > minWriteOffset) { + updateStateFromResponse(response); + } else if (lastResponseWithResource != null) { + return err(UploadFailureScenario.SCENARIO_4_1, response); + } + } else { + return err(UploadFailureScenario.SCENARIO_4_2, response); + } + } else if (!finalizing /* && !incremental*/) { + // generally the first response from the server + if (persistedSize <= totalSentBytes) { + updateStateFromResponse(response); + } else { + return err(UploadFailureScenario.SCENARIO_7, response); + } + } else /* (finalizing && incremental) */ { + // might happen if a `flush: true, state_lookup: true, finish_write: true` + if (persistedSize == totalSentBytes) { + updateStateFromResponse(response); + } else if (persistedSize < totalSentBytes) { + if (persistedSize > minWriteOffset) { + updateStateFromResponse(response); + } else if (lastResponseWithResource != null) { + return err(UploadFailureScenario.SCENARIO_3, response); + } + } else { + return err(UploadFailureScenario.SCENARIO_2, response); + } + } + + return null; + } finally { + lock.unlock(); + } + } + } + + static final class NewAppendableUploadState extends AppendableUploadState { + private final WriteObjectSpec spec; + + private NewAppendableUploadState( + BidiWriteObjectRequest initial, + WriteObjectSpec spec, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + super(initial, baseCallContext, maxBytes, resultFuture, initialCrc32c, State.INITIALIZING); + this.spec = spec; + } + + @Override + protected String getBucket() { + return spec.getResource().getBucket(); + } + + @Override + protected BidiWriteObjectRequest.Builder getBuilder() { + BidiWriteObjectRequest.Builder b = BidiWriteObjectRequest.newBuilder(); + if (confirmedBytes >= 0) { + checkState(generation > 0, "generation > 0"); + + AppendObjectSpec.Builder aosb = + AppendObjectSpec.newBuilder() + .setBucket(spec.getResource().getBucket()) + .setObject(spec.getResource().getName()) + .setGeneration(generation); + if (spec.hasIfMetagenerationMatch()) { + aosb.setIfMetagenerationMatch(spec.getIfMetagenerationMatch()); + } + if (spec.hasIfMetagenerationNotMatch()) { + aosb.setIfMetagenerationNotMatch(spec.getIfMetagenerationMatch()); + } + ifNonNull(routingToken, aosb::setRoutingToken); + ifNonNull(writeHandle, aosb::setWriteHandle); + b.setAppendObjectSpec(aosb); + } else { + b.setWriteObjectSpec(spec); + } + return b; + } + } + + static final class TakeoverAppendableUploadState extends AppendableUploadState { + private final AppendObjectSpec spec; + + private TakeoverAppendableUploadState( + BidiWriteObjectRequest initial, + AppendObjectSpec spec, + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + super(initial, baseCallContext, maxBytes, resultFuture, initialCrc32c, State.TAKEOVER); + this.spec = spec; + } + + @Override + protected String getBucket() { + return spec.getBucket(); + } + + @Override + protected BidiWriteObjectRequest.Builder getBuilder() { + AppendObjectSpec.Builder aosb = spec.toBuilder(); + ifNonNull(routingToken, aosb::setRoutingToken); + ifNonNull(writeHandle, aosb::setWriteHandle); + return BidiWriteObjectRequest.newBuilder().setAppendObjectSpec(aosb); + } + } + + static final class OpenArguments { + + private final BidiWriteObjectRequest req; + private final GrpcCallContext ctx; + + private OpenArguments(BidiWriteObjectRequest req, GrpcCallContext ctx) { + this.req = req; + this.ctx = ctx; + } + + public BidiWriteObjectRequest getReq() { + return req; + } + + public GrpcCallContext getCtx() { + return ctx; + } + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java new file mode 100644 index 0000000000..3cdfb76e2e --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java @@ -0,0 +1,604 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.common.base.Preconditions.checkState; + +import com.google.api.core.ApiFuture; +import com.google.api.core.ApiFutureCallback; +import com.google.api.core.ApiFutures; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.BaseServiceException; +import com.google.cloud.storage.BidiUploadState.State; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.RetryContext.OnFailure; +import com.google.cloud.storage.RetryContext.OnSuccess; +import com.google.common.annotations.VisibleForTesting; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ObjectChecksums; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import org.checkerframework.checker.nullness.qual.EnsuresNonNull; +import org.checkerframework.checker.nullness.qual.MonotonicNonNull; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.checkerframework.checker.nullness.qual.RequiresNonNull; + +/** + * A class that helps tie together a {@link BidiUploadState}, {@link RetryContext} and underlying + * gRPC bidi stream. + * + *

This class helps transparently handle retries in the event an error is observed, and will + * handle redirect(s) if they occur, all without the need for the caller of this class to know about + * those things and the state need to worry about how retries will happen. + */ +final class BidiUploadStreamingStream { + + private final BidiUploadState state; + private final BidiStreamingCallable write; + // private final UnaryCallable get; + private final ScheduledExecutorService executor; + private final RetryContext retryContext; + private final OnSuccess onSuccess; + private final OnFailure onFailure; + private final ReentrantLock lock; + private final int maxRedirectsAllowed; + private final AtomicInteger redirectCounter; + + private volatile @Nullable StreamTuple stream; + private volatile @Nullable ApiFuture pendingReconciliation; + + BidiUploadStreamingStream( + BidiUploadState state, + ScheduledExecutorService executor, + BidiStreamingCallable write, + int maxRedirectsAllowed, + RetryContext retryContext) { + this.state = state; + this.executor = executor; + this.write = write; + this.lock = new ReentrantLock(); + this.retryContext = new StreamRetryContextDecorator(retryContext, lock, this::reset); + this.onSuccess = this::restart; + this.onFailure = + t -> { + SettableApiFuture resultFuture = state.getResultFuture(); + if (!resultFuture.isDone()) { + this.state.terminalError(); + BaseServiceException coalesced = StorageException.coalesce(t); + resultFuture.setException(coalesced); + } + }; + this.maxRedirectsAllowed = maxRedirectsAllowed; + this.redirectCounter = new AtomicInteger(); + } + + public ApiFuture getResultFuture() { + return state.getResultFuture(); + } + + public boolean append(ChunkSegmenter.@NonNull ChunkSegment data) { + lock.lock(); + try { + boolean offered = state.offer(data); + if (offered) { + internalSend(); + } + return offered; + } finally { + lock.unlock(); + } + } + + public boolean appendAndFlush(ChunkSegmenter.@NonNull ChunkSegment data) { + lock.lock(); + try { + boolean offered = state.offer(data); + if (offered) { + flush(); + } + return offered; + } finally { + lock.unlock(); + } + } + + public boolean appendAndFinalize(ChunkSegmenter.@NonNull ChunkSegment data) { + lock.lock(); + try { + boolean offered = state.offer(data); + if (offered) { + finishWrite(state.getTotalSentBytes()); + } + return offered; + } finally { + lock.unlock(); + } + } + + public void flush() { + lock.lock(); + try { + BidiWriteObjectRequest flush = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.getTotalSentBytes()) + .setFlush(true) + .setStateLookup(true) + .build(); + // if our flush is already enqueued, simply tick to make sure things are sent + if (flush.equals(state.peekLast())) { + internalSend(); + return; + } + boolean offered = state.offer(flush); + if (offered) { + internalSend(); + } + } finally { + lock.unlock(); + } + } + + public boolean finishWrite(long length) { + lock.lock(); + try { + // if we're already finalizing, ack rather than enqueueing again + if (state.isFinalizing() && state.getTotalSentBytes() == length) { + return true; + } + + BidiWriteObjectRequest.Builder b = + BidiWriteObjectRequest.newBuilder().setWriteOffset(length).setFinishWrite(true); + Crc32cLengthKnown cumulativeCrc32c = state.getCumulativeCrc32c(); + if (cumulativeCrc32c != null) { + b.setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(cumulativeCrc32c.getValue()).build()); + } + BidiWriteObjectRequest msg = b.build(); + boolean offer = state.offer(msg); + if (offer) { + internalSend(); + } + return offer; + } finally { + lock.unlock(); + } + } + + public boolean closeStream(long length) { + lock.lock(); + try { + + boolean offer = state.finalFlush(length); + if (offer) { + internalSend(); + } + return offer; + } finally { + lock.unlock(); + } + } + + public void sendClose() { + lock.lock(); + try { + StreamTuple tmp = getStream(); + if (tmp != null) { + tmp.closeSend(); + } + } finally { + lock.unlock(); + } + } + + public void awaitTakeoverStateReconciliation() { + state.awaitTakeoverStateReconciliation(this::restart); + } + + /** + * It is possible for this value to change after reading, however it is guaranteed that the amount + * of available capacity will only ever increase. + * + *

The only way this value is impacted by a background thread is if buffer space is released. + * Buffer consumption can only happen from the same thread that would invoke this method. + */ + long availableCapacity() { + return state.availableCapacity(); + } + + /** expected to be called from a background thread provided by {@link #executor}. */ + @VisibleForTesting + void restart() { + lock.lock(); + try { + checkState(stream == null, "attempting to restart stream when stream is already active"); + state.retrying(); + ApiFuture reconciliation = state.beginReconciliation(); + // read the current volatile value + ApiFuture tmpPendingReconciliation = pendingReconciliation; + StreamTuple tmp = initStreamTuple(); + state.sendVia(tmp); + // Intentionally using reference equality. + // Only register the callback if we haven't previously registered it. + // We want to avoid any error/cancellation on a long-running reconciliation being registered + // in retry context multiple times. + // Unfortunately, ApiFuture doesn't provide "isCallbackRegistered" so we need to track this + // ourselves. + if (reconciliation != tmpPendingReconciliation) { + ApiFutures.addCallback( + reconciliation, + new ApiFutureCallback() { + @Override + public void onFailure(Throwable t) { + lock.lock(); + try { + BidiUploadStreamingStream.this.pendingReconciliation = null; + } finally { + lock.unlock(); + } + retryContext.recordError(t, onSuccess, onFailure); + } + + @Override + public void onSuccess(Void result) { + lock.lock(); + try { + BidiUploadStreamingStream.this.pendingReconciliation = null; + } finally { + lock.unlock(); + } + // when the reconciliation completes, trigger sending the rest of the messages + // that might be in the queue. + // re-get the stream so that if a retry is in progress we don't attempt to send + // to a stream that was broken after reconciliation. + StreamTuple tmp = getStream(); + if (tmp != null) { + state.sendVia(tmp); + } + } + }, + executor); + pendingReconciliation = reconciliation; + } + stream = tmp; + } catch (Throwable t) { + retryContext.recordError(t, onSuccess, onFailure); + } finally { + lock.unlock(); + } + } + + @VisibleForTesting + void reset() { + lock.lock(); + try { + StreamTuple tmp = stream; + if (tmp != null) { + tmp.in.flagTombstoned(); + tmp.closeSend(); + stream = null; + state.pendingRetry(); + } + } catch (Throwable t) { + // if any exception is thrown, catch it and funnel it into retryContext so that it is surfaced + // to the application. + retryContext.recordError(t, onSuccess, onFailure); + // Then throw it to prevent the current thread from running any following steps. Not ideal, + // but this can execute on a background thread that the application will never see. + // throw t; + } finally { + lock.unlock(); + } + } + + private @Nullable StreamTuple getStream() { + if (stream == null && state.getState() == State.INITIALIZING) { + stream = initStreamTuple(); + } + return stream; + } + + private StreamTuple initStreamTuple() { + GrpcCallContext grpcCallContext = state.enqueueFirstMessageAndGetGrpcCallContext(); + StreamingResponseObserver streamResponseObserver = + new StreamingResponseObserver(state, retryContext, onSuccess, onFailure); + RedirectHandlingResponseObserver responseObserver = + new RedirectHandlingResponseObserver( + state, + streamResponseObserver, + redirectCounter, + maxRedirectsAllowed, + this::reset, + () -> executor.execute(this::restart)); + ClientStream clientStream = + write.splitCall(responseObserver, grpcCallContext); + GracefulOutboundStream out = new GracefulOutboundStream(clientStream); + + return new StreamTuple(out, responseObserver); + } + + private void internalSend() { + StreamTuple tmp = getStream(); + if (tmp != null) { + state.sendVia(tmp); + } + } + + private static final class StreamTuple implements Consumer { + private final ClientStream out; + private final RedirectHandlingResponseObserver in; + + StreamTuple(ClientStream out, RedirectHandlingResponseObserver in) { + this.out = out; + this.in = in; + } + + @Override + public void accept(BidiWriteObjectRequest bidiWriteObjectRequest) { + out.send(bidiWriteObjectRequest); + } + + public void closeSend() { + in.flagTombstoned(); + out.closeSend(); + } + } + + static final class StreamingResponseObserver + implements ResponseObserver { + + private final BidiUploadState state; + private final RetryContext retryContext; + private final OnSuccess onSuccess; + private final OnFailure onFailure; + + @MonotonicNonNull private StreamController controller; + + StreamingResponseObserver( + BidiUploadState state, + RetryContext retryContext, + OnSuccess onSuccess, + OnFailure onFailure) { + this.state = state; + this.retryContext = retryContext; + this.onSuccess = onSuccess; + this.onFailure = onFailure; + } + + @EnsuresNonNull("controller") + @Override + public void onStart(StreamController controller) { + this.controller = controller; + controller.disableAutoInboundFlowControl(); + controller.request(1); + } + + @RequiresNonNull("controller") + @Override + public void onResponse(BidiWriteObjectResponse response) { + try { + controller.request(1); + @Nullable StorageException se = state.onResponse(response); + if (se != null) { + retryContext.recordError(se, onSuccess, onFailure); + } + } catch (Throwable t) { + // catch an error that might happen while processing and forward it to our retry context + retryContext.recordError(t, onSuccess, onFailure); + } + } + + @Override + public void onError(Throwable t) { + retryContext.recordError(t, onSuccess, onFailure); + } + + @Override + public void onComplete() { + // ignore + } + } + + static final class RedirectHandlingResponseObserver + implements ResponseObserver { + private final BidiUploadState state; + private final ResponseObserver delegate; + private final AtomicInteger redirectCounter; + private final int maxRedirectsAllowed; + private final Runnable beforeRedirect; + private final Runnable onRedirect; + + private volatile boolean tombstoned; + + RedirectHandlingResponseObserver( + BidiUploadState state, + ResponseObserver delegate, + AtomicInteger redirectCounter, + int maxRedirectsAllowed, + Runnable beforeRedirect, + Runnable onRedirect) { + this.state = state; + this.delegate = delegate; + this.redirectCounter = redirectCounter; + this.maxRedirectsAllowed = maxRedirectsAllowed; + this.beforeRedirect = beforeRedirect; + this.onRedirect = onRedirect; + this.tombstoned = false; + } + + /** + * mark this observer instance as tombstoned, this will cause it to ignore any invocations of + * its methods. + * + *

When we are going to retry a client detected error instead of a server detected one, we + * want to effectively ignore any following message that might already be inflight from the + * server. + */ + void flagTombstoned() { + tombstoned = true; + } + + @Override + public void onStart(StreamController controller) { + if (tombstoned) { + return; + } + delegate.onStart(controller); + } + + @Override + public void onResponse(BidiWriteObjectResponse response) { + if (tombstoned) { + return; + } + redirectCounter.set(0); + delegate.onResponse(response); + } + + @Override + public void onError(Throwable t) { + if (tombstoned) { + return; + } + BidiWriteObjectRedirectedError error = GrpcUtils.getBidiWriteObjectRedirectedError(t); + if (error == null) { + delegate.onError(t); + return; + } + int redirectCount = redirectCounter.incrementAndGet(); + if (redirectCount > maxRedirectsAllowed) { + // attach the fact we're ignoring the redirect to the original exception as a suppressed + // Exception. The lower level handler can then perform its usual handling, but if things + // bubble all the way up to the invoker we'll be able to see it in a bug report. + t.addSuppressed(new MaxRedirectsExceededException(maxRedirectsAllowed, redirectCount)); + delegate.onError(t); + return; + } + beforeRedirect.run(); + state.updateFromRedirect(error); + onRedirect.run(); + } + + @Override + public void onComplete() { + if (tombstoned) { + return; + } + delegate.onComplete(); + } + } + + /** + * Prevent "already half-closed" if we previously called onComplete but then detect an error and + * call onError + */ + private static final class GracefulOutboundStream + implements ClientStream { + + private final ClientStream delegate; + private volatile boolean closing; + + private GracefulOutboundStream(ClientStream delegate) { + this.delegate = delegate; + this.closing = false; + } + + @Override + public boolean isSendReady() { + return delegate.isSendReady(); + } + + @Override + public void send(BidiWriteObjectRequest request) { + delegate.send(request); + } + + @Override + public void closeSendWithError(Throwable t) { + if (closing) { + return; + } + closing = true; + delegate.closeSendWithError(t); + } + + @Override + public void closeSend() { + if (closing) { + return; + } + closing = true; + delegate.closeSend(); + } + } + + /** + * Decorate a RetryContext to allow observing the invocation of {@link #recordError(Throwable, + * OnSuccess, OnFailure)}. This allows us to clear out the pending stream before a retry. + */ + @VisibleForTesting + static final class StreamRetryContextDecorator implements RetryContext { + private final RetryContext retryContext; + private final ReentrantLock lock; + private final Runnable onRecordError; + + @VisibleForTesting + StreamRetryContextDecorator( + RetryContext retryContext, ReentrantLock lock, Runnable onRecordError) { + this.retryContext = retryContext; + this.lock = lock; + this.onRecordError = onRecordError; + } + + @Override + public boolean inBackoff() { + return retryContext.inBackoff(); + } + + @Override + public void reset() { + retryContext.reset(); + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + lock.lock(); + try { + try { + onRecordError.run(); + } catch (Throwable tt) { + t.addSuppressed(tt); + onFailure.onFailure(t); + return; + } + retryContext.recordError(t, onSuccess, onFailure); + } finally { + lock.unlock(); + } + } + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java index b79a290969..e6f9167ac7 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java @@ -23,6 +23,7 @@ import com.google.cloud.storage.Storage.BlobWriteOption; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.ClosedChannelException; import java.nio.channels.WritableByteChannel; import java.util.concurrent.TimeUnit; @@ -90,6 +91,8 @@ public interface BlobAppendableUpload extends BlobWriteSession { *

This interface allows writing bytes to an Appendable Upload, and provides methods to close * this channel -- optionally finalizing the upload. * + *

The {@link #write(ByteBuffer)} method of this channel is non-blocking. + * * @since 2.51.0 This new api is in preview and is subject to breaking changes. */ @BetaApi @@ -97,7 +100,34 @@ public interface BlobAppendableUpload extends BlobWriteSession { interface AppendableUploadWriteableByteChannel extends WritableByteChannel { /** - * Finalize the upload and close this instance to further {@link #write(ByteBuffer)}ing. This + * This method is non-blocking + * + *

Consume as many bytes as can fit in the underlying outbound queue. The size of the + * outbound queue is determined from {@link BlobAppendableUploadConfig#getFlushPolicy()}{@code + * .}{@link FlushPolicy#getMaxPendingBytes() getMaxPendingBytes()}. If the outbound queue is + * full, and can not fit more bytes, this method will return 0. + * + *

If your application needs to empty its ByteBuffer before progressing, use our helper + * method {@link StorageChannelUtils#blockingEmptyTo(ByteBuffer, WritableByteChannel)} like so: + * + *

{@code
+     * try (AppendableUploadWriteableByteChannel channel = session.open()) {
+     *   int written = StorageChannelUtils.blockingEmptyTo(byteBuffer, channel);
+     * }
+     * }
+ * + * @param src The buffer from which bytes are to be retrieved + * @return The number of bytes written, possibly zero + * @throws ClosedChannelException If this channel is closed + * @throws IOException If some other I/O error occurs + */ + @Override + int write(ByteBuffer src) throws IOException; + + /** + * This method is blocking + * + *

Finalize the upload and close this instance to further {@link #write(ByteBuffer)}ing. This * will close any underlying stream and release any releasable resources once out of scope. * *

Once this method is called, and returns no more writes to the object will be allowed by @@ -116,8 +146,11 @@ interface AppendableUploadWriteableByteChannel extends WritableByteChannel { void finalizeAndClose() throws IOException; /** - * Close this instance to further {@link #write(ByteBuffer)}ing without finalizing the upload. - * This will close any underlying stream and release any releasable resources once out of scope. + * This method is blocking + * + *

Close this instance to further {@link #write(ByteBuffer)}ing without finalizing the + * upload. This will close any underlying stream and release any releasable resources once out + * of scope. * *

This method, {@link AppendableUploadWriteableByteChannel#finalizeAndClose()} and {@link * AppendableUploadWriteableByteChannel#close()} are mutually exclusive. If one of the other @@ -133,7 +166,9 @@ interface AppendableUploadWriteableByteChannel extends WritableByteChannel { void closeWithoutFinalizing() throws IOException; /** - * Close this instance to further {@link #write(ByteBuffer)}ing. + * This method is blocking + * + *

Close this instance to further {@link #write(ByteBuffer)}ing. * *

Whether the upload is finalized during this depends on the {@link * BlobAppendableUploadConfig#getCloseAction()} provided to create the {@link diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadConfig.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadConfig.java index ae95356d74..4cd51c79fb 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadConfig.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadConfig.java @@ -16,24 +16,26 @@ package com.google.cloud.storage; -import static com.google.cloud.storage.ByteSizeConstants._256KiB; import static java.util.Objects.requireNonNull; +import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; import com.google.api.core.BetaApi; -import com.google.api.core.InternalApi; -import com.google.api.gax.retrying.BasicResultRetryAlgorithm; -import com.google.api.gax.rpc.AbortedException; -import com.google.api.gax.rpc.ApiException; +import com.google.api.core.SettableApiFuture; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.BidiUploadState.TakeoverAppendableUploadState; import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; import com.google.cloud.storage.BlobAppendableUploadImpl.AppendableObjectBufferedWritableByteChannel; import com.google.cloud.storage.Storage.BlobWriteOption; import com.google.cloud.storage.TransportCompatibility.Transport; import com.google.cloud.storage.UnifiedOpts.ObjectTargetOpt; import com.google.cloud.storage.UnifiedOpts.Opts; -import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.common.base.MoreObjects; +import com.google.common.base.Preconditions; import com.google.storage.v2.BidiWriteObjectResponse; -import com.google.storage.v2.Object; +import com.google.storage.v2.ServiceConstants.Values; +import java.util.Objects; +import java.util.function.BiFunction; import javax.annotation.concurrent.Immutable; /** @@ -51,26 +53,24 @@ public final class BlobAppendableUploadConfig { private static final BlobAppendableUploadConfig INSTANCE = new BlobAppendableUploadConfig( - FlushPolicy.minFlushSize(_256KiB), - Hasher.enabled(), - CloseAction.CLOSE_WITHOUT_FINALIZING); + FlushPolicy.minFlushSize(), CloseAction.CLOSE_WITHOUT_FINALIZING, 3); private final FlushPolicy flushPolicy; - private final Hasher hasher; private final CloseAction closeAction; + private final int maxRedirectsAllowed; private BlobAppendableUploadConfig( - FlushPolicy flushPolicy, Hasher hasher, CloseAction closeAction) { + FlushPolicy flushPolicy, CloseAction closeAction, int maxRedirectsAllowed) { this.flushPolicy = flushPolicy; - this.hasher = hasher; this.closeAction = closeAction; + this.maxRedirectsAllowed = maxRedirectsAllowed; } /** * The {@link FlushPolicy} which will be used to determine when and how many bytes to flush to * GCS. * - *

Default: {@link FlushPolicy#minFlushSize(int) FlushPolicy.minFlushSize(256 * 1024)} + *

Default: {@link FlushPolicy#minFlushSize()} * * @see #withFlushPolicy(FlushPolicy) * @since 2.51.0 This new api is in preview and is subject to breaking changes. @@ -83,7 +83,7 @@ public FlushPolicy getFlushPolicy() { /** * Return an instance with the {@code FlushPolicy} set to be the specified value. * - *

Default: {@link FlushPolicy#minFlushSize(int) FlushPolicy.minFlushSize(256 * 1024)} + *

Default: {@link FlushPolicy#minFlushSize()} * * @see #getFlushPolicy() * @since 2.51.0 This new api is in preview and is subject to breaking changes. @@ -94,7 +94,7 @@ public BlobAppendableUploadConfig withFlushPolicy(FlushPolicy flushPolicy) { if (this.flushPolicy.equals(flushPolicy)) { return this; } - return new BlobAppendableUploadConfig(flushPolicy, hasher, closeAction); + return new BlobAppendableUploadConfig(flushPolicy, closeAction, maxRedirectsAllowed); } /** @@ -112,8 +112,9 @@ public CloseAction getCloseAction() { } /** - * Return an instance with the {@code CloseAction} set to be the specified value. Default: - * {@link CloseAction#CLOSE_WITHOUT_FINALIZING} + * Return an instance with the {@code CloseAction} set to be the specified value. + * + *

Default: {@link CloseAction#CLOSE_WITHOUT_FINALIZING} * * @see #getCloseAction() * @since 2.51.0 This new api is in preview and is subject to breaking changes. @@ -124,45 +125,66 @@ public BlobAppendableUploadConfig withCloseAction(CloseAction closeAction) { if (this.closeAction == closeAction) { return this; } - return new BlobAppendableUploadConfig(flushPolicy, hasher, closeAction); + return new BlobAppendableUploadConfig(flushPolicy, closeAction, maxRedirectsAllowed); } /** - * Whether crc32c validation will be performed for bytes returned by Google Cloud Storage + * The {@code maxRedirectsAllowed} set to be the specified value. * - *

Default: {@code true} + *

Default: 3 * - * @since 2.51.0 This new api is in preview and is subject to breaking changes. + * @see #withMaxRedirectsAllowed(int) + * @since 2.56.0 This new api is in preview and is subject to breaking changes. */ @BetaApi - boolean getCrc32cValidationEnabled() { - return Hasher.enabled().equals(hasher); + int getMaxRedirectsAllowed() { + return maxRedirectsAllowed; } /** - * Return an instance with crc32c validation enabled based on {@code enabled}. + * Return an instance with the {@code maxRedirectsAllowed} set to be the specified value. * - *

Default: {@code true} + *

Default: 3 * - * @param enabled Whether crc32c validation will be performed for bytes returned by Google Cloud - * Storage - * @since 2.51.0 This new api is in preview and is subject to breaking changes. + * @see #getMaxRedirectsAllowed() + * @since 2.56.0 This new api is in preview and is subject to breaking changes. */ @BetaApi - BlobAppendableUploadConfig withCrc32cValidationEnabled(boolean enabled) { - if (enabled && Hasher.enabled().equals(hasher)) { - return this; - } else if (!enabled && Hasher.noop().equals(hasher)) { + BlobAppendableUploadConfig withMaxRedirectsAllowed(int maxRedirectsAllowed) { + Preconditions.checkArgument( + maxRedirectsAllowed >= 0, "maxRedirectsAllowed >= 0 (%s >= 0)", maxRedirectsAllowed); + if (this.maxRedirectsAllowed == maxRedirectsAllowed) { return this; } - return new BlobAppendableUploadConfig( - flushPolicy, enabled ? Hasher.enabled() : Hasher.noop(), closeAction); + return new BlobAppendableUploadConfig(flushPolicy, closeAction, maxRedirectsAllowed); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof BlobAppendableUploadConfig)) { + return false; + } + BlobAppendableUploadConfig that = (BlobAppendableUploadConfig) o; + return maxRedirectsAllowed == that.maxRedirectsAllowed + && Objects.equals(flushPolicy, that.flushPolicy) + && closeAction == that.closeAction; + } + + @Override + public int hashCode() { + return Objects.hash(flushPolicy, closeAction, maxRedirectsAllowed); } - /** Never to be made public until {@link Hasher} is public */ - @InternalApi - Hasher getHasher() { - return hasher; + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("flushPolicy", flushPolicy) + .add("closeAction", closeAction) + .add("maxRedirectsAllowed", maxRedirectsAllowed) + .toString(); } /** @@ -217,55 +239,65 @@ public enum CloseAction { } BlobAppendableUpload create(GrpcStorageImpl storage, BlobInfo info, Opts opts) { - boolean takeOver = info.getGeneration() != null; - BidiWriteObjectRequest req = - takeOver - ? storage.getBidiWriteObjectRequestForTakeover(info, opts) - : storage.getBidiWriteObjectRequest(info, opts); - - BidiAppendableWrite baw = new BidiAppendableWrite(req, takeOver); - + long maxPendingBytes = this.getFlushPolicy().getMaxPendingBytes(); + AppendableUploadState state = storage.getAppendableState(info, opts, maxPendingBytes); WritableByteChannelSession build = - ResumableMedia.gapic() - .write() - .bidiByteChannel(storage.storageClient.bidiWriteObjectCallable()) - .setHasher(this.getHasher()) - .setByteStringStrategy(ByteStringStrategy.copy()) - .appendable() - .withRetryConfig( - storage.retrier.withAlg( - new BasicResultRetryAlgorithm() { - @Override - public boolean shouldRetry( - Throwable previousThrowable, Object previousResponse) { - // TODO: remove this later once the redirects are not handled by the - // retry loop - ApiException apiEx = null; - if (previousThrowable instanceof StorageException) { - StorageException se = (StorageException) previousThrowable; - Throwable cause = se.getCause(); - if (cause instanceof ApiException) { - apiEx = (ApiException) cause; - } - } - if (apiEx instanceof AbortedException) { - return true; - } - return storage - .retryAlgorithmManager - .idempotent() - .shouldRetry(previousThrowable, null); - } - })) - .buffered(this.getFlushPolicy()) - .setStartAsync(ApiFutures.immediateFuture(baw)) - .setGetCallable(storage.storageClient.getObjectCallable()) - .setFinalizeOnClose(this.closeAction == CloseAction.FINALIZE_WHEN_CLOSING) - .build(); + new AppendableSession( + ApiFutures.immediateFuture(state), + (start, resultFuture) -> { + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + start, + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + maxRedirectsAllowed, + storage.storageDataClient.retryContextProvider.create()); + ChunkSegmenter chunkSegmenter = + new ChunkSegmenter( + Hasher.enabled(), + ByteStringStrategy.copy(), + Math.min( + Values.MAX_WRITE_CHUNK_BYTES_VALUE, Math.toIntExact(maxPendingBytes)), + /* blockSize= */ 1); + BidiAppendableUnbufferedWritableByteChannel c; + if (state instanceof TakeoverAppendableUploadState) { + // start the takeover reconciliation + stream.awaitTakeoverStateReconciliation(); + c = + new BidiAppendableUnbufferedWritableByteChannel( + stream, chunkSegmenter, state.getConfirmedBytes()); + } else { + c = new BidiAppendableUnbufferedWritableByteChannel(stream, chunkSegmenter, 0); + } + return new AppendableObjectBufferedWritableByteChannel( + flushPolicy.createBufferedChannel(c, /* blocking= */ false), + c, + this.closeAction == CloseAction.FINALIZE_WHEN_CLOSING); + }, + state.getResultFuture()); return new BlobAppendableUploadImpl( new DefaultBlobWriteSessionConfig.DecoratedWritableByteChannelSession<>( build, BidiBlobWriteSessionConfig.Factory.WRITE_OBJECT_RESPONSE_BLOB_INFO_DECODER)); } + + private static final class AppendableSession + extends ChannelSession< + AppendableUploadState, + BidiWriteObjectResponse, + AppendableObjectBufferedWritableByteChannel> + implements WritableByteChannelSession< + AppendableObjectBufferedWritableByteChannel, BidiWriteObjectResponse> { + private AppendableSession( + ApiFuture startFuture, + BiFunction< + AppendableUploadState, + SettableApiFuture, + AppendableObjectBufferedWritableByteChannel> + f, + SettableApiFuture resultFuture) { + super(startFuture, f, resultFuture); + } + } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java index cedfbcba58..909d11dfa2 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java @@ -63,18 +63,18 @@ static final class AppendableObjectBufferedWritableByteChannel implements BufferedWritableByteChannel, BlobAppendableUpload.AppendableUploadWriteableByteChannel { private final BufferedWritableByteChannel buffered; - private final GapicBidiUnbufferedAppendableWritableByteChannel unbuffered; + private final BidiAppendableUnbufferedWritableByteChannel unbuffered; private final boolean finalizeOnClose; private final ReentrantLock lock; AppendableObjectBufferedWritableByteChannel( BufferedWritableByteChannel buffered, - GapicBidiUnbufferedAppendableWritableByteChannel unbuffered, + BidiAppendableUnbufferedWritableByteChannel unbuffered, boolean finalizeOnClose) { this.buffered = buffered; this.unbuffered = unbuffered; this.finalizeOnClose = finalizeOnClose; - lock = new ReentrantLock(); + this.lock = new ReentrantLock(); } @Override @@ -89,7 +89,10 @@ public void flush() throws IOException { @Override public int write(ByteBuffer src) throws IOException { - lock.lock(); + boolean locked = lock.tryLock(); + if (!locked) { + return 0; + } try { return buffered.write(src); } finally { @@ -99,7 +102,6 @@ public int write(ByteBuffer src) throws IOException { @Override public boolean isOpen() { - lock.lock(); try { return buffered.isOpen(); } finally { @@ -112,8 +114,7 @@ public void finalizeAndClose() throws IOException { lock.lock(); try { if (buffered.isOpen()) { - buffered.flush(); - unbuffered.finalizeWrite(); + unbuffered.nextWriteShouldFinalize(); buffered.close(); } } finally { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/Buffers.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/Buffers.java index 571bd8cb5e..21d8c2ed98 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/Buffers.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/Buffers.java @@ -159,18 +159,11 @@ static int alignSize(int size, int alignmentMultiple) { } static int fillFrom(ByteBuffer buf, ReadableByteChannel c) throws IOException { - int total = 0; - while (buf.hasRemaining()) { - int read = c.read(buf); - if (read != -1) { - total += read; - } else if (total == 0) { - return -1; - } else { - break; - } - } - return total; + return StorageChannelUtils.blockingFillFrom(buf, c); + } + + static int emptyTo(ByteBuffer buf, WritableByteChannel c) throws IOException { + return StorageChannelUtils.blockingEmptyTo(buf, c); } static long totalRemaining(ByteBuffer[] buffers, int offset, int length) { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteSizeConstants.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteSizeConstants.java index cbdbd94d67..463df327f5 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteSizeConstants.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/ByteSizeConstants.java @@ -26,6 +26,7 @@ final class ByteSizeConstants { static final int _768KiB = 768 * _1KiB; static final int _1MiB = 1024 * _1KiB; static final int _2MiB = 2 * _1MiB; + static final int _4MiB = 4 * _1MiB; static final int _16MiB = 16 * _1MiB; static final int _32MiB = 32 * _1MiB; static final long _1GiB = 1024 * _1MiB; diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/ChannelSession.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/ChannelSession.java index 25ff1e40e5..532b561bce 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/ChannelSession.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/ChannelSession.java @@ -39,9 +39,16 @@ class ChannelSession { ChannelSession( ApiFuture startFuture, BiFunction, ChannelT> f) { + this(startFuture, f, SettableApiFuture.create()); + } + + ChannelSession( + ApiFuture startFuture, + BiFunction, ChannelT> f, + SettableApiFuture resultFuture) { this.startFuture = startFuture; - this.resultFuture = SettableApiFuture.create(); - this.f = (s) -> f.apply(s, resultFuture); + this.resultFuture = resultFuture; + this.f = (s) -> f.apply(s, this.resultFuture); } public ApiFuture openAsync() { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java index cdd964f819..7b92b25724 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java @@ -16,6 +16,8 @@ package com.google.cloud.storage; +import static com.google.common.base.Preconditions.checkState; + import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; import java.io.IOException; @@ -59,10 +61,17 @@ final class DefaultBufferedWritableByteChannel implements BufferedWritableByteCh private final BufferHandle handle; private final UnbufferedWritableByteChannel channel; + private final boolean blocking; DefaultBufferedWritableByteChannel(BufferHandle handle, UnbufferedWritableByteChannel channel) { + this(handle, channel, true); + } + + DefaultBufferedWritableByteChannel( + BufferHandle handle, UnbufferedWritableByteChannel channel, boolean blocking) { this.handle = handle; this.channel = channel; + this.blocking = blocking; } @SuppressWarnings("UnnecessaryLocalVariable") @@ -110,6 +119,7 @@ public int write(ByteBuffer src) throws IOException { Buffers.flip(buffer); ByteBuffer[] srcs = {buffer, buf}; long write = channel.write(srcs); + checkState(write >= 0, "write >= 0 (%s > 0)", write); if (write == capacity) { // we successfully wrote all the bytes we wanted to Buffers.clear(buffer); @@ -131,6 +141,10 @@ public int write(ByteBuffer src) throws IOException { Buffers.position(src, srcPosition + sliceWritten); bytesConsumed += sliceWritten; } + + if (!blocking) { + break; + } } } else { // no enqueued data and src is at least as large as our buffer, see if we can simply write @@ -138,16 +152,25 @@ public int write(ByteBuffer src) throws IOException { if (bufferRemaining == srcRemaining) { // the capacity of buffer and the bytes remaining in src are the same, directly // write src - bytesConsumed += channel.write(src); + int write = channel.write(src); + checkState(write >= 0, "write >= 0 (%s > 0)", write); + bytesConsumed += write; + if (write < srcRemaining && !blocking) { + break; + } } else { // the src provided is larger than our buffer. rather than copying into the buffer, simply // write a slice ByteBuffer slice = src.slice(); Buffers.limit(slice, bufferRemaining); int write = channel.write(slice); + checkState(write >= 0, "write >= 0 (%s > 0)", write); int newPosition = srcPosition + write; Buffers.position(src, newPosition); bytesConsumed += write; + if (write < bufferRemaining && !blocking) { + break; + } } } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/FlushPolicy.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/FlushPolicy.java index f8e0914e76..0f7b568b4e 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/FlushPolicy.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/FlushPolicy.java @@ -16,7 +16,9 @@ package com.google.cloud.storage; +import static com.google.cloud.storage.ByteSizeConstants._16MiB; import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.ByteSizeConstants._4MiB; import com.google.api.core.BetaApi; import com.google.api.core.InternalExtensionOnly; @@ -66,6 +68,13 @@ public static MaxFlushSizeFlushPolicy maxFlushSize(int maxFlushSize) { /** * Default instance factory method for {@link MinFlushSizeFlushPolicy}. * + *

Default: logically equivalent to the following: + * + *

+   * {@link #minFlushSize(int) FlushPolicy.minFlushSize}(4 * 1024 * 1024)
+   *     .{@link MinFlushSizeFlushPolicy#withMaxPendingBytes(long) withMaxPendingBytes}(16 * 1024 * 1024)
+   * 
+ * * @since 2.51.0 This new api is in preview and is subject to breaking changes. */ @BetaApi @@ -85,7 +94,9 @@ public static MinFlushSizeFlushPolicy minFlushSize(int minFlushSize) { } abstract BufferedWritableByteChannel createBufferedChannel( - UnbufferedWritableByteChannel unbuffered); + UnbufferedWritableByteChannel unbuffered, boolean blocking); + + abstract long getMaxPendingBytes(); @Override public abstract boolean equals(Object obj); @@ -105,6 +116,9 @@ abstract BufferedWritableByteChannel createBufferedChannel( * *

Instances of this class are immutable and thread safe. * + *

Instead of this, strategy use {@link FlushPolicy#minFlushSize()}{@code .}{@link + * MinFlushSizeFlushPolicy#withMaxPendingBytes(long) withMaxPendingBytes(long)} + * * @since 2.51.0 This new api is in preview and is subject to breaking changes. */ @Immutable @@ -114,7 +128,7 @@ public static final class MaxFlushSizeFlushPolicy extends FlushPolicy { private final int maxFlushSize; - public MaxFlushSizeFlushPolicy(int maxFlushSize) { + private MaxFlushSizeFlushPolicy(int maxFlushSize) { this.maxFlushSize = maxFlushSize; } @@ -149,9 +163,15 @@ public MaxFlushSizeFlushPolicy withMaxFlushSize(int maxFlushSize) { } @Override - BufferedWritableByteChannel createBufferedChannel(UnbufferedWritableByteChannel unbuffered) { + BufferedWritableByteChannel createBufferedChannel( + UnbufferedWritableByteChannel unbuffered, boolean blocking) { return new DefaultBufferedWritableByteChannel( - BufferHandle.allocate(maxFlushSize), unbuffered); + BufferHandle.allocate(maxFlushSize), unbuffered, blocking); + } + + @Override + long getMaxPendingBytes() { + return maxFlushSize; } @Override @@ -191,18 +211,21 @@ public String toString() { @Immutable @BetaApi public static final class MinFlushSizeFlushPolicy extends FlushPolicy { - private static final MinFlushSizeFlushPolicy INSTANCE = new MinFlushSizeFlushPolicy(_2MiB); + private static final MinFlushSizeFlushPolicy INSTANCE = + new MinFlushSizeFlushPolicy(_4MiB, _16MiB); private final int minFlushSize; + private final long maxPendingBytes; - public MinFlushSizeFlushPolicy(int minFlushSize) { + private MinFlushSizeFlushPolicy(int minFlushSize, long maxPendingBytes) { this.minFlushSize = minFlushSize; + this.maxPendingBytes = maxPendingBytes; } /** * The minimum number of bytes to include in each automatic flush * - *

Default: {@code 2097152 (2 MiB)} + *

Default: {@code 4194304 (4 MiB)} * * @see #withMinFlushSize(int) */ @@ -214,7 +237,7 @@ public int getMinFlushSize() { /** * Return an instance with the {@code minFlushSize} set to the specified value. * - *

Default: {@code 2097152 (2 MiB)} + *

Default: {@code 4194304 (4 MiB)} * * @param minFlushSize The number of bytes to buffer before flushing. * @return The new instance @@ -226,13 +249,34 @@ public MinFlushSizeFlushPolicy withMinFlushSize(int minFlushSize) { if (this.minFlushSize == minFlushSize) { return this; } - return new MinFlushSizeFlushPolicy(minFlushSize); + return new MinFlushSizeFlushPolicy(minFlushSize, maxPendingBytes); + } + + @BetaApi + public long getMaxPendingBytes() { + return maxPendingBytes; + } + + @BetaApi + public MinFlushSizeFlushPolicy withMaxPendingBytes(long maxPendingBytes) { + Preconditions.checkArgument( + maxPendingBytes >= 0, "maxPendingBytes >= 0 (%s >= 0)", maxPendingBytes); + Preconditions.checkArgument( + maxPendingBytes >= minFlushSize, + "maxPendingBytes >= minFlushSize (%s >= %s", + maxPendingBytes, + minFlushSize); + if (this.maxPendingBytes == maxPendingBytes) { + return this; + } + return new MinFlushSizeFlushPolicy(minFlushSize, maxPendingBytes); } @Override - BufferedWritableByteChannel createBufferedChannel(UnbufferedWritableByteChannel unbuffered) { + BufferedWritableByteChannel createBufferedChannel( + UnbufferedWritableByteChannel unbuffered, boolean blocking) { return new MinFlushBufferedWritableByteChannel( - BufferHandle.allocate(minFlushSize), unbuffered); + BufferHandle.allocate(minFlushSize), unbuffered, blocking); } @Override @@ -244,17 +288,20 @@ public boolean equals(Object o) { return false; } MinFlushSizeFlushPolicy that = (MinFlushSizeFlushPolicy) o; - return minFlushSize == that.minFlushSize; + return minFlushSize == that.minFlushSize && maxPendingBytes == that.maxPendingBytes; } @Override public int hashCode() { - return Objects.hashCode(minFlushSize); + return Objects.hash(minFlushSize, maxPendingBytes); } @Override public String toString() { - return MoreObjects.toStringHelper(this).add("minFlushSize", minFlushSize).toString(); + return MoreObjects.toStringHelper(this) + .add("minFlushSize", minFlushSize) + .add("maxPendingBytes", maxPendingBytes) + .toString(); } } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedAppendableWriteableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedAppendableWriteableByteChannel.java deleted file mode 100644 index 2e7cfc4277..0000000000 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedAppendableWriteableByteChannel.java +++ /dev/null @@ -1,783 +0,0 @@ -/* - * Copyright 2023 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.storage; - -import com.google.api.core.SettableApiFuture; -import com.google.api.gax.grpc.GrpcCallContext; -import com.google.api.gax.rpc.ApiException; -import com.google.api.gax.rpc.ApiStreamObserver; -import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.ErrorDetails; -import com.google.api.gax.rpc.NotFoundException; -import com.google.api.gax.rpc.OutOfRangeException; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; -import com.google.cloud.storage.Conversions.Decoder; -import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; -import com.google.cloud.storage.Retrying.RetrierWithAlg; -import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.ByteString; -import com.google.protobuf.FieldMask; -import com.google.storage.v2.AppendObjectSpec; -import com.google.storage.v2.BidiWriteHandle; -import com.google.storage.v2.BidiWriteObjectRedirectedError; -import com.google.storage.v2.BidiWriteObjectRequest; -import com.google.storage.v2.BidiWriteObjectResponse; -import com.google.storage.v2.ChecksummedData; -import com.google.storage.v2.GetObjectRequest; -import com.google.storage.v2.Object; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.Semaphore; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import org.checkerframework.checker.nullness.qual.NonNull; -import org.checkerframework.checker.nullness.qual.Nullable; - -final class GapicBidiUnbufferedAppendableWritableByteChannel - implements UnbufferedWritableByteChannel { - private final BidiStreamingCallable write; - private final UnaryCallable get; - private final RetrierWithAlg retrier; - private final SettableApiFuture resultFuture; - private final ChunkSegmenter chunkSegmenter; - private final BidiWriteCtx writeCtx; - private final GrpcCallContext context; - private final RedirectHandlingResponseObserver responseObserver; - - private volatile ApiStreamObserver stream; - private boolean open = true; - private boolean first = true; - private boolean redirecting = false; - volatile boolean retry = false; - private long begin; - private volatile BidiWriteObjectRequest lastWrittenRequest; - private final AtomicInteger redirectCounter; - private final int maxRedirectsAllowed = 3; - private final AtomicReference<@Nullable BidiWriteHandle> bidiWriteHandle = - new AtomicReference<>(); - private final AtomicReference<@Nullable String> routingToken = new AtomicReference<>(); - private final AtomicLong generation = new AtomicLong(); - private final ReentrantLock lock = new ReentrantLock(); - private final Supplier baseContextSupplier; - private volatile List messages; - - GapicBidiUnbufferedAppendableWritableByteChannel( - BidiStreamingCallable write, - UnaryCallable get, - RetrierWithAlg retrier, - SettableApiFuture resultFuture, - ChunkSegmenter chunkSegmenter, - BidiWriteCtx writeCtx, - Supplier baseContextSupplier) { - this.write = write; - this.get = get; - this.retrier = retrier; - this.resultFuture = resultFuture; - this.chunkSegmenter = chunkSegmenter; - this.writeCtx = writeCtx; - this.responseObserver = new RedirectHandlingResponseObserver(new BidiObserver()); - this.baseContextSupplier = baseContextSupplier; - this.context = baseContextSupplier.get().withExtraHeaders(getHeaders()); - this.redirectCounter = new AtomicInteger(); - } - - @Override - public long write(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { - return internalWrite(srcs, srcsOffset, srcsLength); - } - - @Override - public long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { - long written = internalWrite(srcs, offset, length); - close(); - return written; - } - - @Override - public boolean isOpen() { - return open; - } - - @Override - public void close() throws IOException { - if (!open) { - return; - } - try { - if (stream != null) { - stream.onCompleted(); - responseObserver.await(); - } - - } finally { - open = false; - stream = null; - lastWrittenRequest = null; - } - } - - public void finalizeWrite() throws IOException { - if (stream == null) { - restart(); - } - BidiWriteObjectRequest message = finishMessage(); - lastWrittenRequest = message; - begin = writeCtx.getConfirmedBytes().get(); - this.messages = Collections.singletonList(message); - flush(); - close(); - } - - /** - * After a reconnect, opens a new stream by using an AppendObjectSpec with a state lookup to get - * the persisted size. We expect to be able to retry anything failed as normal after calling this - * method, on the new stream. - */ - @VisibleForTesting - void restart() { - Preconditions.checkState( - stream == null, "attempting to restart stream when stream is already active"); - - ReconnectArguments reconnectArguments = getReconnectArguments(); - BidiWriteObjectRequest req = reconnectArguments.getReq(); - if (!resultFuture.isDone()) { - ApiStreamObserver requestStream1 = - openedStream(reconnectArguments.getCtx()); - if (req != null) { - requestStream1.onNext(req); - lastWrittenRequest = req; - responseObserver.await(); - first = false; - } else { - // This means we did a metadata lookup and determined that GCS never received the initial - // WriteObjectSpec, - // So we can just start over and send it again - first = true; - } - } - } - - public void startAppendableTakeoverStream() { - BidiWriteObjectRequest req = - writeCtx.newRequestBuilder().setFlush(true).setStateLookup(true).build(); - generation.set(req.getAppendObjectSpec().getGeneration()); - this.messages = Collections.singletonList(req); - flush(); - first = false; - } - - @VisibleForTesting - BidiWriteCtx getWriteCtx() { - return writeCtx; - } - - private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength) - throws ClosedChannelException { - if (!open) { - throw new ClosedChannelException(); - } - - begin = writeCtx.getConfirmedBytes().get(); - - ChunkSegment[] data = chunkSegmenter.segmentBuffers(srcs, srcsOffset, srcsLength, true); - if (data.length == 0) { - return 0; - } - - ImmutableList.Builder messages = new ImmutableList.Builder<>(); - - for (int i = 0; i < data.length; i++) { - ChunkSegment datum = data[i]; - Crc32cLengthKnown crc32c = datum.getCrc32c(); - ByteString b = datum.getB(); - int contentSize = b.size(); - long offset = writeCtx.getTotalSentBytes().getAndAdd(contentSize); - ChecksummedData.Builder checksummedData = ChecksummedData.newBuilder().setContent(b); - if (crc32c != null) { - checksummedData.setCrc32C(crc32c.getValue()); - } - BidiWriteObjectRequest.Builder builder = writeCtx.newRequestBuilder(); - if (!first) { - builder.clearUploadId(); - builder.clearObjectChecksums(); - builder.clearWriteObjectSpec(); - builder.clearAppendObjectSpec(); - } else { - first = false; - } - builder.setWriteOffset(offset).setChecksummedData(checksummedData.build()); - - if (i == data.length - 1) { - builder.setFlush(true).setStateLookup(true); - } - BidiWriteObjectRequest build = builder.build(); - messages.add(build); - } - - this.messages = messages.build(); - - try { - flush(); - } catch (Exception e) { - open = false; - resultFuture.setException(e); - throw e; - } - - long end = writeCtx.getConfirmedBytes().get(); - - long bytesConsumed = end - begin; - return bytesConsumed; - } - - @NonNull - private BidiWriteObjectRequest finishMessage() { - long offset = writeCtx.getTotalSentBytes().get(); - - BidiWriteObjectRequest.Builder b = writeCtx.newRequestBuilder(); - - b.clearUploadId().clearObjectChecksums().clearWriteObjectSpec().clearAppendObjectSpec(); - - b.setFinishWrite(true).setWriteOffset(offset); - BidiWriteObjectRequest message = b.build(); - return message; - } - - private ApiStreamObserver openedStream( - @Nullable GrpcCallContext context) { - if (stream == null) { - synchronized (this) { - if (stream == null) { - responseObserver.reset(); - stream = - new GracefulOutboundStream(this.write.bidiStreamingCall(responseObserver, context)); - } - } - } - return stream; - } - - private void flush() { - retrier.run( - () -> { - if (retry) { - retry = false; - restart(); - processRetryingMessages(); - if (this.messages.isEmpty()) { - // This can happen if proccessRetryingMessages ends up dropping every message - return null; - } - } - try { - ApiStreamObserver opened = openedStream(context); - for (BidiWriteObjectRequest message : this.messages) { - - opened.onNext(message); - lastWrittenRequest = message; - } - if (lastWrittenRequest.getFinishWrite()) { - opened.onCompleted(); - } - responseObserver.await(); - return null; - } catch (Throwable t) { - retry = true; - stream = null; - t.addSuppressed(new AsyncStorageTaskException()); - throw t; - } - }, - Decoder.identity()); - } - - /** - * Handles a retry. Processes segments by skipping any necessary bytes and stripping - * first-specific elements, then restarts the stream and flushes the processed segments. - */ - private void processRetryingMessages() { - ImmutableList.Builder segmentsToRetry = new ImmutableList.Builder<>(); - long confirmed = writeCtx.getConfirmedBytes().get(); - long bytesSeen = begin; - boolean caughtUp = false; - for (BidiWriteObjectRequest message : this.messages) { - if (message.hasAppendObjectSpec() && first) { - // If this is the first message of a takeover, then running the restart() method will - // actually get us to the state we want to be in (i.e. the persisted_size has been - // captured), so we don't actually need to try to write the original message again--we just - // drop it entirely - continue; - } - if (message.hasWriteObjectSpec() - && redirecting) { // This is a first message and we got a Redirect - message = message.toBuilder().clearWriteObjectSpec().clearObjectChecksums().build(); - } - if (!caughtUp) { - bytesSeen += message.getChecksummedData().getContent().size(); - if (bytesSeen <= confirmed) { - // We already flushed this message and persisted the bytes, skip it - continue; - } - ByteString before = message.getChecksummedData().getContent(); - long beforeSize = before.size(); - if ((bytesSeen - confirmed) != beforeSize) { - // This means a partial flush occurred--we need to skip over some of the bytes and adjust - // the offset - long delta = bytesSeen - confirmed; - int bytesToSkip = Math.toIntExact(beforeSize - delta); - ByteString after = before.substring(bytesToSkip); - - if (after.size() == 0) { // GCS somehow flushed the whole request but still errored - continue; - } - message = - message.toBuilder() - .setChecksummedData(ChecksummedData.newBuilder().setContent(after).build()) - .setWriteOffset(confirmed) - .build(); - } - caughtUp = true; - } - segmentsToRetry.add(message); - } - this.messages = segmentsToRetry.build(); - } - - private class BidiObserver implements ApiStreamObserver { - - private final Semaphore sem; - private volatile BidiWriteObjectResponse lastResponseWithResource; - private volatile StorageException clientDetectedError; - private volatile RuntimeException previousError; - - private BidiObserver() { - this.sem = new Semaphore(0); - } - - @Override - public void onNext(BidiWriteObjectResponse value) { - if (value.hasWriteHandle()) { - bidiWriteHandle.set(value.getWriteHandle()); - } - if (lastWrittenRequest.hasAppendObjectSpec() && first) { - long persistedSize = - value.hasPersistedSize() ? value.getPersistedSize() : value.getResource().getSize(); - writeCtx.getConfirmedBytes().set(persistedSize); - writeCtx.getTotalSentBytes().set(persistedSize); - ok(value); - return; - } - boolean finalizing = lastWrittenRequest.getFinishWrite(); - boolean firstResponse = !finalizing && value.hasResource(); - if (firstResponse) { - generation.set(value.getResource().getGeneration()); - } - - if (!finalizing && (firstResponse || value.hasPersistedSize())) { // incremental - long totalSentBytes = writeCtx.getTotalSentBytes().get(); - long persistedSize = - firstResponse ? value.getResource().getSize() : value.getPersistedSize(); - - // todo: replace this with a state tracking variable - if (lastWrittenRequest.hasAppendObjectSpec()) { - writeCtx.getConfirmedBytes().set(persistedSize); - ok(value); - } else if (totalSentBytes == persistedSize) { - writeCtx.getConfirmedBytes().set(persistedSize); - ok(value); - } else if (persistedSize < totalSentBytes) { - writeCtx.getConfirmedBytes().set(persistedSize); - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_9.toStorageException( - ImmutableList.of(lastWrittenRequest), value, context, null)); - } else { - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_7.toStorageException( - ImmutableList.of(lastWrittenRequest), value, context, null)); - } - } else if (finalizing && value.hasResource()) { - long totalSentBytes = writeCtx.getTotalSentBytes().get(); - long finalSize = value.getResource().getSize(); - if (totalSentBytes == finalSize) { - writeCtx.getConfirmedBytes().set(finalSize); - ok(value); - } else if (finalSize < totalSentBytes) { - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_4_1.toStorageException( - ImmutableList.of(lastWrittenRequest), value, context, null)); - } else { - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_4_2.toStorageException( - ImmutableList.of(lastWrittenRequest), value, context, null)); - } - } else if (finalizing && value.hasPersistedSize()) { - long totalSentBytes = writeCtx.getTotalSentBytes().get(); - long persistedSize = value.getPersistedSize(); - // if a flush: true, state_lookup: true message is in the stream along with a - // finish_write: true, GCS can respond with the incremental update, gracefully handle this - // message - if (totalSentBytes == persistedSize) { - writeCtx.getConfirmedBytes().set(persistedSize); - } else if (persistedSize < totalSentBytes) { - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_3.toStorageException( - ImmutableList.of(lastWrittenRequest), value, context, null)); - } else { - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_2.toStorageException( - ImmutableList.of(lastWrittenRequest), value, context, null)); - } - } else { - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_0.toStorageException( - ImmutableList.of(lastWrittenRequest), value, context, null)); - } - } - - @Override - public void onError(Throwable t) { - if (t instanceof OutOfRangeException) { - OutOfRangeException oore = (OutOfRangeException) t; - ErrorDetails ed = oore.getErrorDetails(); - if (!(ed != null - && ed.getErrorInfo() != null - && ed.getErrorInfo().getReason().equals("GRPC_MISMATCHED_UPLOAD_SIZE"))) { - clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_5.toStorageException( - ImmutableList.of(lastWrittenRequest), null, context, oore)); - return; - } - } - if (t instanceof ApiException) { - // use StorageExceptions logic to translate from ApiException to our status codes ensuring - // things fall in line with our retry handlers. - // This is suboptimal, as it will initialize a second exception, however this is the - // unusual case, and it should not cause a significant overhead given its rarity. - StorageException tmp = StorageException.asStorageException((ApiException) t); - previousError = - ResumableSessionFailureScenario.toStorageException( - tmp.getCode(), - tmp.getMessage(), - tmp.getReason(), - lastWrittenRequest != null - ? ImmutableList.of(lastWrittenRequest) - : ImmutableList.of(), - null, - context, - t); - sem.release(); - } else if (t instanceof RuntimeException) { - previousError = (RuntimeException) t; - sem.release(); - } - } - - @Override - public void onCompleted() { - if (lastResponseWithResource != null) { - BidiWriteObjectResponse.Builder withSize = lastResponseWithResource.toBuilder(); - withSize.getResourceBuilder().setSize(writeCtx.getConfirmedBytes().longValue()); - resultFuture.set(withSize.build()); - } - sem.release(); - } - - private void ok(BidiWriteObjectResponse value) { - if (value.hasResource()) { - lastResponseWithResource = value; - } - first = false; - sem.release(); - } - - private void clientDetectedError(StorageException storageException) { - clientDetectedError = storageException; - // yes, check that previousError is not the same instance as e - if (previousError != null && previousError != storageException) { - storageException.addSuppressed(previousError); - previousError = null; - } - if (previousError == null) { - previousError = storageException; - } - sem.release(); - } - - void await() { - try { - sem.acquire(); - } catch (InterruptedException e) { - if (e.getCause() instanceof RuntimeException) { - throw (RuntimeException) e.getCause(); - } else { - throw new RuntimeException(e); - } - } - StorageException e = clientDetectedError; - RuntimeException err = previousError; - clientDetectedError = null; - previousError = null; - if ((e != null || err != null) && stream != null) { - if (lastWrittenRequest.getFinishWrite()) { - stream.onCompleted(); - } - } - if (e != null) { - throw e; - } - if (err != null) { - throw err; - } - } - - public void reset() { - sem.drainPermits(); - lastResponseWithResource = null; - clientDetectedError = null; - previousError = null; - } - } - - /** - * Prevent "already half-closed" if we previously called onComplete but then detect an error and - * call onError - */ - private static final class GracefulOutboundStream - implements ApiStreamObserver { - - private final ApiStreamObserver delegate; - private volatile boolean closing; - - private GracefulOutboundStream(ApiStreamObserver delegate) { - this.delegate = delegate; - this.closing = false; - } - - @Override - public void onNext(BidiWriteObjectRequest value) { - delegate.onNext(value); - } - - @Override - public void onError(Throwable t) { - if (closing) { - return; - } - closing = true; - delegate.onError(t); - } - - @Override - public void onCompleted() { - if (closing) { - return; - } - closing = true; - delegate.onCompleted(); - } - } - - private final class RedirectHandlingResponseObserver - implements ApiStreamObserver { - private final BidiObserver delegate; - - private RedirectHandlingResponseObserver(BidiObserver delegate) { - this.delegate = delegate; - } - - @Override - public void onNext(BidiWriteObjectResponse response) { - redirectCounter.set(0); - delegate.onNext(response); - } - - @Override - public void onError(Throwable t) { - BidiWriteObjectRedirectedError error = GrpcUtils.getBidiWriteObjectRedirectedError(t); - if (error == null) { - delegate.onError(t); - return; - } - redirecting = true; - stream = null; - int redirectCount = redirectCounter.incrementAndGet(); - if (redirectCount > maxRedirectsAllowed) { - // attach the fact we're ignoring the redirect to the original exception as a suppressed - // Exception. The lower level handler can then perform its usual handling, but if things - // bubble all the way up to the invoker we'll be able to see it in a bug report. - redirecting = false; // disable the special case that makes ABORTED retryable - t.addSuppressed(new MaxRedirectsExceededException(maxRedirectsAllowed, redirectCount)); - delegate.onError(t); - resultFuture.setException(t); - return; - } - if (error.hasWriteHandle()) { - bidiWriteHandle.set(error.getWriteHandle()); - } - if (error.hasRoutingToken()) { - routingToken.set(error.getRoutingToken()); - } - if (error.hasGeneration()) { - generation.set(error.getGeneration()); - } - delegate.onError(t); - } - - public void await() { - delegate.await(); - } - - public void reset() { - delegate.reset(); - } - - @Override - public void onCompleted() { - delegate.onCompleted(); - } - } - - ReconnectArguments getReconnectArguments() { - lock.lock(); - try { - BidiWriteObjectRequest.Builder b = writeCtx.newRequestBuilder(); - - AppendObjectSpec.Builder spec; - if (b.hasAppendObjectSpec()) { - spec = b.getAppendObjectSpec().toBuilder(); - } else { - spec = - AppendObjectSpec.newBuilder() - .setBucket(b.getWriteObjectSpec().getResource().getBucket()) - .setObject(b.getWriteObjectSpec().getResource().getName()); - } - - // Reconnects always use AppendObjectSpec, never WriteObjectSpec - b.clearWriteObjectSpec(); - - String routingToken = this.routingToken.get(); - if (routingToken != null) { - spec.setRoutingToken(routingToken); - } - - long generation = this.generation.get(); - if (generation > 0) { - spec.setGeneration(generation); - } else { - GetObjectRequest req = - GetObjectRequest.newBuilder() - .setBucket(spec.getBucket()) - .setObject(spec.getObject()) - .setReadMask( - FieldMask.newBuilder() - .addPaths(Storage.BlobField.GENERATION.getGrpcName()) - .build()) - .build(); - boolean objectNotFound = false; - try { - retrier.run( - () -> { - this.generation.set(get.call(req).getGeneration()); - return null; - }, - Decoder.identity()); - } catch (Throwable t) { - if (t.getCause() instanceof NotFoundException) { - objectNotFound = true; - } else { - t.addSuppressed(new AsyncStorageTaskException()); - throw t; - } - } - generation = this.generation.get(); - if (generation > 0) { - spec.setGeneration(generation); - } else if (objectNotFound) { - // If the object wasn't found, that means GCS never saw the initial WriteObjectSpec, which - // means we'll need - // to send it again. We can process this retry by just starting over again - return ReconnectArguments.of( - baseContextSupplier.get().withExtraHeaders(getHeaders()), null); - } - } - - BidiWriteHandle bidiWriteHandle = this.bidiWriteHandle.get(); - if (bidiWriteHandle != null) { - spec.setWriteHandle(bidiWriteHandle); - } - - b.setAppendObjectSpec(spec.build()); - b.setFlush(true).setStateLookup(true); - - return ReconnectArguments.of( - baseContextSupplier.get().withExtraHeaders(getHeaders()), b.build()); - } finally { - lock.unlock(); - } - } - - static final class ReconnectArguments { - private final GrpcCallContext ctx; - private final BidiWriteObjectRequest req; - - private ReconnectArguments(GrpcCallContext ctx, BidiWriteObjectRequest req) { - this.ctx = ctx; - this.req = req; - } - - public GrpcCallContext getCtx() { - return ctx; - } - - public BidiWriteObjectRequest getReq() { - return req; - } - - public static ReconnectArguments of(GrpcCallContext ctx, BidiWriteObjectRequest req) { - return new ReconnectArguments(ctx, req); - } - } - - private Map> getHeaders() { - return ImmutableMap.of( - "x-goog-request-params", - ImmutableList.of( - Stream.of( - "bucket=" + writeCtx.getRequestFactory().bucketName(), - "appendable=true", - this.routingToken.get() != null - ? "routing_token=" + this.routingToken.get() - : null) - .filter(Objects::nonNull) - .collect(Collectors.joining("&")))); - } -} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedWritableByteChannel.java index 2aea670454..a5b4904a0f 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiUnbufferedWritableByteChannel.java @@ -290,7 +290,7 @@ public void onNext(BidiWriteObjectResponse value) { ok(value); } else { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_7.toStorageException( + UploadFailureScenario.SCENARIO_7.toStorageException( nullSafeList(lastWrittenRequest), value, context, null)); } } else if (finalizing && value.hasResource()) { @@ -301,16 +301,16 @@ public void onNext(BidiWriteObjectResponse value) { ok(value); } else if (finalSize < totalSentBytes) { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_4_1.toStorageException( + UploadFailureScenario.SCENARIO_4_1.toStorageException( nullSafeList(lastWrittenRequest), value, context, null)); } else { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_4_2.toStorageException( + UploadFailureScenario.SCENARIO_4_2.toStorageException( nullSafeList(lastWrittenRequest), value, context, null)); } } else if (!finalizing && value.hasResource()) { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_1.toStorageException( + UploadFailureScenario.SCENARIO_1.toStorageException( nullSafeList(lastWrittenRequest), value, context, null)); } else if (finalizing && value.hasPersistedSize()) { long totalSentBytes = writeCtx.getTotalSentBytes().get(); @@ -322,16 +322,16 @@ public void onNext(BidiWriteObjectResponse value) { writeCtx.getConfirmedBytes().set(persistedSize); } else if (persistedSize < totalSentBytes) { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_3.toStorageException( + UploadFailureScenario.SCENARIO_3.toStorageException( nullSafeList(lastWrittenRequest), value, context, null)); } else { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_2.toStorageException( + UploadFailureScenario.SCENARIO_2.toStorageException( nullSafeList(lastWrittenRequest), value, context, null)); } } else { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_0.toStorageException( + UploadFailureScenario.SCENARIO_0.toStorageException( nullSafeList(lastWrittenRequest), value, context, null)); } } @@ -345,7 +345,7 @@ public void onError(Throwable t) { && ed.getErrorInfo() != null && ed.getErrorInfo().getReason().equals("GRPC_MISMATCHED_UPLOAD_SIZE"))) { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_5.toStorageException( + UploadFailureScenario.SCENARIO_5.toStorageException( nullSafeList(lastWrittenRequest), null, context, oore)); return; } @@ -357,7 +357,7 @@ public void onError(Throwable t) { // unusual case, and it should not cause a significant overhead given its rarity. StorageException tmp = StorageException.asStorageException((ApiException) t); previousError = - ResumableSessionFailureScenario.toStorageException( + UploadFailureScenario.toStorageException( tmp.getCode(), tmp.getMessage(), tmp.getReason(), diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiWritableByteChannelSessionBuilder.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiWritableByteChannelSessionBuilder.java index 1e32da70ec..84629f0efc 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiWritableByteChannelSessionBuilder.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicBidiWritableByteChannelSessionBuilder.java @@ -21,15 +21,11 @@ import com.google.api.core.ApiFuture; import com.google.api.core.SettableApiFuture; import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.storage.BlobAppendableUploadImpl.AppendableObjectBufferedWritableByteChannel; import com.google.cloud.storage.ChannelSession.BufferedWriteSession; import com.google.cloud.storage.Retrying.RetrierWithAlg; import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; import com.google.storage.v2.BidiWriteObjectRequest; import com.google.storage.v2.BidiWriteObjectResponse; -import com.google.storage.v2.GetObjectRequest; -import com.google.storage.v2.Object; import com.google.storage.v2.ServiceConstants.Values; import java.nio.ByteBuffer; import java.util.function.BiFunction; @@ -84,10 +80,6 @@ GapicBidiWritableByteChannelSessionBuilder.ResumableUploadBuilder resumable() { return new GapicBidiWritableByteChannelSessionBuilder.ResumableUploadBuilder(); } - GapicBidiWritableByteChannelSessionBuilder.AppendableUploadBuilder appendable() { - return new GapicBidiWritableByteChannelSessionBuilder.AppendableUploadBuilder(); - } - final class ResumableUploadBuilder { private RetrierWithAlg retrier; @@ -166,111 +158,4 @@ BufferedWritableByteChannelSession build() { } } } - - final class AppendableUploadBuilder { - private RetrierWithAlg retrier; - - AppendableUploadBuilder() { - this.retrier = RetrierWithAlg.attemptOnce(); - } - - AppendableUploadBuilder withRetryConfig(RetrierWithAlg retrier) { - this.retrier = requireNonNull(retrier, "retrier must be non null"); - return this; - } - - BufferedAppendableUploadBuilder buffered(FlushPolicy flushPolicy) { - return new BufferedAppendableUploadBuilder(flushPolicy); - } - - final class BufferedAppendableUploadBuilder { - private final FlushPolicy flushPolicy; - private boolean finalizeOnClose; - private ApiFuture start; - private UnaryCallable get; - - BufferedAppendableUploadBuilder(FlushPolicy flushPolicy) { - this.flushPolicy = flushPolicy; - } - - BufferedAppendableUploadBuilder setFinalizeOnClose(boolean finalizeOnClose) { - this.finalizeOnClose = finalizeOnClose; - return this; - } - - /** - * Set the Future which will contain the AppendableWrite information necessary to open the - * Write stream. - */ - BufferedAppendableUploadBuilder setStartAsync(ApiFuture start) { - this.start = requireNonNull(start, "start must be non null"); - return this; - } - - public BufferedAppendableUploadBuilder setGetCallable( - UnaryCallable get) { - this.get = get; - return this; - } - - WritableByteChannelSession< - AppendableObjectBufferedWritableByteChannel, BidiWriteObjectResponse> - build() { - // it is theoretically possible that the setter methods for the following variables could - // be called again between when this method is invoked and the resulting function is - // invoked. - // To ensure we are using the specified values at the point in time they are bound to the - // function read them into local variables which will be closed over rather than the class - // fields. - ByteStringStrategy boundStrategy = byteStringStrategy; - Hasher boundHasher = hasher; - RetrierWithAlg boundRetrier = retrier; - UnaryCallable boundGet = - requireNonNull(get, "get must be non null"); - boolean boundFinalizeOnClose = finalizeOnClose; - return new AppendableSession( - requireNonNull(start, "start must be non null"), - ((BiFunction< - BidiAppendableWrite, - SettableApiFuture, - GapicBidiUnbufferedAppendableWritableByteChannel>) - (start, resultFuture) -> - new GapicBidiUnbufferedAppendableWritableByteChannel( - write, - boundGet, - boundRetrier, - resultFuture, - new ChunkSegmenter( - boundHasher, boundStrategy, Values.MAX_WRITE_CHUNK_BYTES_VALUE), - new BidiWriteCtx<>(start), - Retrying::newCallContext)) - .andThen( - c -> { - boolean takeOver = - c.getWriteCtx().getRequestFactory().getReq().hasAppendObjectSpec(); - if (takeOver) { - c.startAppendableTakeoverStream(); - } - return new AppendableObjectBufferedWritableByteChannel( - flushPolicy.createBufferedChannel(c), c, boundFinalizeOnClose); - })); - } - } - } - - private static final class AppendableSession - extends ChannelSession< - BidiAppendableWrite, BidiWriteObjectResponse, AppendableObjectBufferedWritableByteChannel> - implements WritableByteChannelSession< - AppendableObjectBufferedWritableByteChannel, BidiWriteObjectResponse> { - private AppendableSession( - ApiFuture startFuture, - BiFunction< - BidiAppendableWrite, - SettableApiFuture, - AppendableObjectBufferedWritableByteChannel> - f) { - super(startFuture, f); - } - } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedChunkedResumableWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedChunkedResumableWritableByteChannel.java index be7d7802da..b24851390f 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedChunkedResumableWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedChunkedResumableWritableByteChannel.java @@ -122,6 +122,9 @@ private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength, bo if (data.length == 0) { return 0; } + // we consumed some bytes from srcs, flag our content as dirty since we aren't writing + // those bytes to implicitly flag as dirty. + content.flagDirty(); List messages = new ArrayList<>(); @@ -267,8 +270,7 @@ public void onError(Throwable t) { && ed.getErrorInfo() != null && ed.getErrorInfo().getReason().equals("GRPC_MISMATCHED_UPLOAD_SIZE"))) { StorageException storageException = - ResumableSessionFailureScenario.SCENARIO_5.toStorageException( - segments, null, context, oore); + UploadFailureScenario.SCENARIO_5.toStorageException(segments, null, context, oore); invocationHandle.setException(storageException); return; } @@ -280,7 +282,7 @@ public void onError(Throwable t) { // unusual case, and it should not cause a significant overhead given its rarity. StorageException tmp = StorageException.asStorageException((ApiException) t); StorageException storageException = - ResumableSessionFailureScenario.toStorageException( + UploadFailureScenario.toStorageException( tmp.getCode(), tmp.getMessage(), tmp.getReason(), segments, null, context, t); invocationHandle.setException(storageException); } @@ -305,7 +307,7 @@ public void onCompleted() { writeCtx.getTotalSentBytes().set(persistedSize); writeCtx.getConfirmedBytes().set(persistedSize); } else { - throw ResumableSessionFailureScenario.SCENARIO_7.toStorageException( + throw UploadFailureScenario.SCENARIO_7.toStorageException( segments, last, context, null); } } else if (finalizing && last.hasResource()) { @@ -315,28 +317,26 @@ public void onCompleted() { writeCtx.getConfirmedBytes().set(finalSize); resultFuture.set(last); } else if (finalSize < totalSentBytes) { - throw ResumableSessionFailureScenario.SCENARIO_4_1.toStorageException( + throw UploadFailureScenario.SCENARIO_4_1.toStorageException( segments, last, context, null); } else { - throw ResumableSessionFailureScenario.SCENARIO_4_2.toStorageException( + throw UploadFailureScenario.SCENARIO_4_2.toStorageException( segments, last, context, null); } } else if (!finalizing && last.hasResource()) { - throw ResumableSessionFailureScenario.SCENARIO_1.toStorageException( - segments, last, context, null); + throw UploadFailureScenario.SCENARIO_1.toStorageException(segments, last, context, null); } else if (finalizing && last.hasPersistedSize()) { long totalSentBytes = writeCtx.getTotalSentBytes().get(); long persistedSize = last.getPersistedSize(); if (persistedSize < totalSentBytes) { - throw ResumableSessionFailureScenario.SCENARIO_3.toStorageException( + throw UploadFailureScenario.SCENARIO_3.toStorageException( segments, last, context, null); } else { - throw ResumableSessionFailureScenario.SCENARIO_2.toStorageException( + throw UploadFailureScenario.SCENARIO_2.toStorageException( segments, last, context, null); } } else { - throw ResumableSessionFailureScenario.SCENARIO_0.toStorageException( - segments, last, context, null); + throw UploadFailureScenario.SCENARIO_0.toStorageException(segments, last, context, null); } } catch (Throwable se) { open = false; diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedDirectWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedDirectWritableByteChannel.java index 5e67440a7e..aa6bcacec1 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedDirectWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedDirectWritableByteChannel.java @@ -222,7 +222,7 @@ public void onError(Throwable t) { // unusual case, and it should not cause a significant overhead given its rarity. StorageException tmp = StorageException.asStorageException((ApiException) t); StorageException storageException = - ResumableSessionFailureScenario.toStorageException( + UploadFailureScenario.toStorageException( tmp.getCode(), tmp.getMessage(), tmp.getReason(), getRequests(), null, context, t); invocationHandle.setException(storageException); } else { @@ -243,14 +243,14 @@ public void onCompleted() { writeCtx.getConfirmedBytes().set(finalSize); resultFuture.set(last); } else if (finalSize < totalSentBytes) { - throw ResumableSessionFailureScenario.SCENARIO_4_1.toStorageException( + throw UploadFailureScenario.SCENARIO_4_1.toStorageException( getRequests(), last, context, null); } else { - throw ResumableSessionFailureScenario.SCENARIO_4_2.toStorageException( + throw UploadFailureScenario.SCENARIO_4_2.toStorageException( getRequests(), last, context, null); } } else { - throw ResumableSessionFailureScenario.SCENARIO_0.toStorageException( + throw UploadFailureScenario.SCENARIO_0.toStorageException( getRequests(), last, context, null); } } catch (Throwable se) { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel.java index e85363b227..32248227cd 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GapicUnbufferedFinalizeOnCloseResumableWritableByteChannel.java @@ -226,7 +226,7 @@ public void onError(Throwable t) { // unusual case, and it should not cause a significant overhead given its rarity. StorageException tmp = StorageException.asStorageException((ApiException) t); StorageException storageException = - ResumableSessionFailureScenario.toStorageException( + UploadFailureScenario.toStorageException( tmp.getCode(), tmp.getMessage(), tmp.getReason(), @@ -247,7 +247,7 @@ public void onCompleted() { boolean finalizing = lastWrittenRequest.getFinishWrite(); if (last == null) { clientDetectedError( - ResumableSessionFailureScenario.toStorageException( + UploadFailureScenario.toStorageException( 0, "onComplete without preceding onNext, unable to determine success.", "invalid", @@ -262,16 +262,16 @@ public void onCompleted() { ok(finalSize); } else if (finalSize < totalSentBytes) { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_4_1.toStorageException( + UploadFailureScenario.SCENARIO_4_1.toStorageException( nullSafeList(lastWrittenRequest), last, context, null)); } else { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_4_2.toStorageException( + UploadFailureScenario.SCENARIO_4_2.toStorageException( nullSafeList(lastWrittenRequest), last, context, null)); } } else if (!finalizing || last.hasPersistedSize()) { // unexpected incremental response clientDetectedError( - ResumableSessionFailureScenario.toStorageException( + UploadFailureScenario.toStorageException( 0, "Unexpected incremental response for finalizing request.", "invalid", @@ -281,7 +281,7 @@ public void onCompleted() { null)); } else { clientDetectedError( - ResumableSessionFailureScenario.SCENARIO_0.toStorageException( + UploadFailureScenario.SCENARIO_0.toStorageException( nullSafeList(lastWrittenRequest), last, context, null)); } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java index 7d11924644..ce3e6c62d2 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/GrpcStorageImpl.java @@ -33,6 +33,7 @@ import com.google.api.core.ApiFuture; import com.google.api.core.ApiFutures; import com.google.api.core.BetaApi; +import com.google.api.core.SettableApiFuture; import com.google.api.gax.grpc.GrpcCallContext; import com.google.api.gax.paging.AbstractPage; import com.google.api.gax.paging.Page; @@ -47,6 +48,7 @@ import com.google.cloud.Policy; import com.google.cloud.WriteChannel; import com.google.cloud.storage.Acl.Entity; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; import com.google.cloud.storage.BlobWriteSessionConfig.WriterFactory; import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; import com.google.cloud.storage.Conversions.Decoder; @@ -1493,6 +1495,34 @@ private Blob getBlob(ApiFuture result) { } } + public AppendableUploadState getAppendableState( + BlobInfo info, Opts opts, long maxPendingBytes) { + boolean takeOver = info.getGeneration() != null; + BidiWriteObjectRequest req = + takeOver + ? getBidiWriteObjectRequestForTakeover(info, opts) + : getBidiWriteObjectRequest(info, opts, /* appendable= */ true); + AppendableUploadState state; + if (takeOver) { + state = + BidiUploadState.appendableTakeover( + req, + Retrying::newCallContext, + maxPendingBytes, + SettableApiFuture.create(), + /* initialCrc32c= */ null); + } else { + state = + BidiUploadState.appendableNew( + req, + Retrying::newCallContext, + maxPendingBytes, + SettableApiFuture.create(), + opts.getHasher().initialValue()); + } + return state; + } + /** Bind some decoders for our "Syntax" classes to this instance of GrpcStorageImpl */ private final class SyntaxDecoders { @@ -1742,19 +1772,22 @@ WriteObjectRequest getWriteObjectRequest(BlobInfo info, Opts op return opts.writeObjectRequest().apply(requestBuilder).build(); } - BidiWriteObjectRequest getBidiWriteObjectRequest(BlobInfo info, Opts opts) { + BidiWriteObjectRequest getBidiWriteObjectRequest( + BlobInfo info, Opts opts, boolean appendable) { Object object = codecs.blobInfo().encode(info); Object.Builder objectBuilder = object.toBuilder() - // required if the data is changing + // clear out the checksums, if a crc32cMatch is specified it'll come back via opts .clearChecksums() - // trimmed to shave payload size .clearGeneration() .clearMetageneration() .clearSize() .clearCreateTime() .clearUpdateTime(); WriteObjectSpec.Builder specBuilder = WriteObjectSpec.newBuilder().setResource(objectBuilder); + if (appendable) { + specBuilder.setAppendable(true); + } BidiWriteObjectRequest.Builder requestBuilder = BidiWriteObjectRequest.newBuilder().setWriteObjectSpec(specBuilder); diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionPutTask.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionPutTask.java index 9ebd8e5868..92de549bd8 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionPutTask.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionPutTask.java @@ -108,7 +108,7 @@ public void rewindTo(long offset) { int code = response.getStatusCode(); - if (!finalizing && ResumableSessionFailureScenario.isContinue(code)) { + if (!finalizing && UploadFailureScenario.isContinue(code)) { long effectiveEnd = ((HttpContentRange.HasRange) contentRange).range().endOffset(); @Nullable String range = response.getHeaders().getRange(); ByteRangeSpec ackRange = ByteRangeSpec.parse(range); @@ -121,11 +121,11 @@ public void rewindTo(long offset) { return ResumableOperationResult.incremental(ackRange.endOffset()); } else { StorageException se = - ResumableSessionFailureScenario.SCENARIO_7.toStorageException(uploadId, response); + UploadFailureScenario.SCENARIO_7.toStorageException(uploadId, response); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } - } else if (finalizing && ResumableSessionFailureScenario.isOk(code)) { + } else if (finalizing && UploadFailureScenario.isOk(code)) { @Nullable StorageObject storageObject; BigInteger actualSize = BigInteger.ZERO; @@ -152,7 +152,7 @@ public void rewindTo(long offset) { } else { response.ignore(); StorageException se = - ResumableSessionFailureScenario.SCENARIO_0_1.toStorageException( + UploadFailureScenario.SCENARIO_0_1.toStorageException( uploadId, response, null, () -> null); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; @@ -165,35 +165,35 @@ public void rewindTo(long offset) { return ResumableOperationResult.complete(storageObject, actualSize.longValue()); } else if (compare > 0) { StorageException se = - ResumableSessionFailureScenario.SCENARIO_4_1.toStorageException( + UploadFailureScenario.SCENARIO_4_1.toStorageException( uploadId, response, null, toString(storageObject)); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } else { StorageException se = - ResumableSessionFailureScenario.SCENARIO_4_2.toStorageException( + UploadFailureScenario.SCENARIO_4_2.toStorageException( uploadId, response, null, toString(storageObject)); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } - } else if (!finalizing && ResumableSessionFailureScenario.isOk(code)) { + } else if (!finalizing && UploadFailureScenario.isOk(code)) { StorageException se = - ResumableSessionFailureScenario.SCENARIO_1.toStorageException(uploadId, response); + UploadFailureScenario.SCENARIO_1.toStorageException(uploadId, response); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; - } else if (finalizing && ResumableSessionFailureScenario.isContinue(code)) { + } else if (finalizing && UploadFailureScenario.isContinue(code)) { // in order to finalize the content range must have a size, cast down to read it HttpContentRange.HasSize size = (HttpContentRange.HasSize) contentRange; ByteRangeSpec range = ByteRangeSpec.parse(response.getHeaders().getRange()); if (range.endOffsetInclusive() < size.getSize()) { StorageException se = - ResumableSessionFailureScenario.SCENARIO_3.toStorageException(uploadId, response); + UploadFailureScenario.SCENARIO_3.toStorageException(uploadId, response); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } else { StorageException se = - ResumableSessionFailureScenario.SCENARIO_2.toStorageException(uploadId, response); + UploadFailureScenario.SCENARIO_2.toStorageException(uploadId, response); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } @@ -205,8 +205,7 @@ public void rewindTo(long offset) { // a 503 with plain text content // Attempt to detect this very loosely as to minimize impact of modified error message // This is accurate circa 2023-06 - if ((!ResumableSessionFailureScenario.isOk(code) - && !ResumableSessionFailureScenario.isContinue(code)) + if ((!UploadFailureScenario.isOk(code) && !UploadFailureScenario.isContinue(code)) && contentType != null && contentType.startsWith("text/plain") && contentLength != null @@ -215,14 +214,13 @@ public void rewindTo(long offset) { if (errorMessage.contains("content-range") && !errorMessage.contains("earlier")) { // TODO: exclude "earlier request" StorageException se = - ResumableSessionFailureScenario.SCENARIO_5.toStorageException( + UploadFailureScenario.SCENARIO_5.toStorageException( uploadId, response, cause, cause::getContent); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } } - StorageException se = - ResumableSessionFailureScenario.toStorageException(response, cause, uploadId); + StorageException se = UploadFailureScenario.toStorageException(response, cause, uploadId); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } @@ -235,7 +233,7 @@ public void rewindTo(long offset) { throw e; } catch (Exception e) { StorageException se = - ResumableSessionFailureScenario.SCENARIO_0.toStorageException(uploadId, response, e); + UploadFailureScenario.SCENARIO_0.toStorageException(uploadId, response, e); span.setStatus(Status.UNKNOWN.withDescription(se.getMessage())); throw se; } finally { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionQueryTask.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionQueryTask.java index 40b96dbe8f..f9d4a6e040 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionQueryTask.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/JsonResumableSessionQueryTask.java @@ -62,7 +62,7 @@ final class JsonResumableSessionQueryTask response = req.execute(); int code = response.getStatusCode(); - if (ResumableSessionFailureScenario.isOk(code)) { + if (UploadFailureScenario.isOk(code)) { @Nullable StorageObject storageObject; @Nullable BigInteger actualSize; @@ -81,7 +81,7 @@ final class JsonResumableSessionQueryTask storageObject = null; } else { response.ignore(); - throw ResumableSessionFailureScenario.SCENARIO_0_1.toStorageException( + throw UploadFailureScenario.SCENARIO_0_1.toStorageException( uploadId, response, null, () -> null); } if (actualSize != null) { @@ -91,13 +91,13 @@ final class JsonResumableSessionQueryTask return ResumableOperationResult.incremental(actualSize.longValue()); } } else { - throw ResumableSessionFailureScenario.SCENARIO_0.toStorageException( + throw UploadFailureScenario.SCENARIO_0.toStorageException( uploadId, response, null, () -> storageObject != null ? storageObject.toString() : null); } - } else if (ResumableSessionFailureScenario.isContinue(code)) { + } else if (UploadFailureScenario.isContinue(code)) { String range1 = response.getHeaders().getRange(); if (range1 != null) { ByteRangeSpec range = ByteRangeSpec.parse(range1); @@ -118,24 +118,23 @@ final class JsonResumableSessionQueryTask // a 503 with plain text content // Attempt to detect this very loosely as to minimize impact of modified error message // This is accurate circa 2023-06 - if ((!ResumableSessionFailureScenario.isOk(code) - && !ResumableSessionFailureScenario.isContinue(code)) + if ((!UploadFailureScenario.isOk(code) && !UploadFailureScenario.isContinue(code)) && contentType != null && contentType.startsWith("text/plain") && contentLength != null && contentLength > 0) { String errorMessage = cause.getContent().toLowerCase(Locale.US); if (errorMessage.contains("content-range")) { - throw ResumableSessionFailureScenario.SCENARIO_5.toStorageException( + throw UploadFailureScenario.SCENARIO_5.toStorageException( uploadId, response, cause, cause::getContent); } } - throw ResumableSessionFailureScenario.toStorageException(response, cause, uploadId); + throw UploadFailureScenario.toStorageException(response, cause, uploadId); } } catch (StorageException se) { throw se; } catch (Exception e) { - throw ResumableSessionFailureScenario.SCENARIO_0.toStorageException(uploadId, response, e); + throw UploadFailureScenario.SCENARIO_0.toStorageException(uploadId, response, e); } finally { if (response != null) { try { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java index 6de7704413..30e8206ea6 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java @@ -16,6 +16,8 @@ package com.google.cloud.storage; +import static com.google.common.base.Preconditions.checkState; + import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; import java.io.IOException; @@ -55,10 +57,17 @@ final class MinFlushBufferedWritableByteChannel implements BufferedWritableByteC private final BufferHandle handle; private final UnbufferedWritableByteChannel channel; + private final boolean blocking; MinFlushBufferedWritableByteChannel(BufferHandle handle, UnbufferedWritableByteChannel channel) { + this(handle, channel, true); + } + + MinFlushBufferedWritableByteChannel( + BufferHandle handle, UnbufferedWritableByteChannel channel, boolean blocking) { this.handle = handle; this.channel = channel; + this.blocking = blocking; } @Override @@ -81,27 +90,43 @@ public int write(ByteBuffer src) throws IOException { } int capacity = handle.capacity(); + int position = handle.position(); int bufferPending = capacity - bufferRemaining; int totalPending = Math.addExact(srcRemaining, bufferPending); - if (totalPending >= capacity) { - ByteBuffer[] srcs; - if (enqueuedBytes()) { - ByteBuffer buffer = handle.get(); - Buffers.flip(buffer); - srcs = new ByteBuffer[] {buffer, src}; - } else { - srcs = new ByteBuffer[] {src}; - } - long write = channel.write(srcs); - if (enqueuedBytes()) { - // we didn't write enough bytes to consume the whole buffer. - Buffers.compact(handle.get()); - } else if (handle.position() == handle.capacity()) { + ByteBuffer[] srcs; + boolean usingBuffer = false; + if (enqueuedBytes()) { + usingBuffer = true; + ByteBuffer buffer = handle.get(); + Buffers.flip(buffer); + srcs = new ByteBuffer[] {buffer, src}; + } else { + srcs = new ByteBuffer[] {src}; + } + long written = channel.write(srcs); + checkState(written >= 0, "written >= 0 (%s > 0)", written); + if (usingBuffer) { + if (written >= bufferPending) { // we wrote enough to consume the buffer Buffers.clear(handle.get()); + } else if (written > 0) { + // we didn't write enough bytes to consume the whole buffer. + Buffers.compact(handle.get()); + } else /*if (written == 0)*/ { + // if none of the buffer was consumed, flip it back so we retain all bytes + Buffers.position(handle.get(), position); + Buffers.limit(handle.get(), capacity); } - int srcConsumed = Math.toIntExact(write) - bufferPending; - bytesConsumed += srcConsumed; + } + + int srcConsumed = Math.max(0, Math.toIntExact(written) - bufferPending); + bytesConsumed += srcConsumed; + + if (!blocking && written != totalPending) { + // we're configured in non-blocking mode, and we weren't able to make any progress on our + // call, break out to allow more bytes to be written to us or to allow underlying space + // to clear. + break; } } return bytesConsumed; diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStream.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStream.java index 37e78198b2..6f02b16866 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStream.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/ObjectReadSessionStream.java @@ -257,7 +257,7 @@ private BidiReadObjectResponseObserver() {} public void onStart(StreamController controller) { ObjectReadSessionStream.this.controller = controller; controller.disableAutoInboundFlowControl(); - controller.request(2); + controller.request(1); } @SuppressWarnings("rawtypes") diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java index 6733631091..8cfa7b031d 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java @@ -1516,15 +1516,13 @@ public BlobAppendableUpload blobAppendableUpload( try (Scope ignore = span.makeCurrent()) { return new OtelDecoratingBlobAppendableUpload( - delegate.blobAppendableUpload(blobInfo, uploadConfig, options)); + delegate.blobAppendableUpload(blobInfo, uploadConfig, options), span); } catch (Throwable t) { span.recordException(t); span.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); span.end(); throw t; - } finally { - span.end(); } } @@ -2110,10 +2108,12 @@ public String toString() { final class OtelDecoratingBlobAppendableUpload implements BlobAppendableUpload { private final BlobAppendableUpload delegate; + private final Span uploadSpan; private final Tracer tracer; - private OtelDecoratingBlobAppendableUpload(BlobAppendableUpload delegate) { + private OtelDecoratingBlobAppendableUpload(BlobAppendableUpload delegate, Span uploadSpan) { this.delegate = delegate; + this.uploadSpan = uploadSpan; this.tracer = TracerDecorator.decorate( Context.current(), @@ -2159,9 +2159,12 @@ public void finalizeAndClose() throws IOException { } catch (IOException | RuntimeException e) { openSpan.recordException(e); openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + uploadSpan.recordException(e); + uploadSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); throw e; } finally { openSpan.end(); + uploadSpan.end(); } } @@ -2173,9 +2176,12 @@ public void closeWithoutFinalizing() throws IOException { } catch (IOException | RuntimeException e) { openSpan.recordException(e); openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + uploadSpan.recordException(e); + uploadSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); throw e; } finally { openSpan.end(); + uploadSpan.end(); } } @@ -2187,9 +2193,12 @@ public void close() throws IOException { } catch (IOException | RuntimeException e) { openSpan.recordException(e); openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + uploadSpan.recordException(e); + uploadSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); throw e; } finally { openSpan.end(); + uploadSpan.end(); } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/RetryContext.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/RetryContext.java index a4a0b99c25..7afd38d82e 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/RetryContext.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/RetryContext.java @@ -235,7 +235,7 @@ public T invokeAny(Collection> tasks, long timeout, Ti @Override public void execute(Runnable command) { - throw new UnsupportedOperationException(); + command.run(); } // diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContent.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContent.java index c765d61a87..8d299bfb54 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContent.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/RewindableContent.java @@ -48,6 +48,8 @@ private RewindableContent() { abstract long writeTo(GatheringByteChannel gbc) throws IOException; + abstract void flagDirty(); + @Override public final boolean retrySupported() { return false; @@ -106,6 +108,9 @@ long writeTo(GatheringByteChannel gbc) { @Override protected void rewindTo(long offset) {} + + @Override + void flagDirty() {} } private static final class PathRewindableContent extends RewindableContent { @@ -157,6 +162,9 @@ long writeTo(GatheringByteChannel gbc) throws IOException { return ByteStreams.copy(in, gbc); } } + + @Override + void flagDirty() {} } private static final class ByteBufferContent extends RewindableContent { @@ -247,5 +255,10 @@ void rewindTo(long offset) { } this.offset = offset; } + + @Override + void flagDirty() { + this.dirty = true; + } } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageChannelUtils.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageChannelUtils.java new file mode 100644 index 0000000000..d720591045 --- /dev/null +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageChannelUtils.java @@ -0,0 +1,79 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; + +/** + * Set of utility methods for working with non-blocking channels returned by this library. + * + * @since 2.56.0 + */ +public final class StorageChannelUtils { + + private StorageChannelUtils() {} + + /** + * Attempt to fill {@code buf} from {@code c}, blocking the invoking thread if necessary in order + * to do so. + * + *

This method will not close {@code c}. + * + * @return The number of bytes read, possibly zero, or {@code -1} if the channel has reached + * end-of-stream + * @throws IOException any IOException from calling {@link ReadableByteChannel#read(ByteBuffer)} + * @since 2.56.0 + */ + public static int blockingFillFrom(ByteBuffer buf, ReadableByteChannel c) throws IOException { + int total = 0; + while (buf.hasRemaining()) { + int read = c.read(buf); + if (read != -1) { + total += read; + } else if (total == 0) { + return -1; + } else { + break; + } + } + return total; + } + + /** + * Attempt to empty {@code buf} to {@code c}, blocking the invoking thread if necessary in order + * to do so. + * + *

This method will not close {@code c} + * + * @return The number of bytes written, possibly zero + * @throws IOException any IOException from calling {@link WritableByteChannel#write(ByteBuffer)} + * @since 2.56.0 + */ + public static int blockingEmptyTo(ByteBuffer buf, WritableByteChannel c) throws IOException { + int total = 0; + while (buf.hasRemaining()) { + int written = c.write(buf); + if (written != 0) { + total += written; + } + } + return total; + } +} diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageDataClient.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageDataClient.java index 19fa8b5cde..43fd503a36 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageDataClient.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageDataClient.java @@ -36,11 +36,11 @@ @InternalApi final class StorageDataClient implements AutoCloseable { - private final ScheduledExecutorService executor; + final ScheduledExecutorService executor; private final Duration terminationAwaitDuration; private final ZeroCopyBidiStreamingCallable bidiReadObject; - private final RetryContextProvider retryContextProvider; + final RetryContextProvider retryContextProvider; private final IOAutoCloseable onClose; private StorageDataClient( diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageV2ProtoUtils.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageV2ProtoUtils.java index 0bc8e8e121..bffa932b13 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageV2ProtoUtils.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/StorageV2ProtoUtils.java @@ -16,13 +16,22 @@ package com.google.cloud.storage; -import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.ByteString; import com.google.protobuf.MessageOrBuilder; -import com.google.protobuf.util.JsonFormat; -import com.google.protobuf.util.JsonFormat.Printer; +import com.google.protobuf.TextFormat; +import com.google.storage.v2.BidiReadObjectResponse; +import com.google.storage.v2.BidiWriteObjectRequest; import com.google.storage.v2.BucketAccessControl; +import com.google.storage.v2.ChecksummedData; import com.google.storage.v2.ObjectAccessControl; +import com.google.storage.v2.ObjectRangeData; import com.google.storage.v2.ReadObjectRequest; +import com.google.storage.v2.ReadObjectResponse; +import com.google.storage.v2.WriteObjectRequest; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.function.Function; import java.util.function.Predicate; import org.checkerframework.checker.nullness.qual.NonNull; @@ -31,9 +40,6 @@ final class StorageV2ProtoUtils { private static final String VALIDATION_TEMPLATE = "offset >= 0 && limit >= 0 (%s >= 0 && %s >= 0)"; - private static final Printer PROTO_PRINTER = - JsonFormat.printer().omittingInsignificantWhitespace().preservingProtoFieldNames(); - private StorageV2ProtoUtils() {} // TODO: can we eliminate this method all together? @@ -53,13 +59,120 @@ static ReadObjectRequest seekReadObjectRequest( return req; } + @FunctionalInterface + interface MsgFmt extends Function {} + + @NonNull + public static String fmtProto(@NonNull Object obj) { + return fmtProtoWithFmt(obj, TextFormat.printer()::shortDebugString); + } + + @NonNull + public static String fmtProtoWithFmt(@NonNull Object obj, MsgFmt fmt) { + if (obj instanceof WriteObjectRequest) { + return fmtProtoWithFmt((WriteObjectRequest) obj, fmt); + } else if (obj instanceof BidiWriteObjectRequest) { + return fmtProtoWithFmt((BidiWriteObjectRequest) obj, fmt); + } else if (obj instanceof ReadObjectResponse) { + return fmtProtoWithFmt((ReadObjectResponse) obj, fmt); + } else if (obj instanceof BidiReadObjectResponse) { + return fmtProtoWithFmt((BidiReadObjectResponse) obj, fmt); + } else if (obj instanceof ChecksummedData) { + return fmtProtoWithFmt((ChecksummedData) obj, fmt); + } else if (obj instanceof MessageOrBuilder) { + return fmt.apply((MessageOrBuilder) obj); + } else { + return obj.toString(); + } + } + + @NonNull + private static String fmtProtoWithFmt(ChecksummedData data, MsgFmt fmt) { + ByteString content = data.getContent(); + if (content.size() > 20) { + ChecksummedData.Builder b = data.toBuilder(); + ByteString trim = snipBytes(content); + b.setContent(trim); + + return fmt.apply(b.build()); + } + return fmt.apply(data); + } + @NonNull - static String fmtProto(@NonNull final MessageOrBuilder msg) { - try { - return PROTO_PRINTER.print(msg); - } catch (InvalidProtocolBufferException e) { - throw new RuntimeException(e); + private static String fmtProtoWithFmt(@NonNull WriteObjectRequest msg, MsgFmt fmt) { + if (msg.hasChecksummedData()) { + ByteString content = msg.getChecksummedData().getContent(); + if (content.size() > 20) { + WriteObjectRequest.Builder b = msg.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + + return fmt.apply(b.build()); + } } + return fmt.apply(msg); + } + + @NonNull + private static String fmtProtoWithFmt(@NonNull BidiWriteObjectRequest msg, MsgFmt fmt) { + if (msg.hasChecksummedData()) { + ByteString content = msg.getChecksummedData().getContent(); + if (content.size() > 20) { + BidiWriteObjectRequest.Builder b = msg.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + + return fmt.apply(b.build()); + } + } + return fmt.apply(msg); + } + + @NonNull + private static String fmtProtoWithFmt(@NonNull ReadObjectResponse msg, MsgFmt fmt) { + if (msg.hasChecksummedData()) { + ByteString content = msg.getChecksummedData().getContent(); + if (content.size() > 20) { + ReadObjectResponse.Builder b = msg.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + + return fmt.apply(b.build()); + } + } + return fmt.apply(msg); + } + + @NonNull + private static String fmtProtoWithFmt(@NonNull BidiReadObjectResponse msg, MsgFmt fmt) { + List rangeData = msg.getObjectDataRangesList(); + if (!rangeData.isEmpty()) { + List snips = new ArrayList<>(); + for (ObjectRangeData rd : rangeData) { + if (rd.hasChecksummedData()) { + ByteString content = rd.getChecksummedData().getContent(); + if (content.size() > 20) { + ObjectRangeData.Builder b = rd.toBuilder(); + ByteString trim = snipBytes(content); + b.getChecksummedDataBuilder().setContent(trim); + snips.add(b.build()); + } else { + snips.add(rd); + } + } + } + BidiReadObjectResponse snipped = + msg.toBuilder().clearObjectDataRanges().addAllObjectDataRanges(snips).build(); + return fmt.apply(snipped); + } + return fmt.apply(msg); + } + + private static ByteString snipBytes(ByteString content) { + ByteString snip = + ByteString.copyFromUtf8(java.lang.String.format(Locale.US, "", content.size())); + return content.substring(0, 20).concat(snip); } /** diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableSessionFailureScenario.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/UploadFailureScenario.java similarity index 93% rename from google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableSessionFailureScenario.java rename to google-cloud-storage/src/main/java/com/google/cloud/storage/UploadFailureScenario.java index 4e99fa21b1..a8312cc19b 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/ResumableSessionFailureScenario.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/UploadFailureScenario.java @@ -44,22 +44,22 @@ import org.checkerframework.checker.nullness.qual.Nullable; @ParametersAreNonnullByDefault -enum ResumableSessionFailureScenario { +enum UploadFailureScenario { // TODO: send more bytes than are in the Content-Range header SCENARIO_0(BaseServiceException.UNKNOWN_CODE, null, "Unknown Error"), SCENARIO_0_1(BaseServiceException.UNKNOWN_CODE, null, "Response not application/json."), SCENARIO_1( BaseServiceException.UNKNOWN_CODE, "invalid", - "Attempt to append to already finalized resumable session."), + "Attempt to append to already finalized upload session."), SCENARIO_2( BaseServiceException.UNKNOWN_CODE, "invalid", - "Attempt to finalize resumable session with fewer bytes than the backend has received."), + "Attempt to finalize upload session with fewer bytes than the backend has received."), SCENARIO_3( BaseServiceException.UNKNOWN_CODE, "dataLoss", - "Attempt to finalize resumable session with more bytes than the backend has received."), + "Attempt to finalize upload session with more bytes than the backend has received."), SCENARIO_4(200, "ok", "Attempt to finalize an already finalized session with same object size"), SCENARIO_4_1( BaseServiceException.UNKNOWN_CODE, @@ -72,7 +72,7 @@ enum ResumableSessionFailureScenario { SCENARIO_5( BaseServiceException.UNKNOWN_CODE, "dataLoss", - "Client side data loss detected. Attempt to append to a resumable session with an offset" + "Client side data loss detected. Attempt to append to a upload session with an offset" + " higher than the backend has"), SCENARIO_7( BaseServiceException.UNKNOWN_CODE, @@ -103,12 +103,16 @@ enum ResumableSessionFailureScenario { @Nullable private final String reason; private final String message; - ResumableSessionFailureScenario(int code, @Nullable String reason, String message) { + UploadFailureScenario(int code, @Nullable String reason, String message) { this.code = code; this.reason = reason; this.message = message; } + String getMessage() { + return message; + } + StorageException toStorageException(String uploadId, HttpResponse resp) { return toStorageException( uploadId, resp, null, () -> CharStreams.toString(new InputStreamReader(resp.getContent()))); @@ -132,10 +136,8 @@ StorageException toStorageException( return toStorageException(code, message, reason, uploadId, resp, cause, contentCallable); } - StorageException toStorageException( - /*In java List is not a sub-type of List despite WriteObjectRequest being a Message. - * intentionally only define List so the compiler doesn't complain */ - @SuppressWarnings("rawtypes") @NonNull List reqs, + StorageException toStorageException( + @NonNull List reqs, @Nullable Message resp, @NonNull GrpcCallContext context, @Nullable Throwable cause) { @@ -146,7 +148,7 @@ static StorageException toStorageException( HttpResponse response, HttpResponseException cause, String uploadId) { String statusMessage = cause.getStatusMessage(); StorageException se = - ResumableSessionFailureScenario.toStorageException( + UploadFailureScenario.toStorageException( cause.getStatusCode(), String.format( Locale.US, @@ -161,11 +163,11 @@ static StorageException toStorageException( return se; } - static StorageException toStorageException( + static StorageException toStorageException( int code, String message, @Nullable String reason, - @NonNull List reqs, + @NonNull List reqs, @Nullable Message resp, @NonNull GrpcCallContext context, @Nullable Throwable cause) { @@ -184,7 +186,7 @@ static StorageException toStorageException( } else { sb.append(","); } - Message req = (Message) reqs.get(i); + Message req = reqs.get(i); fmt(req, PREFIX_O, Indentation.T1, sb); sb.append("\n").append(PREFIX_O).append("\t}"); if (i == length - 1) { @@ -380,7 +382,7 @@ private static void fmtWriteObjectRequest( writeOffset, writeOffset + checksummedData.getContent().size())); if (checksummedData.hasCrc32C()) { - sb.append(", crc32c: ").append(checksummedData.getCrc32C()); + sb.append(", crc32c: ").append(Integer.toUnsignedString(checksummedData.getCrc32C())); } sb.append("}"); } else { @@ -416,7 +418,7 @@ private static void fmtBidiWriteObjectRequest( writeOffset, writeOffset + checksummedData.getContent().size())); if (checksummedData.hasCrc32C()) { - sb.append(", crc32c: ").append(checksummedData.getCrc32C()); + sb.append(", crc32c: ").append(Integer.toUnsignedString(checksummedData.getCrc32C())); } sb.append("}"); } else { diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadStreamingStreamPropertyTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadStreamingStreamPropertyTest.java new file mode 100644 index 0000000000..4fcbd070c1 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadStreamingStreamPropertyTest.java @@ -0,0 +1,459 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BidiUploadTestUtils.incremental; +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static org.junit.Assert.fail; + +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Message; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.ChecksummedData; +import com.google.storage.v2.Object; +import com.google.storage.v2.WriteObjectSpec; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; +import net.jqwik.api.Arbitraries; +import net.jqwik.api.Arbitrary; +import net.jqwik.api.Combinators; +import net.jqwik.api.Example; +import net.jqwik.api.ForAll; +import net.jqwik.api.Property; +import net.jqwik.api.Provide; +import net.jqwik.api.Tuple; +import net.jqwik.api.providers.TypeUsage; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; + +final class BidiUploadStreamingStreamPropertyTest { + + @Example + public void edgeCases() { + JqwikTest.report( + TypeUsage.of(ScenarioWithLastWrittenRequest.class), + arbitrarySendViaScenarioWithLastWrittenRequest()); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s1() { + + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 0, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of(ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(0)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s2() { + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 2, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(2)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s3() { + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 0, + /* beginOffset= */ 1L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(4)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s4() { + ScenarioWithLastWrittenRequest scenario = + new ScenarioWithLastWrittenRequest( + /* lastSentRequest= */ -1, + /* beginOffset= */ 10L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setFlush(true) + .setStateLookup(true) + .build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(10).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(11).build(), + BidiUploadTestUtils.finishAt(12))); + + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s5() { + ScenarioWithLastWrittenRequest scenario = + new ScenarioWithLastWrittenRequest( + /* lastSentRequest= */ -1, + /* beginOffset= */ 0, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + BidiUploadTest.appendRequestNew, + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(0).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(1).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(2).build(), + BidiUploadTestUtils.finishAt(3))); + + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaShouldCompactWithLastWrittenRequest_s6() { + ScenarioWithLastWrittenRequest scenario = + new ScenarioWithLastWrittenRequest( + /* lastSentRequest= */ -1, + /* beginOffset= */ 1, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + BidiUploadTest.appendRequestNew, + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(1).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(2).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(3).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(4).build(), + ChecksummedTestContent.gen(1).asBidiWrite().setWriteOffset(5).build(), + BidiUploadTestUtils.finishAt(6))); + + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void makeScenario_4() { + + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 1, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of(ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(1)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void makeScenario_5() { + + ScenarioWithLastWrittenRequest scenario = + makeScenario( + /* lastSentRequest= */ 0, + /* beginOffset= */ 0L, + BidiUploadTest.appendRequestNew, + ImmutableList.of( + ChecksummedTestContent.gen(1).asChecksummedData(), + ChecksummedTestContent.gen(1).asChecksummedData()), + BidiUploadTestUtils.finishAt(2)); + sendViaShouldCompactWithLastWrittenRequest(scenario); + } + + @Example + public void sendViaWithOnlyAFirstMessage_shouldSendCleanly() { + AppendableUploadState state = + BidiUploadState.appendableNew( + BidiUploadTest.appendRequestNew, + GrpcCallContext::createDefault, + 37, + SettableApiFuture.create(), + Crc32cValue.zero()); + state.updateStateFromResponse(incremental(0)); + assertThat(state.offer(BidiUploadTest.appendRequestNew)).isTrue(); + + List expected = ImmutableList.of(BidiUploadTest.appendRequestNew); + + List actual = sinkToList(state); + + String actualS = fmt(actual); + String expectedS = fmt(expected); + + assertThat(actualS).isEqualTo(expectedS); + } + + @Property(tries = 1_000) + public void sendViaShouldCompactWithLastWrittenRequest( + @ForAll("sendViaScenarioWithLastWrittenRequest") ScenarioWithLastWrittenRequest s) { + AppendableUploadState state = s.makeBidiUploadState(); + assertThat(state.onResponse(incremental(s.beginFromOffset))).isNull(); + for (BidiWriteObjectRequest m : s.messages) { + assertThat(state.offer(m)).isTrue(); + } + state.lastSentRequestIndex = s.lastSentRequestIndex; + + List actual = sinkToList(state); + assertThat(state.lastSentRequestIndex).isEqualTo(s.messages.size() - 1); + + if (actual.isEmpty()) { + assertThat(s.lastSentRequestIndex).isEqualTo(s.messages.size() - 1); + } + + long writeOffset = getExpectedBeginOffset(s); + assertSaneMessageSequence(actual, s.lastSentRequestIndex, writeOffset); + } + + static List sinkToList(BidiUploadState state) { + ImmutableList.Builder b = ImmutableList.builder(); + state.sendVia(b::add); + return b.build(); + } + + private static long getExpectedBeginOffset(ScenarioWithLastWrittenRequest s) { + long writeOffset = s.beginFromOffset; + if (!s.messages.isEmpty()) { + for (int i = s.messages.size() - 1; i > s.lastSentRequestIndex; i--) { + BidiWriteObjectRequest msg = s.messages.get(i); + if (msg.hasOneof(BidiUploadState.FIRST_MESSAGE_DESCRIPTOR)) { + writeOffset = s.beginFromOffset; + } else { + writeOffset = msg.getWriteOffset(); + } + } + } + return writeOffset; + } + + private static void assertSaneMessageSequence( + List actual, int lastWrittenRequest, long beginFromOffset) { + String msg = "Actual message sequence: " + fmt(actual); + if (!actual.isEmpty() && actual.get(0).hasOneof(BidiUploadState.FIRST_MESSAGE_DESCRIPTOR)) { + assertWithMessage("Received an unexpected first_message " + msg) + .that(lastWrittenRequest) + .isEqualTo(-1); + } + + long startOffset = beginFromOffset; + for (int i = 0, actualSize = actual.size(), lastIdx = actualSize - 1; i < actualSize; i++) { + BidiWriteObjectRequest req = actual.get(i); + assertWithMessage("Non-contiguous message " + msg) + .that(req.getWriteOffset()) + .isEqualTo(startOffset); + if (req.getFinishWrite()) { + assertWithMessage("finish_write: true not last " + msg).that(i).isEqualTo(lastIdx); + } + startOffset = req.getWriteOffset() + req.getChecksummedData().getContent().size(); + } + } + + @Provide("sendViaScenarioWithLastWrittenRequest") + Arbitrary arbitrarySendViaScenarioWithLastWrittenRequest() { + return beginOffset() + .flatMap( + beginOffset -> + Combinators.combine( + Arbitraries.just(beginOffset), + firstMessage(), + dataMessage(), + finishMessage()) + .as(Tuple::of)) + .flatMap( + t -> { + Long beginOffset = t.get1(); + BidiWriteObjectRequest first = t.get2(); + List<@NonNull ChecksummedData> data = t.get3(); + BidiWriteObjectRequest finish = t.get4(); + + int messageCount = data.size(); + if (first != null) { + messageCount++; + } + if (finish != null) { + messageCount++; + } + // maybe select one of our existing messages as the lastWrittenRequest + return Arbitraries.integers() + .between(-1, messageCount - 1) + .map(lwr -> makeScenario(lwr, beginOffset, first, data, finish)); + }); + } + + private static @NonNull ScenarioWithLastWrittenRequest makeScenario( + int lastSentRequest, + Long beginOffset, + BidiWriteObjectRequest first, + List cds, + BidiWriteObjectRequest last) { + long offset = beginOffset; + List data = new ArrayList<>(); + data.add(first); + for (ChecksummedData cd : cds) { + data.add( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(offset) + .setChecksummedData(cd) + .build()); + offset += cd.getContent().size(); + } + + if (last != null) { + BidiWriteObjectRequest lastWithOffset = last.toBuilder().setWriteOffset(offset).build(); + data.add(lastWithOffset); + } + + return new ScenarioWithLastWrittenRequest( + /* lastWrittenRequest= */ lastSentRequest, /* beginFromOffset= */ beginOffset, first, data); + } + + Arbitrary beginOffset() { + return Arbitraries.longs().between(0, 256 * 1024); + } + + Arbitrary<@NonNull BidiWriteObjectRequest> firstMessage() { + return Arbitraries.of( + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder().setBucket("projects/_/buckets/b").setName("o").build()) + .build()) + .build(), + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .build()) + .build() /*, + BidiWriteObjectRequest.newBuilder() + .setUploadId("upload-id") + .build(),*/); + } + + Arbitrary<@NonNull List> dataMessage() { + // keep data fairly small, we are mainly testing message handling not data handling + return Arbitraries.integers() + .between(1, 17) + .map( + numBytes -> { + ChecksummedTestContent content = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(numBytes)); + return content.asChecksummedData(); + }) + .list() + .ofMinSize(1) + .ofMaxSize(5); + } + + Arbitrary<@Nullable BidiWriteObjectRequest> finishMessage() { + return Arbitraries.of( + BidiWriteObjectRequest.newBuilder().setFinishWrite(true).build(), + BidiWriteObjectRequest.newBuilder().setFlush(true).setStateLookup(true).build()); + } + + private static String fmt(List l) { + return l.stream().map(StorageV2ProtoUtils::fmtProto).collect(BidiUploadTest.joiner); + } + + private static String fmt(Message msg) { + if (msg == null) { + return "null"; + } + return fmtProto(msg); + } + + static final class ScenarioWithLastWrittenRequest { + private static final long MAX_BYTES = 50_000; + private final int lastSentRequestIndex; + private final long beginFromOffset; + private final BidiWriteObjectRequest firstMessage; + private final List messages; + + private ScenarioWithLastWrittenRequest( + int lastWrittenRequest, + long beginFromOffset, + BidiWriteObjectRequest firstMessage, + List messages) { + this.lastSentRequestIndex = lastWrittenRequest; + this.beginFromOffset = beginFromOffset; + this.firstMessage = firstMessage; + this.messages = messages; + } + + public @NonNull AppendableUploadState makeBidiUploadState() { + if (firstMessage.hasWriteObjectSpec()) { + AppendableUploadState state = + BidiUploadState.appendableNew( + firstMessage, + GrpcCallContext::createDefault, + MAX_BYTES, + SettableApiFuture.create(), + Crc32cValue.zero()); + state.totalSentBytes = beginFromOffset; + return state; + } else if (firstMessage.hasAppendObjectSpec()) { + AppendableUploadState state = + BidiUploadState.appendableTakeover( + firstMessage, + GrpcCallContext::createDefault, + MAX_BYTES, + SettableApiFuture.create(), + Crc32cValue.zero()); + state.awaitTakeoverStateReconciliation( + () -> { + state.retrying(); + assertThat( + state.onResponse(BidiUploadTest.resourceFor(firstMessage, beginFromOffset))) + .isNull(); + }); + return state; + } else { + //noinspection JUnit5AssertionsConverter + fail("Unhandled firstMessage type: " + fmtProto(firstMessage)); + return null; + } + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("\n lastWrittenRequest", lastSentRequestIndex) + .add("\n beginFromOffset", beginFromOffset) + .add("\n maxBytes", MAX_BYTES) + .add("\n firstMessage", BidiUploadStreamingStreamPropertyTest.fmt(firstMessage)) + .add("\n messages", fmt(messages)) + .addValue("\n") + .toString(); + } + + private static String fmt(List msgs) { + return msgs.stream() + .map(BidiUploadStreamingStreamPropertyTest::fmt) + .collect(Collectors.joining(",\n ", "[\n ", "\n ]")); + } + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java new file mode 100644 index 0000000000..bfb561b593 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java @@ -0,0 +1,2264 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.BidiUploadState.appendableNew; +import static com.google.cloud.storage.BidiUploadTestUtils.finishAt; +import static com.google.cloud.storage.BidiUploadTestUtils.makeRedirect; +import static com.google.cloud.storage.BidiUploadTestUtils.packRedirectIntoAbortedException; +import static com.google.cloud.storage.BidiUploadTestUtils.timestampNow; +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.cloud.storage.TestUtils.GRPC_STATUS_DETAILS_KEY; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.defaultRetryingDeps; +import static com.google.common.base.Preconditions.checkState; +import static com.google.common.truth.Truth.assertThat; +import static com.google.common.truth.Truth.assertWithMessage; +import static java.lang.String.format; +import static org.junit.Assert.assertThrows; +import static org.junit.Assert.fail; + +import com.google.api.core.ApiFuture; +import com.google.api.core.SettableApiFuture; +import com.google.api.gax.grpc.GrpcCallContext; +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.ApiCallContext; +import com.google.api.gax.rpc.ApiExceptionFactory; +import com.google.api.gax.rpc.BidiStreamingCallable; +import com.google.api.gax.rpc.ClientStream; +import com.google.api.gax.rpc.ClientStreamReadyObserver; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.ResponseObserver; +import com.google.api.gax.rpc.StreamController; +import com.google.cloud.storage.Backoff.Jitterer; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; +import com.google.cloud.storage.BidiUploadState.BaseUploadState; +import com.google.cloud.storage.BidiUploadState.State; +import com.google.cloud.storage.BidiUploadStreamingStream.RedirectHandlingResponseObserver; +import com.google.cloud.storage.BidiUploadStreamingStream.StreamRetryContextDecorator; +import com.google.cloud.storage.BidiUploadStreamingStream.StreamingResponseObserver; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.ITAppendableUploadFakeTest.FakeStorage; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.BoundType; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Range; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; +import com.google.rpc.Code; +import com.google.storage.v2.AppendObjectSpec; +import com.google.storage.v2.BidiWriteHandle; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.Object; +import com.google.storage.v2.ObjectChecksums; +import com.google.storage.v2.WriteObjectSpec; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Supplier; +import java.util.function.UnaryOperator; +import java.util.stream.Collector; +import java.util.stream.Collectors; +import java.util.stream.IntStream; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.runners.Enclosed; +import org.junit.rules.TestName; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameters; + +@RunWith(Enclosed.class) +@SuppressWarnings({"unused", "UnnecessaryLocalVariable", "SameParameterValue"}) +public final class BidiUploadTest { + static final Collector joiner = joiner(1); + + private static Collector joiner(int indentation) { + String i0 = " "; + String i_1 = IntStream.range(0, indentation - 1).mapToObj(x -> i0).reduce("", String::concat); + String i = IntStream.range(0, indentation).mapToObj(x -> i0).reduce("", String::concat); + + return Collectors.joining(",\n" + i, "[\n" + i, "\n" + i_1 + "]"); + } + + public static final ChecksummedTestContent content = + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(64)); + + static final BidiWriteObjectRequest appendRequestNew = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder().setBucket("projects/_/buckets/b").setName("o").build()) + .setAppendable(true) + .build()) + .build(); + static final BidiWriteObjectRequest appendRequestTakeover = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .build()) + .build(); + + static final BidiWriteObjectRequest uploadId = + BidiWriteObjectRequest.newBuilder().setUploadId("uploadId").build(); + static final BidiWriteObjectRequest writeObjectSpec = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec(WriteObjectSpec.newBuilder().setIfGenerationMatch(0).build()) + .build(); + static final BidiWriteObjectRequest appendableObjectSpec = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec(AppendObjectSpec.newBuilder().setObject("obj").build()) + .build(); + + static final BidiWriteObjectRequest onlyBytes_00 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(0, 10).asChecksummedData()) + .setWriteOffset(0) + .build(); + static final BidiWriteObjectRequest onlyBytes_10 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(10, 10).asChecksummedData()) + .setWriteOffset(10) + .build(); + static final BidiWriteObjectRequest onlyBytes_20 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(20, 10).asChecksummedData()) + .setWriteOffset(20) + .build(); + static final BidiWriteObjectRequest onlyBytes_30 = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(content.slice(30, 10).asChecksummedData()) + .setWriteOffset(30) + .build(); + + static final BidiWriteObjectRequest onlyFlush = + BidiWriteObjectRequest.newBuilder().setFlush(true).build(); + static final BidiWriteObjectRequest onlyFinishWrite = + BidiWriteObjectRequest.newBuilder().setFinishWrite(true).build(); + + @SuppressWarnings("ClassEscapesDefinedScope") + @RunWith(Parameterized.class) + public static final class BidiUploadStateCommonTest { + + private static final Function fmt = + message -> message == null ? "null" : TextFormat.printer().shortDebugString(message); + + private final BidiUploadStateFactory factory; + + public BidiUploadStateCommonTest(BidiUploadStateFactory factory) { + this.factory = factory; + } + + @Parameters(name = "{0}") + public static ImmutableList factories() { + return ImmutableList.of(new AppendableNewFactory(), new AppendableTakeoverFactory()); + } + + @Test + public void offer() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(uploadId)).isTrue(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.offer(onlyBytes_20)).isFalse(); + + assertThat(state.peekFirst()).isSameInstanceAs(uploadId); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + } + + @Test + public void setConfirmedBytesOffset_oneFullMessage() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_10); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + } + + @Test + public void setConfirmedBytesOffset_anyFirstMessageEvicted() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(uploadId)).isTrue(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + assertThat(state.peekFirst()).isSameInstanceAs(uploadId); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_10); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + } + + @Test + public void setConfirmedBytesOffset_onlyFullMessagesAreEvicted() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(11)); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_10); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_10); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + } + + @Test + public void setConfirmedBytesOffset_allMessagesAreEvicted() { + BidiUploadState state = factory.createInitialized(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(20)); + + assertThat(state.peekFirst()).isNull(); + assertThat(state.peekLast()).isNull(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(state.getConfirmedBytes()).isEqualTo(20); + } + + @Test + public void multipleOfferAckCycles() { + BidiUploadState state = factory.createInitialized(); + + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + assertThat(state.getConfirmedBytes()).isEqualTo(10); + + assertThat(state.offer(onlyBytes_10)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(20)); + assertThat(state.getConfirmedBytes()).isEqualTo(20); + + assertThat(state.offer(onlyBytes_20)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(30); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(30)); + assertThat(state.getConfirmedBytes()).isEqualTo(30); + + assertThat(state.peekFirst()).isNull(); + assertThat(state.peekLast()).isNull(); + } + + @Test + public void ackOfferLessThanSent() { + BidiUploadState state = factory.createInitialized(); + + assertThat(state.offer(uploadId)).isTrue(); + assertThat(state.offer(onlyBytes_00)).isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(0)); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + assertThat(state.getConfirmedBytes()).isEqualTo(0); + + assertThat(state.peekFirst()).isSameInstanceAs(onlyBytes_00); + assertThat(state.peekLast()).isSameInstanceAs(onlyBytes_00); + } + + @Test + public void offerWithBytesRejectedIfNoAvailableCapacity() { + BidiUploadState state = factory.createInitialized(4); + + assertThat(state.availableCapacity()).isEqualTo(4); + assertThat( + state.offer( + BidiUploadTestUtils.createSegment(content.slice(0, 5).asChecksummedData()))) + .isFalse(); + } + + @Test + public void initialRequestMessage_shouldNotMergeIntoDataRequest() { + BidiUploadState state = factory.createInitialized(17); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(onlyBytes_00.getChecksummedData()))) + .isTrue(); + + assertThat(state.peekFirst()).isEqualTo(onlyBytes_00); + } + + @Test + public void redirectToken_appendable_previousSuccessfulFlush() throws Exception { + BidiWriteObjectRequest req = appendRequestNew; + GrpcCallContext baseContext = + GrpcCallContext.createDefault() + .withExtraHeaders(ImmutableMap.of("something", ImmutableList.of("or", "other"))); + BidiUploadState state = factory.createInitialized(() -> baseContext, 17); + + state.updateStateFromResponse(BidiUploadTestUtils.incremental(10)); + state.pendingRetry(); + state.updateFromRedirect( + BidiWriteObjectRedirectedError.newBuilder() + .setGeneration(1) + .setRoutingToken("routing-token") + .setWriteHandle( + BidiWriteHandle.newBuilder().setHandle(ByteString.copyFromUtf8("handle")).build()) + .build()); + state.retrying(); + + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + List requests = BidiUploadTestUtils.sinkToList(state); + + BidiWriteObjectRequest expectedRequest = + appendRequestNew.toBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .setWriteHandle( + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8("handle")) + .build()) + .setRoutingToken("routing-token") + .build()) + .setStateLookup(true) + .build(); + ImmutableMap> expectedHeaders = + ImmutableMap.of( + "something", + ImmutableList.of("or", "other"), + "x-goog-request-params", + ImmutableList.of( + "bucket=projects/_/buckets/b&appendable=true&routing_token=routing-token")); + assertAll( + () -> assertThat(requests).isEqualTo(ImmutableList.of(expectedRequest)), + () -> assertThat(actualCtx).isNotEqualTo(baseContext), + () -> assertThat(actualCtx.getExtraHeaders()).isEqualTo(expectedHeaders)); + } + + @Test + public void sendVia_onlySendsFirstMessageWhenRetrying() { + BidiUploadState state = factory.create(20); + assertThat(state.enqueueFirstMessageAndGetGrpcCallContext()).isNotNull(); + List requests1 = BidiUploadTestUtils.sinkToList(state); + assertThat(requests1).hasSize(1); + assertThat(state.onResponse(resourceWithSize(0))).isNull(); + assertThat(state.getState()).isEqualTo(State.RUNNING); + + ChecksummedTestContent.gen(20).chunkup(5).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(s -> assertThat(state.offer(s)).isTrue()); + + List requests2 = BidiUploadTestUtils.sinkToList(state); + assertThat(requests2).hasSize(4); + assertThat(requests2).containsNoneIn(requests1); + + assertThat(state.onResponse(BidiUploadTestUtils.incremental(4))).isNull(); + + state.pendingRetry(); + state.retrying(); + assertThat(state.enqueueFirstMessageAndGetGrpcCallContext()).isNotNull(); + + List requests3 = BidiUploadTestUtils.sinkToList(state); + + assertThat(requests3) + .isEqualTo( + ImmutableList.of( + BidiWriteObjectRequest.newBuilder() + .setStateLookup(true) + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .build()) + .build())); + } + + @Test + public void redirectToken_appendable_noPreviousSuccessfulFlush() throws Exception { + GrpcCallContext baseContext = + GrpcCallContext.createDefault() + .withExtraHeaders(ImmutableMap.of("something", ImmutableList.of("or", "other"))); + BidiUploadState state = factory.create(() -> baseContext, 17); + + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + List requests = BidiUploadTestUtils.sinkToList(state); + + ImmutableMap> expectedHeaders = + ImmutableMap.of( + "something", + ImmutableList.of("or", "other"), + "x-goog-request-params", + ImmutableList.of("bucket=projects/_/buckets/b&appendable=true")); + assertAll( + () -> { + // because we're running parameterized, we don't know which initial request is + // specifically needed for this assertion. But we do know all the valid request chains. + // Enumerate them here and validate that one of them is matched. + List> all = + ImmutableList.of( + ImmutableList.of(appendRequestNew), ImmutableList.of(appendRequestTakeover)); + boolean contains = all.contains(requests); + String msg = + format( + "Requests does not match a valid list of expected requests.%n" + + "expected: %s" + + "%n" + + "%n" + + "but was: %s", + all.stream() + .map(l -> l.stream().map(StorageV2ProtoUtils::fmtProto).collect(joiner(2))) + .collect(joiner), + requests.stream().map(StorageV2ProtoUtils::fmtProto).collect(joiner)); + assertWithMessage(msg).that(contains).isTrue(); + }, + () -> assertThat(actualCtx).isNotEqualTo(baseContext), + () -> assertThat(actualCtx.getExtraHeaders()).isEqualTo(expectedHeaders)); + } + + private abstract static class BidiUploadStateFactory { + final BidiUploadState createInitialized() { + return createInitialized(25); + } + + final BidiUploadState createInitialized(long maxBytes) { + return createInitialized( + GrpcCallContext::createDefault, + maxBytes, + SettableApiFuture.create(), + Crc32cValue.zero()); + } + + final BidiUploadState createInitialized( + Supplier grpcContextSupplier, long maxBytes) { + return createInitialized( + grpcContextSupplier, maxBytes, SettableApiFuture.create(), Crc32cValue.zero()); + } + + final BidiUploadState create() { + return create(25); + } + + final BidiUploadState create(long maxBytes) { + return create( + GrpcCallContext::createDefault, + maxBytes, + SettableApiFuture.create(), + Crc32cValue.zero()); + } + + final BidiUploadState create(Supplier grpcContextSupplier, long maxBytes) { + return create( + grpcContextSupplier, maxBytes, SettableApiFuture.create(), Crc32cValue.zero()); + } + + abstract BidiUploadState createInitialized( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c); + + abstract BidiUploadState create( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c); + + @Override + public final String toString() { + return this.getClass().getSimpleName(); + } + } + + private static final class AppendableNewFactory extends BidiUploadStateFactory { + @Override + BidiUploadState createInitialized( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + BidiUploadState state = create(baseCallContext, maxBytes, resultFuture, initialCrc32c); + state.enqueueFirstMessageAndGetGrpcCallContext(); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(0)); + return state; + } + + @Override + BidiUploadState create( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + return appendableNew( + appendRequestNew, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + } + + private static final class AppendableTakeoverFactory extends BidiUploadStateFactory { + @Override + BidiUploadState createInitialized( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + BidiUploadState state = create(baseCallContext, maxBytes, resultFuture, initialCrc32c); + state.awaitTakeoverStateReconciliation( + () -> { + state.retrying(); + assertThat(state.onResponse(BidiUploadTest.resourceFor(appendRequestTakeover, 0))) + .isNull(); + }); + return state; + } + + @Override + BidiUploadState create( + Supplier baseCallContext, + long maxBytes, + SettableApiFuture resultFuture, + @Nullable Crc32cLengthKnown initialCrc32c) { + return BidiUploadState.appendableTakeover( + appendRequestTakeover, baseCallContext, maxBytes, resultFuture, initialCrc32c); + } + } + } + + public static final class BidiUploadStateConcatenateTest { + + @Test + public void concatenate_bothChecksummedData_resultsInIllegalArgument() { + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, + () -> BidiUploadState.concatenate(onlyBytes_00, onlyBytes_10)); + } + + @Test + public void concatenate_writeOffsetLesserWhenBothSpecified() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setFinishWrite(true) + .setChecksummedData(onlyBytes_10.getChecksummedData()) + .build(); + BidiWriteObjectRequest finish_20 = onlyFinishWrite.toBuilder().setWriteOffset(20).build(); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(onlyBytes_10, finish_20); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_left0_right10_shouldBe0() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setFinishWrite(true) + .setChecksummedData(onlyBytes_00.getChecksummedData()) + .build(); + BidiWriteObjectRequest finish_10 = onlyFinishWrite.toBuilder().setWriteOffset(10).build(); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(onlyBytes_00, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_leftFirst_rightFinish10_shouldHaveWriteOffset10() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec(appendRequestNew.getWriteObjectSpec()) + .setWriteOffset(10) + .setFinishWrite(true) + .build(); + BidiWriteObjectRequest finish_10 = onlyFinishWrite.toBuilder().setWriteOffset(10).build(); + BidiWriteObjectRequest concatenated = + BidiUploadState.concatenate(appendRequestNew, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_leftFirstWith00_rightFinish10_shouldHaveWriteOffset0() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec(appendRequestNew.getWriteObjectSpec()) + .setChecksummedData(onlyBytes_00.getChecksummedData()) + .setWriteOffset(0) + .setFinishWrite(true) + .build(); + BidiWriteObjectRequest first_00 = + appendRequestNew.toBuilder().mergeFrom(onlyBytes_00).build(); + BidiWriteObjectRequest finish_10 = onlyFinishWrite.toBuilder().setWriteOffset(10).build(); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(first_00, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_left00_rightFinish10_shouldHaveWriteOffset0() { + BidiWriteObjectRequest expected = + BidiWriteObjectRequest.newBuilder() + .setChecksummedData(onlyBytes_00.getChecksummedData()) + .setWriteOffset(0) + .setFinishWrite(true) + .build(); + BidiWriteObjectRequest first_00 = onlyBytes_00; + BidiWriteObjectRequest finish_10 = finishAt(10); + BidiWriteObjectRequest concatenated = BidiUploadState.concatenate(first_00, finish_10); + assertThat(concatenated).isEqualTo(expected); + } + + @Test + public void concatenate_left10_rightFinish21_shouldThrowIllegalArgumentException() { + IllegalArgumentException iae = + assertThrows( + IllegalArgumentException.class, + () -> BidiUploadState.concatenate(onlyBytes_10, finishAt(21))); + } + } + + public static final class AppendableUploadStateTest { + private static @NonNull AppendableUploadState getAppendable() { + AppendableUploadState uploadState = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + uploadState.enqueueFirstMessageAndGetGrpcCallContext(); + BidiUploadTestUtils.sinkToList(uploadState); + assertThat(uploadState.onResponse(resourceWithSize(0))).isNull(); + ChecksummedTestContent.gen(10).chunkup(1).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(c -> assertThat(uploadState.offer(c)).isTrue()); + return uploadState; + } + + private static void runExpectNoException( + AppendableUploadState state, @NonNull BidiWriteObjectResponse response) { + StorageException se = state.onResponse(response); + assertThat(se).isNull(); + } + + private static void runExpectException( + AppendableUploadState state, + BidiWriteObjectResponse response, + UploadFailureScenario scenario) { + StorageException se = state.onResponse(response); + assertThat(se).isNotNull(); + assertThat(se).hasMessageThat().contains(scenario.getMessage()); + } + + @Test + public void onResponse_responseWithoutPersistedSizeAndWithoutResource_IllegalStateException() { + AppendableUploadState state = getAppendable(); + + IllegalStateException ise = + assertThrows( + IllegalStateException.class, + () -> { + BidiWriteObjectResponse response = BidiWriteObjectResponse.getDefaultInstance(); + StorageException se = state.onResponse(response); + if (se != null) { + throw se; + } + }); + + assertThat(ise).hasMessageThat().contains("persistedSize > -1"); + } + + @Test + public void onResponse_writeHandleUpdated() { + BidiWriteHandle handle = + BidiWriteHandle.newBuilder().setHandle(ByteString.copyFromUtf8("new-handle")).build(); + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException( + state, BidiUploadTestUtils.incremental(10).toBuilder().setWriteHandle(handle).build()); + assertThat(state.writeHandle).isEqualTo(handle); + } + + @Test + public void onResponse_notFinalizing_incremental_ackEq() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, BidiUploadTestUtils.incremental(10)); + assertThat(state.confirmedBytes).isEqualTo(10); + } + + @Test + public void onResponse_notFinalizing_incremental_ackLt() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, BidiUploadTestUtils.incremental(9)); + assertThat(state.confirmedBytes).isEqualTo(9); + } + + @Test + public void onResponse_notFinalizing_incremental_ackGt() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectException( + state, BidiUploadTestUtils.incremental(11), UploadFailureScenario.SCENARIO_7); + } + + @Test + public void onResponse_notFinalizing_notIncremental_ackEq() { + AppendableUploadState state = getAppendable(); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, resourceWithSize(10)); + } + + @Test + public void onResponse_finalizing_notIncremental_ackLt_inRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + // runExpectException(state, resourceWithSize(9), UploadFailureScenario.SCENARIO_9); + runExpectNoException(state, resourceWithSize(9)); + assertThat(state.confirmedBytes).isEqualTo(9); + } + + @Test + public void onResponse_finalizing_notIncremental_ackLt_outOfRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(3)); + runExpectException(state, resourceWithSize(2), UploadFailureScenario.SCENARIO_4_1); + } + + @Test + public void onResponse_finalizing_notIncremental_ackGt() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + runExpectException(state, resourceWithSize(11), UploadFailureScenario.SCENARIO_4_2); + } + + @Test + public void onResponse_finalizing_incremental_ackLt_inRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + // runExpectException(state, incremental(9), UploadFailureScenario.SCENARIO_9); + runExpectNoException(state, BidiUploadTestUtils.incremental(9)); + assertThat(state.confirmedBytes).isEqualTo(9); + } + + @Test + public void onResponse_finalizing_incremental_ackLt_outOfRangeOfBufferedBytes() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + state.updateStateFromResponse(BidiUploadTestUtils.incremental(9)); + runExpectException( + state, BidiUploadTestUtils.incremental(8), UploadFailureScenario.SCENARIO_3); + } + + @Test + public void onResponse_finalizing_incremental_ackEq() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, BidiUploadTestUtils.incremental(10)); + assertThat(state.confirmedBytes).isEqualTo(10); + } + + @Test + public void onResponse_finalizing_incremental_ackGt() { + // BidiUploadState state = new TestState(Flag.FINALIZING); + AppendableUploadState state = getAppendable(); + // updateBasedOnResponseRunExpectFailure(state, UploadFailureScenario.SCENARIO_2, + // incremental(11)); + } + + @Test + public void onResponse_finalizing_notIncremental_ackEq() { + AppendableUploadState state = getAppendable(); + state.offer( + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(state.totalSentBytes) + .setFinishWrite(true) + .build()); + BidiUploadTestUtils.sinkToList(state); + runExpectNoException(state, resourceWithSize(10)); + assertThat(state.confirmedBytes).isEqualTo(10); + } + + @Test + public void complexSequence_1() throws Exception { + AppendableUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + assertThat(actualCtx).isNotNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(0), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(Crc32cValue.zero())); + + ChecksummedTestContent b_10 = ChecksummedTestContent.gen(10); + Crc32cLengthKnown cumulative0 = Crc32cValue.of(b_10.getCrc32c(), b_10.length()); + b_10.chunkup(1).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(c -> assertThat(state.offer(c)).isTrue()); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.lastSentRequestIndex).isIn(range(-1, state.queue.size())), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative0)); + + BidiUploadTestUtils.sinkToList(state); + assertThat(state.onResponse(resourceWithSize(1))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.lastSentRequestIndex).isIn(range(-1, state.queue.size())), + () -> assertThat(state.confirmedBytes).isEqualTo(1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative0)); + + BidiUploadTestUtils.sinkToList(state); + assertThat(state.onResponse(BidiUploadTestUtils.incremental(10))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(10), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative0)); + + ChunkSegment segment1 = BidiUploadTestUtils.createSegment(1); + Crc32cLengthKnown cumulative1 = cumulative0.concat(segment1.getCrc32c()); + assertThat(state.offer(segment1)).isTrue(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(11), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(10), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative1)); + BidiUploadTestUtils.sinkToList(state); + assertThat(state.lastSentRequestIndex).isEqualTo(0); + + ChunkSegment segment2 = BidiUploadTestUtils.createSegment(2); + Crc32cLengthKnown cumulative2 = cumulative1.concat(segment2.getCrc32c()); + assertThat(state.offer(segment2)).isTrue(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(13), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(0), + () -> assertThat(state.confirmedBytes).isEqualTo(10), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative2)); + BidiUploadTestUtils.sinkToList(state); + assertThat(state.lastSentRequestIndex).isEqualTo(1); + + BidiUploadTestUtils.sinkToList(state); + assertThat(state.onResponse(BidiUploadTestUtils.incremental(11))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(13), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(0), + () -> assertThat(state.confirmedBytes).isEqualTo(11), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(cumulative2)); + } + + @Test + public void complexScenario_2_retries() throws Exception { + ChecksummedTestContent b_10 = ChecksummedTestContent.of("ABCDEFGHIJ"); + Crc32cLengthKnown cumulative0 = Crc32cValue.of(b_10.getCrc32c(), b_10.length()); + ChecksummedTestContent abc = b_10.slice(0, 3); + ChecksummedTestContent def = b_10.slice(3, 3); + ChecksummedTestContent ghi = b_10.slice(6, 3); + ChecksummedTestContent j = b_10.slice(9, 1); + + AppendableUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + assertThat(actualCtx).isNotNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(0), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(Crc32cValue.zero())); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(abc.asChecksummedData()))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(1); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(def.asChecksummedData()))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(2); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(ghi.asChecksummedData()))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(3); + + assertThat(state.offer(BidiUploadTestUtils.createSegment(j.asChecksummedData()))).isTrue(); + assertThat(state.offer(flushOffset(10))).isTrue(); + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(1); + assertThat(state.lastSentRequestIndex).isEqualTo(5); + + // send incremental response, ack'ing 3 bytes + assertThat(state.onResponse(resourceFor(abc))).isNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(10), + () -> assertThat(state.confirmedBytes).isEqualTo(3), + () -> assertThat(state.isFinalizing()).isFalse()); + + // error returned, transition to pending retry + state.pendingRetry(); + // error is retryable, and backoff has elapsed, transition to retrying + state.retrying(); + // resolve the opening request and call context + // todo: better method name + state.enqueueFirstMessageAndGetGrpcCallContext(); + + BidiWriteObjectRequest reconnect = + BidiWriteObjectRequest.newBuilder() + .setStateLookup(true) + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(appendRequestNew.getWriteObjectSpec().getResource().getBucket()) + .setObject(appendRequestNew.getWriteObjectSpec().getResource().getName()) + .setGeneration(1) + .build()) + .build(); + assertThat(state.lastSentRequestIndex).isEqualTo(-1); + ApiFuture reconciliation = state.beginReconciliation(); + assertThat(BidiUploadTestUtils.sinkToList(state)).isEqualTo(ImmutableList.of(reconnect)); + + assertThat(state.onResponse(BidiUploadTestUtils.incremental(6))).isNull(); + reconciliation.get(137, TimeUnit.MILLISECONDS); + + assertThat(BidiUploadTestUtils.sinkToList(state)).hasSize(2); + } + + @Test + public void resultFutureNotResolvedForResourceWithoutFinalizeTime() throws Exception { + AppendableUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 50_000, + SettableApiFuture.create(), + Crc32cValue.zero()); + GrpcCallContext actualCtx = state.enqueueFirstMessageAndGetGrpcCallContext(); + assertThat(actualCtx).isNotNull(); + assertAll( + () -> assertThat(state.totalSentBytes).isEqualTo(0), + () -> assertThat(state.lastSentRequestIndex).isEqualTo(-1), + () -> assertThat(state.confirmedBytes).isEqualTo(-1), + () -> assertThat(state.isFinalizing()).isFalse(), + () -> assertThat(state.cumulativeCrc32c).isEqualTo(Crc32cValue.zero())); + + // simulate a scenario where multiple messages are sent to gcs before we receive any + // resource response. Even if this resource is the expected size, we should not resolved the + // result future when it doesn't have finalize_time set. + ChecksummedTestContent b_10 = ChecksummedTestContent.gen(10); + Crc32cLengthKnown cumulative0 = Crc32cValue.of(b_10.getCrc32c(), b_10.length()); + b_10.chunkup(1).stream() + .map(ChecksummedTestContent::asChecksummedData) + .map(BidiUploadTestUtils::createSegment) + .forEach(c -> assertThat(state.offer(c)).isTrue()); + assertThat(state.offer(finishAt(10))).isTrue(); + assertThat(state.onResponse(resourceFor(appendRequestNew, 10))).isNull(); + assertThat(state.getResultFuture().isDone()).isFalse(); + + BidiWriteObjectResponse response = + resourceFor( + appendRequestNew, + b -> + b.setSize(10) + .setFinalizeTime(timestampNow()) + .setChecksums( + ObjectChecksums.newBuilder().setCrc32C(b_10.getCrc32c()).build())); + assertThat(state.onResponse(response)).isNull(); + assertThat(state.getResultFuture().isDone()).isTrue(); + assertThat(state.getResultFuture().get()).isEqualTo(response); + } + + private Range range(int min, int maxExclusive) { + return Range.range(min, BoundType.CLOSED, maxExclusive, BoundType.OPEN); + } + } + + public static final class StreamingStreamTest { + + public static final int MAX_REDIRECTS_ALLOWED = 3; + @Rule public final TestName name = new TestName(); + + @Test + public void simple() throws InterruptedException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + BidiUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 2 * 1024 * 1024, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + long writeOffset = request.getWriteOffset(); + ByteString content = request.getChecksummedData().getContent(); + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setPersistedSize(writeOffset + content.size()) + .build()); + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + ChecksummedTestContent content = + ChecksummedTestContent.of( + DataGenerator.base64Characters().genBytes(4 * 1024 * 1024 + 17)); + List chunked = content.chunkup(2 * 1024 * 1024); + + for (ChecksummedTestContent checksummedTestContent : chunked) { + int attemptCounter = 0; + boolean accepted; + do { + attemptCounter++; + accepted = + stream.append( + BidiUploadTestUtils.createSegment(checksummedTestContent.asChecksummedData())); + if (!accepted) { + if (attemptCounter == 3) { + fail(); + } + Thread.sleep(300); + } + } while (!accepted); + } + } + + @Test + public void finishWrite_emptyObject() + throws InterruptedException, ExecutionException, TimeoutException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + AppendableUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 2 * 1024 * 1024, + SettableApiFuture.create(), + Crc32cValue.zero()); + + ObjectChecksums expectedObjectChecksums = + ObjectChecksums.newBuilder().setCrc32C(Crc32cValue.zero().getValue()).build(); + + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + if (request.equals(appendRequestNew)) { + respond.onResponse(BidiUploadTestUtils.incremental(0)); + } else { + assertThat(request.getFinishWrite()).isTrue(); + long writeOffset = request.getWriteOffset(); + assertThat(writeOffset).isEqualTo(0); + ObjectChecksums objectChecksums = request.getObjectChecksums(); + assertThat(objectChecksums).isEqualTo(expectedObjectChecksums); + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setResource( + appendRequestNew + .getWriteObjectSpec() + .getResource() + .toBuilder() + .setGeneration(1) + .setChecksums(objectChecksums) + .setFinalizeTime(timestampNow()) + .build()) + .build()); + respond.onComplete(); + } + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + stream.finishWrite(0); + BidiWriteObjectResponse response = stream.getResultFuture().get(3, TimeUnit.SECONDS); + + assertThat(response.hasResource()).isTrue(); + Object resource = response.getResource(); + assertThat(resource.getSize()).isEqualTo(0); + assertThat(resource.getChecksums()).isEqualTo(expectedObjectChecksums); + assertThat(resource.getGeneration()).isGreaterThan(0); + assertThat(state.peekLast()).isNull(); + } + + @Test + public void finishWrite_2MessageObject() + throws InterruptedException, ExecutionException, TimeoutException { + AppendableUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 2 * 1024 * 1024, + SettableApiFuture.create(), + Crc32cValue.zero()); + + ObjectChecksums expectedObjectChecksums = + ObjectChecksums.newBuilder() + .setCrc32C(content.slice(0, 20).asChecksummedData().getCrc32C()) + .build(); + + BidiWriteObjectRequest baseWith00 = + appendRequestNew.toBuilder().mergeFrom(onlyBytes_00).build(); + BidiWriteObjectRequest expectedFinish = + BidiWriteObjectRequest.newBuilder() + .setFinishWrite(true) + .setWriteOffset(20) + .setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(content.slice(0, 20).getCrc32c()).build()) + .build(); + + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + if (request.equals(baseWith00)) { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } else if (request.equals(onlyBytes_10)) { + respond.onResponse(BidiUploadTestUtils.incremental(20)); + } else if (request.equals(expectedFinish)) { + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setResource( + appendRequestNew + .getWriteObjectSpec() + .getResource() + .toBuilder() + .setSize(20) + .setGeneration(1) + .setChecksums(expectedFinish.getObjectChecksums()) + .setFinalizeTime(timestampNow()) + .build()) + .build()); + // respond.onComplete(); + } else { + respond.onError( + FakeStorage.unexpectedRequest( + request, + ImmutableList.of(baseWith00, onlyBytes_10, expectedFinish))); + } + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat( + stream.append( + BidiUploadTestUtils.createSegment(content.slice(0, 10).asChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + assertThat( + stream.append( + BidiUploadTestUtils.createSegment(content.slice(10, 10).asChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(stream.finishWrite(20)).isTrue(); + BidiWriteObjectResponse response = stream.getResultFuture().get(3, TimeUnit.SECONDS); + + assertThat(response.hasResource()).isTrue(); + Object resource = response.getResource(); + assertThat(resource.getSize()).isEqualTo(20); + assertThat(resource.getChecksums()).isEqualTo(expectedObjectChecksums); + assertThat(resource.getGeneration()).isGreaterThan(0); + assertThat(state.peekFirst()).isNull(); + assertThat(state.peekLast()).isNull(); + } + + @Test + public void appendDoesNotSendWhenStateDoesNotAcceptOffer() { + BidiUploadState state = + new BidiUploadState(name.getMethodName()) { + @Override + public boolean offer(@NonNull ChunkSegment data) { + return false; + } + }; + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + alwaysErrorBidiStreamingCallable(Status.UNIMPLEMENTED), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat(stream.append(BidiUploadTestUtils.createSegment(content.asChecksummedData()))) + .isFalse(); + } + + @Test + public void finishWriteDoesNotSendWhenStateDoesNotAcceptOffer() { + BidiUploadState state = + new BidiUploadState(name.getMethodName()) { + @Override + public boolean offer(@NonNull BidiWriteObjectRequest e) { + return false; + } + + @Override + Crc32cValue.@Nullable Crc32cLengthKnown getCumulativeCrc32c() { + return Crc32cValue.zero(); + } + + @Override + boolean isFinalizing() { + return false; + } + + @Override + long getTotalSentBytes() { + return 0; + } + }; + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + alwaysErrorBidiStreamingCallable(Status.UNIMPLEMENTED), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat(stream.finishWrite(0)).isFalse(); + } + + @Test + public void available() { + AtomicLong available = new AtomicLong(2 * 1024 * 1024); + BidiUploadState state = + new BidiUploadState(name.getMethodName()) { + @Override + public long availableCapacity() { + return available.get(); + } + }; + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + alwaysErrorBidiStreamingCallable(Status.UNIMPLEMENTED), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat(stream.availableCapacity()).isEqualTo(2 * 1024 * 1024); + available.set(MAX_REDIRECTS_ALLOWED); + assertThat(stream.availableCapacity()).isEqualTo(MAX_REDIRECTS_ALLOWED); + } + + @Test + public void redirect() throws ExecutionException, InterruptedException, TimeoutException { + SettableApiFuture resultFuture = SettableApiFuture.create(); + BaseUploadState state = + BidiUploadState.appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 20, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiWriteObjectRequest expectedRedirectRequest1 = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket("projects/_/buckets/b") + .setObject("o") + .setGeneration(1) + .setRoutingToken("token") + .setWriteHandle( + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8("handle")) + .build()) + .build()) + .setStateLookup(true) + .build(); + BidiWriteObjectRequest baseWith00 = + appendRequestNew.toBuilder().mergeFrom(onlyBytes_00).build(); + BidiWriteObjectRequest finish_20 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(20) + .setFinishWrite(true) + .setObjectChecksums( + ObjectChecksums.newBuilder().setCrc32C(content.slice(0, 20).getCrc32c()).build()) + .build(); + BidiWriteObjectRequest finish_20with10 = + finish_20.toBuilder().mergeFrom(onlyBytes_10).build(); + AtomicInteger bytes10SeenCount = new AtomicInteger(0); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> + request -> { + if (request.equals(baseWith00)) { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } else if (request.equals(onlyBytes_10)) { + int i = bytes10SeenCount.getAndIncrement(); + if (i == 0) { + BidiWriteObjectRedirectedError redirect = + BidiWriteObjectRedirectedError.newBuilder() + .setWriteHandle( + BidiWriteHandle.newBuilder() + .setHandle(ByteString.copyFromUtf8("handle")) + .build()) + .setRoutingToken("token") + .setGeneration(1) + .build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(Code.ABORTED_VALUE) + .setMessage("redirect") + .addDetails(Any.pack(redirect)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.ABORTED + .withDescription("redirect") + .asRuntimeException(trailers); + respond.onError( + ApiExceptionFactory.createException( + statusRuntimeException, + GrpcStatusCode.of(Status.Code.ABORTED), + true, + ErrorDetails.builder() + .setRawErrorMessages(grpcStatusDetails.getDetailsList()) + .build())); + } else { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } + } else if (request.equals(expectedRedirectRequest1)) { + respond.onResponse(BidiUploadTestUtils.incremental(10)); + } else if (request.equals(finish_20) || request.equals(finish_20with10)) { + respond.onResponse( + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setBucket("projects/_/buckets/b") + .setName("o") + .setGeneration(1) + .setSize(20) + .setFinalizeTime(timestampNow()) + .build()) + .build()); + respond.onComplete(); + } else { + respond.onError( + FakeStorage.unexpectedRequest( + request, + ImmutableList.of( + baseWith00, + onlyBytes_10, + expectedRedirectRequest1, + finish_20, + finish_20with10))); + } + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_00.getChecksummedData()))) + .isTrue(); + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_10.getChecksummedData()))) + .isTrue(); + assertThat(stream.finishWrite(20)).isTrue(); + BidiWriteObjectResponse response = stream.getResultFuture().get(1_500, TimeUnit.MILLISECONDS); + assertThat(response.hasResource()).isTrue(); + assertThat(response.getResource().getSize()).isEqualTo(20); + } + + @Test + public void canNotOpenStreamAfterFirstOpenButCanEnqueueForBackgroundRetry() { + SettableApiFuture resultFuture = SettableApiFuture.create(); + AtomicInteger streamOpenCounter = new AtomicInteger(); + BidiUploadState state = + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 20, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + RetryContext.directScheduledExecutorService(), + adaptOnlySend( + respond -> { + streamOpenCounter.getAndIncrement(); + return request -> {}; + }), + MAX_REDIRECTS_ALLOWED, + RetryContext.neverRetry()); + + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_00.getChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(10); + // TODO: remove when state reconciliation is better + state.updateStateFromResponse(BidiUploadTestUtils.incremental(0)); + stream.reset(); + assertThat( + stream.append(BidiUploadTestUtils.createSegment(onlyBytes_10.getChecksummedData()))) + .isTrue(); + assertThat(state.getTotalSentBytes()).isEqualTo(20); + assertThat(stream.finishWrite(20)).isTrue(); + assertThat(streamOpenCounter.get()).isEqualTo(1); + } + + @Test + public void reset_forwardsAnyUncaughtThrowableToRetryContext() { + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + new BidiUploadState(name.getMethodName()) { + @Override + void pendingRetry() { + //noinspection DataFlowIssue + checkState(false, "bad state"); + } + + @Override + long getTotalSentBytes() { + return 0; + } + + @Override + boolean offer(@NonNull BidiWriteObjectRequest e) { + return true; + } + + @Override + State getState() { + return State.INITIALIZING; + } + + @Override + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + @Nullable BidiWriteObjectRequest peekLast() { + return null; + } + + @Override + void sendVia(Consumer consumer) {} + }, + RetryContext.directScheduledExecutorService(), + adaptOnlySend(respond -> request -> {}), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isInstanceOf(IllegalStateException.class); + recordErrorCalled.set(true); + } + }); + + stream.flush(); + stream.reset(); + + assertThat(recordErrorCalled.get()).isTrue(); + } + + @Test + public void restart_reconciliationErrorPropagation_failure() throws Exception { + SettableApiFuture beginReconciliation = SettableApiFuture.create(); + RuntimeException boomBoom = new RuntimeException("boom boom"); + + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + AtomicInteger sendViaCallCount = new AtomicInteger(0); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + new BidiUploadState(name.getMethodName()) { + @Override + void retrying() {} + + @Override + ApiFuture beginReconciliation() { + return beginReconciliation; + } + + @Override + State getState() { + return State.INITIALIZING; + } + + @Override + void pendingRetry() {} + + @Override + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + void sendVia(Consumer consumer) { + sendViaCallCount.getAndIncrement(); + } + }, + RetryContext.directScheduledExecutorService(), + adaptOnlySend(respond -> request -> {}), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isSameInstanceAs(boomBoom); + recordErrorCalled.set(true); + } + }); + + stream.restart(); + beginReconciliation.setException(boomBoom); + + assertAll( + () -> assertThat(recordErrorCalled.get()).isTrue(), + () -> assertThat(sendViaCallCount.get()).isEqualTo(1)); + } + + @Test + public void restart_reconciliationErrorPropagation_success() throws Exception { + SettableApiFuture beginReconciliation = SettableApiFuture.create(); + + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + AtomicInteger sendViaCallCount = new AtomicInteger(0); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + new BidiUploadState(name.getMethodName()) { + @Override + void retrying() {} + + @Override + ApiFuture beginReconciliation() { + return beginReconciliation; + } + + @Override + State getState() { + return State.INITIALIZING; + } + + @Override + @NonNull GrpcCallContext enqueueFirstMessageAndGetGrpcCallContext() { + return GrpcCallContext.createDefault(); + } + + @Override + void sendVia(Consumer consumer) { + sendViaCallCount.getAndIncrement(); + } + }, + RetryContext.directScheduledExecutorService(), + adaptOnlySend(respond -> request -> {}), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + fail("unexpected recordError call"); + } + }); + + stream.restart(); + beginReconciliation.set(null); + + assertAll(() -> assertThat(sendViaCallCount.get()).isEqualTo(2)); + } + + /** + * imagine a reconciliation that happens across multiple retries or redirects. The stream would + * attempt to register its reconciliation callback. Make sure it's only actually registered + * once. + */ + @Test + public void longRunningReconciliationFailureOnlyReportsToRetryContextOnce() throws Exception { + SettableApiFuture resultFuture = SettableApiFuture.create(); + + BidiWriteObjectRequest flush3 = flushOffset(3); + List recordedErrors = Collections.synchronizedList(new ArrayList<>()); + AtomicInteger sendViaCallCount = new AtomicInteger(0); + AtomicInteger redirectCount = new AtomicInteger(0); + ScheduledExecutorService exec1 = Executors.newSingleThreadScheduledExecutor(); + ExecutorService exec2 = Executors.newCachedThreadPool(); + ScheduledExecutorService exec3 = Executors.newSingleThreadScheduledExecutor(); + RetryContext retryContext = + RetryContext.of(exec3, defaultRetryingDeps(), Retrying.neverRetry(), Jitterer.noJitter()); + CountDownLatch cdl = new CountDownLatch(2); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + appendableNew( + appendRequestNew, + GrpcCallContext::createDefault, + 15, + resultFuture, + Crc32cValue.zero()), + exec1, + adaptOnlySend( + respond -> + request -> + exec2.execute( + () -> + respond.onError( + packRedirectIntoAbortedException( + makeRedirect( + String.format( + "{redirect_%02d}", + redirectCount.incrementAndGet())))))), + /* maxRedirectsAllowed= */ 3, + new RetryContext() { + @Override + public boolean inBackoff() { + return retryContext.inBackoff(); + } + + @Override + public void reset() { + retryContext.reset(); + } + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + recordedErrors.add(t); + retryContext.recordError(t, onSuccess, onFailure); + cdl.countDown(); + } + }); + + try { + stream.flush(); + assertThat(cdl.await(3, TimeUnit.SECONDS)).isTrue(); + ExecutionException ee = + assertThrows( + ExecutionException.class, () -> stream.getResultFuture().get(3, TimeUnit.SECONDS)); + assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class); + assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(AbortedException.class); + + ImmutableList errorsForAssertion = ImmutableList.copyOf(recordedErrors); + + assertAll( + () -> assertThat(redirectCount.get()).isEqualTo(4), + () -> assertThat(errorsForAssertion).hasSize(2), + () -> + assertThat( + errorsForAssertion.stream() + .filter(t -> t instanceof AbortedException) + .count()) + .isEqualTo(1), + () -> + assertThat( + errorsForAssertion.stream() + .filter(t -> t instanceof CancellationException) + .count()) + .isEqualTo(1)); + } finally { + exec3.shutdownNow(); + exec2.shutdownNow(); + exec1.shutdownNow(); + } + } + + private static BidiStreamingCallable + alwaysErrorBidiStreamingCallable(Status status) { + return adaptOnlySend(respond -> request -> respond.onError(status.asRuntimeException())); + } + + private static BidiStreamingCallable adaptOnlySend( + Function, OnlySendClientStream> func) { + return adapt(func::apply); + } + + private static BidiStreamingCallable adapt( + Function, ClientStream> func) { + return adapt( + (respond, onReady, context) -> { + ClientStream clientStream = func.apply(respond); + StreamController controller = TestUtils.nullStreamController(); + respond.onStart(controller); + return clientStream; + }); + } + + /** + * BidiStreamingCallable isn't functional even though it's a single abstract method. + * + *

Define a method that can adapt a TriFunc as the required implementation of {@link + * BidiStreamingCallable#internalCall(ResponseObserver, ClientStreamReadyObserver, + * ApiCallContext)}. + * + *

Saves several lines of boilerplate in each test. + */ + private static BidiStreamingCallable adapt( + StreamingStreamTest.TriFunc< + ResponseObserver, + ClientStreamReadyObserver, + ApiCallContext, + ClientStream> + func) { + return new BidiStreamingCallable() { + @Override + public ClientStream internalCall( + ResponseObserver respond, + ClientStreamReadyObserver onReady, + ApiCallContext context) { + return func.apply(respond, onReady, context); + } + }; + } + + @FunctionalInterface + interface TriFunc { + R apply(A a, B b, C c); + } + } + + public static final class BidiUploadStreamingStreamResponseObserverTest { + @Rule public final TestName name = new TestName(); + + @Test + public void onError() { + RetryContext retryContext = RetryContext.neverRetry(); + AtomicReference failure = new AtomicReference<>(); + @NonNull BidiUploadState state = + new BidiUploadStreamingStreamResponseObserverTest.TestState( + BidiUploadStreamingStreamResponseObserverTest.Flag.NOT_FINALIZING); + StreamingResponseObserver obs = + new StreamingResponseObserver( + state, retryContext, RetryContextTest.failOnSuccess(), failure::set); + obs.onStart(TestUtils.nullStreamController()); + + RuntimeException t = new RuntimeException("Kablamo~~~"); + obs.onError(t); + + assertThat(failure.get()).isSameInstanceAs(t); + } + + enum Flag { + FINALIZING, + NOT_FINALIZING + } + + private class TestState extends BidiUploadState { + private final BidiUploadStreamingStreamResponseObserverTest.Flag flag; + + private TestState(BidiUploadStreamingStreamResponseObserverTest.Flag flag) { + super(name.getMethodName()); + this.flag = flag; + } + + @Override + public boolean isFinalizing() { + return flag == BidiUploadStreamingStreamResponseObserverTest.Flag.FINALIZING; + } + + @Override + @Nullable BidiWriteObjectRequest peekLast() { + return BidiWriteObjectRequest.newBuilder() + .setChecksummedData( + ChecksummedTestContent.gen(Math.toIntExact(getTotalSentBytes())) + .asChecksummedData()) + .build(); + } + + @Override + void updateStateFromResponse(BidiWriteObjectResponse response) { + fail("unexpected call to setConfirmedBytesOffset(" + response + ")"); + } + + @Override + long getTotalSentBytes() { + return 10; + } + } + } + + public static final class RedirectHandlingResponseObserverTest { + @Rule public final TestName name = new TestName(); + + @Test + public void tombstoned_noop() throws Exception { + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) {}, + new TestResponseObserver(), + new AtomicInteger(0), + 3, + () -> fail("beforeRedirect()"), + () -> fail("onRedirect")); + obs.flagTombstoned(); + assertAll( + () -> obs.onStart(TestUtils.nullStreamController()), + () -> obs.onResponse(BidiUploadTestUtils.incremental(10)), + obs::onComplete, + () -> obs.onError(new RuntimeException("should not cause error"))); + } + + @Test + public void onError_shouldNotDelegateWhenARedirectErrorIsSpecified() { + BidiWriteObjectRedirectedError redirect = BidiUploadTestUtils.makeRedirect("routing-token"); + + AbortedException abortedException = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect); + + AtomicBoolean beforeRedirectCalled = new AtomicBoolean(false); + AtomicBoolean onRedirectCalled = new AtomicBoolean(false); + AtomicBoolean updateFromRedirectCalled = new AtomicBoolean(false); + + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError r) { + assertThat(beforeRedirectCalled.get()).isTrue(); + assertThat(r).isEqualTo(redirect); + updateFromRedirectCalled.set(true); + } + }, + new TestResponseObserver(), + new AtomicInteger(0), + 3, + () -> beforeRedirectCalled.set(true), + () -> { + assertThat(beforeRedirectCalled.get()).isTrue(); + onRedirectCalled.set(true); + }); + + obs.onError(abortedException); + + assertThat(updateFromRedirectCalled.get()).isTrue(); + assertThat(onRedirectCalled.get()).isTrue(); + } + + @Test + public void onError_shouldDelegateWhenNoRedirectErrorIsSpecified() throws Exception { + + AbortedException abortedException = BidiUploadTestUtils.newAbortedException("{aborted}"); + + AtomicBoolean delegateOnErrorCalled = new AtomicBoolean(false); + + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) {}, + new TestResponseObserver() { + @Override + public void onError(Throwable t) { + assertThat(t).isEqualTo(abortedException); + delegateOnErrorCalled.set(true); + } + }, + new AtomicInteger(0), + 3, + () -> fail("beforeRedirect()"), + () -> fail("onRedirect")); + + obs.onError(abortedException); + + assertThat(delegateOnErrorCalled.get()).isTrue(); + } + + @Test + public void onError_shouldDelegateWhenMaxRedirectsExceeded() throws Exception { + + BidiWriteObjectRedirectedError redirect1 = BidiUploadTestUtils.makeRedirect("{token 1}"); + BidiWriteObjectRedirectedError redirect2 = BidiUploadTestUtils.makeRedirect("{token 2}"); + BidiWriteObjectRedirectedError redirect3 = BidiUploadTestUtils.makeRedirect("{token 3}"); + BidiWriteObjectRedirectedError redirect4 = BidiUploadTestUtils.makeRedirect("{token 4}"); + AbortedException abortedException1 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect1); + AbortedException abortedException2 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect2); + AbortedException abortedException3 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect3); + AbortedException abortedException4 = + BidiUploadTestUtils.packRedirectIntoAbortedException(redirect4); + + AtomicInteger beforeRedirectCalled = new AtomicInteger(0); + AtomicInteger onRedirectCalled = new AtomicInteger(0); + AtomicInteger onErrorCalled = new AtomicInteger(0); + + int maxRedirectsAllowed = 3; + // the closure passed to the constructor of obs needs to do things with the obs instance + // but obs hasn't finished initializing yet. make an indirect reference to it which can be + // accessed in the closure. + AtomicReference lifecycleIsDifficult = + new AtomicReference<>(); + List redirects = new ArrayList<>(); + RedirectHandlingResponseObserver obs = + new RedirectHandlingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + void updateFromRedirect(@NonNull BidiWriteObjectRedirectedError redirect) { + redirects.add(redirect); + } + }, + new TestResponseObserver() { + @Override + public void onError(Throwable t) { + assertThat(t).isEqualTo(abortedException4); + assertThat(t.getSuppressed()).hasLength(1); + assertThat(t.getSuppressed()[0]) + .isInstanceOf(MaxRedirectsExceededException.class); + onErrorCalled.getAndIncrement(); + } + }, + new AtomicInteger(0), + maxRedirectsAllowed, + beforeRedirectCalled::getAndIncrement, + () -> { + int i = onRedirectCalled.getAndIncrement(); + switch (i) { + case 0: + lifecycleIsDifficult.get().onError(abortedException2); + break; + case 1: + lifecycleIsDifficult.get().onError(abortedException3); + break; + case 2: + lifecycleIsDifficult.get().onError(abortedException4); + break; + default: + fail("invocation: " + i); + break; + } + }); + lifecycleIsDifficult.set(obs); + + obs.onError(abortedException1); + + assertAll( + () -> assertThat(beforeRedirectCalled.get()).isEqualTo(maxRedirectsAllowed), + () -> assertThat(onRedirectCalled.get()).isEqualTo(maxRedirectsAllowed), + () -> assertThat(onErrorCalled.get()).isEqualTo(1), + () -> assertThat(redirects).isEqualTo(ImmutableList.of(redirect1, redirect2, redirect3))); + } + + private static class TestResponseObserver implements ResponseObserver { + + @Override + public void onStart(StreamController controller) { + fail("onStart(" + controller + ")"); + } + + @Override + public void onResponse(BidiWriteObjectResponse response) { + fail("onResponse(" + fmtProto(response) + ")"); + } + + @Override + public void onError(Throwable t) { + fail("onError(" + t.getMessage() + ")"); + } + + @Override + public void onComplete() { + fail("onComplete()"); + } + } + } + + public static final class StreamRetryContextDecoratorTest { + @Test + public void onRecordError_calledBeforeRecordError() { + AtomicBoolean onRecordErrorCalled = new AtomicBoolean(false); + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + RetryContext ctx = + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(onRecordErrorCalled.get()).isTrue(); + recordErrorCalled.set(true); + } + }; + StreamRetryContextDecorator dec = + new StreamRetryContextDecorator( + ctx, new ReentrantLock(), () -> onRecordErrorCalled.set(true)); + + dec.recordError( + new RuntimeException("blamo"), + RetryContextTest.failOnSuccess(), + RetryContextTest.failOnFailure()); + assertThat(recordErrorCalled.get()).isTrue(); + } + } + + public static final class StreamingResponseObserverTest { + @Rule public final TestName name = new TestName(); + + @Test + public void onResponse_stateErrorForwardedToRetryContext() { + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + StreamingResponseObserver obs = + new StreamingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + StorageException onResponse(BidiWriteObjectResponse response) { + return new StorageException(0, "test-error", null); + } + }, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isInstanceOf(StorageException.class); + assertThat(((StorageException) t).getCode()).isEqualTo(0); + recordErrorCalled.set(true); + } + }, + RetryContextTest.failOnSuccess(), + RetryContextTest.failOnFailure()); + + obs.onStart(TestUtils.nullStreamController()); + obs.onResponse(resourceWithSize(0)); + + assertThat(recordErrorCalled.get()).isTrue(); + } + + @Test + public void onResponse_exceptionFromStateOnResponseForwardedToRetryContext() { + AtomicBoolean recordErrorCalled = new AtomicBoolean(false); + StreamingResponseObserver obs = + new StreamingResponseObserver( + new BidiUploadState(name.getMethodName()) { + @Override + StorageException onResponse(BidiWriteObjectResponse response) { + //noinspection DataFlowIssue + checkState(false, "kblamo"); + return null; + } + }, + new RetryContext() { + @Override + public boolean inBackoff() { + return false; + } + + @Override + public void reset() {} + + @Override + public void recordError( + T t, OnSuccess onSuccess, OnFailure onFailure) { + assertThat(t).isInstanceOf(IllegalStateException.class); + assertThat(t).hasMessageThat().contains("kblamo"); + recordErrorCalled.set(true); + } + }, + RetryContextTest.failOnSuccess(), + RetryContextTest.failOnFailure()); + + obs.onStart(TestUtils.nullStreamController()); + obs.onResponse(resourceWithSize(0)); + + assertThat(recordErrorCalled.get()).isTrue(); + } + } + + static BidiWriteObjectRequest flushOffset(long offset) { + return onlyFlush.toBuilder().setWriteOffset(offset).setStateLookup(true).build(); + } + + static @NonNull BidiWriteObjectResponse resourceWithSize(int size) { + return resourceFor(appendRequestNew, size); + } + + static @NonNull BidiWriteObjectResponse resourceFor(ChecksummedTestContent ctc) { + return resourceFor( + appendRequestNew, + b -> + b.setSize(ctc.length()) + .setChecksums(ObjectChecksums.newBuilder().setCrc32C(ctc.getCrc32c()).build())); + } + + static @NonNull BidiWriteObjectResponse resourceFor(BidiWriteObjectRequest req, long size) { + return resourceFor(req, b -> b.setSize(size)); + } + + static @NonNull BidiWriteObjectResponse resourceFor( + BidiWriteObjectRequest req, UnaryOperator f) { + Object.Builder b = Object.newBuilder(); + if (req.hasWriteObjectSpec()) { + WriteObjectSpec spec = req.getWriteObjectSpec(); + b.setBucket(spec.getResource().getBucket()) + .setName(spec.getResource().getName()) + .setGeneration(1); + } else if (req.hasAppendObjectSpec()) { + AppendObjectSpec spec = req.getAppendObjectSpec(); + b.setBucket(spec.getBucket()).setName(spec.getObject()).setGeneration(spec.getGeneration()); + } else { + fail("Unhandled request shape: " + fmtProto(req)); + } + return BidiWriteObjectResponse.newBuilder().setResource(f.apply(b)).build(); + } + + @FunctionalInterface + private interface OnlySendClientStream extends ClientStream { + @Override + default void closeSendWithError(Throwable t) {} + + @Override + default void closeSend() {} + + @Override + default boolean isSendReady() { + return true; + } + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTestUtils.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTestUtils.java new file mode 100644 index 0000000000..827f1320d3 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTestUtils.java @@ -0,0 +1,125 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.common.truth.Truth.assertThat; + +import com.google.api.gax.grpc.GrpcStatusCode; +import com.google.api.gax.rpc.AbortedException; +import com.google.api.gax.rpc.ErrorDetails; +import com.google.cloud.storage.ChunkSegmenter.ChunkSegment; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.collect.ImmutableList; +import com.google.protobuf.Any; +import com.google.protobuf.ByteString; +import com.google.protobuf.Timestamp; +import com.google.rpc.Code; +import com.google.storage.v2.BidiWriteObjectRedirectedError; +import com.google.storage.v2.BidiWriteObjectRequest; +import com.google.storage.v2.BidiWriteObjectResponse; +import com.google.storage.v2.ChecksummedData; +import io.grpc.Metadata; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import java.nio.ByteBuffer; +import java.time.OffsetDateTime; +import java.util.List; +import org.checkerframework.checker.nullness.qual.NonNull; + +final class BidiUploadTestUtils { + + private BidiUploadTestUtils() {} + + static @NonNull BidiWriteObjectRedirectedError makeRedirect(String routingToken) { + return BidiWriteObjectRedirectedError.newBuilder() + .setRoutingToken(routingToken) + .setGeneration(1) + .build(); + } + + static @NonNull AbortedException newAbortedException(String message) { + return new AbortedException(message, null, GrpcStatusCode.of(Status.Code.ABORTED), false); + } + + static @NonNull AbortedException packRedirectIntoAbortedException( + BidiWriteObjectRedirectedError redirect) { + String description = fmtProto(redirect); + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(Code.ABORTED_VALUE) + .setMessage(description) + .addDetails(Any.pack(redirect)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(TestUtils.GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException statusRuntimeException = + Status.ABORTED.withDescription(description).asRuntimeException(trailers); + ErrorDetails errorDetails = + ErrorDetails.builder().setRawErrorMessages(grpcStatusDetails.getDetailsList()).build(); + return new AbortedException( + statusRuntimeException, GrpcStatusCode.of(Status.Code.ABORTED), true, errorDetails); + } + + static @NonNull BidiWriteObjectResponse incremental(long persistedSize) { + return BidiWriteObjectResponse.newBuilder().setPersistedSize(persistedSize).build(); + } + + static ChunkSegment createSegment(int length) { + return createSegment(ChecksummedTestContent.gen(length).asChecksummedData()); + } + + static ChunkSegment createSegment(ChecksummedData cd) { + ByteString content = cd.getContent(); + ChunkSegmenter segmenter = + new ChunkSegmenter( + Hasher.enabled(), ByteStringStrategy.copy(), content.size(), content.size()); + ChunkSegment[] segments = + segmenter.segmentBuffers(new ByteBuffer[] {content.asReadOnlyByteBuffer()}); + assertThat(segments).hasLength(1); + return segments[0]; + } + + static List sinkToList(BidiUploadState state) { + ImmutableList.Builder b = ImmutableList.builder(); + state.sendVia(b::add); + return b.build(); + } + + static @NonNull BidiWriteObjectRequest finishAt(int totalOffset) { + return BidiWriteObjectRequest.newBuilder() + .setFinishWrite(true) + .setWriteOffset(totalOffset) + .build(); + } + + static BidiWriteObjectRequest withRedirectToken( + BidiWriteObjectRequest redirectReconcile, String routingToken) { + BidiWriteObjectRequest.Builder b = redirectReconcile.toBuilder(); + b.getAppendObjectSpecBuilder().setRoutingToken(routingToken); + return b.build(); + } + + static BidiWriteObjectRequest withFlushAndStateLookup(BidiWriteObjectRequest orig) { + return orig.toBuilder().setFlush(true).setStateLookup(true).build(); + } + + static Timestamp timestampNow() { + return Conversions.grpc().timestampCodec.encode(OffsetDateTime.now()); + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/BuffersTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/BuffersTest.java index 3721dbc48f..f0d760cb85 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/BuffersTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/BuffersTest.java @@ -16,15 +16,10 @@ package com.google.cloud.storage; -import static com.google.cloud.storage.TestUtils.assertAll; -import static com.google.cloud.storage.TestUtils.xxd; import static com.google.common.truth.Truth.assertThat; -import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.channels.ReadableByteChannel; import java.security.SecureRandom; -import java.util.concurrent.atomic.AtomicInteger; import org.junit.Test; public final class BuffersTest { @@ -77,87 +72,4 @@ public void allocateAligned_evenlyDivisible_capacityGtAlignment() { ByteBuffer b1 = Buffers.allocateAligned(8, 4); assertThat(b1.capacity()).isEqualTo(8); } - - @Test - public void fillFrom_handles_0SizeRead_someBytesRead() throws Exception { - byte[] bytes = new byte[14]; - ByteBuffer buf = ByteBuffer.wrap(bytes); - - byte[] expected = - new byte[] { - (byte) 'A', - (byte) 'B', - (byte) 'C', - (byte) 'A', - (byte) 'B', - (byte) 'A', - (byte) 'A', - (byte) 'A', - (byte) 'B', - (byte) 'A', - (byte) 'B', - (byte) 'C', - (byte) 0, - (byte) 0 - }; - - int[] acceptSequence = new int[] {3, 2, 1, 0, 0, 1, 2, 3}; - AtomicInteger readCount = new AtomicInteger(0); - - ReadableByteChannel c = - new ReadableByteChannel() { - @Override - public int read(ByteBuffer dst) throws IOException { - int i = readCount.getAndIncrement(); - if (i == acceptSequence.length) { - return -1; - } - int bytesToRead = acceptSequence[i]; - if (bytesToRead > 0) { - long copy = - Buffers.copy(DataGenerator.base64Characters().genByteBuffer(bytesToRead), dst); - assertThat(copy).isEqualTo(bytesToRead); - } - - return bytesToRead; - } - - @Override - public boolean isOpen() { - return true; - } - - @Override - public void close() throws IOException {} - }; - int filled = Buffers.fillFrom(buf, c); - - assertAll( - () -> assertThat(filled).isEqualTo(12), - () -> assertThat(xxd(bytes)).isEqualTo(xxd(expected))); - } - - @Test - public void fillFrom_handles_0SizeRead_noBytesRead() throws Exception { - ByteBuffer buf = ByteBuffer.allocate(3); - - ReadableByteChannel c = - new ReadableByteChannel() { - @Override - public int read(ByteBuffer dst) throws IOException { - return -1; - } - - @Override - public boolean isOpen() { - return true; - } - - @Override - public void close() throws IOException {} - }; - int filled = Buffers.fillFrom(buf, c); - - assertThat(filled).isEqualTo(-1); - } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java index f8f879ae04..fa88ec76ec 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java @@ -24,7 +24,9 @@ import static org.junit.Assert.fail; import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; +import com.google.cloud.storage.MinFlushBufferedWritableByteChannelTest.OnlyConsumeNBytes; import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.it.ChecksummedTestContent; import com.google.common.collect.ImmutableList; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -401,6 +403,200 @@ public void close() throws IOException { assertThat(closed.get()).isTrue(); } + @Example + void nonBlockingWrite0DoesNotBlock() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new OnlyConsumeNBytes(0, 1), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_8_3 = ByteBuffer.wrap(all.slice(0, 3).getBytes()); + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + + int written3 = c.write(s_8_3); + assertThat(written3).isEqualTo(0); + assertThat(s_8_3.remaining()).isEqualTo(3); + + assertThat(handle.remaining()).isEqualTo(1); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withoutBuffering() throws IOException { + BufferHandle handle = BufferHandle.allocate(4); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(4, 4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + + // write all 4 bytes + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(4); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + + // Attempt to write 4 bytes, but 0 will be consumed, break out without consuming any + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(4); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withoutBuffering_oversized() throws IOException { + BufferHandle handle = BufferHandle.allocate(2); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(4, 2); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + + // write all 4 bytes + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + + // Attempt to write 4 bytes, but 0 will be consumed, break out without consuming any + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(4); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withBuffering() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(5, 5); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + ByteBuffer s_8_12 = ByteBuffer.wrap(all.slice(8, 4).getBytes()); + + // write all 4 bytes + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(1); + assertThat(channel.getBytesConsumed()).isEqualTo(0); + + // + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(4); + assertThat(s_4_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(5); + + int written3 = c.write(s_8_12); + assertThat(written3).isEqualTo(0); + assertThat(s_8_12.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(5); + } + + @Example + void nonBlockingWritePartialDoesNotBlock_withBuffering_oversized() throws IOException { + BufferHandle handle = BufferHandle.allocate(3); + OnlyConsumeNBytes channel = new OnlyConsumeNBytes(6, 3); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, channel, false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(13); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(4, 4).getBytes()); + ByteBuffer s_8_12 = ByteBuffer.wrap(all.slice(8, 4).getBytes()); + + // slice 3 bytes and consume them, then enqueue the remaining 1 byte + int written1_1 = c.write(s_0_4); + assertThat(written1_1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(2); + assertThat(channel.getBytesConsumed()).isEqualTo(3); + + // write 1 buffered byte and 2 sliced bytes, enqueue 2 remaining + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(4); + assertThat(s_4_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(1); + assertThat(channel.getBytesConsumed()).isEqualTo(6); + + // attempt to write 4 bytes, non will be consumed and the buffer should remain the same + int written3 = c.write(s_8_12); + assertThat(written3).isEqualTo(0); + assertThat(s_8_12.remaining()).isEqualTo(4); + assertThat(handle.remaining()).isEqualTo(1); + assertThat(channel.getBytesConsumed()).isEqualTo(6); + } + + @Example + void illegalStateExceptionIfWrittenLt0_slice_eqBuffer() { + BufferHandle handle = BufferHandle.allocate(4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new NegativeOneWritableByteChannel(), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + IllegalStateException ise = + assertThrows(IllegalStateException.class, () -> c.write(all.slice(0, 4).asByteBuffer())); + ise.printStackTrace(System.out); + } + + @Example + void illegalStateExceptionIfWrittenLt0_slice_gtBuffer() { + BufferHandle handle = BufferHandle.allocate(4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new NegativeOneWritableByteChannel(), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + IllegalStateException ise = + assertThrows(IllegalStateException.class, () -> c.write(all.slice(0, 5).asByteBuffer())); + ise.printStackTrace(System.out); + } + + @Example + void illegalStateExceptionIfWrittenLt0_slice_ltBuffer() { + BufferHandle handle = BufferHandle.allocate(4); + DefaultBufferedWritableByteChannel c = + new DefaultBufferedWritableByteChannel(handle, new NegativeOneWritableByteChannel(), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + IllegalStateException ise = + assertThrows( + IllegalStateException.class, + () -> { + int written1 = c.write(all.slice(0, 3).asByteBuffer()); + assertThat(written1).isEqualTo(3); + c.write(all.slice(3, 3).asByteBuffer()); + fail("should have errored in previous write call"); + }); + ise.printStackTrace(System.out); + } + + @Example + void test() { + illegalStateExceptionIfWrittenLt0_slice_eqBuffer(); + illegalStateExceptionIfWrittenLt0_slice_gtBuffer(); + illegalStateExceptionIfWrittenLt0_slice_ltBuffer(); + } + @Property void bufferAllocationShouldOnlyHappenWhenNeeded(@ForAll("BufferSizes") WriteOps writeOps) throws IOException { @@ -697,4 +893,20 @@ public ByteBuffer get() { return delegate.get(); } } + + private static class NegativeOneWritableByteChannel implements UnbufferedWritableByteChannel { + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + return -1; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannelTest.java index f74b21fb28..27d96ef6f0 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannelTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/GapicUnbufferedReadableByteChannelTest.java @@ -23,7 +23,6 @@ import com.google.api.gax.rpc.ApiCallContext; import com.google.api.gax.rpc.ResponseObserver; import com.google.api.gax.rpc.ServerStreamingCallable; -import com.google.api.gax.rpc.StreamController; import com.google.cloud.storage.GrpcUtils.ZeroCopyServerStreamingCallable; import com.google.cloud.storage.Retrying.Retrier; import com.google.cloud.storage.it.ChecksummedTestContent; @@ -56,7 +55,7 @@ public void call( ReadObjectRequest request, ResponseObserver respond, ApiCallContext context) { - respond.onStart(new NullStreamController()); + respond.onStart(TestUtils.nullStreamController()); respond.onResponse( ReadObjectResponse.newBuilder() .setChecksummedData(testContent.asChecksummedData()) @@ -76,16 +75,4 @@ public void call( assertThat(close.get()).isTrue(); } } - - private static class NullStreamController implements StreamController { - - @Override - public void cancel() {} - - @Override - public void disableAutoInboundFlowControl() {} - - @Override - public void request(int count) {} - } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadFakeTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadFakeTest.java index 65804c19df..f9dff67481 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadFakeTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadFakeTest.java @@ -16,25 +16,34 @@ package com.google.cloud.storage; +import static com.google.cloud.storage.BidiUploadTestUtils.makeRedirect; +import static com.google.cloud.storage.BidiUploadTestUtils.packRedirectIntoAbortedException; +import static com.google.cloud.storage.BidiUploadTestUtils.timestampNow; import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.StorageV2ProtoUtils.fmtProto; +import static com.google.cloud.storage.TestUtils.assertAll; import static com.google.common.truth.Truth.assertThat; import static org.junit.Assert.assertThrows; +import com.google.api.core.ApiFuture; import com.google.api.core.SettableApiFuture; import com.google.api.gax.grpc.GrpcCallContext; import com.google.api.gax.rpc.AbortedException; +import com.google.cloud.storage.BidiUploadState.AppendableUploadState; import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.Storage.BlobField; import com.google.cloud.storage.it.ChecksummedTestContent; import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Maps; import com.google.protobuf.Any; import com.google.protobuf.ByteString; import com.google.protobuf.FieldMask; +import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; import com.google.rpc.Code; +import com.google.rpc.DebugInfo; import com.google.storage.v2.AppendObjectSpec; import com.google.storage.v2.BidiWriteHandle; -import com.google.storage.v2.BidiWriteObjectRedirectedError; import com.google.storage.v2.BidiWriteObjectRequest; import com.google.storage.v2.BidiWriteObjectResponse; import com.google.storage.v2.BucketName; @@ -42,33 +51,34 @@ import com.google.storage.v2.GetObjectRequest; import com.google.storage.v2.Object; import com.google.storage.v2.ObjectChecksums; -import com.google.storage.v2.StorageClient; import com.google.storage.v2.StorageGrpc; import com.google.storage.v2.WriteObjectSpec; import io.grpc.Metadata; import io.grpc.Status; import io.grpc.StatusRuntimeException; -import io.grpc.protobuf.ProtoUtils; import io.grpc.stub.StreamObserver; +import java.io.IOException; import java.nio.ByteBuffer; import java.time.Duration; +import java.util.Collection; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Consumer; +import java.util.stream.Collectors; +import org.checkerframework.checker.nullness.qual.NonNull; +import org.checkerframework.checker.nullness.qual.Nullable; +import org.junit.Ignore; import org.junit.Test; public class ITAppendableUploadFakeTest { private static final byte[] ALL_OBJECT_BYTES = DataGenerator.base64Characters().genBytes(64); - private static final Metadata.Key GRPC_STATUS_DETAILS_KEY = - Metadata.Key.of( - "grpc-status-details-bin", - ProtoUtils.metadataMarshaller(com.google.rpc.Status.getDefaultInstance())); - private static final Object METADATA = Object.newBuilder() .setBucket(BucketName.format("_", "b")) @@ -92,182 +102,152 @@ public class ITAppendableUploadFakeTest { private static final BlobAppendableUploadConfig UPLOAD_CONFIG = BlobAppendableUploadConfig.of() - .withFlushPolicy(FlushPolicy.maxFlushSize(5)) - .withCrc32cValidationEnabled(false) + .withFlushPolicy(FlushPolicy.maxFlushSize(3)) .withCloseAction(CloseAction.FINALIZE_WHEN_CLOSING); - /** - * - * - *

    - *
  1. Create a new appendable object - *
  2. First results give redirect error - *
  3. Retry using a new AppendObjectSpec with routing token, generation, write handle specified - * -- retry succeeds - *
  4. Finish writing the data as normal on the new stream - *
- */ - @Test - public void bidiWriteObjectRedirectedError() throws Exception { - - String routingToken = UUID.randomUUID().toString(); - BidiWriteHandle writeHandle = - BidiWriteHandle.newBuilder() - .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) - .build(); - BidiWriteObjectRequest req2 = - BidiWriteObjectRequest.newBuilder() - .setAppendObjectSpec( - AppendObjectSpec.newBuilder() - .setBucket(METADATA.getBucket()) - .setObject(METADATA.getName()) - .setGeneration(METADATA.getGeneration()) - .setRoutingToken(routingToken) - .setWriteHandle(writeHandle) - .build()) - .setFlush(true) - .setStateLookup(true) - .build(); - - BidiWriteObjectRequest req3 = - BidiWriteObjectRequest.newBuilder() - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABCDE")).build()) - .setStateLookup(true) - .setFlush(true) - .build(); - - BidiWriteObjectRequest req4 = - BidiWriteObjectRequest.newBuilder() - .setWriteOffset(5) - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("FGHIJ")).build()) - .setStateLookup(true) - .setFlush(true) - .build(); - BidiWriteObjectRequest req5 = - BidiWriteObjectRequest.newBuilder().setWriteOffset(10).setFinishWrite(true).build(); - - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); - BidiWriteObjectResponse res2 = BidiWriteObjectResponse.newBuilder().setPersistedSize(0).build(); - BidiWriteObjectResponse res3 = BidiWriteObjectResponse.newBuilder().setPersistedSize(5).build(); - - BidiWriteObjectResponse res4 = - BidiWriteObjectResponse.newBuilder().setPersistedSize(10).build(); - - BidiWriteObjectResponse res5 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(10) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - FakeStorage fake = - FakeStorage.of( - ImmutableMap.of( - REQ_OPEN.toBuilder().setFlush(true).setStateLookup(true).build(), - respond -> { - BidiWriteObjectRedirectedError redirect = - BidiWriteObjectRedirectedError.newBuilder() - .setWriteHandle(writeHandle) - .setRoutingToken(routingToken) - .setGeneration(METADATA.getGeneration()) - .build(); - - com.google.rpc.Status grpcStatusDetails = - com.google.rpc.Status.newBuilder() - .setCode(Code.ABORTED_VALUE) - .setMessage("redirect") - .addDetails(Any.pack(redirect)) - .build(); - - Metadata trailers = new Metadata(); - trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); - StatusRuntimeException statusRuntimeException = - Status.ABORTED.withDescription("redirect").asRuntimeException(trailers); - respond.onError(statusRuntimeException); - }, - req2, - respond -> respond.onNext(res2), - req3, - respond -> respond.onNext(res3), - req4, - respond -> respond.onNext(res4), - req5, - respond -> respond.onNext(res5))); + private static final ChecksummedTestContent content = + ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); + private static final ObjectChecksums checksums = + ObjectChecksums.newBuilder().setCrc32C(content.getCrc32c()).build(); + private static final BidiWriteObjectRequest flushLookup = + BidiWriteObjectRequest.newBuilder().setFlush(true).setStateLookup(true).build(); + private static final BidiWriteObjectRequest abc = incrementalRequest(0, "ABC"); + private static final BidiWriteObjectRequest def = incrementalRequest(3, "DEF"); + private static final BidiWriteObjectRequest ghi = incrementalRequest(6, "GHI"); + private static final BidiWriteObjectRequest j = incrementalRequest(9, "J"); + private static final BidiWriteObjectRequest j_flush = + j.toBuilder().mergeFrom(flushLookup).build(); + private static final BidiWriteObjectRequest j_finish = + j.toBuilder().setFinishWrite(true).setObjectChecksums(checksums).build(); + private static final BidiWriteObjectRequest finish_10 = + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(10) + .setFinishWrite(true) + .setObjectChecksums(checksums) + .build(); - try (FakeServer fakeServer = FakeServer.of(fake); - Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { + private static final BidiWriteObjectRequest open_abc = + REQ_OPEN.toBuilder().mergeFrom(abc).build(); + private static final BidiWriteObjectResponse res_abc = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(3) + .setChecksums( + ObjectChecksums.newBuilder() + .setCrc32C(content.slice(0, 3).getCrc32c()) + .build()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + private static final BidiWriteObjectRequest reconnect = + BidiWriteObjectRequest.newBuilder() + .setAppendObjectSpec( + AppendObjectSpec.newBuilder() + .setBucket(METADATA.getBucket()) + .setObject(METADATA.getName()) + .setGeneration(METADATA.getGeneration()) + .build()) + .setStateLookup(true) + .build(); + private static final BidiWriteObjectResponse resource_10 = + BidiWriteObjectResponse.newBuilder() + .setResource( + Object.newBuilder() + .setName(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setGeneration(METADATA.getGeneration()) + .setSize(10) + .setChecksums(checksums) + .setFinalizeTime(timestampNow()) + // real object would have some extra fields like metageneration and storage + // class + .build()) + .build(); + public static final GetObjectRequest get_generation_mask = + GetObjectRequest.newBuilder() + .setObject(METADATA.getName()) + .setBucket(METADATA.getBucket()) + .setReadMask(FieldMask.newBuilder().addPaths(BlobField.GENERATION.getGrpcName()).build()) + .build(); - BlobId id = BlobId.of("b", "o"); - BlobAppendableUpload b = - storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), UPLOAD_CONFIG); - try (AppendableUploadWriteableByteChannel channel = b.open()) { - channel.write(ByteBuffer.wrap(content.getBytes())); - } - BlobInfo bi = b.getResult().get(5, TimeUnit.SECONDS); - assertThat(bi.getSize()).isEqualTo(10); - } - } + private static final ChunkSegmenter smallSegmenter = + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); @Test public void bidiWriteObjectRedirectedError_maxAttempts() throws Exception { - // todo: This test fails currently - String routingToken = UUID.randomUUID().toString(); + String routingToken1 = "routingToken1"; + String routingToken2 = "routingToken2"; + String routingToken3 = "routingToken3"; + String routingToken4 = "routingToken4"; + String routingToken5 = "routingToken5"; BidiWriteHandle writeHandle = BidiWriteHandle.newBuilder() .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) .build(); - BidiWriteObjectRequest req2 = + BidiWriteObjectRequest redirectReconcile = BidiWriteObjectRequest.newBuilder() .setAppendObjectSpec( AppendObjectSpec.newBuilder() .setBucket(METADATA.getBucket()) .setObject(METADATA.getName()) .setGeneration(METADATA.getGeneration()) - .setRoutingToken(routingToken) .setWriteHandle(writeHandle) .build()) - .setFlush(true) .setStateLookup(true) .build(); - BidiWriteObjectRedirectedError redirect = - BidiWriteObjectRedirectedError.newBuilder() - .setWriteHandle(writeHandle) - .setRoutingToken(routingToken) - .setGeneration(METADATA.getGeneration()) - .build(); - - com.google.rpc.Status grpcStatusDetails = - com.google.rpc.Status.newBuilder() - .setCode(Code.ABORTED_VALUE) - .setMessage("redirect") - .addDetails(Any.pack(redirect)) - .build(); - - Metadata trailers = new Metadata(); - trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); - StatusRuntimeException statusRuntimeException = - Status.ABORTED.withDescription("redirect").asRuntimeException(trailers); + BidiWriteObjectRequest redirectRequest1 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken1); + BidiWriteObjectRequest redirectRequest2 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken2); + BidiWriteObjectRequest redirectRequest3 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken3); + BidiWriteObjectRequest redirectRequest4 = + BidiUploadTestUtils.withRedirectToken(redirectReconcile, routingToken4); - // TODO: assert number of redirects returned + AtomicInteger redirectCounter = new AtomicInteger(); FakeStorage fake = FakeStorage.of( ImmutableMap.of( - REQ_OPEN.toBuilder().setFlush(true).setStateLookup(true).build(), - respond -> respond.onError(statusRuntimeException), - req2, - respond -> respond.onError(statusRuntimeException))); + BidiUploadTestUtils.withFlushAndStateLookup(open_abc), + respond -> { + BidiWriteObjectResponse.Builder b = res_abc.toBuilder(); + b.setWriteHandle(writeHandle); + BidiWriteObjectResponse resAbcWithHandle = b.build(); + respond.onNext(resAbcWithHandle); + }, + BidiUploadTestUtils.withFlushAndStateLookup(def), + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken1))); + }, + redirectRequest1, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken2))); + }, + redirectRequest2, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken3))); + }, + redirectRequest3, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken4))); + }, + redirectRequest4, + respond -> { + redirectCounter.getAndIncrement(); + respond.onError(packRedirectIntoAbortedException(makeRedirect(routingToken5))); + })); try (FakeServer fakeServer = FakeServer.of(fake); Storage storage = @@ -281,248 +261,33 @@ public void bidiWriteObjectRedirectedError_maxAttempts() throws Exception { .getService()) { BlobId id = BlobId.of("b", "o"); + BlobAppendableUploadConfig config = + BlobAppendableUploadConfig.of() + .withFlushPolicy(FlushPolicy.maxFlushSize(3)) + .withCloseAction(CloseAction.CLOSE_WITHOUT_FINALIZING); BlobAppendableUpload b = - storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), UPLOAD_CONFIG); - AppendableUploadWriteableByteChannel channel = b.open(); - try { - StorageException e = - assertThrows( - StorageException.class, - () -> { - channel.write(ByteBuffer.wrap("ABCDE".getBytes())); - }); - assertThat(e).hasCauseThat().isInstanceOf(AbortedException.class); - } finally { - channel.close(); - } - } - } - - /** - * - * - *
    - *
  1. Create a new appendable object, write 5 bytes, first result succeeds - *
  2. Write 5 more bytes--server responds with a retryable error - *
  3. Retry using a new AppendObjectSpec with generation, write handle specified -- retry - * succeeds - *
  4. Finish writing the data as normal on the new stream - *
- */ - @Test - public void bidiWriteObjectRetryableError() throws Exception { - BidiWriteHandle writeHandle = - BidiWriteHandle.newBuilder() - .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) - .build(); - BidiWriteObjectResponse res1 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(5) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - BidiWriteObjectRequest req2 = - BidiWriteObjectRequest.newBuilder() - .setWriteOffset(5) - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("FGHIJ")).build()) - .setStateLookup(true) - .setFlush(true) - .build(); - - BidiWriteObjectRequest req3 = - BidiWriteObjectRequest.newBuilder() - .setAppendObjectSpec( - AppendObjectSpec.newBuilder() - .setBucket(METADATA.getBucket()) - .setObject(METADATA.getName()) - .setGeneration(METADATA.getGeneration()) - .setWriteHandle(writeHandle) - .build()) - .setFlush(true) - .setStateLookup(true) - .build(); - - BidiWriteObjectRequest req5 = - BidiWriteObjectRequest.newBuilder().setWriteOffset(10).setFinishWrite(true).build(); - - BidiWriteObjectResponse res3 = BidiWriteObjectResponse.newBuilder().setPersistedSize(5).build(); - - BidiWriteObjectResponse res4 = - BidiWriteObjectResponse.newBuilder().setPersistedSize(10).build(); - - BidiWriteObjectResponse res5 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(10) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - final AtomicBoolean retried = new AtomicBoolean(false); - - FakeStorage fake = - FakeStorage.of( - ImmutableMap.of( - REQ_OPEN.toBuilder().setFlush(true).setStateLookup(true).build(), - respond -> respond.onNext(res1), - req2, - respond -> { - // This same request gets run twice, the first time (as the second request), - // it gets an error. The second time (as the fourth request) it succeeds. - if (!retried.get()) { - respond.onError(Status.INTERNAL.asRuntimeException()); - retried.set(true); - } else { - respond.onNext(res4); - } - }, - req3, - respond -> respond.onNext(res3), - req5, - respond -> respond.onNext(res5))); - - try (FakeServer fakeServer = FakeServer.of(fake); - Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - - BlobId id = BlobId.of("b", "o"); - BlobAppendableUpload b = - storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), UPLOAD_CONFIG); - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); - try (AppendableUploadWriteableByteChannel channel = b.open()) { - channel.write(ByteBuffer.wrap(content.getBytes())); - } - BlobInfo bi = b.getResult().get(5, TimeUnit.SECONDS); - assertThat(bi.getSize()).isEqualTo(10); - } - } - - /** - * - * - *
    - *
  1. Create a new appendable object, write 5 bytes, first result succeeds - *
  2. Write 5 more bytes--server responds with a retryable error - *
  3. Retry using a new AppendObjectSpec with generation, write handle specified - *
  4. GCS responds with a persisted size indicating a partial write - *
  5. Client responds by taking the partial success into account and skipping some bytes on the - * retry - *
  6. Finish writing the data as normal on the new stream - *
- */ - @Test - public void retryableErrorIncompleteFlush() throws Exception { - BidiWriteHandle writeHandle = - BidiWriteHandle.newBuilder() - .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) - .build(); - BidiWriteObjectResponse res1 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(5) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - BidiWriteObjectRequest req2 = - BidiWriteObjectRequest.newBuilder() - .setWriteOffset(5) - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("FGHIJ")).build()) - .setStateLookup(true) - .setFlush(true) - .build(); - - BidiWriteObjectRequest req3 = - BidiWriteObjectRequest.newBuilder() - .setAppendObjectSpec( - AppendObjectSpec.newBuilder() - .setBucket(METADATA.getBucket()) - .setObject(METADATA.getName()) - .setGeneration(METADATA.getGeneration()) - .setWriteHandle(writeHandle) - .build()) - .setFlush(true) - .setStateLookup(true) - .build(); - - BidiWriteObjectRequest req5 = - BidiWriteObjectRequest.newBuilder().setWriteOffset(10).setFinishWrite(true).build(); - - BidiWriteObjectResponse res3 = BidiWriteObjectResponse.newBuilder().setPersistedSize(7).build(); - - BidiWriteObjectRequest req4 = - BidiWriteObjectRequest.newBuilder() - .setWriteOffset(7) - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("HIJ")).build()) - .setStateLookup(true) - .setFlush(true) - .build(); - - BidiWriteObjectResponse res4 = - BidiWriteObjectResponse.newBuilder().setPersistedSize(10).build(); - - BidiWriteObjectResponse res5 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(10) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - FakeStorage fake = - FakeStorage.of( - ImmutableMap.of( - REQ_OPEN.toBuilder().setFlush(true).setStateLookup(true).build(), - respond -> respond.onNext(res1), - req2, - respond -> respond.onError(Status.INTERNAL.asRuntimeException()), - req3, - respond -> respond.onNext(res3), - req4, - respond -> respond.onNext(res4), - req5, - respond -> respond.onNext(res5))); - - try (FakeServer fakeServer = FakeServer.of(fake); - Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - - BlobId id = BlobId.of("b", "o"); - BlobAppendableUpload b = - storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), UPLOAD_CONFIG); - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); - try (AppendableUploadWriteableByteChannel channel = b.open()) { - channel.write(ByteBuffer.wrap(content.getBytes())); - } - BlobInfo bi = b.getResult().get(5, TimeUnit.SECONDS); - assertThat(bi.getSize()).isEqualTo(10); + storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), config); + IOException ioe = + assertThrows( + IOException.class, + () -> { + AppendableUploadWriteableByteChannel channel = b.open(); + ByteBuffer wrap = ByteBuffer.wrap(content.getBytes()); + Buffers.emptyTo(wrap, channel); + channel.close(); + }); + + assertAll( + () -> assertThat(redirectCounter.get()).isEqualTo(4), + () -> { + ExecutionException ee = + assertThrows( + ExecutionException.class, () -> b.getResult().get(3, TimeUnit.SECONDS)); + assertThat(ee).hasCauseThat().isInstanceOf(StorageException.class); + assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(AbortedException.class); + }, + () -> assertThat(ioe).hasCauseThat().isInstanceOf(StorageException.class), + () -> assertThat(ioe).hasCauseThat().hasCauseThat().isInstanceOf(AbortedException.class)); } } @@ -532,96 +297,25 @@ public void retryableErrorIncompleteFlush() throws Exception { */ @Test public void testFlushMultipleSegments() throws Exception { - BidiWriteHandle writeHandle = - BidiWriteHandle.newBuilder() - .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) - .build(); - - ChunkSegmenter smallSegmenter = - new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), 3, 3); - - BidiWriteObjectRequest req1 = - REQ_OPEN.toBuilder() - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABC"))) - .build(); - - BidiWriteObjectResponse res1 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(10) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - BidiWriteObjectResponse last = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(10) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .build(); FakeStorage fake = FakeStorage.of( ImmutableMap.of( - req1, + open_abc, respond -> {}, - incrementalRequest(3, "DEF"), + def, respond -> {}, - incrementalRequest(6, "GHI"), + ghi, respond -> {}, - incrementalRequest(9, "J", true), - respond -> respond.onNext(res1), - finishMessage(10), - respond -> respond.onNext(last))); - - try (FakeServer fakeServer = FakeServer.of(fake); - GrpcStorageImpl storage = - (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - StorageClient storageClient = storage.storageClient; - BidiWriteCtx writeCtx = - new BidiWriteCtx<>( - new BidiAppendableWrite( - BidiWriteObjectRequest.newBuilder() - .setWriteObjectSpec( - WriteObjectSpec.newBuilder() - .setResource( - Object.newBuilder() - .setBucket(METADATA.getBucket()) - .setName(METADATA.getName())) - .setAppendable(true) - .build()) - .build())); - SettableApiFuture done = SettableApiFuture.create(); + j_flush, + respond -> respond.onNext(incrementalResponse(10)), + finish_10, + respond -> { + respond.onNext(resource_10); + respond.onCompleted(); + })); - GapicBidiUnbufferedAppendableWritableByteChannel channel = - new GapicBidiUnbufferedAppendableWritableByteChannel( - storageClient.bidiWriteObjectCallable(), - storageClient.getObjectCallable(), - TestUtils.retrierFromStorageOptions(fakeServer.getGrpcStorageOptions()) - .withAlg( - fakeServer.getGrpcStorageOptions().getRetryAlgorithmManager().idempotent()), - done, - smallSegmenter, - writeCtx, - GrpcCallContext::createDefault); - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); - channel.write(ByteBuffer.wrap(content.getBytes())); - channel.finalizeWrite(); - assertThat(done.get().getResource().getSize()).isEqualTo(10); - } + runTestFlushMultipleSegments(fake); } /** @@ -633,133 +327,39 @@ public void testFlushMultipleSegments() throws Exception { */ @Test public void testFlushMultipleSegments_failsHalfway() throws Exception { - BidiWriteHandle writeHandle = - BidiWriteHandle.newBuilder() - .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) - .build(); - - ChunkSegmenter smallSegmenter = - new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), 3, 3); - - BidiWriteObjectRequest req1 = - REQ_OPEN.toBuilder() - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABC"))) - .build(); - - BidiWriteObjectResponse res1 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(3) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - BidiWriteObjectRequest req2 = incrementalRequest(3, "DEF"); - BidiWriteObjectRequest req3 = incrementalRequest(6, "GHI"); - - BidiWriteObjectRequest reconnect = - BidiWriteObjectRequest.newBuilder() - .setAppendObjectSpec( - AppendObjectSpec.newBuilder() - .setBucket(METADATA.getBucket()) - .setObject(METADATA.getName()) - .setGeneration(METADATA.getGeneration()) - .build()) - .setFlush(true) - .setStateLookup(true) - .build(); - - BidiWriteObjectRequest req4 = incrementalRequest(9, "J", true); - BidiWriteObjectRequest req5 = finishMessage(10); - - BidiWriteObjectResponse last = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(10) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .build(); Map map = new ConcurrentHashMap<>(); - + Consumer> finish10Respond = + maxRetries(j_finish, resource_10, map, 1); FakeStorage fake = FakeStorage.of( ImmutableMap.of( - req1, - maxRetries(req1, null, map, 1), - req2, - maxRetries(req2, null, map, 1), - req3, - retryableErrorOnce(req3, null, map, 2), + open_abc, + maxRetries(open_abc, res_abc, map, 1), + def, + maxRetries(def, map, 1), + ghi, + retryableErrorOnce(ghi, map, 2), reconnect, - maxRetries(reconnect, incrementalResponse(6), map, 2), - req4, - maxRetries(req4, incrementalResponse(10), map, 1), - req5, - maxRetries(req5, last, map, 1)), + maxRetries(reconnect, incrementalResponse(6), map, 1), + j_finish, + respond -> { + finish10Respond.accept(respond); + respond.onCompleted(); + }), ImmutableMap.of( - GetObjectRequest.newBuilder() - .setObject(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setReadMask( - (FieldMask.newBuilder() - .addPaths(Storage.BlobField.GENERATION.getGrpcName()) - .build())) - .build(), + get_generation_mask, Object.newBuilder().setGeneration(METADATA.getGeneration()).build())); - - try (FakeServer fakeServer = FakeServer.of(fake); - GrpcStorageImpl storage = - (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - StorageClient storageClient = storage.storageClient; - BidiWriteCtx writeCtx = - new BidiWriteCtx<>( - new BidiAppendableWrite( - BidiWriteObjectRequest.newBuilder() - .setWriteObjectSpec( - WriteObjectSpec.newBuilder() - .setResource( - Object.newBuilder() - .setBucket(METADATA.getBucket()) - .setName(METADATA.getName())) - .setAppendable(true) - .build()) - .build())); - SettableApiFuture done = SettableApiFuture.create(); - - GapicBidiUnbufferedAppendableWritableByteChannel channel = - new GapicBidiUnbufferedAppendableWritableByteChannel( - storageClient.bidiWriteObjectCallable(), - storageClient.getObjectCallable(), - TestUtils.retrierFromStorageOptions(fakeServer.getGrpcStorageOptions()) - .withAlg( - fakeServer.getGrpcStorageOptions().getRetryAlgorithmManager().idempotent()), - done, - smallSegmenter, - writeCtx, - GrpcCallContext::createDefault); - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); - channel.write(ByteBuffer.wrap(content.getBytes())); - channel.finalizeWrite(); - assertThat(done.get().getResource().getSize()).isEqualTo(10); - - assertThat(map.get(req1)).isEqualTo(1); - assertThat(map.get(req2)).isEqualTo(1); - assertThat(map.get(req3)).isEqualTo(2); - assertThat(map.get(req4)).isEqualTo(1); - assertThat(map.get(req5)).isEqualTo(1); - } + + runTestFlushMultipleSegments(fake); + + assertThat(map) + .isEqualTo( + ImmutableMap.of( + open_abc, 1, + def, 1, + ghi, 2, + reconnect, 1, + j_finish, 1)); } /** @@ -769,6 +369,7 @@ public void testFlushMultipleSegments_failsHalfway() throws Exception { * and only sending "HI", and updating the offsets accordingly. */ @Test + @Ignore("messages splitting") public void testFlushMultipleSegments_failsHalfway_partialFlush() throws Exception { BidiWriteHandle writeHandle = BidiWriteHandle.newBuilder() @@ -776,7 +377,7 @@ public void testFlushMultipleSegments_failsHalfway_partialFlush() throws Excepti .build(); ChunkSegmenter smallSegmenter = - new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), 3, 3); + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); BidiWriteObjectRequest req1 = REQ_OPEN.toBuilder() @@ -826,6 +427,7 @@ public void testFlushMultipleSegments_failsHalfway_partialFlush() throws Excepti .setBucket(METADATA.getBucket()) .setGeneration(METADATA.getGeneration()) .setSize(10) + .setFinalizeTime(timestampNow()) // real object would have some extra fields like metageneration and storage // class .build()) @@ -863,37 +465,36 @@ public void testFlushMultipleSegments_failsHalfway_partialFlush() throws Excepti try (FakeServer fakeServer = FakeServer.of(fake); GrpcStorageImpl storage = (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - StorageClient storageClient = storage.storageClient; - BidiWriteCtx writeCtx = - new BidiWriteCtx<>( - new BidiAppendableWrite( - BidiWriteObjectRequest.newBuilder() - .setWriteObjectSpec( - WriteObjectSpec.newBuilder() - .setResource( - Object.newBuilder() - .setBucket(METADATA.getBucket()) - .setName(METADATA.getName())) - .setAppendable(true) - .build()) - .build())); SettableApiFuture done = SettableApiFuture.create(); - - GapicBidiUnbufferedAppendableWritableByteChannel channel = - new GapicBidiUnbufferedAppendableWritableByteChannel( - storageClient.bidiWriteObjectCallable(), - storageClient.getObjectCallable(), - TestUtils.retrierFromStorageOptions(fakeServer.getGrpcStorageOptions()) - .withAlg( - fakeServer.getGrpcStorageOptions().getRetryAlgorithmManager().idempotent()), - done, + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel( + new BidiUploadStreamingStream( + BidiUploadState.appendableNew( + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(), + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()), + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()), smallSegmenter, - writeCtx, - GrpcCallContext::createDefault); + 0); ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); - channel.write(ByteBuffer.wrap(content.getBytes())); - channel.finalizeWrite(); - assertThat(done.get().getResource().getSize()).isEqualTo(10); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + assertThat(done.get(777, TimeUnit.MILLISECONDS).getResource().getSize()).isEqualTo(10); assertThat(map.get(req1)).isEqualTo(1); assertThat(map.get(req2)).isEqualTo(1); @@ -912,6 +513,7 @@ public void testFlushMultipleSegments_failsHalfway_partialFlush() throws Excepti * in the channel works properly */ @Test + @Ignore("partial message eviction") public void testFlushMultipleSegmentsTwice_firstSucceeds_secondFailsHalfway_partialFlush() throws Exception { BidiWriteHandle writeHandle = @@ -920,7 +522,7 @@ public void testFlushMultipleSegmentsTwice_firstSucceeds_secondFailsHalfway_part .build(); ChunkSegmenter smallSegmenter = - new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), 3, 3); + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); BidiWriteObjectRequest req1 = REQ_OPEN.toBuilder() @@ -972,6 +574,7 @@ public void testFlushMultipleSegmentsTwice_firstSucceeds_secondFailsHalfway_part .setBucket(METADATA.getBucket()) .setGeneration(METADATA.getGeneration()) .setSize(20) + .setFinalizeTime(timestampNow()) // real object would have some extra fields like metageneration and storage // class .build()) @@ -1011,39 +614,38 @@ public void testFlushMultipleSegmentsTwice_firstSucceeds_secondFailsHalfway_part try (FakeServer fakeServer = FakeServer.of(fake); GrpcStorageImpl storage = (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - StorageClient storageClient = storage.storageClient; - BidiWriteCtx writeCtx = - new BidiWriteCtx<>( - new BidiAppendableWrite( - BidiWriteObjectRequest.newBuilder() - .setWriteObjectSpec( - WriteObjectSpec.newBuilder() - .setResource( - Object.newBuilder() - .setBucket(METADATA.getBucket()) - .setName(METADATA.getName())) - .setAppendable(true) - .build()) - .build())); SettableApiFuture done = SettableApiFuture.create(); - - GapicBidiUnbufferedAppendableWritableByteChannel channel = - new GapicBidiUnbufferedAppendableWritableByteChannel( - storageClient.bidiWriteObjectCallable(), - storageClient.getObjectCallable(), - TestUtils.retrierFromStorageOptions(fakeServer.getGrpcStorageOptions()) - .withAlg( - fakeServer.getGrpcStorageOptions().getRetryAlgorithmManager().idempotent()), - done, + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel( + new BidiUploadStreamingStream( + BidiUploadState.appendableNew( + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(), + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()), + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()), smallSegmenter, - writeCtx, - GrpcCallContext::createDefault); + 0); ChecksummedTestContent content1 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); ChecksummedTestContent content2 = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 10); - channel.write(ByteBuffer.wrap(content1.getBytes())); - channel.write(ByteBuffer.wrap(content2.getBytes())); - channel.finalizeWrite(); - assertThat(done.get().getResource().getSize()).isEqualTo(20); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content1.getBytes()), channel); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content2.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + assertThat(done.get(777, TimeUnit.MILLISECONDS).getResource().getSize()).isEqualTo(20); assertThat(map.get(reconnect)).isEqualTo(1); assertThat(map.get(req2)).isEqualTo(1); @@ -1062,6 +664,12 @@ public void testFlushMultipleSegmentsTwice_firstSucceeds_secondFailsHalfway_part * skipping the partially ack'd bytes */ @Test + /* + @Ignore("Ignore until the new implementation handles partial message consumption. \n" + + "[0:3] + [3:3] + [6:3] -> 8\n" + + "Today we only replay whole messages") + */ + @Ignore("messages splitting") public void testFlushMultipleSegments_200ResponsePartialFlushHalfway() throws Exception { BidiWriteHandle writeHandle = BidiWriteHandle.newBuilder() @@ -1069,7 +677,7 @@ public void testFlushMultipleSegments_200ResponsePartialFlushHalfway() throws Ex .build(); ChunkSegmenter smallSegmenter = - new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), 3, 3); + new ChunkSegmenter(Hasher.enabled(), ByteStringStrategy.copy(), 3, 3); BidiWriteObjectRequest req1 = REQ_OPEN.toBuilder() @@ -1120,6 +728,7 @@ public void testFlushMultipleSegments_200ResponsePartialFlushHalfway() throws Ex .setBucket(METADATA.getBucket()) .setGeneration(METADATA.getGeneration()) .setSize(10) + .setFinalizeTime(timestampNow()) // real object would have some extra fields like metageneration and storage // class .build()) @@ -1155,38 +764,40 @@ public void testFlushMultipleSegments_200ResponsePartialFlushHalfway() throws Ex try (FakeServer fakeServer = FakeServer.of(fake); GrpcStorageImpl storage = - (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - StorageClient storageClient = storage.storageClient; - BidiWriteCtx writeCtx = - new BidiWriteCtx<>( - new BidiAppendableWrite( - BidiWriteObjectRequest.newBuilder() - .setWriteObjectSpec( - WriteObjectSpec.newBuilder() - .setResource( - Object.newBuilder() - .setBucket(METADATA.getBucket()) - .setName(METADATA.getName())) - .setAppendable(true) - .build()) - .build())); - SettableApiFuture done = SettableApiFuture.create(); - - GapicBidiUnbufferedAppendableWritableByteChannel channel = - new GapicBidiUnbufferedAppendableWritableByteChannel( - storageClient.bidiWriteObjectCallable(), - storageClient.getObjectCallable(), - TestUtils.retrierFromStorageOptions(fakeServer.getGrpcStorageOptions()) - .withAlg( - fakeServer.getGrpcStorageOptions().getRetryAlgorithmManager().idempotent()), - done, - smallSegmenter, - writeCtx, - GrpcCallContext::createDefault); + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + BidiWriteObjectRequest initial = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(); + AppendableUploadState uploadState = + BidiUploadState.appendableNew( + initial, + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + uploadState, + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()); + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel(stream, smallSegmenter, 0); ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 10); - channel.write(ByteBuffer.wrap(content.getBytes())); - channel.finalizeWrite(); - assertThat(done.get().getResource().getSize()).isEqualTo(10); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + assertThat(stream.getResultFuture().get(777, TimeUnit.MILLISECONDS).getResource().getSize()) + .isEqualTo(10); assertThat(map.get(req1)).isEqualTo(1); assertThat(map.get(req2)).isEqualTo(1); @@ -1198,323 +809,6 @@ public void testFlushMultipleSegments_200ResponsePartialFlushHalfway() throws Ex } } - /** - * If the last message in a flush of multiple segments (or the only message in a flush with just - * one segment) returns a 200 response but does a partial flush, we won't get a server side error - * like in the previous test, because we won't try to do a write with a larger offset than the - * persisted size. Instead, the channel keeps a manual count for this case, and throws an error if - * it happens, which triggers a retry, and the retry loop handles flushing the last request again - * while skipping the partially ack'd bytes - */ - @Test - public void testFlushMultipleSegments_200ResponsePartialFlushOnLastMessage() throws Exception { - BidiWriteHandle writeHandle = - BidiWriteHandle.newBuilder() - .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) - .build(); - - ChunkSegmenter smallSegmenter = - new ChunkSegmenter(Hasher.noop(), ByteStringStrategy.copy(), 3, 3); - - BidiWriteObjectRequest req1 = - REQ_OPEN.toBuilder() - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("ABC"))) - .build(); - - BidiWriteObjectResponse res1 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(7) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - BidiWriteObjectRequest req2 = incrementalRequest(3, "DEF"); - BidiWriteObjectRequest req3 = incrementalRequest(6, "GHI", true); - - BidiWriteObjectRequest reconnect = - BidiWriteObjectRequest.newBuilder() - .setAppendObjectSpec( - AppendObjectSpec.newBuilder() - .setBucket(METADATA.getBucket()) - .setObject(METADATA.getName()) - .setGeneration(METADATA.getGeneration()) - .setWriteHandle(writeHandle) - .build()) - .setFlush(true) - .setStateLookup(true) - .build(); - - BidiWriteObjectRequest req4 = incrementalRequest(7, "HI", true); - - BidiWriteObjectRequest req5 = finishMessage(9); - - BidiWriteObjectResponse last = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(9) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .build(); - Map map = new HashMap<>(); - - FakeStorage fake = - FakeStorage.of( - ImmutableMap.of( - req1, - maxRetries(req1, null, map, 1), - req2, - maxRetries(req2, null, map, 1), - req3, - maxRetries(req3, res1, map, 1), - reconnect, - maxRetries(reconnect, incrementalResponse(7), map, 1), - req4, - maxRetries(req4, incrementalResponse(9), map, 1), - req5, - maxRetries(req5, last, map, 1))); - - try (FakeServer fakeServer = FakeServer.of(fake); - GrpcStorageImpl storage = - (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - StorageClient storageClient = storage.storageClient; - BidiWriteCtx writeCtx = - new BidiWriteCtx<>( - new BidiAppendableWrite( - BidiWriteObjectRequest.newBuilder() - .setWriteObjectSpec( - WriteObjectSpec.newBuilder() - .setResource( - Object.newBuilder() - .setBucket(METADATA.getBucket()) - .setName(METADATA.getName())) - .setAppendable(true) - .build()) - .build())); - SettableApiFuture done = SettableApiFuture.create(); - - GapicBidiUnbufferedAppendableWritableByteChannel channel = - new GapicBidiUnbufferedAppendableWritableByteChannel( - storageClient.bidiWriteObjectCallable(), - storageClient.getObjectCallable(), - TestUtils.retrierFromStorageOptions(fakeServer.getGrpcStorageOptions()) - .withAlg( - fakeServer.getGrpcStorageOptions().getRetryAlgorithmManager().idempotent()), - done, - smallSegmenter, - writeCtx, - GrpcCallContext::createDefault); - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 9); - channel.write(ByteBuffer.wrap(content.getBytes())); - channel.finalizeWrite(); - assertThat(done.get().getResource().getSize()).isEqualTo(9); - - assertThat(map.get(req1)).isEqualTo(1); - assertThat(map.get(req2)).isEqualTo(1); - assertThat(map.get(req3)).isEqualTo(1); - assertThat(map.get(req4)).isEqualTo(1); - assertThat(map.get(req5)).isEqualTo(1); - assertThat(map.get(reconnect)).isEqualTo(1); - } - } - - @Test - public void takeoverRedirectError() throws Exception { - BidiWriteHandle writeHandle = - BidiWriteHandle.newBuilder() - .setHandle(ByteString.copyFromUtf8(UUID.randomUUID().toString())) - .build(); - String routingToken = UUID.randomUUID().toString(); - - BidiWriteObjectRequest req1 = - BidiWriteObjectRequest.newBuilder() - .setAppendObjectSpec( - AppendObjectSpec.newBuilder() - .setBucket(METADATA.getBucket()) - .setObject(METADATA.getName()) - .setGeneration(METADATA.getGeneration()) - .build()) - .setFlush(true) - .setStateLookup(true) - .build(); - - BidiWriteObjectRequest req2 = - BidiWriteObjectRequest.newBuilder() - .setAppendObjectSpec( - AppendObjectSpec.newBuilder() - .setBucket(METADATA.getBucket()) - .setObject(METADATA.getName()) - .setGeneration(METADATA.getGeneration()) - .setWriteHandle(writeHandle) - .setRoutingToken(routingToken) - .build()) - .setFlush(true) - .setStateLookup(true) - .build(); - - BidiWriteObjectRequest req3 = - BidiWriteObjectRequest.newBuilder() - .setWriteOffset(10) - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("KLMNO")).build()) - .setStateLookup(true) - .setFlush(true) - .build(); - - BidiWriteObjectRequest req4 = - BidiWriteObjectRequest.newBuilder() - .setWriteOffset(15) - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8("PQRST")).build()) - .setStateLookup(true) - .setFlush(true) - .build(); - - BidiWriteObjectRequest req5 = - BidiWriteObjectRequest.newBuilder().setWriteOffset(20).setFinishWrite(true).build(); - - BidiWriteObjectResponse res2 = - BidiWriteObjectResponse.newBuilder().setPersistedSize(10).build(); - - BidiWriteObjectResponse res3 = - BidiWriteObjectResponse.newBuilder().setPersistedSize(15).build(); - - BidiWriteObjectResponse res4 = - BidiWriteObjectResponse.newBuilder().setPersistedSize(20).build(); - - BidiWriteObjectResponse res5 = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(20) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .setWriteHandle(writeHandle) - .build(); - - FakeStorage fake = - FakeStorage.of( - ImmutableMap.of( - req1, - respond -> { - BidiWriteObjectRedirectedError redirect = - BidiWriteObjectRedirectedError.newBuilder() - .setWriteHandle(writeHandle) - .setRoutingToken(routingToken) - .setGeneration(METADATA.getGeneration()) - .build(); - - com.google.rpc.Status grpcStatusDetails = - com.google.rpc.Status.newBuilder() - .setCode(Code.ABORTED_VALUE) - .setMessage("redirect") - .addDetails(Any.pack(redirect)) - .build(); - - Metadata trailers = new Metadata(); - trailers.put(GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); - StatusRuntimeException statusRuntimeException = - Status.ABORTED.withDescription("redirect").asRuntimeException(trailers); - respond.onError(statusRuntimeException); - }, - req2, - respond -> respond.onNext(res2), - req3, - respond -> respond.onNext(res3), - req4, - respond -> respond.onNext(res4), - req5, - respond -> respond.onNext(res5))); - - try (FakeServer fakeServer = FakeServer.of(fake); - Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - - BlobId id = BlobId.of("b", "o", METADATA.getGeneration()); - BlobAppendableUpload b = - storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), UPLOAD_CONFIG); - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 10, 10); - try (AppendableUploadWriteableByteChannel channel = b.open()) { - channel.write(ByteBuffer.wrap(content.getBytes())); - } - BlobInfo bi = b.getResult().get(5, TimeUnit.SECONDS); - assertThat(bi.getSize()).isEqualTo(20); - } - } - - /** - * We get a retryable error in our first flush. We don't have a generation so we do a metadata - * lookup, but we get an ObjectNotFound, which means that GCS never received the WriteObjectSpec - * and never created the object. Thus, we just send the WriteObjectSpec again - */ - @Test - public void retryableError_ObjectNotFound() throws Exception { - BidiWriteObjectRequest req1 = REQ_OPEN.toBuilder().setFlush(true).setStateLookup(true).build(); - - Map map = new ConcurrentHashMap<>(); - BidiWriteObjectResponse res = - BidiWriteObjectResponse.newBuilder() - .setResource( - Object.newBuilder() - .setName(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setGeneration(METADATA.getGeneration()) - .setSize(5) - // real object would have some extra fields like metageneration and storage - // class - .build()) - .build(); - - BidiWriteObjectRequest req2 = finishMessage(5); - - FakeStorage fake = - FakeStorage.of( - ImmutableMap.of( - req1, retryableErrorOnce(req1, res, map, 2), req2, maxRetries(req2, res, map, 1)), - ImmutableMap.of( - GetObjectRequest.newBuilder() - .setObject(METADATA.getName()) - .setBucket(METADATA.getBucket()) - .setReadMask( - (FieldMask.newBuilder() - .addPaths(Storage.BlobField.GENERATION.getGrpcName()) - .build())) - .build(), - Object.getDefaultInstance())); - - try (FakeServer fakeServer = FakeServer.of(fake); - Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { - - BlobId id = BlobId.of("b", "o"); - BlobAppendableUpload b = - storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), UPLOAD_CONFIG); - ChecksummedTestContent content = ChecksummedTestContent.of(ALL_OBJECT_BYTES, 0, 5); - try (AppendableUploadWriteableByteChannel channel = b.open()) { - channel.write(ByteBuffer.wrap(content.getBytes())); - } - BlobInfo bi = b.getResult().get(5, TimeUnit.SECONDS); - assertThat(bi.getSize()).isEqualTo(5); - - assertThat(map.get(req1)).isEqualTo(2); - assertThat(map.get(req2)).isEqualTo(1); - } - } - @Test public void crc32cWorks() throws Exception { byte[] b = new byte[25]; @@ -1569,7 +863,11 @@ public void crc32cWorks() throws Exception { .build(); BidiWriteObjectResponse res5 = incrementalResponse(25); BidiWriteObjectRequest req6 = - BidiWriteObjectRequest.newBuilder().setWriteOffset(25).setFinishWrite(true).build(); + BidiWriteObjectRequest.newBuilder() + .setWriteOffset(25) + .setFinishWrite(true) + .setObjectChecksums(ObjectChecksums.newBuilder().setCrc32C(all.getCrc32c()).build()) + .build(); BidiWriteObjectResponse res6 = BidiWriteObjectResponse.newBuilder() .setResource( @@ -1578,6 +876,7 @@ public void crc32cWorks() throws Exception { .setBucket(METADATA.getBucket()) .setGeneration(METADATA.getGeneration()) .setSize(25) + .setFinalizeTime(timestampNow()) .setChecksums(ObjectChecksums.newBuilder().setCrc32C(all.getCrc32c()).build()) // real object would have some extra fields like metageneration and storage // class @@ -1587,40 +886,76 @@ public void crc32cWorks() throws Exception { FakeStorage fake = FakeStorage.of( ImmutableMap.of( - req1, respond -> respond.onNext(res1), - req2, respond -> respond.onNext(res2), - req3, respond -> respond.onNext(res3), - req4, respond -> respond.onNext(res4), - req5, respond -> respond.onNext(res5), - req6, respond -> respond.onNext(res6))); + req1, + respond -> respond.onNext(res1), + req2, + respond -> respond.onNext(res2), + req3, + respond -> respond.onNext(res3), + req4, + respond -> respond.onNext(res4), + req5, + respond -> respond.onNext(res5), + req6, + respond -> { + respond.onNext(res6); + respond.onCompleted(); + })); try (FakeServer fakeServer = FakeServer.of(fake); Storage storage = fakeServer.getGrpcStorageOptions().toBuilder().build().getService()) { BlobId id = BlobId.of("b", "o"); - BlobAppendableUploadConfig uploadConfig = UPLOAD_CONFIG.withCrc32cValidationEnabled(true); + BlobAppendableUploadConfig config = + BlobAppendableUploadConfig.of() + .withFlushPolicy(FlushPolicy.maxFlushSize(5)) + .withCloseAction(CloseAction.FINALIZE_WHEN_CLOSING); BlobAppendableUpload upload = - storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), uploadConfig); + storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), config); try (AppendableUploadWriteableByteChannel channel = upload.open()) { - channel.write(ByteBuffer.wrap(b)); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(b), channel); } - upload.getResult().get(5, TimeUnit.SECONDS); + ApiFuture result = upload.getResult(); + result.get(5, TimeUnit.SECONDS); } } - private Consumer> maxRetries( - BidiWriteObjectRequest req, - BidiWriteObjectResponse res, - Map retryMap, + private static Consumer> maxRetries( + @NonNull BidiWriteObjectRequest req, + Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, + int maxAttempts) { + return maxRetries(req, null, retryMap, maxAttempts); + } + + private static Consumer> maxRetries( + @NonNull BidiWriteObjectRequest req, + @Nullable BidiWriteObjectResponse res, + @NonNull Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, int maxAttempts) { return respond -> { - retryMap.putIfAbsent(req, 0); - int attempts = retryMap.get(req) + 1; - retryMap.put(req, attempts); + int attempts = retryMap.compute(req, (r, count) -> count == null ? 1 : count + 1); if (attempts > maxAttempts) { - respond.onError( + DebugInfo details = + DebugInfo.newBuilder().setDetail(TextFormat.printer().shortDebugString(req)).build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(com.google.rpc.Code.ABORTED_VALUE) + .setMessage("details") + .addDetails(Any.pack(details)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(TestUtils.GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StatusRuntimeException t = Status.ABORTED - .withDescription("maxRetriesMethod exceed maxAttempts in fake") - .asRuntimeException()); + .withDescription( + String.format( + Locale.US, + "request received %d times, but only allowed %d times", + attempts, + maxAttempts)) + .asRuntimeException(trailers); + respond.onError(t); } else { if (res != null) { respond.onNext(res); @@ -1629,15 +964,20 @@ private Consumer> maxRetries( }; } - private Consumer> retryableErrorOnce( - BidiWriteObjectRequest req, - BidiWriteObjectResponse res, - Map retryMap, + private static Consumer> retryableErrorOnce( + @NonNull BidiWriteObjectRequest req, + @NonNull Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, + int maxAttempts) { + return retryableErrorOnce(req, null, retryMap, maxAttempts); + } + + private static Consumer> retryableErrorOnce( + @NonNull BidiWriteObjectRequest req, + @Nullable BidiWriteObjectResponse res, + @NonNull Map<@NonNull BidiWriteObjectRequest, Integer> retryMap, int maxAttempts) { return respond -> { - retryMap.putIfAbsent(req, 0); - int attempts = retryMap.get(req) + 1; - retryMap.put(req, attempts); + int attempts = retryMap.compute(req, (r, count) -> count == null ? 1 : count + 1); if (attempts == 1) { respond.onError(Status.INTERNAL.asRuntimeException()); } else if (attempts > maxAttempts) { @@ -1653,12 +993,12 @@ private Consumer> retryableErrorOnce( }; } - private BidiWriteObjectRequest incrementalRequest(long offset, String content, boolean flush) { + private static BidiWriteObjectRequest incrementalRequest( + long offset, String content, boolean flush) { BidiWriteObjectRequest.Builder builder = BidiWriteObjectRequest.newBuilder() .setWriteOffset(offset) - .setChecksummedData( - ChecksummedData.newBuilder().setContent(ByteString.copyFromUtf8(content))); + .setChecksummedData(ChecksummedTestContent.of(content).asChecksummedData()); if (flush) { builder.setFlush(true).setStateLookup(true); @@ -1666,18 +1006,59 @@ private BidiWriteObjectRequest incrementalRequest(long offset, String content, b return builder.build(); } - private BidiWriteObjectRequest incrementalRequest(long offset, String content) { + private static BidiWriteObjectRequest incrementalRequest(long offset, String content) { return incrementalRequest(offset, content, false); } - private BidiWriteObjectResponse incrementalResponse(long perSize) { + private static BidiWriteObjectResponse incrementalResponse(long perSize) { return BidiWriteObjectResponse.newBuilder().setPersistedSize(perSize).build(); } - private BidiWriteObjectRequest finishMessage(long offset) { + private static BidiWriteObjectRequest finishMessage(long offset) { return BidiWriteObjectRequest.newBuilder().setWriteOffset(offset).setFinishWrite(true).build(); } + private static void runTestFlushMultipleSegments(FakeStorage fake) throws Exception { + try (FakeServer fakeServer = FakeServer.of(fake); + GrpcStorageImpl storage = + (GrpcStorageImpl) fakeServer.getGrpcStorageOptions().getService()) { + + BidiWriteObjectRequest initialRequest = + BidiWriteObjectRequest.newBuilder() + .setWriteObjectSpec( + WriteObjectSpec.newBuilder() + .setResource( + Object.newBuilder() + .setBucket(METADATA.getBucket()) + .setName(METADATA.getName())) + .setAppendable(true) + .build()) + .build(); + AppendableUploadState state = + BidiUploadState.appendableNew( + initialRequest, + GrpcCallContext::createDefault, + 32, + SettableApiFuture.create(), + Crc32cValue.zero()); + BidiUploadStreamingStream stream = + new BidiUploadStreamingStream( + state, + storage.storageDataClient.executor, + storage.storageClient.bidiWriteObjectCallable(), + 3, + storage.storageDataClient.retryContextProvider.create()); + BidiAppendableUnbufferedWritableByteChannel channel = + new BidiAppendableUnbufferedWritableByteChannel(stream, smallSegmenter, 0); + StorageChannelUtils.blockingEmptyTo(ByteBuffer.wrap(content.getBytes()), channel); + channel.nextWriteShouldFinalize(); + channel.close(); + BidiWriteObjectResponse response = stream.getResultFuture().get(777, TimeUnit.MILLISECONDS); + assertThat(response.getResource().getSize()).isEqualTo(10); + assertThat(response.getResource().getChecksums().getCrc32C()).isEqualTo(content.getCrc32c()); + } + } + static final class FakeStorage extends StorageGrpc.StorageImplBase { private final Map>> db; @@ -1706,8 +1087,7 @@ public void getObject(GetObjectRequest request, StreamObserver responseO responseObserver.onCompleted(); } } else { - responseObserver.onError( - TestUtils.apiException(Status.Code.UNIMPLEMENTED, "Unexpected request")); + responseObserver.onError(unexpectedRequest(request, getdb.keySet())); } } @@ -1720,13 +1100,37 @@ public void onNext(BidiWriteObjectRequest req) { if (db.containsKey(req)) { db.get(req).accept(respond); } else { - respond.onError( - TestUtils.apiException(Status.Code.UNIMPLEMENTED, "Unexpected request")); + respond.onError(unexpectedRequest(req, db.keySet())); } } }; } + static @NonNull StatusRuntimeException unexpectedRequest( + Message req, Collection messages) { + DebugInfo details = + DebugInfo.newBuilder().setDetail(TextFormat.printer().shortDebugString(req)).build(); + + com.google.rpc.Status grpcStatusDetails = + com.google.rpc.Status.newBuilder() + .setCode(Code.UNIMPLEMENTED_VALUE) + .setMessage("details") + .addDetails(Any.pack(details)) + .build(); + + Metadata trailers = new Metadata(); + trailers.put(TestUtils.GRPC_STATUS_DETAILS_KEY, grpcStatusDetails); + StringBuilder sb = new StringBuilder(); + sb.append("Unexpected request.").append("\n"); + sb.append(" actual: ").append("\n ").append(fmtProto(req)).append("\n"); + sb.append(" expected one of: "); + sb.append( + messages.stream() + .map(StorageV2ProtoUtils::fmtProto) + .collect(Collectors.joining(",\n ", "[\n ", "\n ]"))); + return Status.UNIMPLEMENTED.withDescription(sb.toString()).asRuntimeException(trailers); + } + static FakeStorage of( Map>> db) { return new FakeStorage(db); @@ -1737,10 +1141,6 @@ static FakeStorage of( Map getdb) { return new FakeStorage(db, getdb); } - - static FakeStorage from(Map db) { - return new FakeStorage(Maps.transformValues(db, resp -> (respond) -> respond.onNext(resp))); - } } abstract static class AbstractObserver implements StreamObserver { diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java index e9f7dff88b..b21896a1c7 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java @@ -15,29 +15,41 @@ */ package com.google.cloud.storage; -import static com.google.cloud.storage.ByteSizeConstants._2MiB; +import static com.google.cloud.storage.ByteSizeConstants._1MiB; import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assume.assumeFalse; +import static org.junit.Assume.assumeTrue; +import com.google.api.core.ApiFuture; import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.FlushPolicy.MaxFlushSizeFlushPolicy; +import com.google.cloud.storage.FlushPolicy.MinFlushSizeFlushPolicy; +import com.google.cloud.storage.ITAppendableUploadTest.UploadConfigParameters; +import com.google.cloud.storage.MetadataField.PartRange; import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ChecksummedTestContent; import com.google.cloud.storage.it.runner.StorageITRunner; import com.google.cloud.storage.it.runner.annotations.Backend; import com.google.cloud.storage.it.runner.annotations.BucketFixture; import com.google.cloud.storage.it.runner.annotations.BucketType; import com.google.cloud.storage.it.runner.annotations.CrossRun; import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.annotations.Parameterized; +import com.google.cloud.storage.it.runner.annotations.Parameterized.Parameter; +import com.google.cloud.storage.it.runner.annotations.Parameterized.ParametersProvider; import com.google.cloud.storage.it.runner.registry.Generator; -import com.google.common.io.ByteStreams; +import com.google.common.base.MoreObjects; +import com.google.common.collect.ImmutableList; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.SeekableByteChannel; -import java.nio.file.Files; import java.nio.file.Paths; -import java.nio.file.StandardOpenOption; -import java.util.Arrays; +import java.util.List; +import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -48,6 +60,7 @@ @CrossRun( backends = {Backend.TEST_BENCH}, transports = Transport.GRPC) +@Parameterized(UploadConfigParameters.class) public final class ITAppendableUploadTest { @Inject public Generator generator; @@ -58,148 +71,245 @@ public final class ITAppendableUploadTest { @BucketFixture(BucketType.RAPID) public BucketInfo bucket; + @Inject public Backend backend; + + @Parameter public Param p; + @Test - public void testAppendableBlobUpload() + public void appendableUpload_emptyObject() throws IOException, ExecutionException, InterruptedException, TimeoutException { - BlobAppendableUploadConfig uploadConfig = - BlobAppendableUploadConfig.of() - .withFlushPolicy(FlushPolicy.maxFlushSize(2000)) - .withCloseAction(CloseAction.FINALIZE_WHEN_CLOSING); + assumeTrue( + "only run once", + p.content.length() == UploadConfigParameters.objectSizes.get(0) + && p.uploadConfig.getCloseAction() == UploadConfigParameters.closeActions.get(0) + && p.uploadConfig.getFlushPolicy().equals(UploadConfigParameters.flushPolicies.get(0))); + BlobAppendableUpload upload = storage.blobAppendableUpload( - BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(), uploadConfig); - - byte[] bytes = DataGenerator.base64Characters().genBytes(512 * 1024); - byte[] a1 = Arrays.copyOfRange(bytes, 0, bytes.length / 2); - byte[] a2 = Arrays.copyOfRange(bytes, bytes.length / 2 + 1, bytes.length); - try (AppendableUploadWriteableByteChannel channel = upload.open()) { - channel.write(ByteBuffer.wrap(a1)); - channel.write(ByteBuffer.wrap(a2)); - } - BlobInfo blob = upload.getResult().get(5, TimeUnit.SECONDS); + BlobInfo.newBuilder(bucket, UUID.randomUUID().toString()).build(), p.uploadConfig); - assertThat(blob.getSize()).isEqualTo(a1.length + a2.length); + upload.open().close(); BlobInfo actual = upload.getResult().get(5, TimeUnit.SECONDS); - BlobInfo blob1 = storage.get(actual.getBlobId()); - assertThat(actual).isEqualTo(blob1); + assertThat(actual.getSize()).isEqualTo(0); + assertThat(actual.getCrc32c()) + .isEqualTo(Utils.crc32cCodec.encode(Crc32cValue.zero().getValue())); + + assumeFalse( + "Testbench doesn't handle {read_id: 1, read_offset: 0} for a 0 byte object", + backend == Backend.TEST_BENCH); + byte[] actualBytes = readAllBytes(actual); + assertThat(xxd(actualBytes)).isEqualTo(xxd(new byte[0])); } @Test - public void appendableBlobUploadWithoutFinalizing() throws Exception { - BlobAppendableUploadConfig uploadConfig = - BlobAppendableUploadConfig.of().withFlushPolicy(FlushPolicy.maxFlushSize(256 * 1024)); - BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); - BlobAppendableUpload upload = storage.blobAppendableUpload(info, uploadConfig); + public void appendableUpload_bytes() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + checkTestbenchIssue733(); + + BlobAppendableUpload upload = + storage.blobAppendableUpload( + BlobInfo.newBuilder(bucket, UUID.randomUUID().toString()).build(), p.uploadConfig); - byte[] bytes = DataGenerator.base64Characters().genBytes(512 * 1024); - byte[] a1 = Arrays.copyOfRange(bytes, 0, bytes.length / 2); - byte[] a2 = Arrays.copyOfRange(bytes, bytes.length / 2 + 1, bytes.length); + // cut out the middle + 1 byte + int length = p.content.length(); + int mid = length / 2; + ChecksummedTestContent a1 = p.content.slice(0, mid); + ChecksummedTestContent a2 = p.content.slice(mid + 1, length - mid - 1); + ChecksummedTestContent a1_a2 = a1.concat(a2); + Crc32cLengthKnown c1_c2 = Crc32cValue.of(a1_a2.getCrc32c(), a1_a2.length()); try (AppendableUploadWriteableByteChannel channel = upload.open()) { - channel.write(ByteBuffer.wrap(a1)); - channel.write(ByteBuffer.wrap(a2)); + int written1 = Buffers.emptyTo(ByteBuffer.wrap(a1.getBytes()), channel); + assertThat(written1).isEqualTo(a1.length()); + int written2 = Buffers.emptyTo(ByteBuffer.wrap(a2.getBytes()), channel); + assertThat(written2).isEqualTo(a2.length()); } + BlobInfo actual = upload.getResult().get(5, TimeUnit.SECONDS); - assertAll( - () -> assertThat(actual).isNotNull(), - () -> assertThat(actual.getSize()).isEqualTo(512 * 1024 - 1), - () -> { - // TODO: re-enable this when crc32c behavior is better defined when multiple flushes - // and state lookups happen for incomplete uploads. - if (false) { - String crc32c = actual.getCrc32c(); - // prod is null - boolean crc32cNull = crc32c == null; - // testbench v0.54.0+ will have the crc32c of the first flush, regardless if more has - // been flushed since then. - // While the following assertion can pass for v0.54.0 and v0.55.0 it's janky, and not - // something I want to depend upon. So, for now it's skipped, with this comment and - // code left as a skeleton of what should be filled in. - Crc32cLengthKnown a1hash = Hasher.enabled().hash(ByteBuffer.wrap(a1)); - boolean crc32cZero = - Utils.crc32cCodec.encode(a1hash.getValue()).equalsIgnoreCase(crc32c); - assertThat(crc32cNull || crc32cZero).isTrue(); - } - }); + assertThat(actual.getSize()).isEqualTo(c1_c2.getLength()); + assertThat(actual.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(c1_c2.getValue())); + + byte[] actualBytes = readAllBytes(actual); + assertThat(xxd(actualBytes)).isEqualTo(xxd(a1_a2.getBytes())); } @Test - // Pending work in testbench, manually verified internally on 2025-03-25 + // Pending work in testbench: https://github.com/googleapis/storage-testbench/issues/723 + // manually verified internally on 2025-03-25 @CrossRun.Ignore(backends = {Backend.TEST_BENCH}) public void appendableBlobUploadTakeover() throws Exception { - BlobAppendableUploadConfig uploadConfig = - BlobAppendableUploadConfig.of().withFlushPolicy(FlushPolicy.maxFlushSize(5)); - BlobId bid = BlobId.of(bucket.getName(), generator.randomObjectName()); - BlobAppendableUpload upload = - storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), uploadConfig); - byte[] bytes = "ABCDEFGHIJ".getBytes(); + List chunks = p.content.chunkup((p.content.length() / 2) + 1); + assertThat(chunks).hasSize(2); + + ChecksummedTestContent c1 = chunks.get(0); + ChecksummedTestContent c2 = chunks.get(1); + + BlobId id = BlobId.of(bucket.getName(), UUID.randomUUID().toString()); + BlobAppendableUploadConfig doNotFinalizeConfig = + p.uploadConfig.withCloseAction(CloseAction.CLOSE_WITHOUT_FINALIZING); + BlobAppendableUpload upload = + storage.blobAppendableUpload(BlobInfo.newBuilder(id).build(), doNotFinalizeConfig); try (AppendableUploadWriteableByteChannel channel = upload.open()) { - channel.write(ByteBuffer.wrap(bytes)); + int written = Buffers.emptyTo(ByteBuffer.wrap(c1.getBytes()), channel); + assertThat(written).isEqualTo(c1.length()); } - BlobInfo blob = upload.getResult().get(5, TimeUnit.SECONDS); + BlobInfo done1 = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(done1.getSize()).isEqualTo(c1.length()); + assertThat(done1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(c1.getCrc32c())); - byte[] bytes2 = "KLMNOPQRST".getBytes(); BlobAppendableUpload takeOver = - storage.blobAppendableUpload(BlobInfo.newBuilder(blob.getBlobId()).build(), uploadConfig); + storage.blobAppendableUpload( + BlobInfo.newBuilder(done1.getBlobId()).build(), p.uploadConfig); try (AppendableUploadWriteableByteChannel channel = takeOver.open()) { - channel.write(ByteBuffer.wrap(bytes2)); + int written = Buffers.emptyTo(ByteBuffer.wrap(c2.getBytes()), channel); + assertThat(written).isEqualTo(c2.length()); } - BlobInfo i = takeOver.getResult().get(5, TimeUnit.SECONDS); - assertThat(i.getSize()).isEqualTo(20); + BlobInfo done2 = takeOver.getResult().get(5, TimeUnit.SECONDS); + + assertThat(done2.getSize()).isEqualTo(p.content.length()); + assertThat(done2.getCrc32c()).isAnyOf(Utils.crc32cCodec.encode(p.content.getCrc32c()), null); } @Test public void testUploadFileUsingAppendable() throws Exception { - BlobAppendableUploadConfig uploadConfig = - BlobAppendableUploadConfig.of().withFlushPolicy(FlushPolicy.minFlushSize(_2MiB)); + checkTestbenchIssue733(); - BlobId bid = BlobId.of(bucket.getName(), generator.randomObjectName()); + String objectName = UUID.randomUUID().toString(); + String fileName = + ParallelCompositeUploadBlobWriteSessionConfig.PartNamingStrategy.noPrefix() + .fmtName(objectName, PartRange.of(1)); + BlobId bid = BlobId.of(bucket.getName(), objectName); + int fileSize = p.content.length(); try (TmpFile tmpFile = - DataGenerator.base64Characters() - .tempFile(Paths.get(System.getProperty("java.io.tmpdir")), 100 * 1024 * 1024)) { + TmpFile.of(Paths.get(System.getProperty("java.io.tmpdir")), fileName + ".", ".bin")) { + try (SeekableByteChannel w = tmpFile.writer()) { + int written = Buffers.emptyTo(ByteBuffer.wrap(p.content.getBytes()), w); + assertThat(written).isEqualTo(p.content.length()); + } BlobAppendableUpload appendable = - storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), uploadConfig); - try (AppendableUploadWriteableByteChannel channel = appendable.open(); - SeekableByteChannel r = - Files.newByteChannel(tmpFile.getPath(), StandardOpenOption.READ)) { - ByteStreams.copy(r, channel); + storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), p.uploadConfig); + try (SeekableByteChannel r = tmpFile.reader(); + AppendableUploadWriteableByteChannel w = appendable.open()) { + long copied = Buffers.copyUsingBuffer(Buffers.allocate(8 * _1MiB), r, w); + assertThat(copied).isEqualTo(fileSize); } BlobInfo bi = appendable.getResult().get(5, TimeUnit.SECONDS); - assertThat(bi.getSize()).isEqualTo(100 * 1024 * 1024); + assertThat(bi.getSize()).isEqualTo(fileSize); } } @Test - // Pending work in testbench, manually verified internally on 2025-03-25 + // Pending work in testbench: https://github.com/googleapis/storage-testbench/issues/723 + // manually verified internally on 2025-03-25 @CrossRun.Ignore(backends = {Backend.TEST_BENCH}) public void takeoverJustToFinalizeWorks() throws Exception { - BlobAppendableUploadConfig uploadConfig = - BlobAppendableUploadConfig.of().withFlushPolicy(FlushPolicy.maxFlushSize(5)); - BlobId bid = BlobId.of(bucket.getName(), generator.randomObjectName()); + BlobId bid = BlobId.of(bucket.getName(), UUID.randomUUID().toString()); + assumeTrue( + "manually finalizing", + p.uploadConfig.getCloseAction() != CloseAction.FINALIZE_WHEN_CLOSING); BlobAppendableUpload upload = - storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), uploadConfig); - + storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), p.uploadConfig); try (AppendableUploadWriteableByteChannel channel = upload.open()) { - channel.write(DataGenerator.base64Characters().genByteBuffer(20)); + int written = Buffers.emptyTo(ByteBuffer.wrap(p.content.getBytes()), channel); + assertThat(written).isEqualTo(p.content.length()); } - - BlobInfo blob = upload.getResult().get(5, TimeUnit.SECONDS); + BlobInfo done1 = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(done1.getSize()).isEqualTo(p.content.length()); + assertThat(done1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(p.content.getCrc32c())); BlobAppendableUpload takeOver = - storage.blobAppendableUpload(BlobInfo.newBuilder(blob.getBlobId()).build(), uploadConfig); + storage.blobAppendableUpload( + BlobInfo.newBuilder(done1.getBlobId()).build(), p.uploadConfig); takeOver.open().finalizeAndClose(); - BlobInfo i = takeOver.getResult().get(5, TimeUnit.SECONDS); - assertThat(i.getSize()).isEqualTo(20); - BlobInfo actual = takeOver.getResult().get(5, TimeUnit.SECONDS); + BlobInfo done2 = takeOver.getResult().get(5, TimeUnit.SECONDS); assertAll( - () -> assertThat(actual).isNotNull(), - () -> assertThat(actual.getSize()).isEqualTo(20), - () -> assertThat(actual.getCrc32c()).isNotNull()); + () -> assertThat(done2).isNotNull(), + () -> assertThat(done2.getSize()).isEqualTo(p.content.length()), + () -> assertThat(done2.getCrc32c()).isNotNull()); + } + + private void checkTestbenchIssue733() { + if (p.uploadConfig.getCloseAction() == CloseAction.FINALIZE_WHEN_CLOSING) { + int estimatedMessageCount = 0; + FlushPolicy flushPolicy = p.uploadConfig.getFlushPolicy(); + if (flushPolicy instanceof MinFlushSizeFlushPolicy) { + MinFlushSizeFlushPolicy min = (MinFlushSizeFlushPolicy) flushPolicy; + estimatedMessageCount = p.content.length() / min.getMinFlushSize(); + } else if (flushPolicy instanceof MaxFlushSizeFlushPolicy) { + MaxFlushSizeFlushPolicy max = (MaxFlushSizeFlushPolicy) flushPolicy; + estimatedMessageCount = p.content.length() / max.getMaxFlushSize(); + } + // if our int division results in a partial message, ensure we are counting at least one + // message. We have a separate test specifically for empty objects. + estimatedMessageCount = Math.max(estimatedMessageCount, 1); + assumeTrue( + "testbench broken https://github.com/googleapis/storage-testbench/issues/733", + estimatedMessageCount > 1); + } + } + + private byte[] readAllBytes(BlobInfo actual) + throws IOException, InterruptedException, ExecutionException, TimeoutException { + ApiFuture blobReadSessionFuture = storage.blobReadSession(actual.getBlobId()); + try (BlobReadSession read = blobReadSessionFuture.get(2_372, TimeUnit.MILLISECONDS)) { + ApiFuture futureBytes = read.readAs(ReadProjectionConfigs.asFutureBytes()); + return futureBytes.get(2_273, TimeUnit.MILLISECONDS); + } + } + + public static final class UploadConfigParameters implements ParametersProvider { + + private static final ImmutableList flushPolicies = + ImmutableList.of( + FlushPolicy.minFlushSize(1_000), + FlushPolicy.minFlushSize(1_000).withMaxPendingBytes(5_000), + FlushPolicy.maxFlushSize(500_000), + FlushPolicy.minFlushSize(), + FlushPolicy.maxFlushSize()); + private static final ImmutableList closeActions = + ImmutableList.copyOf(CloseAction.values()); + public static final ImmutableList objectSizes = + ImmutableList.of(5, 500, 5_000, 500_000, 5_000_000); + + @Override + public ImmutableList parameters() { + ImmutableList.Builder builder = ImmutableList.builder(); + for (FlushPolicy fp : flushPolicies) { + for (CloseAction ca : closeActions) { + for (int size : objectSizes) { + Param param = + new Param( + ChecksummedTestContent.gen(size), + BlobAppendableUploadConfig.of().withFlushPolicy(fp).withCloseAction(ca)); + builder.add(param); + } + } + } + return builder.build(); + } + } + + public static final class Param { + private final ChecksummedTestContent content; + private final BlobAppendableUploadConfig uploadConfig; + + private Param(ChecksummedTestContent content, BlobAppendableUploadConfig uploadConfig) { + this.content = content; + this.uploadConfig = uploadConfig; + } + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("byteCount", content) + .add("uploadConfig", uploadConfig) + .toString(); + } } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITBidiAppendableUnbufferedWritableByteChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITBidiAppendableUnbufferedWritableByteChannelTest.java new file mode 100644 index 0000000000..8742482733 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITBidiAppendableUnbufferedWritableByteChannelTest.java @@ -0,0 +1,90 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.common.truth.Truth.assertThat; + +import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; +import com.google.cloud.storage.BlobAppendableUploadConfig.CloseAction; +import com.google.cloud.storage.TransportCompatibility.Transport; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.cloud.storage.it.runner.StorageITRunner; +import com.google.cloud.storage.it.runner.annotations.Backend; +import com.google.cloud.storage.it.runner.annotations.BucketFixture; +import com.google.cloud.storage.it.runner.annotations.BucketType; +import com.google.cloud.storage.it.runner.annotations.CrossRun; +import com.google.cloud.storage.it.runner.annotations.Inject; +import com.google.cloud.storage.it.runner.registry.Generator; +import java.nio.ByteBuffer; +import java.nio.channels.WritableByteChannel; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import org.junit.Test; +import org.junit.runner.RunWith; + +@RunWith(StorageITRunner.class) +@CrossRun( + backends = {Backend.TEST_BENCH}, + transports = Transport.GRPC) +public final class ITBidiAppendableUnbufferedWritableByteChannelTest { + + @Inject public Generator generator; + + @Inject public Storage storage; + + @Inject + @BucketFixture(BucketType.RAPID) + public BucketInfo bucket; + + @Inject public Backend backend; + + @Test + public void nonBufferAlignedWritesLeaveBuffersInTheCorrectState() throws Exception { + BlobId bid = BlobId.of(bucket.getName(), UUID.randomUUID().toString()); + BlobAppendableUploadConfig config = + BlobAppendableUploadConfig.of() + .withFlushPolicy(FlushPolicy.minFlushSize(8 * 1024).withMaxPendingBytes(16 * 1024)) + .withCloseAction(CloseAction.CLOSE_WITHOUT_FINALIZING); + ChecksummedTestContent ctc = ChecksummedTestContent.gen(16 * 1024 + 5); + BlobAppendableUpload upload = + storage.blobAppendableUpload(BlobInfo.newBuilder(bid).build(), config); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + // enqueue 4 bytes, this makes it so the following 8K writes don't evenly fit + checkedEmptyTo(ctc.slice(0, 4).asByteBuffer(), channel); + checkedEmptyTo(ctc.slice(4, 8192).asByteBuffer(), channel); + checkedEmptyTo(ctc.slice(4 + 8192, 8192).asByteBuffer(), channel); + checkedEmptyTo(ctc.slice(4 + 8192 + 8192, 1).asByteBuffer(), channel); + } + BlobInfo done1 = upload.getResult().get(5, TimeUnit.SECONDS); + assertThat(done1.getSize()).isEqualTo(ctc.length()); + assertThat(done1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(ctc.getCrc32c())); + } + + private static int checkedEmptyTo(ByteBuffer buf, WritableByteChannel c) throws Exception { + int remaining = buf.remaining(); + int position = buf.position(); + int remaining1 = buf.remaining(); + int written = StorageChannelUtils.blockingEmptyTo(buf, c); + assertAll( + () -> assertThat(written).isEqualTo(position + remaining1), + () -> assertThat(buf.position()).isEqualTo(position + written), + () -> assertThat(buf.remaining()).isEqualTo(remaining1 - written)); + assertThat(written).isEqualTo(remaining); + return written; + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionFakeTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionFakeTest.java index 91bb7719f3..1cb3270498 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionFakeTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionFakeTest.java @@ -18,6 +18,7 @@ import static com.google.cloud.storage.ByteSizeConstants._2MiB; import static com.google.cloud.storage.PackagePrivateMethodWorkarounds.maybeGetStorageDataClient; +import static com.google.cloud.storage.TestUtils.GRPC_STATUS_DETAILS_KEY; import static com.google.cloud.storage.TestUtils.apiException; import static com.google.cloud.storage.TestUtils.assertAll; import static com.google.cloud.storage.TestUtils.getChecksummedData; @@ -76,7 +77,6 @@ import io.grpc.Status; import io.grpc.Status.Code; import io.grpc.StatusRuntimeException; -import io.grpc.protobuf.ProtoUtils; import io.grpc.stub.StreamObserver; import java.io.ByteArrayOutputStream; import java.nio.ByteBuffer; @@ -108,10 +108,6 @@ import org.junit.function.ThrowingRunnable; public final class ITObjectReadSessionFakeTest { - private static final Metadata.Key GRPC_STATUS_DETAILS_KEY = - Metadata.Key.of( - "grpc-status-details-bin", - ProtoUtils.metadataMarshaller(com.google.rpc.Status.getDefaultInstance())); private static final Object METADATA = Object.newBuilder() diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionTest.java index 321ba8e9a0..cd3eb3170d 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITObjectReadSessionTest.java @@ -28,6 +28,7 @@ import com.google.api.gax.rpc.OutOfRangeException; import com.google.cloud.storage.BlobAppendableUpload.AppendableUploadWriteableByteChannel; import com.google.cloud.storage.Crc32cValue.Crc32cLengthKnown; +import com.google.cloud.storage.FlushPolicy.MinFlushSizeFlushPolicy; import com.google.cloud.storage.Storage.BlobWriteOption; import com.google.cloud.storage.TransportCompatibility.Transport; import com.google.cloud.storage.ZeroCopySupport.DisposableByteString; @@ -77,6 +78,8 @@ public final class ITObjectReadSessionTest { @Inject public Generator generator; + @Inject public Backend backend; + @Test public void bytes() throws ExecutionException, InterruptedException, TimeoutException, IOException { @@ -325,10 +328,11 @@ public void seekable() throws Exception { @Test public void outOfRange() throws ExecutionException, InterruptedException, TimeoutException, IOException { + int objectSize = 4 * 1024 * 1024; ChecksummedTestContent testContent = - ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(4)); - BlobInfo obj512KiB = create(testContent); - BlobId blobId = obj512KiB.getBlobId(); + ChecksummedTestContent.of(DataGenerator.base64Characters().genBytes(objectSize)); + BlobInfo gen1 = create(testContent); + BlobId blobId = gen1.getBlobId(); try (BlobReadSession blobReadSession = storage.blobReadSession(blobId).get(30, TimeUnit.SECONDS)) { @@ -338,14 +342,15 @@ public void outOfRange() ReadAsFutureBytes cfg = ReadProjectionConfigs.asFutureBytes(); - ApiFuture f2 = blobReadSession.readAs(cfg.withRangeSpec(RangeSpec.beginAt(5))); + ApiFuture f2 = + blobReadSession.readAs(cfg.withRangeSpec(RangeSpec.beginAt(objectSize + 1))); ExecutionException ee = assertThrows(ExecutionException.class, () -> f2.get(30, TimeUnit.SECONDS)); assertThat(ee).hasCauseThat().hasCauseThat().isInstanceOf(OutOfRangeException.class); ApiFuture f1 = blobReadSession.readAs(cfg.withRangeSpec(RangeSpec.all())); byte[] bytes1 = f1.get(30, TimeUnit.SECONDS); - assertThat(bytes1.length).isEqualTo(4); + assertThat(bytes1.length).isEqualTo(objectSize); } } @@ -353,11 +358,17 @@ private BlobInfo create(ChecksummedTestContent content) throws IOException, ExecutionException, InterruptedException, TimeoutException { BlobInfo info = BlobInfo.newBuilder(bucket, generator.randomObjectName()).build(); + BlobAppendableUploadConfig config = BlobAppendableUploadConfig.of(); + if (backend == Backend.TEST_BENCH) { + // workaround for https://github.com/googleapis/storage-testbench/issues/733 + MinFlushSizeFlushPolicy flushPolicy = + FlushPolicy.minFlushSize(256 * 1024).withMaxPendingBytes(4 * 1024 * 1024); + config = config.withFlushPolicy(flushPolicy); + } BlobAppendableUpload upload = - storage.blobAppendableUpload( - info, BlobAppendableUploadConfig.of(), BlobWriteOption.doesNotExist()); - try (AppendableUploadWriteableByteChannel channel = upload.open(); ) { - channel.write(ByteBuffer.wrap(content.getBytes())); + storage.blobAppendableUpload(info, config, BlobWriteOption.doesNotExist()); + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + Buffers.emptyTo(ByteBuffer.wrap(content.getBytes()), channel); channel.finalizeAndClose(); } return upload.getResult().get(5, TimeUnit.SECONDS); diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java index 14d146e533..255d0e4bea 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java @@ -27,6 +27,8 @@ import com.google.cloud.storage.DefaultBufferedWritableByteChannelTest.AuditingBufferHandle; import com.google.cloud.storage.DefaultBufferedWritableByteChannelTest.CountingWritableByteChannelAdapter; import com.google.cloud.storage.UnbufferedWritableByteChannelSession.UnbufferedWritableByteChannel; +import com.google.cloud.storage.it.ChecksummedTestContent; +import com.google.common.base.MoreObjects; import com.google.common.collect.ImmutableList; import java.io.ByteArrayOutputStream; import java.io.IOException; @@ -52,14 +54,93 @@ import net.jqwik.api.Provide; import net.jqwik.api.providers.TypeUsage; import org.checkerframework.checker.nullness.qual.NonNull; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.slf4j.Marker; +import org.slf4j.MarkerFactory; public final class MinFlushBufferedWritableByteChannelTest { + private static final Logger LOGGER = + LoggerFactory.getLogger(MinFlushBufferedWritableByteChannelTest.class); + private static final Marker TRACE_ENTER = MarkerFactory.getMarker("enter"); + private static final Marker TRACE_EXIT = MarkerFactory.getMarker("exit"); @Example void edgeCases() { JqwikTest.report(TypeUsage.of(WriteOps.class), arbitraryWriteOps()); } + @Example + void nonBlockingWrite0DoesNotBlock() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + MinFlushBufferedWritableByteChannel c = + new MinFlushBufferedWritableByteChannel(handle, new OnlyConsumeNBytes(0, 1), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_8_3 = ByteBuffer.wrap(all.slice(0, 3).getBytes()); + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(0); + assertThat(s_4_4.remaining()).isEqualTo(4); + + int written3 = c.write(s_8_3); + assertThat(written3).isEqualTo(0); + assertThat(s_8_3.remaining()).isEqualTo(3); + + assertThat(handle.remaining()).isEqualTo(1); + } + + @Example + void nonBlockingWritePartialDoesNotBlock() throws IOException { + BufferHandle handle = BufferHandle.allocate(5); + MinFlushBufferedWritableByteChannel c = + new MinFlushBufferedWritableByteChannel(handle, new OnlyConsumeNBytes(6, 5), false); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + ByteBuffer s_4_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + int written1 = c.write(s_0_4); + assertThat(written1).isEqualTo(4); + assertThat(s_0_4.remaining()).isEqualTo(0); + assertThat(handle.remaining()).isEqualTo(1); + + int written2 = c.write(s_4_4); + assertThat(written2).isEqualTo(1); + assertThat(s_4_4.remaining()).isEqualTo(3); + assertThat(handle.remaining()).isEqualTo(5); + } + + @Example + void illegalStateExceptionIfWrittenLt0() throws IOException { + BufferHandle handle = BufferHandle.allocate(4); + MinFlushBufferedWritableByteChannel c = + new MinFlushBufferedWritableByteChannel( + handle, + new UnbufferedWritableByteChannel() { + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + return -1; + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + }); + + ChecksummedTestContent all = ChecksummedTestContent.gen(11); + ByteBuffer s_0_4 = ByteBuffer.wrap(all.slice(0, 4).getBytes()); + assertThrows(IllegalStateException.class, () -> c.write(s_0_4)); + } + @Property void bufferingEagerlyFlushesWhenFull(@ForAll("WriteOps") WriteOps writeOps) throws IOException { ByteBuffer buffer = ByteBuffer.allocate(writeOps.bufferSize); @@ -580,4 +661,64 @@ static WriteOps of(int numBytes, int bufferSize, int writeSize) { dbgExpectedWriteSizes); } } + + static final class OnlyConsumeNBytes implements UnbufferedWritableByteChannel { + private static final Logger LOGGER = LoggerFactory.getLogger(OnlyConsumeNBytes.class); + private final long bytesToConsume; + private final int consumptionIncrement; + private long bytesConsumed; + + OnlyConsumeNBytes(int bytesToConsume, int consumptionIncrement) { + this.bytesToConsume = bytesToConsume; + this.consumptionIncrement = consumptionIncrement; + this.bytesConsumed = 0; + } + + long getBytesConsumed() { + return bytesConsumed; + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + LOGGER.info(TRACE_ENTER, "write(srcs : {}, offset : {}, length : {})", srcs, offset, length); + try { + if (bytesConsumed >= bytesToConsume) { + return 0; + } + + long consumed = 0; + int toConsume = consumptionIncrement; + for (int i = offset; i < length && toConsume > 0; i++) { + ByteBuffer src = srcs[i]; + int remaining = src.remaining(); + int position = src.position(); + int consumable = Math.min(toConsume, remaining); + toConsume -= consumable; + consumed += consumable; + src.position(position + consumable); + } + bytesConsumed += consumed; + return consumed; + } finally { + LOGGER.info(TRACE_EXIT, "write(srcs : {}, offset : {}, length : {})", srcs, offset, length); + } + } + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + + @Override + public String toString() { + return MoreObjects.toStringHelper(this) + .add("bytesToConsume", bytesToConsume) + .add("consumptionIncrement", consumptionIncrement) + .add("bytesConsumed", bytesConsumed) + .toString(); + } + } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java index b3f45526f6..2b3dce86f6 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/PackagePrivateMethodWorkarounds.java @@ -23,6 +23,7 @@ import com.google.cloud.storage.OtelStorageDecorator.OtelDecoratedReadChannel; import com.google.cloud.storage.OtelStorageDecorator.OtelDecoratedWriteChannel; import com.google.common.collect.ImmutableList; +import com.google.protobuf.MessageOrBuilder; import com.google.storage.v2.StorageClient; import java.util.Optional; import java.util.concurrent.ExecutionException; @@ -134,4 +135,8 @@ public static void ifNonNull(@Nullable T1 t, Function map, Cons public static BlobInfo noAcl(BlobInfo bi) { return bi.toBuilder().setOwner(null).setAcl(ImmutableList.of()).build(); } + + public static String fmtProto(Object msg, Function fmt) { + return StorageV2ProtoUtils.fmtProtoWithFmt(msg, fmt::apply); + } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryContextTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryContextTest.java index e55241a948..e0fd73dee7 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryContextTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/RetryContextTest.java @@ -441,7 +441,9 @@ private MaxAttemptRetryingDependencies maxAttempts(int maxAttempts) { static OnFailure failOnFailure() { InvocationTracer invocationTracer = new InvocationTracer("Unexpected onFailure invocation"); return t -> { - invocationTracer.addSuppressed(t); + if (t != invocationTracer) { + invocationTracer.addSuppressed(t); + } throw invocationTracer; }; } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageChannelUtilsTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageChannelUtilsTest.java new file mode 100644 index 0000000000..5c6d87c164 --- /dev/null +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/StorageChannelUtilsTest.java @@ -0,0 +1,270 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.google.cloud.storage; + +import static com.google.cloud.storage.StorageChannelUtils.blockingEmptyTo; +import static com.google.cloud.storage.StorageChannelUtils.blockingFillFrom; +import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.TestUtils.xxd; +import static com.google.common.truth.Truth.assertThat; +import static org.junit.Assert.assertThrows; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.channels.ReadableByteChannel; +import java.nio.channels.WritableByteChannel; +import java.util.concurrent.atomic.AtomicInteger; +import org.junit.Test; + +public final class StorageChannelUtilsTest { + + @Test + public void emptyTo_fullyConsumed() throws Exception { + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger writeInvocationCount = new AtomicInteger(0); + int written = + blockingEmptyTo( + buf, + new SimpleWritableByteChannel() { + @Override + public int write(ByteBuffer src) { + int i = writeInvocationCount.getAndIncrement(); + if (i % 2 == 0) { + return 0; + } else { + src.get(); + return 1; + } + } + }); + assertAll( + () -> assertThat(written).isEqualTo(16), + () -> assertThat(writeInvocationCount.get()).isEqualTo(32), + () -> assertThat(buf.hasRemaining()).isFalse()); + } + + @Test + public void emptyTo_errorPropagated() throws Exception { + ByteBuffer buf = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger writeInvocationCount = new AtomicInteger(0); + IOException ioException = + assertThrows( + IOException.class, + () -> + blockingEmptyTo( + buf, + new SimpleWritableByteChannel() { + @Override + public int write(ByteBuffer src) throws IOException { + int i = writeInvocationCount.incrementAndGet(); + if (i == 0) { + return 0; + } else if (i == 3) { + throw new IOException("boom boom"); + } else { + src.get(); + return 1; + } + } + })); + assertAll( + () -> assertThat(ioException).hasMessageThat().isEqualTo("boom boom"), + () -> assertThat(writeInvocationCount.get()).isEqualTo(3), + () -> assertThat(buf.position()).isEqualTo(2)); + } + + @Test + public void fillFrom_fullyConsumed_dstGtEq_data() throws Exception { + ByteBuffer data = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(32); + int read = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + readInvocationCount.getAndIncrement(); + if (!data.hasRemaining()) { + return -1; + } else { + dst.put(data.get()); + return 1; + } + } + }); + assertAll( + () -> assertThat(read).isEqualTo(16), + () -> assertThat(readInvocationCount.get()).isEqualTo(16 + 1), // + 1 to read EOF + () -> assertThat(data.hasRemaining()).isFalse(), + () -> assertThat(buf.position()).isEqualTo(16)); + } + + @Test + public void fillFrom_fullyConsumed_dstLt_data() throws Exception { + ByteBuffer data = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(8); + int read = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + readInvocationCount.getAndIncrement(); + if (!data.hasRemaining()) { + return -1; + } else { + dst.put(data.get()); + return 1; + } + } + }); + assertAll( + () -> assertThat(read).isEqualTo(8), + () -> assertThat(readInvocationCount.get()).isEqualTo(8), + () -> assertThat(data.hasRemaining()).isTrue(), + () -> assertThat(buf.position()).isEqualTo(8)); + } + + @Test + public void fillFrom_eofPropagated() throws Exception { + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(8); + int read = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + readInvocationCount.getAndIncrement(); + return -1; + } + }); + assertAll( + () -> assertThat(read).isEqualTo(-1), + () -> assertThat(readInvocationCount.get()).isEqualTo(1), + () -> assertThat(buf.position()).isEqualTo(0)); + } + + @Test + public void fillFrom_errorPropagated() throws Exception { + ByteBuffer data = DataGenerator.base64Characters().genByteBuffer(16); + AtomicInteger readInvocationCount = new AtomicInteger(0); + ByteBuffer buf = ByteBuffer.allocate(32); + IOException ioException = + assertThrows( + IOException.class, + () -> + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) throws IOException { + int i = readInvocationCount.incrementAndGet(); + if (i == 0) { + return 0; + } else if (i == 3) { + throw new IOException("boom boom"); + } else { + dst.put(data.get()); + return 1; + } + } + })); + assertAll( + () -> assertThat(ioException).hasMessageThat().isEqualTo("boom boom"), + () -> assertThat(readInvocationCount.get()).isEqualTo(3), + () -> assertThat(buf.position()).isEqualTo(2), + () -> assertThat(buf.position()).isEqualTo(2)); + } + + @Test + public void fillFrom_handles_0SizeRead_someBytesRead() throws Exception { + byte[] bytes = new byte[14]; + ByteBuffer buf = ByteBuffer.wrap(bytes); + + byte[] expected = + new byte[] { + (byte) 'A', + (byte) 'B', + (byte) 'C', + (byte) 'A', + (byte) 'B', + (byte) 'A', + (byte) 'A', + (byte) 'A', + (byte) 'B', + (byte) 'A', + (byte) 'B', + (byte) 'C', + (byte) 0, + (byte) 0 + }; + + int[] acceptSequence = new int[] {3, 2, 1, 0, 0, 1, 2, 3}; + AtomicInteger readCount = new AtomicInteger(0); + + int filled = + blockingFillFrom( + buf, + new SimpleReadableByteChannel() { + @Override + public int read(ByteBuffer dst) { + int i = readCount.getAndIncrement(); + if (i == acceptSequence.length) { + return -1; + } + int bytesToRead = acceptSequence[i]; + if (bytesToRead > 0) { + long copy = + Buffers.copy( + DataGenerator.base64Characters().genByteBuffer(bytesToRead), dst); + assertThat(copy).isEqualTo(bytesToRead); + } + + return bytesToRead; + } + }); + + assertAll( + () -> assertThat(filled).isEqualTo(12), + () -> assertThat(xxd(bytes)).isEqualTo(xxd(expected))); + } + + private abstract static class SimpleWritableByteChannel implements WritableByteChannel { + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } + + private abstract static class SimpleReadableByteChannel implements ReadableByteChannel { + + @Override + public boolean isOpen() { + return true; + } + + @Override + public void close() {} + } +} diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java index 5fd68dfc83..f493570acc 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/TestUtils.java @@ -25,6 +25,7 @@ import com.google.api.gax.rpc.ApiException; import com.google.api.gax.rpc.ApiExceptionFactory; import com.google.api.gax.rpc.ErrorDetails; +import com.google.api.gax.rpc.StreamController; import com.google.cloud.RetryHelper; import com.google.cloud.RetryHelper.RetryHelperException; import com.google.cloud.http.BaseHttpServiceException; @@ -42,10 +43,12 @@ import com.google.rpc.DebugInfo; import com.google.storage.v2.ChecksummedData; import com.google.storage.v2.WriteObjectRequest; +import io.grpc.Metadata; import io.grpc.Status.Code; import io.grpc.StatusRuntimeException; import io.grpc.netty.shaded.io.netty.buffer.ByteBufUtil; import io.grpc.netty.shaded.io.netty.buffer.Unpooled; +import io.grpc.protobuf.ProtoUtils; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; @@ -81,6 +84,11 @@ public final class TestUtils { + public static final Metadata.Key GRPC_STATUS_DETAILS_KEY = + Metadata.Key.of( + "grpc-status-details-bin", + ProtoUtils.metadataMarshaller(com.google.rpc.Status.getDefaultInstance())); + private TestUtils() {} public static byte[] gzipBytes(byte[] bytes) { @@ -397,4 +405,23 @@ public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOEx } }); } + + public static StreamController nullStreamController() { + return NullStreamController.INSTANCE; + } + + static class NullStreamController implements StreamController { + private static final NullStreamController INSTANCE = new NullStreamController(); + + private NullStreamController() {} + + @Override + public void cancel() {} + + @Override + public void disableAutoInboundFlowControl() {} + + @Override + public void request(int count) {} + } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ResumableSessionFailureScenarioTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/UploadFailureScenarioTest.java similarity index 94% rename from google-cloud-storage/src/test/java/com/google/cloud/storage/ResumableSessionFailureScenarioTest.java rename to google-cloud-storage/src/test/java/com/google/cloud/storage/UploadFailureScenarioTest.java index 4c17e19c44..5d54ad0d2c 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/ResumableSessionFailureScenarioTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/UploadFailureScenarioTest.java @@ -18,10 +18,10 @@ import static com.google.cloud.storage.ByteSizeConstants._256KiB; import static com.google.cloud.storage.ByteSizeConstants._512KiB; -import static com.google.cloud.storage.ResumableSessionFailureScenario.SCENARIO_1; -import static com.google.cloud.storage.ResumableSessionFailureScenario.isContinue; -import static com.google.cloud.storage.ResumableSessionFailureScenario.isOk; import static com.google.cloud.storage.TestUtils.assertAll; +import static com.google.cloud.storage.UploadFailureScenario.SCENARIO_1; +import static com.google.cloud.storage.UploadFailureScenario.isContinue; +import static com.google.cloud.storage.UploadFailureScenario.isOk; import static com.google.common.truth.Truth.assertThat; import com.google.api.client.http.EmptyContent; @@ -53,7 +53,7 @@ import java.nio.charset.StandardCharsets; import org.junit.Test; -public final class ResumableSessionFailureScenarioTest { +public final class UploadFailureScenarioTest { private static final GsonFactory gson = GsonFactory.getDefaultInstance(); @Test @@ -84,7 +84,7 @@ public void toStorageException_ioExceptionDuringContentResolutionAddedAsSuppress resp.getHeaders().setContentType("text/plain; charset=utf-8").setContentLength(5L); StorageException storageException = - ResumableSessionFailureScenario.SCENARIO_1.toStorageException( + UploadFailureScenario.SCENARIO_1.toStorageException( "uploadId", resp, new Cause(), @@ -125,8 +125,7 @@ public void multilineResponseBodyIsProperlyPrefixed() throws Exception { .setContentLength((long) bytes.length); StorageException storageException = - ResumableSessionFailureScenario.SCENARIO_0.toStorageException( - "uploadId", resp, null, () -> json); + UploadFailureScenario.SCENARIO_0.toStorageException("uploadId", resp, null, () -> json); assertThat(storageException.getCode()).isEqualTo(0); assertThat(storageException).hasMessageThat().contains("\t|< \"generation\": \"1\",\n"); @@ -148,8 +147,7 @@ public void xGoogStoredHeadersIncludedIfPresent() throws IOException { .setContentLength(0L); StorageException storageException = - ResumableSessionFailureScenario.SCENARIO_0.toStorageException( - "uploadId", resp, null, () -> null); + UploadFailureScenario.SCENARIO_0.toStorageException("uploadId", resp, null, () -> null); assertThat(storageException.getCode()).isEqualTo(0); assertThat(storageException).hasMessageThat().contains("|< x-goog-stored-content-length: 5"); @@ -171,8 +169,7 @@ public void xGoogGcsIdempotencyTokenHeadersIncludedIfPresent() throws IOExceptio resp.getHeaders().set("X-Goog-Gcs-Idempotency-Token", "5").setContentLength(0L); StorageException storageException = - ResumableSessionFailureScenario.SCENARIO_0.toStorageException( - "uploadId", resp, null, () -> null); + UploadFailureScenario.SCENARIO_0.toStorageException("uploadId", resp, null, () -> null); assertThat(storageException.getCode()).isEqualTo(0); assertThat(storageException).hasMessageThat().contains("|< x-goog-gcs-idempotency-token: 5"); diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java index 6ccb05f524..7050638c05 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/ChecksummedTestContent.java @@ -19,6 +19,7 @@ import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkPositionIndexes; +import com.google.cloud.storage.DataGenerator; import com.google.common.base.MoreObjects; import com.google.common.collect.ImmutableList; import com.google.common.hash.Hashing; @@ -26,8 +27,10 @@ import com.google.common.primitives.Ints; import com.google.protobuf.ByteString; import com.google.protobuf.UnsafeByteOperations; +import com.google.storage.v2.BidiWriteObjectRequest; import com.google.storage.v2.ChecksummedData; import java.io.ByteArrayInputStream; +import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; @@ -50,6 +53,10 @@ public byte[] getBytes() { return bytes; } + public int length() { + return bytes.length; + } + public byte[] getBytes(int beginIndex) { return UnsafeByteOperations.unsafeWrap(bytes).substring(beginIndex).toByteArray(); } @@ -88,6 +95,13 @@ public byte[] concat(byte b) { return newBytes; } + public ChecksummedTestContent concat(ChecksummedTestContent ctc) { + byte[] newBytes = new byte[this.length() + ctc.length()]; + System.arraycopy(bytes, 0, newBytes, 0, bytes.length); + System.arraycopy(ctc.bytes, 0, newBytes, bytes.length, ctc.length()); + return ChecksummedTestContent.of(newBytes); + } + public ByteArrayInputStream bytesAsInputStream() { return new ByteArrayInputStream(bytes); } @@ -111,6 +125,14 @@ public List chunkup(int chunkSize) { return ImmutableList.copyOf(elements); } + public BidiWriteObjectRequest.Builder asBidiWrite() { + return BidiWriteObjectRequest.newBuilder().setChecksummedData(asChecksummedData()); + } + + public ByteBuffer asByteBuffer() { + return ByteBuffer.wrap(bytes); + } + @Override public String toString() { return MoreObjects.toStringHelper(this) @@ -124,6 +146,11 @@ public static ChecksummedTestContent of(String content) { return of(bytes); } + public static ChecksummedTestContent gen(int length) { + byte[] bytes1 = DataGenerator.base64Characters().genBytes(length); + return of(bytes1); + } + public static ChecksummedTestContent of(byte[] bytes) { int crc32c = Hashing.crc32c().hashBytes(bytes).asInt(); String md5Base64 = Base64.getEncoder().encodeToString(Hashing.md5().hashBytes(bytes).asBytes()); diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor.java index 5c55cb8613..b39f6dd1c4 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptor.java @@ -17,22 +17,16 @@ package com.google.cloud.storage.it; import com.google.api.gax.grpc.GrpcInterceptorProvider; +import com.google.cloud.storage.PackagePrivateMethodWorkarounds; import com.google.common.annotations.VisibleForTesting; import com.google.common.collect.ImmutableList; import com.google.protobuf.Any; -import com.google.protobuf.ByteString; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Message; -import com.google.protobuf.MessageOrBuilder; import com.google.protobuf.TextFormat; import com.google.protobuf.UnsafeByteOperations; import com.google.rpc.DebugInfo; import com.google.rpc.ErrorInfo; -import com.google.storage.v2.BidiReadObjectResponse; -import com.google.storage.v2.BidiWriteObjectRequest; -import com.google.storage.v2.ObjectRangeData; -import com.google.storage.v2.ReadObjectResponse; -import com.google.storage.v2.WriteObjectRequest; import io.grpc.CallOptions; import io.grpc.Channel; import io.grpc.ClientCall; @@ -43,7 +37,6 @@ import io.grpc.MethodDescriptor; import io.grpc.Status; import java.nio.charset.StandardCharsets; -import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.Map; @@ -159,101 +152,8 @@ public void sendMessage(ReqT message) { } @NonNull - static String fmtProto(@NonNull Object obj) { - if (obj instanceof WriteObjectRequest) { - return fmtProto((WriteObjectRequest) obj); - } else if (obj instanceof BidiWriteObjectRequest) { - return fmtProto((BidiWriteObjectRequest) obj); - } else if (obj instanceof ReadObjectResponse) { - return fmtProto((ReadObjectResponse) obj); - } else if (obj instanceof BidiReadObjectResponse) { - return fmtProto((BidiReadObjectResponse) obj); - } else if (obj instanceof MessageOrBuilder) { - return fmtProto((MessageOrBuilder) obj); - } else { - return obj.toString(); - } - } - - @NonNull - static String fmtProto(@NonNull final MessageOrBuilder msg) { - return TextFormat.printer().printToString(msg); - } - - @NonNull - static String fmtProto(@NonNull WriteObjectRequest msg) { - if (msg.hasChecksummedData()) { - ByteString content = msg.getChecksummedData().getContent(); - if (content.size() > 20) { - WriteObjectRequest.Builder b = msg.toBuilder(); - ByteString trim = snipBytes(content); - b.getChecksummedDataBuilder().setContent(trim); - - return fmtProto((MessageOrBuilder) b.build()); - } - } - return fmtProto((MessageOrBuilder) msg); - } - - @NonNull - static String fmtProto(@NonNull BidiWriteObjectRequest msg) { - if (msg.hasChecksummedData()) { - ByteString content = msg.getChecksummedData().getContent(); - if (content.size() > 20) { - BidiWriteObjectRequest.Builder b = msg.toBuilder(); - ByteString trim = snipBytes(content); - b.getChecksummedDataBuilder().setContent(trim); - - return fmtProto((MessageOrBuilder) b.build()); - } - } - return fmtProto((MessageOrBuilder) msg); - } - - @NonNull - static String fmtProto(@NonNull ReadObjectResponse msg) { - if (msg.hasChecksummedData()) { - ByteString content = msg.getChecksummedData().getContent(); - if (content.size() > 20) { - ReadObjectResponse.Builder b = msg.toBuilder(); - ByteString trim = snipBytes(content); - b.getChecksummedDataBuilder().setContent(trim); - - return fmtProto((MessageOrBuilder) b.build()); - } - } - return msg.toString(); - } - - @NonNull - public static String fmtProto(@NonNull BidiReadObjectResponse msg) { - List rangeData = msg.getObjectDataRangesList(); - if (!rangeData.isEmpty()) { - List snips = new ArrayList<>(); - for (ObjectRangeData rd : rangeData) { - if (rd.hasChecksummedData()) { - ByteString content = rd.getChecksummedData().getContent(); - if (content.size() > 20) { - ObjectRangeData.Builder b = rd.toBuilder(); - ByteString trim = snipBytes(content); - b.getChecksummedDataBuilder().setContent(trim); - snips.add(b.build()); - } else { - snips.add(rd); - } - } - } - BidiReadObjectResponse snipped = - msg.toBuilder().clearObjectDataRanges().addAllObjectDataRanges(snips).build(); - return fmtProto((MessageOrBuilder) snipped); - } - return fmtProto((MessageOrBuilder) msg); - } - - private static ByteString snipBytes(ByteString content) { - ByteString snip = - ByteString.copyFromUtf8(String.format(Locale.US, "", content.size())); - return content.substring(0, 20).concat(snip); + public static String fmtProto(@NonNull Object obj) { + return PackagePrivateMethodWorkarounds.fmtProto(obj, TextFormat.printer()::printToString); } // Suppress DataFlowIssue warnings for this method. diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptorTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptorTest.java index 39b46cbf14..6ade93ac1c 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptorTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/GrpcPlainRequestLoggingInterceptorTest.java @@ -16,6 +16,7 @@ package com.google.cloud.storage.it; +import static com.google.cloud.storage.TestUtils.GRPC_STATUS_DETAILS_KEY; import static com.google.common.truth.Truth.assertThat; import com.google.common.io.Resources; @@ -29,7 +30,6 @@ import com.google.storage.v2.ReadRangeError; import io.grpc.Metadata; import io.grpc.Status; -import io.grpc.protobuf.ProtoUtils; import java.io.IOException; import java.net.URL; import java.nio.charset.StandardCharsets; @@ -39,10 +39,6 @@ import org.junit.Test; public final class GrpcPlainRequestLoggingInterceptorTest { - private static final Metadata.Key GRPC_STATUS_DETAILS_KEY = - Metadata.Key.of( - "grpc-status-details-bin", - ProtoUtils.metadataMarshaller(com.google.rpc.Status.getDefaultInstance())); @Test public void lazyOnCloseLogStringGolden() throws IOException { diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestBench.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestBench.java index 7734794a9f..4d93407620 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestBench.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/it/runner/registry/TestBench.java @@ -106,6 +106,8 @@ public final class TestBench implements ManagedLifecycle { private Path outPath; private Path errPath; + private boolean runningOutsideAlready; + private TestBench( boolean ignorePullError, String baseUri, @@ -199,6 +201,14 @@ public Object get() { @Override public void start() { + try { + listRetryTests(); + LOGGER.info("Using testbench running outside test suite."); + runningOutsideAlready = true; + return; + } catch (IOException ignore) { + // expected when the server isn't running already + } try { tempDirectory = Files.createTempDirectory(containerName); outPath = tempDirectory.resolve("stdout"); @@ -308,6 +318,10 @@ public boolean shouldRetry( @Override public void stop() { + if (runningOutsideAlready) { + // if the server was running outside the tests already simply return + return; + } try { process.destroy(); process.waitFor(2, TimeUnit.SECONDS); diff --git a/google-cloud-storage/src/test/resources/logback.xml b/google-cloud-storage/src/test/resources/logback.xml index 3dbad20390..779e3112d3 100644 --- a/google-cloud-storage/src/test/resources/logback.xml +++ b/google-cloud-storage/src/test/resources/logback.xml @@ -87,6 +87,7 @@ + From d6587f42b65a586a2e3f30e0559975801726a812 Mon Sep 17 00:00:00 2001 From: BenWhitehead Date: Thu, 21 Aug 2025 13:27:08 -0400 Subject: [PATCH 12/16] fix: update otel integration to properly activate span context for lazy RPCs such as reads & writes (#3255) * Fixes for WriteChannel * Fixes for RadChannel * Fixes for BlobWriteSession * Fixes for BlobReadSession `s$com.google.cloud.storage.Storage/readAs$com.google.cloud.storage.Storage/blobReadSession/readAs$g` * Fixes for BlobAppendableUpload --- .../cloud/storage/OtelStorageDecorator.java | 101 ++++++++++++++++-- 1 file changed, 91 insertions(+), 10 deletions(-) diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java index 8cfa7b031d..3bcc22cae8 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java @@ -64,11 +64,13 @@ import org.checkerframework.checker.nullness.qual.Nullable; @SuppressWarnings("DuplicatedCode") -final class OtelStorageDecorator implements Storage { +public final class OtelStorageDecorator implements Storage { /** Becomes the {@code otel.scope.name} attribute in a span */ private static final String OTEL_SCOPE_NAME = "cloud.google.com/java/storage"; + private static final String BLOB_READ_SESSION = "blobReadSession"; + @VisibleForTesting final Storage delegate; private final OpenTelemetry otel; private final Attributes baseAttributes; @@ -1434,13 +1436,11 @@ public BlobWriteSession blobWriteSession(BlobInfo blobInfo, BlobWriteOption... o .startSpan(); try (Scope ignore = sessionSpan.makeCurrent()) { BlobWriteSession session = delegate.blobWriteSession(blobInfo, options); - return new OtelDecoratedBlobWriteSession(session); + return new OtelDecoratedBlobWriteSession(session, sessionSpan); } catch (Throwable t) { sessionSpan.recordException(t); sessionSpan.setStatus(StatusCode.ERROR, t.getClass().getSimpleName()); throw t; - } finally { - sessionSpan.end(); } } @@ -1467,12 +1467,12 @@ public Blob moveBlob(MoveBlobRequest request) { public ApiFuture blobReadSession(BlobId id, BlobSourceOption... options) { Span blobReadSessionSpan = tracer - .spanBuilder("blobReadSession") + .spanBuilder(BLOB_READ_SESSION) .setAttribute("gsutil.uri", id.toGsUtilUriWithGeneration()) .startSpan(); try (Scope ignore1 = blobReadSessionSpan.makeCurrent()) { Context blobReadSessionContext = Context.current(); - Span ready = tracer.spanBuilder("blobReadSession/ready").startSpan(); + Span ready = tracer.spanBuilder(BLOB_READ_SESSION + "/ready").startSpan(); ApiFuture blobReadSessionApiFuture = delegate.blobReadSession(id, options); ApiFuture futureDecorated = ApiFutures.transform( @@ -1561,7 +1561,7 @@ static UnaryOperator retryContextDecorator(OpenTelemetry otel) { return String.format(Locale.US, "gs://%s/", bucket); } - private static final class TracerDecorator implements Tracer { + public static final class TracerDecorator implements Tracer { @Nullable private final Context parentContextOverride; private final Tracer delegate; private final Attributes baseAttributes; @@ -1578,7 +1578,7 @@ private TracerDecorator( this.spanNamePrefix = spanNamePrefix; } - private static TracerDecorator decorate( + public static TracerDecorator decorate( @Nullable Context parentContextOverride, OpenTelemetry otel, Attributes baseAttributes, @@ -1608,6 +1608,8 @@ static final class OtelDecoratedReadChannel implements ReadChannel { @VisibleForTesting final ReadChannel reader; private final Span span; + private volatile Scope scope; + private OtelDecoratedReadChannel(ReadChannel reader, Span span) { this.reader = reader; this.span = span; @@ -1615,6 +1617,7 @@ private OtelDecoratedReadChannel(ReadChannel reader, Span span) { @Override public void seek(long position) throws IOException { + clearScope(); reader.seek(position); } @@ -1630,6 +1633,7 @@ public RestorableState capture() { @Override public ReadChannel limit(long limit) { + clearScope(); return reader.limit(limit); } @@ -1640,6 +1644,7 @@ public long limit() { @Override public int read(ByteBuffer dst) throws IOException { + setScope(); return reader.read(dst); } @@ -1650,21 +1655,38 @@ public boolean isOpen() { @Override public void close() { + setScope(); try { reader.close(); } finally { span.end(); + clearScope(); + } + } + + private void clearScope() { + try (Scope ignore = scope) { + scope = null; + } + } + + public void setScope() { + if (scope != null) { + clearScope(); } + scope = span.makeCurrent(); } } private final class OtelDecoratedBlobWriteSession implements BlobWriteSession { private final BlobWriteSession delegate; + private final Span sessionSpan; private final Tracer tracer; - public OtelDecoratedBlobWriteSession(BlobWriteSession delegate) { + public OtelDecoratedBlobWriteSession(BlobWriteSession delegate, Span sessionSpan) { this.delegate = delegate; + this.sessionSpan = sessionSpan; this.tracer = TracerDecorator.decorate( Context.current(), @@ -1696,6 +1718,8 @@ private class OtelDecoratingWritableByteChannel implements WritableByteChannel { private final WritableByteChannel delegate; private final Span openSpan; + private Scope scope; + private OtelDecoratingWritableByteChannel(WritableByteChannel delegate, Span openSpan) { this.delegate = delegate; this.openSpan = openSpan; @@ -1703,6 +1727,7 @@ private OtelDecoratingWritableByteChannel(WritableByteChannel delegate, Span ope @Override public int write(ByteBuffer src) throws IOException { + setScope(); return delegate.write(src); } @@ -1713,16 +1738,34 @@ public boolean isOpen() { @Override public void close() throws IOException { + setScope(); try { delegate.close(); } catch (IOException | RuntimeException e) { openSpan.recordException(e); openSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); + sessionSpan.recordException(e); + sessionSpan.setStatus(StatusCode.ERROR, e.getClass().getSimpleName()); throw e; } finally { openSpan.end(); + sessionSpan.end(); + clearScope(); + } + } + + private void clearScope() { + try (Scope ignore = scope) { + scope = null; } } + + public void setScope() { + if (scope != null) { + clearScope(); + } + scope = openSpan.makeCurrent(); + } } } @@ -1731,6 +1774,8 @@ static final class OtelDecoratedWriteChannel implements WriteChannel { @VisibleForTesting final WriteChannel delegate; private final Span openSpan; + private Scope scope; + private OtelDecoratedWriteChannel(WriteChannel delegate, Span openSpan) { this.delegate = delegate; this.openSpan = openSpan; @@ -1748,6 +1793,7 @@ public RestorableState capture() { @Override public int write(ByteBuffer src) throws IOException { + setScope(); return delegate.write(src); } @@ -1758,6 +1804,7 @@ public boolean isOpen() { @Override public void close() throws IOException { + setScope(); try { delegate.close(); } catch (IOException | RuntimeException e) { @@ -1766,7 +1813,21 @@ public void close() throws IOException { throw e; } finally { openSpan.end(); + clearScope(); + } + } + + private void clearScope() { + try (Scope ignore = scope) { + scope = null; + } + } + + public void setScope() { + if (scope != null) { + clearScope(); } + scope = openSpan.makeCurrent(); } } @@ -1962,7 +2023,7 @@ public BlobInfo getBlobInfo() { public Projection readAs(ReadProjectionConfig config) { Span readRangeSpan = tracer - .spanBuilder("readAs") + .spanBuilder(BLOB_READ_SESSION + "/readAs") .setAttribute("gsutil.uri", id.toGsUtilUriWithGeneration()) .setParent(blobReadSessionContext) .startSpan(); @@ -2145,6 +2206,8 @@ private final class OtelDecoratingAppendableUploadWriteableByteChannel private final AppendableUploadWriteableByteChannel delegate; private final Span openSpan; + private volatile Scope scope; + private OtelDecoratingAppendableUploadWriteableByteChannel( AppendableUploadWriteableByteChannel delegate, Span openSpan) { this.delegate = delegate; @@ -2165,6 +2228,7 @@ public void finalizeAndClose() throws IOException { } finally { openSpan.end(); uploadSpan.end(); + clearScope(); } } @@ -2182,12 +2246,14 @@ public void closeWithoutFinalizing() throws IOException { } finally { openSpan.end(); uploadSpan.end(); + clearScope(); } } @Override @BetaApi public void close() throws IOException { + setScope(); try { delegate.close(); } catch (IOException | RuntimeException e) { @@ -2199,11 +2265,13 @@ public void close() throws IOException { } finally { openSpan.end(); uploadSpan.end(); + clearScope(); } } @Override public int write(ByteBuffer src) throws IOException { + setScope(); return delegate.write(src); } @@ -2211,6 +2279,19 @@ public int write(ByteBuffer src) throws IOException { public boolean isOpen() { return delegate.isOpen(); } + + private void clearScope() { + try (Scope ignore = scope) { + scope = null; + } + } + + public void setScope() { + if (scope != null) { + clearScope(); + } + scope = openSpan.makeCurrent(); + } } } } From bc3470a30329253186b7850eec33f756689cd51c Mon Sep 17 00:00:00 2001 From: BenWhitehead Date: Thu, 21 Aug 2025 15:20:59 -0400 Subject: [PATCH 13/16] chore: make OtelStorageDecorator package private again (#3260) Accidentally opened after some debugging in #3255 --- .../java/com/google/cloud/storage/OtelStorageDecorator.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java index 3bcc22cae8..1742833ecb 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java @@ -64,7 +64,7 @@ import org.checkerframework.checker.nullness.qual.Nullable; @SuppressWarnings("DuplicatedCode") -public final class OtelStorageDecorator implements Storage { +final class OtelStorageDecorator implements Storage { /** Becomes the {@code otel.scope.name} attribute in a span */ private static final String OTEL_SCOPE_NAME = "cloud.google.com/java/storage"; @@ -1561,7 +1561,7 @@ static UnaryOperator retryContextDecorator(OpenTelemetry otel) { return String.format(Locale.US, "gs://%s/", bucket); } - public static final class TracerDecorator implements Tracer { + private static final class TracerDecorator implements Tracer { @Nullable private final Context parentContextOverride; private final Tracer delegate; private final Attributes baseAttributes; @@ -1578,7 +1578,7 @@ private TracerDecorator( this.spanNamePrefix = spanNamePrefix; } - public static TracerDecorator decorate( + private static TracerDecorator decorate( @Nullable Context parentContextOverride, OpenTelemetry otel, Attributes baseAttributes, From 950c56f0e622d75faff51257d5cbc9f3ddc7e1ce Mon Sep 17 00:00:00 2001 From: BenWhitehead Date: Mon, 25 Aug 2025 12:55:46 -0400 Subject: [PATCH 14/16] feat: add AppendableUploadWriteableByteChannel#flush() (#3261) Allows blocking the invoking thread until the number of bytes acknowledged by GCS matches the number of written bytes prior to calling flush(). --- .../clirr-ignored-differences.xml | 8 ++++ ...pendableUnbufferedWritableByteChannel.java | 5 +++ .../google/cloud/storage/BidiUploadState.java | 38 ++++++++++++++--- .../storage/BidiUploadStreamingStream.java | 4 ++ .../cloud/storage/BlobAppendableUpload.java | 13 ++++++ .../storage/BlobAppendableUploadImpl.java | 9 ++++ .../BufferedWritableByteChannelSession.java | 2 + .../DefaultBufferedWritableByteChannel.java | 2 +- .../MinFlushBufferedWritableByteChannel.java | 2 +- .../cloud/storage/OtelStorageDecorator.java | 8 ++++ .../UnbufferedWritableByteChannelSession.java | 6 +++ .../google/cloud/storage/BidiUploadTest.java | 42 +++++++++++++++++++ ...efaultBufferedWritableByteChannelTest.java | 2 +- .../cloud/storage/ITAppendableUploadTest.java | 28 +++++++++++++ ...nFlushBufferedWritableByteChannelTest.java | 2 +- 15 files changed, 161 insertions(+), 10 deletions(-) diff --git a/google-cloud-storage/clirr-ignored-differences.xml b/google-cloud-storage/clirr-ignored-differences.xml index bdc578c4c1..9cb223aebc 100644 --- a/google-cloud-storage/clirr-ignored-differences.xml +++ b/google-cloud-storage/clirr-ignored-differences.xml @@ -184,4 +184,12 @@ int write(java.nio.ByteBuffer) + + + 7012 + com/google/cloud/storage/BlobAppendableUpload$AppendableUploadWriteableByteChannel + void flush() + + + diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java index 7f105d758b..28663f813b 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiAppendableUnbufferedWritableByteChannel.java @@ -106,6 +106,11 @@ public void nextWriteShouldFinalize() { this.nextWriteShouldFinalize = true; } + void flush() throws InterruptedException { + stream.flush(); + stream.awaitAckOf(writeOffset); + } + private long internalWrite(ByteBuffer[] srcs, int srcsOffset, int srcsLength) throws IOException { if (!open) { throw new ClosedChannelException(); diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java index 151ff402cb..08ed0c414f 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadState.java @@ -254,6 +254,10 @@ public void awaitTakeoverStateReconciliation(Runnable restart) { unimplemented(); } + public void awaitAck(long writeOffset) throws InterruptedException { + unimplemented(); + } + enum State { INITIALIZING, TAKEOVER, @@ -286,6 +290,7 @@ abstract static class BaseUploadState extends BidiUploadState { protected final Supplier baseCallContext; protected final ReentrantLock lock; protected final Condition stateUpdated; + protected final Condition confirmedBytesUpdated; /** The maximum number of bytes allowed to be enqueued in {@link #queue} across all messages. */ protected final long maxBytes; @@ -345,6 +350,7 @@ private BaseUploadState( this.enqueuedBytes = 0; this.lock = new ReentrantLock(); this.stateUpdated = lock.newCondition(); + this.confirmedBytesUpdated = lock.newCondition(); this.lastSentRequestIndex = -1; this.minByteOffset = 0; this.totalSentBytes = 0; @@ -501,6 +507,11 @@ final boolean offer(@NonNull BidiWriteObjectRequest e) { } } + protected void setConfirmedBytes(long newConfirmedBytes) { + this.confirmedBytes = newConfirmedBytes; + this.confirmedBytesUpdated.signalAll(); + } + @Override final void updateStateFromResponse(BidiWriteObjectResponse response) { lock.lock(); @@ -525,7 +536,7 @@ final void updateStateFromResponse(BidiWriteObjectResponse response) { // todo: test more permutations where this might be true // 1. retry, object not yet created if (state == State.INITIALIZING) { - confirmedBytes = persistedSize; + setConfirmedBytes(persistedSize); totalSentBytes = Math.max(totalSentBytes, persistedSize); } if (state == State.INITIALIZING || state == State.RETRYING) { @@ -541,7 +552,7 @@ final void updateStateFromResponse(BidiWriteObjectResponse response) { long endOffset = peek.getWriteOffset() + size; if (endOffset <= persistedSize) { poll(); - confirmedBytes = endOffset; + setConfirmedBytes(endOffset); enqueuedBytes -= size; minByteOffset = peek.getWriteOffset(); } else { @@ -551,11 +562,11 @@ final void updateStateFromResponse(BidiWriteObjectResponse response) { poll(); } else if (peek.getFlush()) { if (finalFlushSent && persistedSize == totalSentBytes) { - confirmedBytes = persistedSize; + setConfirmedBytes(persistedSize); signalTerminalSuccess = true; poll(); } else if (persistedSize >= peek.getWriteOffset()) { - confirmedBytes = persistedSize; + setConfirmedBytes(persistedSize); poll(); } else { break; @@ -565,7 +576,7 @@ final void updateStateFromResponse(BidiWriteObjectResponse response) { enqueuedBytes == 0, "attempting to evict finish_write: true while bytes are still enqueued"); if (response.hasResource() && persistedSize == totalSentBytes) { - confirmedBytes = persistedSize; + setConfirmedBytes(persistedSize); if (response.getResource().hasFinalizeTime()) { signalTerminalSuccess = true; poll(); @@ -883,6 +894,21 @@ public void awaitTakeoverStateReconciliation(Runnable restart) { throw StorageException.coalesce(e); } } + + @Override + public void awaitAck(long writeOffset) throws InterruptedException { + lock.lock(); + try { + while (confirmedBytes < writeOffset + && !confirmedBytesUpdated.await(5, TimeUnit.MILLISECONDS)) { + if (resultFuture.isDone()) { + return; + } + } + } finally { + lock.unlock(); + } + } } abstract static class AppendableUploadState extends BaseUploadState { @@ -950,7 +976,7 @@ private AppendableUploadState( checkState(persistedSize > -1, "persistedSize > -1 (%s > -1)", persistedSize); if (state == State.TAKEOVER || stateToReturnToAfterRetry == State.TAKEOVER) { totalSentBytes = persistedSize; - confirmedBytes = persistedSize; + setConfirmedBytes(persistedSize); if (response.hasResource() && response.getResource().hasChecksums() && response.getResource().getChecksums().hasCrc32C()) { diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java index 3cdfb76e2e..6da243e541 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BidiUploadStreamingStream.java @@ -218,6 +218,10 @@ public void awaitTakeoverStateReconciliation() { state.awaitTakeoverStateReconciliation(this::restart); } + void awaitAckOf(long writeOffset) throws InterruptedException { + state.awaitAck(writeOffset); + } + /** * It is possible for this value to change after reading, however it is guaranteed that the amount * of available capacity will only ever increase. diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java index e6f9167ac7..056f665ab6 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUpload.java @@ -124,6 +124,19 @@ interface AppendableUploadWriteableByteChannel extends WritableByteChannel { @Override int write(ByteBuffer src) throws IOException; + /** + * This method is blocking + * + *

Block the invoking thread, waiting until the number of bytes written so far has been + * acknowledged by Google Cloud Storage. + * + * @throws IOException if an error happens while waiting for the flush to complete + * @throws java.io.InterruptedIOException if the current thread is interrupted while waiting + * @since 2.56.0 This new api is in preview and is subject to breaking changes. + */ + @BetaApi + void flush() throws IOException; + /** * This method is blocking * diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java index 909d11dfa2..cc3bac3f1a 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BlobAppendableUploadImpl.java @@ -21,6 +21,7 @@ import com.google.cloud.storage.BufferedWritableByteChannelSession.BufferedWritableByteChannel; import com.google.common.base.Preconditions; import java.io.IOException; +import java.io.InterruptedIOException; import java.nio.ByteBuffer; import java.util.concurrent.locks.ReentrantLock; @@ -82,6 +83,14 @@ public void flush() throws IOException { lock.lock(); try { buffered.flush(); + try { + unbuffered.flush(); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + InterruptedIOException interruptedIOException = new InterruptedIOException(); + interruptedIOException.initCause(e); + throw interruptedIOException; + } } finally { lock.unlock(); } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedWritableByteChannelSession.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedWritableByteChannelSession.java index 67cf231333..cf9c19602f 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedWritableByteChannelSession.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/BufferedWritableByteChannelSession.java @@ -24,6 +24,8 @@ interface BufferedWritableByteChannelSession extends WritableByteChannelSession { interface BufferedWritableByteChannel extends WritableByteChannel { + + /** Block the invoking thread until all written bytes are accepted by the lower layer */ void flush() throws IOException; } } diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java index 7b92b25724..4e9a7c107f 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/DefaultBufferedWritableByteChannel.java @@ -200,7 +200,7 @@ public void close() throws IOException { @Override public void flush() throws IOException { - if (enqueuedBytes()) { + while (enqueuedBytes()) { ByteBuffer buffer = handle.get(); Buffers.flip(buffer); channel.write(buffer); diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java index 30e8206ea6..4bd4d9eaa5 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannel.java @@ -155,7 +155,7 @@ public void close() throws IOException { @Override public void flush() throws IOException { - if (enqueuedBytes()) { + while (enqueuedBytes()) { ByteBuffer buffer = handle.get(); Buffers.flip(buffer); channel.write(buffer); diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java index 1742833ecb..0a5eae9577 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/OtelStorageDecorator.java @@ -2217,6 +2217,7 @@ private OtelDecoratingAppendableUploadWriteableByteChannel( @Override @BetaApi public void finalizeAndClose() throws IOException { + setScope(); try { delegate.finalizeAndClose(); } catch (IOException | RuntimeException e) { @@ -2235,6 +2236,7 @@ public void finalizeAndClose() throws IOException { @Override @BetaApi public void closeWithoutFinalizing() throws IOException { + setScope(); try { delegate.closeWithoutFinalizing(); } catch (IOException | RuntimeException e) { @@ -2269,6 +2271,12 @@ public void close() throws IOException { } } + @Override + public void flush() throws IOException { + setScope(); + delegate.flush(); + } + @Override public int write(ByteBuffer src) throws IOException { setScope(); diff --git a/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedWritableByteChannelSession.java b/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedWritableByteChannelSession.java index d7a5fcef60..2210822fa0 100644 --- a/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedWritableByteChannelSession.java +++ b/google-cloud-storage/src/main/java/com/google/cloud/storage/UnbufferedWritableByteChannelSession.java @@ -26,24 +26,30 @@ interface UnbufferedWritableByteChannelSession extends WritableByteChannelSession { interface UnbufferedWritableByteChannel extends WritableByteChannel, GatheringByteChannel { + + /** Default assumed to be blocking, non-blocking allowed but must be documented. */ @Override default int write(ByteBuffer src) throws IOException { return Math.toIntExact(write(new ByteBuffer[] {src}, 0, 1)); } + /** Default assumed to be blocking, non-blocking allowed but must be documented. */ @Override default long write(ByteBuffer[] srcs) throws IOException { return write(srcs, 0, srcs.length); } + /** This method must block until terminal state is reached. */ default int writeAndClose(ByteBuffer src) throws IOException { return Math.toIntExact(writeAndClose(new ByteBuffer[] {src}, 0, 1)); } + /** This method must block until terminal state is reached. */ default long writeAndClose(ByteBuffer[] srcs) throws IOException { return writeAndClose(srcs, 0, srcs.length); } + /** This method must block until terminal state is reached. */ default long writeAndClose(ByteBuffer[] srcs, int offset, int length) throws IOException { long write = write(srcs, offset, length); close(); diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java index bfb561b593..52b6ebb5fe 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/BidiUploadTest.java @@ -17,7 +17,9 @@ package com.google.cloud.storage; import static com.google.cloud.storage.BidiUploadState.appendableNew; +import static com.google.cloud.storage.BidiUploadTestUtils.createSegment; import static com.google.cloud.storage.BidiUploadTestUtils.finishAt; +import static com.google.cloud.storage.BidiUploadTestUtils.incremental; import static com.google.cloud.storage.BidiUploadTestUtils.makeRedirect; import static com.google.cloud.storage.BidiUploadTestUtils.packRedirectIntoAbortedException; import static com.google.cloud.storage.BidiUploadTestUtils.timestampNow; @@ -84,6 +86,7 @@ import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; +import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -480,6 +483,45 @@ public void redirectToken_appendable_noPreviousSuccessfulFlush() throws Exceptio () -> assertThat(actualCtx.getExtraHeaders()).isEqualTo(expectedHeaders)); } + @Test + public void awaitAck_alreadyThere() throws InterruptedException { + BidiUploadState state = factory.createInitialized(17); + + assertThat(state.offer(createSegment(2))).isTrue(); + assertThat(state.onResponse(incremental(2))).isNull(); + + state.awaitAck(2); + } + + @Test + public void awaitAck_multipleResponses() + throws InterruptedException, ExecutionException, TimeoutException { + BidiUploadState state = factory.createInitialized(17); + + assertThat(state.offer(createSegment(4))).isTrue(); + ExecutorService exec = Executors.newSingleThreadExecutor(); + try { + Future f = + exec.submit( + () -> { + try { + Thread.sleep(10); + assertThat(state.onResponse(incremental(2))).isNull(); + Thread.sleep(10); + assertThat(state.onResponse(incremental(4))).isNull(); + return 3; + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + state.awaitAck(4); + assertThat(f.get(3, TimeUnit.SECONDS)).isEqualTo(3); + } finally { + exec.shutdownNow(); + } + } + private abstract static class BidiUploadStateFactory { final BidiUploadState createInitialized() { return createInitialized(25); diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java index fa88ec76ec..954d2917fd 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/DefaultBufferedWritableByteChannelTest.java @@ -259,7 +259,7 @@ void manualFlushingIsAccurate() throws IOException { assertWithMessage("Unexpected total flushed length") .that(adapter.writeEndPoints) - .isEqualTo(ImmutableList.of(3L, 5L, 10L, 12L)); + .isEqualTo(ImmutableList.of(3L, 5L, 6L, 11L, 12L)); assertThat(baos.toByteArray()).isEqualTo(allData); } } diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java index b21896a1c7..eeb24f59c5 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/ITAppendableUploadTest.java @@ -134,6 +134,34 @@ public void appendableUpload_bytes() assertThat(xxd(actualBytes)).isEqualTo(xxd(a1_a2.getBytes())); } + @Test + public void explicitFlush() + throws IOException, ExecutionException, InterruptedException, TimeoutException { + checkTestbenchIssue733(); + + BlobAppendableUpload upload = + storage.blobAppendableUpload( + BlobInfo.newBuilder(bucket, UUID.randomUUID().toString()).build(), p.uploadConfig); + + try (AppendableUploadWriteableByteChannel channel = upload.open()) { + ByteBuffer src = p.content.asByteBuffer(); + ByteBuffer zed = src.slice(); + zed.limit(zed.position() + 1); + src.position(src.position() + 1); + + int written = channel.write(zed); + assertThat(written).isEqualTo(1); + channel.flush(); + + written = StorageChannelUtils.blockingEmptyTo(src, channel); + assertThat(written).isEqualTo(p.content.length() - 1); + } + + BlobInfo gen1 = upload.getResult().get(3, TimeUnit.SECONDS); + assertThat(gen1.getSize()).isEqualTo(p.content.length()); + assertThat(gen1.getCrc32c()).isEqualTo(Utils.crc32cCodec.encode(p.content.getCrc32c())); + } + @Test // Pending work in testbench: https://github.com/googleapis/storage-testbench/issues/723 // manually verified internally on 2025-03-25 diff --git a/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java b/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java index 255d0e4bea..6ccd5c669f 100644 --- a/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java +++ b/google-cloud-storage/src/test/java/com/google/cloud/storage/MinFlushBufferedWritableByteChannelTest.java @@ -338,7 +338,7 @@ void manualFlushingIsAccurate() throws IOException { assertWithMessage("Unexpected total flushed length") .that(adapter.writeEndPoints) - .isEqualTo(ImmutableList.of(3L, 5L, 12L)); + .isEqualTo(ImmutableList.of(3L, 5L, 6L, 12L)); assertThat(baos.toByteArray()).isEqualTo(allData); } } From 87afe1ac5f500053e4c0639d5b824304d03796f4 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Mon, 25 Aug 2025 21:27:03 +0200 Subject: [PATCH 15/16] deps: update dependency com.google.apis:google-api-services-storage to v1-rev20250815-2.0.0 (#3245) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 8a93606d1a..12de36d5fc 100644 --- a/pom.xml +++ b/pom.xml @@ -87,7 +87,7 @@ com.google.apis google-api-services-storage - v1-rev20250718-2.0.0 + v1-rev20250815-2.0.0 com.google.cloud From b52eac5360b8bb6265d39e185308ec1b3f44aff2 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Mon, 25 Aug 2025 16:35:42 -0400 Subject: [PATCH 16/16] chore(main): release 2.56.0 (#3240) * chore(main): release 2.56.0 * chore: generate libraries at Mon Aug 25 19:27:55 UTC 2025 --------- Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> Co-authored-by: cloud-java-bot --- CHANGELOG.md | 24 +++++++++++++++++++ README.md | 6 ++--- gapic-google-cloud-storage-v2/pom.xml | 4 ++-- google-cloud-storage-bom/pom.xml | 16 ++++++------- google-cloud-storage-control/pom.xml | 4 ++-- google-cloud-storage/pom.xml | 4 ++-- grpc-google-cloud-storage-control-v2/pom.xml | 4 ++-- grpc-google-cloud-storage-v2/pom.xml | 4 ++-- pom.xml | 16 ++++++------- proto-google-cloud-storage-control-v2/pom.xml | 4 ++-- proto-google-cloud-storage-v2/pom.xml | 4 ++-- samples/snapshot/pom.xml | 6 ++--- storage-shared-benchmarking/pom.xml | 4 ++-- versions.txt | 14 +++++------ 14 files changed, 69 insertions(+), 45 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b51e08214f..6cb0a9ae77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## [2.56.0](https://github.com/googleapis/java-storage/compare/v2.55.0...v2.56.0) (2025-08-25) + + +### Features + +* *breaking behavior* rewrite Storage.blobAppendableUpload to be non-blocking and have improved throughput ([#3231](https://github.com/googleapis/java-storage/issues/3231)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Add AppendableUploadWriteableByteChannel#flush() ([#3261](https://github.com/googleapis/java-storage/issues/3261)) ([950c56f](https://github.com/googleapis/java-storage/commit/950c56f0e622d75faff51257d5cbc9f3ddc7e1ce)) +* Add MinFlushSizeFlushPolicy#withMaxPendingBytes(long) ([#3231](https://github.com/googleapis/java-storage/issues/3231)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Add StorageChannelUtils to provide helper methods to perform blocking read/write to/from non-blocking channels ([#3231](https://github.com/googleapis/java-storage/issues/3231)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) + + +### Bug Fixes + +* Make FlushPolicy${Min,Max}FlushSizeFlushPolicy constructors private ([#3217](https://github.com/googleapis/java-storage/issues/3217)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Update BlobAppendableUploadConfig and FlushPolicy.MinFlushSizeFlushPolicy to default to 4MiB minFlushSize and 16MiB maxPendingBytes ([#3249](https://github.com/googleapis/java-storage/issues/3249)) ([7bd73d3](https://github.com/googleapis/java-storage/commit/7bd73d3104f5c47299f5a9c8d68dec82933eeda5)) +* Update otel integration to properly activate span context for lazy RPCs such as reads & writes ([#3255](https://github.com/googleapis/java-storage/issues/3255)) ([d6587f4](https://github.com/googleapis/java-storage/commit/d6587f42b65a586a2e3f30e0559975801726a812)) + + +### Dependencies + +* Update actions/checkout action to v5 ([#3239](https://github.com/googleapis/java-storage/issues/3239)) ([33f024b](https://github.com/googleapis/java-storage/commit/33f024b1ae094bf3e3605e1a835cb55eb5c9e750)) +* Update dependency com.google.apis:google-api-services-storage to v1-rev20250815-2.0.0 ([#3245](https://github.com/googleapis/java-storage/issues/3245)) ([87afe1a](https://github.com/googleapis/java-storage/commit/87afe1ac5f500053e4c0639d5b824304d03796f4)) +* Update dependency com.google.cloud:sdk-platform-java-config to v3.52.0 ([#3250](https://github.com/googleapis/java-storage/issues/3250)) ([0782e62](https://github.com/googleapis/java-storage/commit/0782e62fc9534e3cecfaaa4d78b58904ecf699d6)) + ## [2.55.0](https://github.com/googleapis/java-storage/compare/v2.54.0...v2.55.0) (2025-08-05) diff --git a/README.md b/README.md index a01d688302..045ffb52a6 100644 --- a/README.md +++ b/README.md @@ -66,13 +66,13 @@ implementation 'com.google.cloud:google-cloud-storage' If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-storage:2.55.0' +implementation 'com.google.cloud:google-cloud-storage:2.56.0' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-storage" % "2.55.0" +libraryDependencies += "com.google.cloud" % "google-cloud-storage" % "2.56.0" ``` ## Authentication @@ -523,7 +523,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-storage/java11.html [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-storage.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-storage/2.55.0 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-storage/2.56.0 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/gapic-google-cloud-storage-v2/pom.xml b/gapic-google-cloud-storage-v2/pom.xml index f85d90ef24..86c78e9b9c 100644 --- a/gapic-google-cloud-storage-v2/pom.xml +++ b/gapic-google-cloud-storage-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc gapic-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 gapic-google-cloud-storage-v2 GRPC library for gapic-google-cloud-storage-v2 com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 diff --git a/google-cloud-storage-bom/pom.xml b/google-cloud-storage-bom/pom.xml index a6f26c1311..ba57b70c08 100644 --- a/google-cloud-storage-bom/pom.xml +++ b/google-cloud-storage-bom/pom.xml @@ -19,7 +19,7 @@ 4.0.0 com.google.cloud google-cloud-storage-bom - 2.55.1-SNAPSHOT + 2.56.0 pom com.google.cloud @@ -69,37 +69,37 @@ com.google.cloud google-cloud-storage - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc gapic-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc grpc-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc proto-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.cloud google-cloud-storage-control - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc grpc-google-cloud-storage-control-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc proto-google-cloud-storage-control-v2 - 2.55.1-SNAPSHOT + 2.56.0 diff --git a/google-cloud-storage-control/pom.xml b/google-cloud-storage-control/pom.xml index 0517d81d37..0a29a76216 100644 --- a/google-cloud-storage-control/pom.xml +++ b/google-cloud-storage-control/pom.xml @@ -5,13 +5,13 @@ 4.0.0 com.google.cloud google-cloud-storage-control - 2.55.1-SNAPSHOT + 2.56.0 google-cloud-storage-control GRPC library for google-cloud-storage-control com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 diff --git a/google-cloud-storage/pom.xml b/google-cloud-storage/pom.xml index a58682f862..d8e343f743 100644 --- a/google-cloud-storage/pom.xml +++ b/google-cloud-storage/pom.xml @@ -2,7 +2,7 @@ 4.0.0 google-cloud-storage - 2.55.1-SNAPSHOT + 2.56.0 jar Google Cloud Storage https://github.com/googleapis/java-storage @@ -12,7 +12,7 @@ com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 google-cloud-storage diff --git a/grpc-google-cloud-storage-control-v2/pom.xml b/grpc-google-cloud-storage-control-v2/pom.xml index 3c132d31e7..9dbd36e81a 100644 --- a/grpc-google-cloud-storage-control-v2/pom.xml +++ b/grpc-google-cloud-storage-control-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-storage-control-v2 - 2.55.1-SNAPSHOT + 2.56.0 grpc-google-cloud-storage-control-v2 GRPC library for google-cloud-storage com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 diff --git a/grpc-google-cloud-storage-v2/pom.xml b/grpc-google-cloud-storage-v2/pom.xml index eebe6e2f0e..c4c31b3477 100644 --- a/grpc-google-cloud-storage-v2/pom.xml +++ b/grpc-google-cloud-storage-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 grpc-google-cloud-storage-v2 GRPC library for grpc-google-cloud-storage-v2 com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 diff --git a/pom.xml b/pom.xml index 12de36d5fc..c8ca49833d 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-storage-parent pom - 2.55.1-SNAPSHOT + 2.56.0 Storage Parent https://github.com/googleapis/java-storage @@ -82,7 +82,7 @@ com.google.cloud google-cloud-storage - 2.55.1-SNAPSHOT + 2.56.0 com.google.apis @@ -104,32 +104,32 @@ com.google.api.grpc proto-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc grpc-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc gapic-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc grpc-google-cloud-storage-control-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.api.grpc proto-google-cloud-storage-control-v2 - 2.55.1-SNAPSHOT + 2.56.0 com.google.cloud google-cloud-storage-control - 2.55.1-SNAPSHOT + 2.56.0 com.google.cloud diff --git a/proto-google-cloud-storage-control-v2/pom.xml b/proto-google-cloud-storage-control-v2/pom.xml index 4f630ef121..b43ee86fa1 100644 --- a/proto-google-cloud-storage-control-v2/pom.xml +++ b/proto-google-cloud-storage-control-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-storage-control-v2 - 2.55.1-SNAPSHOT + 2.56.0 proto-google-cloud-storage-control-v2 Proto library for proto-google-cloud-storage-control-v2 com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 diff --git a/proto-google-cloud-storage-v2/pom.xml b/proto-google-cloud-storage-v2/pom.xml index 9b898f142d..f451e9e9b5 100644 --- a/proto-google-cloud-storage-v2/pom.xml +++ b/proto-google-cloud-storage-v2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-storage-v2 - 2.55.1-SNAPSHOT + 2.56.0 proto-google-cloud-storage-v2 PROTO library for proto-google-cloud-storage-v2 com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 9e263e88a2..470b33feb2 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -28,12 +28,12 @@ com.google.cloud google-cloud-storage - 2.55.1-SNAPSHOT + 2.56.0 com.google.cloud google-cloud-storage-control - 2.55.1-SNAPSHOT + 2.56.0 compile @@ -70,7 +70,7 @@ com.google.cloud google-cloud-storage - 2.55.1-SNAPSHOT + 2.56.0 tests test diff --git a/storage-shared-benchmarking/pom.xml b/storage-shared-benchmarking/pom.xml index 30105dbe71..8a4dc63a5f 100644 --- a/storage-shared-benchmarking/pom.xml +++ b/storage-shared-benchmarking/pom.xml @@ -10,7 +10,7 @@ com.google.cloud google-cloud-storage-parent - 2.55.1-SNAPSHOT + 2.56.0 @@ -31,7 +31,7 @@ com.google.cloud google-cloud-storage - 2.55.1-SNAPSHOT + 2.56.0 tests diff --git a/versions.txt b/versions.txt index a05bde7968..d6f3b4816d 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-storage:2.55.0:2.55.1-SNAPSHOT -gapic-google-cloud-storage-v2:2.55.0:2.55.1-SNAPSHOT -grpc-google-cloud-storage-v2:2.55.0:2.55.1-SNAPSHOT -proto-google-cloud-storage-v2:2.55.0:2.55.1-SNAPSHOT -google-cloud-storage-control:2.55.0:2.55.1-SNAPSHOT -proto-google-cloud-storage-control-v2:2.55.0:2.55.1-SNAPSHOT -grpc-google-cloud-storage-control-v2:2.55.0:2.55.1-SNAPSHOT +google-cloud-storage:2.56.0:2.56.0 +gapic-google-cloud-storage-v2:2.56.0:2.56.0 +grpc-google-cloud-storage-v2:2.56.0:2.56.0 +proto-google-cloud-storage-v2:2.56.0:2.56.0 +google-cloud-storage-control:2.56.0:2.56.0 +proto-google-cloud-storage-control-v2:2.56.0:2.56.0 +grpc-google-cloud-storage-control-v2:2.56.0:2.56.0