From 12875a7c5588e93bb6e8a130f9a935deb248ab15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 13:08:00 +0700 Subject: [PATCH 01/17] test(storage): rename `testbench` to `emulator` Rename `UsingTestbench` to `UsingEmulator` in `google/cloud/storage/` --- .../storage/benchmarks/throughput_experiment_test.cc | 4 ++-- .../storage/examples/storage_bucket_acl_samples.cc | 2 +- .../storage/examples/storage_bucket_cors_samples.cc | 2 +- .../storage_bucket_default_kms_key_samples.cc | 2 +- .../storage/examples/storage_bucket_iam_samples.cc | 2 +- .../storage_bucket_requester_pays_samples.cc | 2 +- .../examples/storage_default_object_acl_samples.cc | 2 +- .../examples/storage_event_based_hold_samples.cc | 2 +- .../storage/examples/storage_examples_common.cc | 2 +- .../cloud/storage/examples/storage_examples_common.h | 2 +- .../examples/storage_lifecycle_management_samples.cc | 2 +- .../storage/examples/storage_notification_samples.cc | 2 +- .../storage/examples/storage_object_acl_samples.cc | 2 +- .../storage/examples/storage_object_cmek_samples.cc | 2 +- .../cloud/storage/examples/storage_object_samples.cc | 2 +- .../examples/storage_object_versioning_samples.cc | 2 +- .../storage/examples/storage_policy_doc_samples.cc | 2 +- .../examples/storage_retention_policy_samples.cc | 2 +- .../examples/storage_signed_url_v2_samples.cc | 2 +- .../examples/storage_signed_url_v4_samples.cc | 2 +- .../storage/examples/storage_website_samples.cc | 2 +- .../storage/testing/storage_integration_test.cc | 6 +++--- .../cloud/storage/testing/storage_integration_test.h | 2 +- .../cloud/storage/tests/bucket_integration_test.cc | 12 ++++++------ .../tests/error_injection_integration_test.cc | 2 +- google/cloud/storage/tests/grpc_integration_test.cc | 2 +- .../cloud/storage/tests/key_file_integration_test.cc | 2 +- .../tests/object_checksum_integration_test.cc | 10 +++++----- .../storage/tests/object_file_integration_test.cc | 4 ++-- .../storage/tests/object_hash_integration_test.cc | 10 +++++----- .../storage/tests/object_insert_integration_test.cc | 2 +- .../storage/tests/object_media_integration_test.cc | 6 +++--- .../tests/object_resumable_write_integration_test.cc | 12 ++++++------ .../storage/tests/signed_url_integration_test.cc | 8 ++++---- .../tests/slow_reader_chunk_integration_test.cc | 8 ++++---- .../tests/slow_reader_stream_integration_test.cc | 8 ++++---- 36 files changed, 69 insertions(+), 69 deletions(-) diff --git a/google/cloud/storage/benchmarks/throughput_experiment_test.cc b/google/cloud/storage/benchmarks/throughput_experiment_test.cc index de883343f4709..d43947edaa153 100644 --- a/google/cloud/storage/benchmarks/throughput_experiment_test.cc +++ b/google/cloud/storage/benchmarks/throughput_experiment_test.cc @@ -42,7 +42,7 @@ bool ProductionOnly(ApiName api) { } TEST_P(ThroughputExperimentIntegrationTest, Upload) { - if (UsingTestbench() && ProductionOnly(GetParam())) GTEST_SKIP(); + if (UsingEmulator() && ProductionOnly(GetParam())) GTEST_SKIP(); auto client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -71,7 +71,7 @@ TEST_P(ThroughputExperimentIntegrationTest, Upload) { } TEST_P(ThroughputExperimentIntegrationTest, Download) { - if (UsingTestbench() && ProductionOnly(GetParam())) GTEST_SKIP(); + if (UsingEmulator() && ProductionOnly(GetParam())) GTEST_SKIP(); auto client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/examples/storage_bucket_acl_samples.cc b/google/cloud/storage/examples/storage_bucket_acl_samples.cc index 24492d8999b1f..709ce0dcd6c92 100644 --- a/google/cloud/storage/examples/storage_bucket_acl_samples.cc +++ b/google/cloud/storage/examples/storage_bucket_acl_samples.cc @@ -283,7 +283,7 @@ void RunAll(std::vector const& argv) { std::cout << "\nRunning RemoveBucketOwner() example" << std::endl; RemoveBucketOwner(client, {bucket_name, entity}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_bucket_cors_samples.cc b/google/cloud/storage/examples/storage_bucket_cors_samples.cc index 1b2fb169c5004..45c80ad7ab924 100644 --- a/google/cloud/storage/examples/storage_bucket_cors_samples.cc +++ b/google/cloud/storage/examples/storage_bucket_cors_samples.cc @@ -113,7 +113,7 @@ void RunAll(std::vector const& argv) { std::cout << "\nRunning the RemoveCorsConfiguration() example" << std::endl; RemoveCorsConfiguration(client, {bucket_name}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_bucket_default_kms_key_samples.cc b/google/cloud/storage/examples/storage_bucket_default_kms_key_samples.cc index 8d3b5e57af9f6..2e24e4ec8bf44 100644 --- a/google/cloud/storage/examples/storage_bucket_default_kms_key_samples.cc +++ b/google/cloud/storage/examples/storage_bucket_default_kms_key_samples.cc @@ -134,7 +134,7 @@ void RunAll(std::vector const& argv) { std::cout << "\nRunning the RemoveBucketDefaultKmsKey() example" << std::endl; RemoveBucketDefaultKmsKey(client, {bucket_name}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_bucket_iam_samples.cc b/google/cloud/storage/examples/storage_bucket_iam_samples.cc index 55a8389f4ca1e..d88643a0649ba 100644 --- a/google/cloud/storage/examples/storage_bucket_iam_samples.cc +++ b/google/cloud/storage/examples/storage_bucket_iam_samples.cc @@ -460,7 +460,7 @@ void RunAll(std::vector const& argv) { std::cout << "\nRunning SetBucketPublicIam() example" << std::endl; SetBucketPublicIam(client, {bucket_name}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_bucket_requester_pays_samples.cc b/google/cloud/storage/examples/storage_bucket_requester_pays_samples.cc index 9fd9c9ecb0c36..45c4cf3435aa9 100644 --- a/google/cloud/storage/examples/storage_bucket_requester_pays_samples.cc +++ b/google/cloud/storage/examples/storage_bucket_requester_pays_samples.cc @@ -218,7 +218,7 @@ void RunAll(std::vector const& argv) { std::cout << "\nRunning GetBilling() example [3]" << std::endl; GetBilling(client, {bucket_name, project_id}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_default_object_acl_samples.cc b/google/cloud/storage/examples/storage_default_object_acl_samples.cc index 3eeaa3d887690..40a3c6aff9807 100644 --- a/google/cloud/storage/examples/storage_default_object_acl_samples.cc +++ b/google/cloud/storage/examples/storage_default_object_acl_samples.cc @@ -234,7 +234,7 @@ void RunAll(std::vector const& argv) { std::cout << "\nRunning DeleteDefaultObjectAcl() example" << std::endl; DeleteDefaultObjectAcl(client, {bucket_name, entity}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_event_based_hold_samples.cc b/google/cloud/storage/examples/storage_event_based_hold_samples.cc index 2edef228971d6..377748a368db4 100644 --- a/google/cloud/storage/examples/storage_event_based_hold_samples.cc +++ b/google/cloud/storage/examples/storage_event_based_hold_samples.cc @@ -140,7 +140,7 @@ void RunAll(std::vector const& argv) { DisableDefaultEventBasedHold(client, {bucket_name}); std::cout << "\nCleaning up" << std::endl; - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_examples_common.cc b/google/cloud/storage/examples/storage_examples_common.cc index cac8cf4163466..3748cf44a79e1 100644 --- a/google/cloud/storage/examples/storage_examples_common.cc +++ b/google/cloud/storage/examples/storage_examples_common.cc @@ -23,7 +23,7 @@ namespace cloud { namespace storage { namespace examples { -bool UsingTestbench() { +bool UsingEmulator() { return !google::cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") .value_or("") .empty(); diff --git a/google/cloud/storage/examples/storage_examples_common.h b/google/cloud/storage/examples/storage_examples_common.h index 6591d9cb79c16..08471ce8c42de 100644 --- a/google/cloud/storage/examples/storage_examples_common.h +++ b/google/cloud/storage/examples/storage_examples_common.h @@ -33,7 +33,7 @@ using ::google::cloud::testing_util::CommandType; using ::google::cloud::testing_util::Example; using ::google::cloud::testing_util::Usage; -bool UsingTestbench(); +bool UsingEmulator(); std::string MakeRandomBucketName(google::cloud::internal::DefaultPRNG& gen); std::string MakeRandomObjectName(google::cloud::internal::DefaultPRNG& gen, diff --git a/google/cloud/storage/examples/storage_lifecycle_management_samples.cc b/google/cloud/storage/examples/storage_lifecycle_management_samples.cc index 24fd7d3530219..1708db5f2411b 100644 --- a/google/cloud/storage/examples/storage_lifecycle_management_samples.cc +++ b/google/cloud/storage/examples/storage_lifecycle_management_samples.cc @@ -151,7 +151,7 @@ void RunAll(std::vector const& argv) { GetBucketLifecycleManagement(client, {bucket_name}); std::cout << "\nCleaning up" << std::endl; - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_notification_samples.cc b/google/cloud/storage/examples/storage_notification_samples.cc index c7c44f0e8ebd0..a2678bb72a6e9 100644 --- a/google/cloud/storage/examples/storage_notification_samples.cc +++ b/google/cloud/storage/examples/storage_notification_samples.cc @@ -177,7 +177,7 @@ void RunAll(std::vector const& argv) { std::cout << "\nRunning DeleteNotification() example [2]" << std::endl; DeleteNotification(client, {bucket_name, n2.id()}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_object_acl_samples.cc b/google/cloud/storage/examples/storage_object_acl_samples.cc index 27341c51098ae..acda2ff8fdd14 100644 --- a/google/cloud/storage/examples/storage_object_acl_samples.cc +++ b/google/cloud/storage/examples/storage_object_acl_samples.cc @@ -307,7 +307,7 @@ void RunAll(std::vector const& argv) { RemoveObjectOwner(client, {bucket_name, object_name, entity}); (void)client.DeleteObject(bucket_name, object_name); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_object_cmek_samples.cc b/google/cloud/storage/examples/storage_object_cmek_samples.cc index 5c57a0cf825f7..87b532a28844e 100644 --- a/google/cloud/storage/examples/storage_object_cmek_samples.cc +++ b/google/cloud/storage/examples/storage_object_cmek_samples.cc @@ -157,7 +157,7 @@ void RunAll(std::vector const& argv) { } (void)client.DeleteObject(bucket_name, csek_object_name); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_object_samples.cc b/google/cloud/storage/examples/storage_object_samples.cc index 74fe47bf55255..691734d9e93d3 100644 --- a/google/cloud/storage/examples/storage_object_samples.cc +++ b/google/cloud/storage/examples/storage_object_samples.cc @@ -683,7 +683,7 @@ void RunAll(std::vector const& argv) { {bucket_name, object_name_retry, object_media}); DeleteObject(client, {bucket_name, object_name_retry}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_object_versioning_samples.cc b/google/cloud/storage/examples/storage_object_versioning_samples.cc index 7044f8c38b6b6..fe843fad3340d 100644 --- a/google/cloud/storage/examples/storage_object_versioning_samples.cc +++ b/google/cloud/storage/examples/storage_object_versioning_samples.cc @@ -220,7 +220,7 @@ void RunAll(std::vector const& argv) { GetObjectVersioning(client, {bucket_name}); (void)client.DeleteObject(bucket_name, dst_object_name); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_policy_doc_samples.cc b/google/cloud/storage/examples/storage_policy_doc_samples.cc index 03ffd6cd2e6a9..65476d403b3f1 100644 --- a/google/cloud/storage/examples/storage_policy_doc_samples.cc +++ b/google/cloud/storage/examples/storage_policy_doc_samples.cc @@ -149,7 +149,7 @@ void RunAll(std::vector const& argv) { << std::endl; CreatePolicyDocumentFormV4(client, {bucket_name, object_name}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_retention_policy_samples.cc b/google/cloud/storage/examples/storage_retention_policy_samples.cc index bca23ebd7a903..05d2b485a91f8 100644 --- a/google/cloud/storage/examples/storage_retention_policy_samples.cc +++ b/google/cloud/storage/examples/storage_retention_policy_samples.cc @@ -198,7 +198,7 @@ void RunAll(std::vector const& argv) { LockRetentionPolicy(client, {bucket_name}); std::cout << "\nCleaning up" << std::endl; - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/examples/storage_signed_url_v2_samples.cc b/google/cloud/storage/examples/storage_signed_url_v2_samples.cc index cd0d7e0ae9f83..ecc6d790808b6 100644 --- a/google/cloud/storage/examples/storage_signed_url_v2_samples.cc +++ b/google/cloud/storage/examples/storage_signed_url_v2_samples.cc @@ -83,7 +83,7 @@ void RunAll(std::vector const& argv) { auto client = gcs::Client::CreateDefaultClient().value(); - if (examples::UsingTestbench()) { + if (examples::UsingEmulator()) { std::cout << "Signed URL examples are only runnable against production\n"; return; } diff --git a/google/cloud/storage/examples/storage_signed_url_v4_samples.cc b/google/cloud/storage/examples/storage_signed_url_v4_samples.cc index cb6744e32fda4..fa36a09968f78 100644 --- a/google/cloud/storage/examples/storage_signed_url_v4_samples.cc +++ b/google/cloud/storage/examples/storage_signed_url_v4_samples.cc @@ -81,7 +81,7 @@ void RunAll(std::vector const& argv) { auto client = gcs::Client::CreateDefaultClient().value(); - if (examples::UsingTestbench()) { + if (examples::UsingEmulator()) { std::cout << "Signed URL examples are only runnable against production\n"; return; } diff --git a/google/cloud/storage/examples/storage_website_samples.cc b/google/cloud/storage/examples/storage_website_samples.cc index 20c910a19dc82..a8ee6a36b3142 100644 --- a/google/cloud/storage/examples/storage_website_samples.cc +++ b/google/cloud/storage/examples/storage_website_samples.cc @@ -158,7 +158,7 @@ void RunAll(std::vector const& argv) { << std::endl; RemoveStaticWebsiteConfiguration(client, {bucket_name}); - if (!examples::UsingTestbench()) std::this_thread::sleep_until(pause); + if (!examples::UsingEmulator()) std::this_thread::sleep_until(pause); (void)examples::RemoveBucketAndContents(client, bucket_name); } diff --git a/google/cloud/storage/testing/storage_integration_test.cc b/google/cloud/storage/testing/storage_integration_test.cc index f1642ccae5dc9..242b44b78026d 100644 --- a/google/cloud/storage/testing/storage_integration_test.cc +++ b/google/cloud/storage/testing/storage_integration_test.cc @@ -46,7 +46,7 @@ StorageIntegrationTest::MakeIntegrationTestClient() { google::cloud::StatusOr StorageIntegrationTest::MakeBucketIntegrationTestClient() { - if (UsingTestbench()) return MakeIntegrationTestClient(); + if (UsingEmulator()) return MakeIntegrationTestClient(); auto constexpr kInitialDelay = std::chrono::seconds(5); auto constexpr kMaximumBackoffDelay = std::chrono::minutes(5); @@ -105,7 +105,7 @@ StorageIntegrationTest::MakeIntegrationTestClient( std::unique_ptr StorageIntegrationTest::TestBackoffPolicy() { std::chrono::milliseconds initial_delay(std::chrono::seconds(1)); auto constexpr kShortDelayForTestbench = std::chrono::milliseconds(10); - if (UsingTestbench()) { + if (UsingEmulator()) { initial_delay = kShortDelayForTestbench; } @@ -159,7 +159,7 @@ EncryptionKeyData StorageIntegrationTest::MakeEncryptionKeyData() { return CreateKeyFromGenerator(generator_); } -bool StorageIntegrationTest::UsingTestbench() { +bool StorageIntegrationTest::UsingEmulator() { return google::cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") .has_value(); } diff --git a/google/cloud/storage/testing/storage_integration_test.h b/google/cloud/storage/testing/storage_integration_test.h index fc8dd01e49d7a..c3b3217ca3d0d 100644 --- a/google/cloud/storage/testing/storage_integration_test.h +++ b/google/cloud/storage/testing/storage_integration_test.h @@ -85,7 +85,7 @@ class StorageIntegrationTest : public ::testing::Test { std::string MakeRandomData(std::size_t desired_size); - static bool UsingTestbench(); + static bool UsingEmulator(); static bool UsingGrpc(); diff --git a/google/cloud/storage/tests/bucket_integration_test.cc b/google/cloud/storage/tests/bucket_integration_test.cc index 32c4e9de69f53..5f48113893826 100644 --- a/google/cloud/storage/tests/bucket_integration_test.cc +++ b/google/cloud/storage/tests/bucket_integration_test.cc @@ -165,13 +165,13 @@ TEST_F(BucketIntegrationTest, CreatePredefinedAcl) { EXPECT_EQ(bucket_name, metadata->name()); // Wait at least 2 seconds before trying to create / delete another bucket. - if (!UsingTestbench()) std::this_thread::sleep_for(std::chrono::seconds(2)); + if (!UsingEmulator()) std::this_thread::sleep_for(std::chrono::seconds(2)); auto status = client->DeleteBucket(bucket_name); ASSERT_STATUS_OK(status); // Wait at least 2 seconds before trying to create / delete another bucket. - if (!UsingTestbench()) std::this_thread::sleep_for(std::chrono::seconds(2)); + if (!UsingEmulator()) std::this_thread::sleep_for(std::chrono::seconds(2)); } } @@ -199,13 +199,13 @@ TEST_F(BucketIntegrationTest, CreatePredefinedDefaultObjectAcl) { EXPECT_EQ(bucket_name, metadata->name()); // Wait at least 2 seconds before trying to create / delete another bucket. - if (!UsingTestbench()) std::this_thread::sleep_for(std::chrono::seconds(2)); + if (!UsingEmulator()) std::this_thread::sleep_for(std::chrono::seconds(2)); auto status = client->DeleteBucket(bucket_name); ASSERT_STATUS_OK(status); // Wait at least 2 seconds before trying to create / delete another bucket. - if (!UsingTestbench()) std::this_thread::sleep_for(std::chrono::seconds(2)); + if (!UsingEmulator()) std::this_thread::sleep_for(std::chrono::seconds(2)); } } @@ -271,7 +271,7 @@ TEST_F(BucketIntegrationTest, FullPatch) { EXPECT_EQ(logging_name, logging_meta->name()); // Wait at least 2 seconds before trying to create / delete another bucket. - if (!UsingTestbench()) std::this_thread::sleep_for(std::chrono::seconds(2)); + if (!UsingEmulator()) std::this_thread::sleep_for(std::chrono::seconds(2)); // Create a Bucket, use the default settings for most fields, except the // storage class and location. Fetch the full attributes of the bucket. StatusOr const insert_meta = client->CreateBucketForProject( @@ -393,7 +393,7 @@ TEST_F(BucketIntegrationTest, FullPatch) { auto status = client->DeleteBucket(bucket_name); ASSERT_STATUS_OK(status); // Wait at least 2 seconds before trying to create / delete another bucket. - if (!UsingTestbench()) std::this_thread::sleep_for(std::chrono::seconds(2)); + if (!UsingEmulator()) std::this_thread::sleep_for(std::chrono::seconds(2)); status = client->DeleteBucket(logging_name); ASSERT_STATUS_OK(status); } diff --git a/google/cloud/storage/tests/error_injection_integration_test.cc b/google/cloud/storage/tests/error_injection_integration_test.cc index 2165889d376ed..c19e93bf6aee2 100644 --- a/google/cloud/storage/tests/error_injection_integration_test.cc +++ b/google/cloud/storage/tests/error_injection_integration_test.cc @@ -45,7 +45,7 @@ class ErrorInjectionIntegrationTest : public google::cloud::storage::testing::StorageIntegrationTest { protected: void SetUp() override { - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); bucket_name_ = google::cloud::internal::GetEnv( "GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME") .value_or(""); diff --git a/google/cloud/storage/tests/grpc_integration_test.cc b/google/cloud/storage/tests/grpc_integration_test.cc index a146e4038b9aa..a08615880c542 100644 --- a/google/cloud/storage/tests/grpc_integration_test.cc +++ b/google/cloud/storage/tests/grpc_integration_test.cc @@ -165,7 +165,7 @@ TEST_P(GrpcIntegrationTest, WriteResume) { ObjectMetadata meta = os.metadata().value(); EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name, meta.bucket()); - if (UsingTestbench()) { + if (UsingEmulator()) { EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); } diff --git a/google/cloud/storage/tests/key_file_integration_test.cc b/google/cloud/storage/tests/key_file_integration_test.cc index 3c9f4053c4aed..04cab8ae3aea7 100644 --- a/google/cloud/storage/tests/key_file_integration_test.cc +++ b/google/cloud/storage/tests/key_file_integration_test.cc @@ -33,7 +33,7 @@ class KeyFileIntegrationTest protected: void SetUp() override { // The testbench does not implement signed URLs. - if (UsingTestbench()) GTEST_SKIP(); + if (UsingEmulator()) GTEST_SKIP(); std::string const key_file_envvar = GetParam(); diff --git a/google/cloud/storage/tests/object_checksum_integration_test.cc b/google/cloud/storage/tests/object_checksum_integration_test.cc index 9750c86af6dcc..76c8896413025 100644 --- a/google/cloud/storage/tests/object_checksum_integration_test.cc +++ b/google/cloud/storage/tests/object_checksum_integration_test.cc @@ -298,7 +298,7 @@ TEST_F(ObjectChecksumIntegrationTest, DefaultCrc32cStreamingWriteJSON) { TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadXML) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -342,7 +342,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadXML) { TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadJSON) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -386,7 +386,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadJSON) { TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadXMLRead) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -422,7 +422,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadXMLRead) { TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadJSONRead) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -459,7 +459,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadJSONRead) { TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingWriteJSON) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/object_file_integration_test.cc b/google/cloud/storage/tests/object_file_integration_test.cc index 5786bcb9dd459..8b867ad7f03b0 100644 --- a/google/cloud/storage/tests/object_file_integration_test.cc +++ b/google/cloud/storage/tests/object_file_integration_test.cc @@ -434,7 +434,7 @@ TEST_F(ObjectFileIntegrationTest, UploadFileResumableBySize) { auto expected_str = expected.str(); ASSERT_EQ(expected_str.size(), meta->size()); - if (UsingTestbench()) { + if (UsingEmulator()) { ASSERT_TRUE(meta->has_metadata("x_testbench_upload")); EXPECT_EQ("resumable", meta->metadata("x_testbench_upload")); } @@ -474,7 +474,7 @@ TEST_F(ObjectFileIntegrationTest, UploadFileResumableByOption) { auto expected_str = expected.str(); ASSERT_EQ(expected_str.size(), meta->size()); - if (UsingTestbench()) { + if (UsingEmulator()) { ASSERT_TRUE(meta->has_metadata("x_testbench_upload")); EXPECT_EQ("resumable", meta->metadata("x_testbench_upload")); } diff --git a/google/cloud/storage/tests/object_hash_integration_test.cc b/google/cloud/storage/tests/object_hash_integration_test.cc index 440b05c128a45..ac1c6045be6d7 100644 --- a/google/cloud/storage/tests/object_hash_integration_test.cc +++ b/google/cloud/storage/tests/object_hash_integration_test.cc @@ -398,7 +398,7 @@ TEST_F(ObjectHashIntegrationTest, DisableHashesStreamingWriteJSON) { TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXML) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -440,7 +440,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXML) { TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSON) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -483,7 +483,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSON) { TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXMLRead) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -518,7 +518,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXMLRead) { TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSONRead) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -554,7 +554,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSONRead) { TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingWriteJSON) { // This test is disabled when not using the testbench as it relies on the // testbench to inject faults. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/object_insert_integration_test.cc b/google/cloud/storage/tests/object_insert_integration_test.cc index 8fb7ad2b83986..9d77cae100456 100644 --- a/google/cloud/storage/tests/object_insert_integration_test.cc +++ b/google/cloud/storage/tests/object_insert_integration_test.cc @@ -42,7 +42,7 @@ class ObjectInsertIntegrationTest : application_credentials_("GOOGLE_APPLICATION_CREDENTIALS", {}) {} void SetUp() override { - if (!UsingTestbench()) { + if (!UsingEmulator()) { // This test was chosen (more or less arbitrarily) to validate that both // P12 and JSON credentials are usable in production. The positives for // this test are (1) it is relatively short (less than 60 seconds), (2) it diff --git a/google/cloud/storage/tests/object_media_integration_test.cc b/google/cloud/storage/tests/object_media_integration_test.cc index 5051761565c72..deee576609fbe 100644 --- a/google/cloud/storage/tests/object_media_integration_test.cc +++ b/google/cloud/storage/tests/object_media_integration_test.cc @@ -639,7 +639,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureUploadFile) { } TEST_F(ObjectMediaIntegrationTest, StreamingReadTimeout) { - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); auto options = ClientOptions::CreateDefaultClientOptions(); ASSERT_STATUS_OK(options); @@ -672,7 +672,7 @@ TEST_F(ObjectMediaIntegrationTest, StreamingReadTimeout) { } TEST_F(ObjectMediaIntegrationTest, StreamingReadTimeoutContinues) { - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); auto options = ClientOptions::CreateDefaultClientOptions(); ASSERT_STATUS_OK(options); @@ -711,7 +711,7 @@ TEST_F(ObjectMediaIntegrationTest, StreamingReadTimeoutContinues) { } TEST_F(ObjectMediaIntegrationTest, StreamingReadInternalError) { - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); auto options = ClientOptions::CreateDefaultClientOptions(); ASSERT_STATUS_OK(options); diff --git a/google/cloud/storage/tests/object_resumable_write_integration_test.cc b/google/cloud/storage/tests/object_resumable_write_integration_test.cc index 481ea2cff9f84..7a46bb83acc79 100644 --- a/google/cloud/storage/tests/object_resumable_write_integration_test.cc +++ b/google/cloud/storage/tests/object_resumable_write_integration_test.cc @@ -66,7 +66,7 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteWithContentType) { EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name_, meta.bucket()); EXPECT_EQ("text/plain", meta.content_type()); - if (UsingTestbench()) { + if (UsingEmulator()) { EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); } @@ -114,7 +114,7 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteWithUseResumable) { ObjectMetadata meta = os.metadata().value(); EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name_, meta.bucket()); - if (UsingTestbench()) { + if (UsingEmulator()) { EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); } @@ -153,7 +153,7 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteResume) { ObjectMetadata meta = os.metadata().value(); EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name_, meta.bucket()); - if (UsingTestbench()) { + if (UsingEmulator()) { EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); } @@ -229,7 +229,7 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteResumeFinalizedUpload) { ObjectMetadata meta = os.metadata().value(); EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name_, meta.bucket()); - if (UsingTestbench()) { + if (UsingEmulator()) { EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); } @@ -306,7 +306,7 @@ TEST_F(ObjectResumableWriteIntegrationTest, StreamingWriteSlow) { } TEST_F(ObjectResumableWriteIntegrationTest, WithXUploadContentLength) { - if (UsingTestbench() || UsingGrpc()) GTEST_SKIP(); + if (UsingEmulator() || UsingGrpc()) GTEST_SKIP(); auto constexpr kMiB = 1024 * 1024L; auto constexpr kChunkSize = 2 * kMiB; @@ -380,7 +380,7 @@ TEST_F(ObjectResumableWriteIntegrationTest, WithXUploadContentLengthRandom) { } TEST_F(ObjectResumableWriteIntegrationTest, WithInvalidXUploadContentLength) { - if (UsingTestbench() || UsingGrpc()) GTEST_SKIP(); + if (UsingEmulator() || UsingGrpc()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/signed_url_integration_test.cc b/google/cloud/storage/tests/signed_url_integration_test.cc index a070390f50543..36a88db50b2eb 100644 --- a/google/cloud/storage/tests/signed_url_integration_test.cc +++ b/google/cloud/storage/tests/signed_url_integration_test.cc @@ -49,7 +49,7 @@ class SignedUrlIntegrationTest TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlGet) { // The testbench does not implement signed URLs. - if (UsingTestbench()) GTEST_SKIP(); + if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -82,7 +82,7 @@ TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlGet) { TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlPut) { // The testbench does not implement signed URLs. - if (UsingTestbench()) GTEST_SKIP(); + if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -115,7 +115,7 @@ TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlPut) { TEST_F(SignedUrlIntegrationTest, CreateV4SignedUrlGet) { // The testbench does not implement signed URLs. - if (UsingTestbench()) GTEST_SKIP(); + if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -148,7 +148,7 @@ TEST_F(SignedUrlIntegrationTest, CreateV4SignedUrlGet) { TEST_F(SignedUrlIntegrationTest, CreateV4SignedUrlPut) { // The testbench does not implement signed URLs. - if (UsingTestbench()) GTEST_SKIP(); + if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc b/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc index bcc22104f61b3..db7a9bfc8bc9a 100644 --- a/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc +++ b/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc @@ -32,7 +32,7 @@ class SlowReaderChunkIntegrationTest protected: void SetUp() override { // Too slow to run against production. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); bucket_name_ = google::cloud::internal::GetEnv( "GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME") .value_or(""); @@ -59,7 +59,7 @@ TEST_F(SlowReaderChunkIntegrationTest, LongPauses) { // testbench we can fail quickly by asking the testbench to break the stream // in the middle. auto make_reader = [this, object_name, &client](int64_t offset) { - if (UsingTestbench()) { + if (UsingEmulator()) { return client->ReadObject( bucket_name_, object_name, CustomHeader("x-goog-testbench-instructions", "return-broken-stream"), @@ -71,8 +71,8 @@ TEST_F(SlowReaderChunkIntegrationTest, LongPauses) { ObjectReadStream stream = make_reader(0); - auto slow_reader_period = std::chrono::seconds(UsingTestbench() ? 1 : 400); - auto const period_increment = std::chrono::seconds(UsingTestbench() ? 5 : 60); + auto slow_reader_period = std::chrono::seconds(UsingEmulator() ? 1 : 400); + auto const period_increment = std::chrono::seconds(UsingEmulator() ? 5 : 60); auto const max_slow_reader_period = std::chrono::minutes(10); std::vector buffer; std::size_t const size = 1024 * 1024; diff --git a/google/cloud/storage/tests/slow_reader_stream_integration_test.cc b/google/cloud/storage/tests/slow_reader_stream_integration_test.cc index 3edf4b71ca0c7..d62c7aeb89ea9 100644 --- a/google/cloud/storage/tests/slow_reader_stream_integration_test.cc +++ b/google/cloud/storage/tests/slow_reader_stream_integration_test.cc @@ -32,7 +32,7 @@ class SlowReaderStreamIntegrationTest protected: void SetUp() override { // Too slow to run against production. - if (!UsingTestbench()) GTEST_SKIP(); + if (!UsingEmulator()) GTEST_SKIP(); bucket_name_ = google::cloud::internal::GetEnv( "GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME") .value_or(""); @@ -60,7 +60,7 @@ TEST_F(SlowReaderStreamIntegrationTest, LongPauses) { // in the middle. ObjectReadStream stream; - if (UsingTestbench()) { + if (UsingEmulator()) { stream = client->ReadObject( bucket_name_, object_name, CustomHeader("x-goog-testbench-instructions", "return-broken-stream")); @@ -68,8 +68,8 @@ TEST_F(SlowReaderStreamIntegrationTest, LongPauses) { stream = client->ReadObject(bucket_name_, object_name); } - auto slow_reader_period = std::chrono::seconds(UsingTestbench() ? 1 : 400); - auto const period_increment = std::chrono::seconds(UsingTestbench() ? 5 : 60); + auto slow_reader_period = std::chrono::seconds(UsingEmulator() ? 1 : 400); + auto const period_increment = std::chrono::seconds(UsingEmulator() ? 5 : 60); auto const max_slow_reader_period = std::chrono::minutes(10); std::vector buffer; std::int64_t read_count = 0; From 8e7abed66d1d4a499f22545d55167589f781be9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 14:16:33 +0700 Subject: [PATCH 02/17] delete `google/cloud/storage/testbench` --- .gitignore | 2 - google/cloud/storage/testbench/README.md | 52 - .../cloud/storage/testbench/error_response.py | 37 - google/cloud/storage/testbench/gcs_bucket.py | 775 -------------- google/cloud/storage/testbench/gcs_iam.py | 50 - google/cloud/storage/testbench/gcs_object.py | 977 ------------------ google/cloud/storage/testbench/gcs_project.py | 315 ------ .../cloud/storage/testbench/requirements.txt | 4 - google/cloud/storage/testbench/testbench.py | 968 ----------------- .../storage/testbench/testbench_utils.py | 413 -------- 10 files changed, 3593 deletions(-) delete mode 100644 google/cloud/storage/testbench/README.md delete mode 100644 google/cloud/storage/testbench/error_response.py delete mode 100644 google/cloud/storage/testbench/gcs_bucket.py delete mode 100644 google/cloud/storage/testbench/gcs_iam.py delete mode 100644 google/cloud/storage/testbench/gcs_object.py delete mode 100644 google/cloud/storage/testbench/gcs_project.py delete mode 100644 google/cloud/storage/testbench/requirements.txt delete mode 100644 google/cloud/storage/testbench/testbench.py delete mode 100644 google/cloud/storage/testbench/testbench_utils.py diff --git a/.gitignore b/.gitignore index ad73899e9b65f..6d2c9fb3c6cef 100644 --- a/.gitignore +++ b/.gitignore @@ -25,7 +25,5 @@ cmake-build-*/ # Ignore staging files for the ci/kokoro/install builds ci/kokoro/install/ccache-contents/ -google/cloud/storage/testbench/__pycache__/ -google/cloud/storage/testbench/*.pyc google/cloud/storage/emulator/**/__pycache__/ google/cloud/storage/emulator/**/*.pyc diff --git a/google/cloud/storage/testbench/README.md b/google/cloud/storage/testbench/README.md deleted file mode 100644 index 9209c531cbddc..0000000000000 --- a/google/cloud/storage/testbench/README.md +++ /dev/null @@ -1,52 +0,0 @@ -# Storage Test Bench for Python 3 - -This is a testbench for the Storage JSON API, with limited support for the XML API. While it emulates most of the behaviors of Google Cloud Storage, it was developed mostly as a testbench for the client library, the testbench, for example, performs far fewer error checks and no permission checks (ACL/IAM). Generally, the error codes are similar to the ones generated by GCS, but the error messages are not. In short, this is intended for testing, and not as a general purpose emulator. - -## Install Dependencies - -```bash -pip install -r requirements.txt -``` - -## Run Test Bench - -```bash -python3 testbench.py --port 8080 -``` - -For more information use: `python3 testbench.py -h` - -## Force Failures - -You can force the following failures by using the `x-goog-testbench-instructions` header. - -### return-broken-stream - -Set request headers with `x-goog-testbench-instructions: return-broken-stream`. -Emulator will fail after sending 1024*1024 bytes. - -### return-corrupted-data - -Set request headers with `x-goog-testbench-instructions: return-corrupted-data`. -Emulator will return corrupted data. - -### stall-always - -Set request headers with `x-goog-testbench-instructions: stall-always`. -Emulator will stall at the beginning. - -### stall-at-256KiB - -Set request headers with `x-goog-testbench-instructions: stall-at-256KiB`. -Emulator will stall at 256KiB bytes. - -### return-503-after-256K - -Set request headers with `x-goog-testbench-instructions: return-503-after-256K`. -Emulator will return a `HTTP 503` after sending 256KiB bytes. - -### return-503-after-256K/retry-N - -Set request headers with `x-goog-testbench-instructions: return-503-after-256K/retry-1` up to `x-goog-testbench-instructions: return-503-after-256K/retry-N`. - -For N==1 and N==2 behave like `return-305-after-256K`, for `N>=3` ignore the failure instruction and return successfully. This is used to test failures during retry, the client cooperates by sending the retry counter in the failure instructions. diff --git a/google/cloud/storage/testbench/error_response.py b/google/cloud/storage/testbench/error_response.py deleted file mode 100644 index 7d045e8a67717..0000000000000 --- a/google/cloud/storage/testbench/error_response.py +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python -# Copyright 2018 Google LLC. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""A helper class to send error responses in the storage client test bench.""" - -import flask - - -class ErrorResponse(Exception): - """Simplify generation of error responses.""" - - status_code = 400 - - def __init__(self, message, status_code=None, payload=None): - Exception.__init__(self) - self.message = message - if status_code is not None: - self.status_code = status_code - self.payload = payload - - def as_response(self): - kv = dict(self.payload or ()) - kv["message"] = self.message - response = flask.jsonify(kv) - response.status_code = self.status_code - return response diff --git a/google/cloud/storage/testbench/gcs_bucket.py b/google/cloud/storage/testbench/gcs_bucket.py deleted file mode 100644 index 949e880dab327..0000000000000 --- a/google/cloud/storage/testbench/gcs_bucket.py +++ /dev/null @@ -1,775 +0,0 @@ -#!/usr/bin/env python -# Copyright 2018 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Implement a class to simulate GCS buckets.""" - -import base64 -import error_response -import flask -import gcs_object -import json -import re -import testbench_utils -import time - - -class GcsBucket(object): - """Represent a GCS Bucket.""" - - def __init__(self, gcs_url, name): - self.name = name - self.gcs_url = gcs_url - now = time.gmtime(time.time()) - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - self.metadata = { - "timeCreated": timestamp, - "updated": timestamp, - "metageneration": "0", - "name": self.name, - "location": "US", - "storageClass": "STANDARD", - "etag": "XYZ=", - "labels": {"foo": "bar", "baz": "qux"}, - "owner": {"entity": "project-owners-123456789", "entityId": ""}, - } - self.notification_id = "1" - self.notifications = {} - self.iam_version = 1 - self.counter = 1 - self.iam_bindings = [] - self.resumable_uploads = {} - # Update the derived metadata attributes (e.g.: id, kind, selfLink) - self.update_from_metadata({}) - self.insert_acl( - testbench_utils.canonical_entity_name("project-owners-123456789"), "OWNER" - ) - self.insert_acl( - testbench_utils.canonical_entity_name("project-editors-123456789"), "OWNER" - ) - self.insert_acl( - testbench_utils.canonical_entity_name("project-viewers-123456789"), "READER" - ) - self.insert_default_object_acl( - testbench_utils.canonical_entity_name("project-owners-123456789"), "OWNER" - ) - self.insert_default_object_acl( - testbench_utils.canonical_entity_name("project-editors-123456789"), "OWNER" - ) - self.insert_default_object_acl( - testbench_utils.canonical_entity_name("project-viewers-123456789"), "READER" - ) - - def increase_metageneration(self): - """Increase the current metageneration number.""" - new = str(int(self.metadata.get("metageneration", "0")) + 1) - self.metadata["metageneration"] = new - - def versioning_enabled(self): - """Return True if versioning is enabled for this Bucket.""" - v = self.metadata.get("versioning", None) - if v is None: - return False - return v.get("enabled", False) - - @classmethod - def _remove_non_writable_keys(cls, metadata): - """Remove the keys from metadata (an update or patch) that are not - writable. - - Both `Buckets: patch` and `Buckets: update` either ignore non-writable - keys or return 400 if the key does not match the current value. In - the testbench we simply always ignore them, to make life easier. - - :param metadata:dict a dictionary representing a patch or - update to the metadata. - :return metadata but with only any non-writable keys removed. - :rtype: dict - """ - writeable_keys = { - "acl", - "billing", - "cors", - "defaultObjectAcl", - "encryption", - "labels", - "lifecycle", - "location", - "logging", - "retentionPolicy", - "storageClass", - "versioning", - "website", - "iamConfiguration", - } - non_writeable_keys = [] - for key in metadata.keys(): - if key not in writeable_keys: - non_writeable_keys.append(key) - for key in non_writeable_keys: - metadata.pop(key, None) - return metadata - - def _adjust_field_patch(self, patch, field): - """Add missing fields (such as lockedTme) to a UniformBucketLevelAccess - or BucketPolicyOnly patch. - - :param patch:dict a dictionary of metadata values. - :param field: one of 'uniformBucketLevelAccess' or 'bucketPolicyOnly' - """ - field_was_enabled = False - if self.metadata.get("iamConfiguration"): - field_value = self.metadata.get("iamConfiguration").get(field) - if field_value: - field_was_enabled = field_value.get("enabled") - config = patch.get("iamConfiguration") - if config is not None: - if config.get(field): - field_enabled = config.get(field).get("enabled") - if not field_was_enabled and field_enabled: - # Set the locked time (arbitrarily) to 7 days from now. - locked_time = time.gmtime(time.time() + 7 * 24 * 3600) - modified_field = { - "lockedTime": time.strftime("%Y-%m-%dT%H:%M:%SZ", locked_time), - "enabled": field_enabled, - } - config[field] = modified_field - - def update_from_metadata(self, metadata): - """Update from a metadata dictionary. - - :param metadata:dict a dictionary with new metadata values. - """ - retention_policy = metadata.get("retentionPolicy") - if retention_policy: - # Ignore any values set for 'isLocked' or 'effectiveTime'. - retention_policy.pop("isLocked", None) - now = time.gmtime(time.time()) - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - retention_policy["effectiveTime"] = timestamp - metadata["retentionPolicy"] = retention_policy - self._adjust_field_patch(self.metadata, "uniformBucketLevelAccess") - self._adjust_field_patch(self.metadata, "bucketPolicyOnly") - tmp = self.metadata.copy() - metadata = GcsBucket._remove_non_writable_keys(metadata) - tmp.update(metadata) - tmp["name"] = tmp.get("name", self.name) - now = time.gmtime(time.time()) - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - tmp.update( - { - "id": self.name, - "kind": "storage#bucket", - "selfLink": self.gcs_url + self.name, - "projectNumber": "123456789", - "updated": timestamp, - } - ) - self.metadata = tmp - self.increase_metageneration() - - def apply_patch(self, patch): - """Update from a metadata dictionary. - - :param patch:dict a dictionary with metadata changes. - """ - patch = GcsBucket._remove_non_writable_keys(patch) - retention_policy = patch.get("retentionPolicy") - if retention_policy: - # Ignore any values set for 'isLocked' or 'effectiveTime'. - retention_policy.pop("isLocked", None) - now = time.gmtime(time.time()) - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - retention_policy["effectiveTime"] = timestamp - patch["retentionPolicy"] = retention_policy - self._adjust_field_patch(self.metadata, "uniformBucketLevelAccess") - self._adjust_field_patch(self.metadata, "bucketPolicyOnly") - patched = testbench_utils.json_api_patch( - self.metadata, patch, recurse_on={"labels"} - ) - self.metadata = patched - self.increase_metageneration() - - def check_preconditions(self, request): - """Verify that the preconditions in request are met. - - :param request:flask.Request the contents of the HTTP request. - :rtype:NoneType - :raises:ErrorResponse if the request does not pass the preconditions, - for example, the request has a `ifMetagenerationMatch` restriction - that is not met. - """ - - metageneration_match = request.args.get("ifMetagenerationMatch") - metageneration_not_match = request.args.get("ifMetagenerationNotMatch") - metageneration = self.metadata.get("metageneration") - - if ( - metageneration_not_match is not None - and metageneration_not_match == metageneration - ): - raise error_response.ErrorResponse( - "Precondition Failed (metageneration = %s)" % metageneration, - status_code=412, - ) - - if metageneration_match is not None and metageneration_match != metageneration: - raise error_response.ErrorResponse( - "Precondition Failed (metageneration = %s)" % metageneration, - status_code=412, - ) - - def create_acl_entry(self, entity, role): - """Return an ACL entry for the given entity and role. - - :param entity: str the user, group or email granted permissions. - :param role: str the name of the permissions (READER, WRITER, OWNER). - :return: the canonical entity name and the ACL entry. - :rtype: (str,dict) - """ - entity = testbench_utils.canonical_entity_name(entity) - email = "" - if entity.startswith("user-"): - email = entity.replace("user-", "", 1) - return ( - entity, - { - "bucket": self.name, - "email": email, - "entity": entity, - "etag": self.metadata.get("etag", "XYZ="), - "id": self.metadata.get("id", "") + "/" + entity, - "kind": "storage#bucketAccessControl", - "role": role, - "selfLink": self.metadata.get("selfLink") + "/acl/" + entity, - }, - ) - - def insert_acl(self, entity, role): - """Insert (or update) a new BucketAccessControl entry for this bucket. - - :param entity:str the name of the entity to insert. - :param role:str the new role - :return: the dictionary representing the new AccessControl metadata. - :rtype: dict - """ - entity, entry = self.create_acl_entry(entity, role) - # Replace or insert the entry. - indexed = testbench_utils.index_acl(self.metadata.get("acl", [])) - indexed[entity] = entry - self.metadata["acl"] = list(indexed.values()) - return entry - - def delete_acl(self, entity): - """ - Delete a single BucketAccessControl entry from this bucket. - - :param entity:str the name of the entity. - :rtype:NoneType - """ - entity = testbench_utils.canonical_entity_name(entity) - indexed = testbench_utils.index_acl(self.metadata.get("acl", [])) - indexed.pop(entity) - self.metadata["acl"] = list(indexed.values()) - - def get_acl(self, entity): - """Get a single BucketAccessControl entry from this bucket. - - :param entity:str the name of the entity. - :return: with the contents of the BucketAccessControl. - :rtype: dict - """ - entity = testbench_utils.canonical_entity_name(entity) - for acl in self.metadata.get("acl", []): - if acl.get("entity", "") == entity: - return acl - raise error_response.ErrorResponse( - "Entity %s not found in object %s" % (entity, self.name) - ) - - def update_acl(self, entity, role): - """Update a single BucketAccessControl entry in this bucket. - - :param entity:str the name of the entity. - :param role:str the new role for the entity. - :return: with the contents of the BucketAccessControl. - :rtype: dict - """ - return self.insert_acl(entity, role) - - def insert_default_object_acl(self, entity, role): - """Insert (or update) a new default ObjectAccessControl entry for this - bucket. - - :param entity:str the name of the entity to insert. - :param role:str the new role - :return: the dictionary representing the new ObjectAccessControl. - :rtype: dict - """ - entity = testbench_utils.canonical_entity_name(entity) - email = "" - if entity.startswith("user-"): - email = email.replace("user-", "", 1) - # Replace or insert the entry. - indexed = testbench_utils.index_acl(self.metadata.get("defaultObjectAcl", [])) - indexed[entity] = { - "bucket": self.name, - "email": email, - "entity": entity, - "etag": self.metadata.get("etag", "XYZ="), - "id": self.metadata.get("id", "") + "/" + entity, - "kind": "storage#objectAccessControl", - "role": role, - "selfLink": self.metadata.get("selfLink") + "/acl/" + entity, - } - self.metadata["defaultObjectAcl"] = list(indexed.values()) - return indexed[entity] - - def delete_default_object_acl(self, entity): - """Delete a single default ObjectAccessControl entry from this bucket. - - :param entity:str the name of the entity. - :rtype:NoneType - """ - entity = testbench_utils.canonical_entity_name(entity) - indexed = testbench_utils.index_acl(self.metadata.get("defaultObjectAcl", [])) - indexed.pop(entity) - self.metadata["defaultObjectAcl"] = list(indexed.values()) - - def get_default_object_acl(self, entity): - """Get a single default ObjectAccessControl entry from this Bucket. - - :param entity:str the name of the entity. - :return: with the contents of the BucketAccessControl. - :rtype: dict - """ - entity = testbench_utils.canonical_entity_name(entity) - for acl in self.metadata.get("defaultObjectAcl", []): - if acl.get("entity", "") == entity: - return acl - raise error_response.ErrorResponse( - "Entity %s not found in object %s" % (entity, self.name) - ) - - def update_default_object_acl(self, entity, role): - """Update a single default ObjectAccessControl entry in this Bucket. - - :param entity:str the name of the entity. - :param role:str the new role for the entity. - :return: with the contents of the ObjectAccessControl. - :rtype: dict - """ - return self.insert_default_object_acl(entity, role) - - def list_notifications(self): - """List the notifications associated with this Bucket. - - :return: with the notification definitions. - :rtype: list[dict] - """ - return list(self.notifications.values()) - - def insert_notification(self, request): - """ - Insert a new notification into this Bucket. - - :param request:flask.Request the HTTP request contents. - :return: the new notification value. - :rtype:dict - """ - notification_id = "notification-%s" % self.notification_id - link = "%s/b/%s/notificationConfigs/%s" % ( - self.gcs_url, - self.name, - notification_id, - ) - self.notification_id = str(int(self.notification_id) + 1) - notification = json.loads(request.data) - notification.update( - { - "id": notification_id, - "selfLink": link, - "etag": "XYZ=", - "kind": "storage#notification", - } - ) - self.notifications[notification_id] = notification - return notification - - def delete_notification(self, notification_id): - """Delete a notification in this Bucket. - - :param notification_id:str the id of the notification. - :rtype:NoneType - """ - if self.notifications.get(notification_id) is None: - raise error_response.ErrorResponse( - "Notification %d not found in %s" % (notification_id, self.name), - status_code=404, - ) - del self.notifications[notification_id] - - def get_notification(self, notification_id): - """ - Get the details of a given notification in this Bucket. - - :param notification_id:str the id of the notification. - :return: the details of the notification. - :rtype: dict - """ - details = self.notifications.get(notification_id) - if details is None: - raise error_response.ErrorResponse( - "Notification %d not found in %s" % (notification_id, self.name), - status_code=404, - ) - return details - - @classmethod - def _append_acl_members_to_binding(cls, role, members, bindings): - """Add ACL members into IAM bindings.""" - found = False - for binding in bindings: - if binding.get("role") == role and not binding.get("condition"): - found = True - binding.setdefault("members", []) - for member in members: - binding["members"].append(member) - break - if not found: - bindings.append({"role": role, "members": members}) - return bindings - - def iam_policy_as_json(self): - """Get the current IamPolicy in the right format for JSON.""" - role_mapping = { - "READER": "roles/storage.legacyBucketReader", - "WRITER": "roles/storage.legacyBucketWriter", - "OWNER": "roles/storage.legacyBucketOwner", - } - copy_of_bindings = self.iam_bindings.copy() - if self.metadata.get("acl") is not None: - # Store the ACLs as IamBindings - for entry in self.metadata.get("acl", []): - legacy_role = entry.get("role") - if legacy_role is None or entry.get("entity") is None: - raise error_response.ErrorResponse( - "Invalid ACL entry", status_code=500 - ) - role = role_mapping.get(legacy_role) - if role is None: - raise error_response.ErrorResponse( - "Invalid legacy role %s" % legacy_role, status_code=500 - ) - copy_of_bindings = GcsBucket._append_acl_members_to_binding( - role, [entry.get("entity")], copy_of_bindings - ) - policy = { - "kind": "storage#policy", - "resourceId": "projects/_/buckets/%s" % self.name, - "bindings": copy_of_bindings, - "etag": base64.b64encode(bytearray(str(self.counter), "utf-8")).decode( - "utf-8" - ), - "version": self.iam_version, - } - return policy - - def get_iam_policy(self, request): - """Get the IamPolicy associated with this Bucket. - - :param request: flask.Request the http request. - :return: the IamPolicy as a dictionary, ready for JSON encoding. - :rtype: dict - """ - self.check_preconditions(request) - return self.iam_policy_as_json() - - def set_iam_policy(self, request): - """Set the IamPolicy associated with this Bucket. - - :param request: flask.Request the original http request. - :return: the IamPolicy as a dictionary, ready for JSON encoding. - :rtype: dict - """ - self.check_preconditions(request) - current_etag = base64.b64encode(bytearray(str(self.counter), "utf-8")).decode( - "utf-8" - ) - if request.headers.get( - "if-match" - ) is not None and current_etag != request.headers.get("if-match"): - raise error_response.ErrorResponse( - "Mismatched ETag has %s" % current_etag, status_code=412 - ) - if request.headers.get( - "if-none-match" - ) is not None and current_etag == request.headers.get("if-none-match"): - raise error_response.ErrorResponse( - "Mismatched ETag has %s" % current_etag, status_code=412 - ) - - policy = json.loads(request.data) - if policy.get("bindings") is None: - raise error_response.ErrorResponse('Missing "bindings" field') - - new_acl = [] - new_iam_bindings = [] - role_mapping = { - "roles/storage.legacyBucketReader": "READER", - "roles/storage.legacyBucketWriter": "WRITER", - "roles/storage.legacyBucketOwner": "OWNER", - } - for binding in policy.get("bindings"): - role = binding.get("role") - members = binding.get("members") - condition = binding.get("condition") - if role is None or members is None: - raise error_response.ErrorResponse('Missing "role" or "members" fields') - if role_mapping.get(role) is None: - new_binding = {"role": role, "members": members} - if condition: - new_binding["condition"] = condition - new_iam_bindings.append(new_binding) - else: - for m in members: - legacy_role = role_mapping.get(role) - _, entry = self.create_acl_entry(entity=m, role=legacy_role) - new_acl.append(entry) - self.metadata["acl"] = new_acl - self.iam_bindings = new_iam_bindings - if policy.get("version") is None: - self.iam_version = 1 - else: - self.iam_version = policy.get("version") - self.counter = self.counter + 1 - return self.iam_policy_as_json() - - def test_iam_permissions(self, request): - """Test the IAM permissions for the current user. - - Because we do not want to implement a full simulator for IAM, we simply - return the permissions matching 'storage.*' - - :param request: flask.Request the current http request. - :return: formatted for `Buckets: testIamPermissions` - :rtype: dict - """ - result = {"kind": "storage#testIamPermissionsResponse", "permissions": []} - for p in request.args.getlist("permissions"): - if p.startswith("storage."): - result["permissions"].append(p) - return result - - def lock_retention_policy(self, request): - """Set the IamPolicy associated with this Bucket. - - :param request: flask.Request the current http request. - :return: None - """ - metageneration = request.args.get("ifMetagenerationMatch") - if metageneration is None: - raise error_response.ErrorResponse( - "Missing ifMetagenerationMatch parameter", status_code=400 - ) - if metageneration != self.metadata.get("metageneration"): - raise error_response.ErrorResponse( - "Precondition Failed (metageneration = %s)" % metageneration, - status_code=412, - ) - retention_policy = self.metadata.get("retentionPolicy") - if retention_policy is None: - raise error_response.ErrorResponse( - "Precondition Failed, bucket does not have a retention policy to lock", - status_code=412, - ) - retention_policy["isLocked"] = True - self.metadata["retentionPolicy"] = retention_policy - self.increase_metageneration() - - def create_resumable_upload(self, upload_url, request): - """Capture the details for a resumable upload. - - :param upload_url: str the base URL for uploads. - :param request: flask.Request the original http request. - :return: the HTTP response to send back. - """ - x_upload_content_type = request.headers.get( - "x-upload-content-type", "application/octet-stream" - ) - x_upload_content_length = request.headers.get("x-upload-content-length") - expected_bytes = None - if x_upload_content_length: - expected_bytes = int(x_upload_content_length) - - if request.args.get("name") is not None and len(request.data): - raise error_response.ErrorResponse( - "The name argument is only supported for empty payloads", - status_code=400, - ) - if len(request.data): - metadata = json.loads(request.data) - else: - metadata = {"name": request.args.get("name")} - - if metadata.get("name") is None: - raise error_response.ErrorResponse( - "Missing object name argument", status_code=400 - ) - metadata.setdefault("contentType", x_upload_content_type) - upload = { - "metadata": metadata, - "instructions": request.headers.get("x-goog-testbench-instructions"), - "fields": request.args.get("fields"), - "next_byte": 0, - "expected_bytes": expected_bytes, - "object_name": metadata.get("name"), - "media": b"", - "transfer": set(), - "done": False, - } - # Capture the preconditions, including those that are None. - for precondition in [ - "ifGenerationMatch", - "ifGenerationNotMatch", - "ifMetagenerationMatch", - "ifMetagenerationNotMatch", - ]: - upload[precondition] = request.args.get(precondition) - upload_id = base64.b64encode(bytearray(metadata.get("name"), "utf-8")).decode( - "utf-8" - ) - self.resumable_uploads[upload_id] = upload - location = "%s?uploadType=resumable&upload_id=%s" % (upload_url, upload_id) - response = flask.make_response("") - response.headers["Location"] = location - return response - - def receive_upload_chunk(self, gcs_url, request): - """Receive a new upload chunk. - - :param gcs_url: str the base URL for the service. - :param request: flask.Request the original http request. - :return: the HTTP response. - """ - upload_id = request.args.get("upload_id") - if upload_id is None: - raise error_response.ErrorResponse( - "Missing upload_id in resumable_upload_chunk", status_code=400 - ) - upload = self.resumable_uploads.get(upload_id) - if upload is None: - raise error_response.ErrorResponse( - "Cannot find resumable upload %s" % upload_id, status_code=404 - ) - # Be gracious in what you accept, if the Content-Range header is not - # set we assume it is a good header and it is the end of the file. - next_byte = upload["next_byte"] - upload["transfer"].add(request.environ.get("HTTP_TRANSFER_ENCODING", "")) - end = next_byte + len(request.data) - total = end - final_chunk = False - payload = testbench_utils.extract_media(request) - content_range = request.headers.get("content-range") - if content_range is not None: - if content_range.startswith("bytes */*"): - # This is just a query to resume an upload, if it is done, return - # the completed upload payload and an empty range header. - response = flask.make_response(upload.get("payload", "")) - if next_byte > 1 and not upload["done"]: - response.headers["Range"] = "bytes=0-%d" % (next_byte - 1) - response.status_code = 200 if upload["done"] else 308 - return response - match = re.match("bytes \*/(\\*|[0-9]+)", content_range) - if match: - if match.group(1) == "*": - total = 0 - else: - total = int(match.group(1)) - final_chunk = True - else: - match = re.match("bytes ([0-9]+)-([0-9]+)/(\\*|[0-9]+)", content_range) - if not match: - raise error_response.ErrorResponse( - "Invalid Content-Range in upload %s" % content_range, - status_code=400, - ) - begin = int(match.group(1)) - end = int(match.group(2)) - if match.group(3) == "*": - total = 0 - else: - total = int(match.group(3)) - final_chunk = True - - if begin != next_byte: - raise error_response.ErrorResponse( - "Mismatched data range, expected data at %d, got %d" - % (next_byte, begin), - status_code=400, - ) - if len(payload) != end - begin + 1: - raise error_response.ErrorResponse( - "Mismatched data range (%d) vs. received data (%d)" - % (end - begin + 1, len(payload)), - status_code=400, - ) - - upload["media"] = upload.get("media", b"") + payload - next_byte = len(upload.get("media", "")) - upload["next_byte"] = next_byte - response_payload = "" - if final_chunk and next_byte >= total: - expected_bytes = upload["expected_bytes"] - if expected_bytes is not None and expected_bytes != total: - raise error_response.ErrorResponse( - "X-Upload-Content-Length" - "validation failed. Expected=%d, got %d." % (expected_bytes, total) - ) - upload["done"] = True - object_name = upload.get("object_name") - object_path, blob = testbench_utils.get_object( - self.name, object_name, gcs_object.GcsObject(self.name, object_name) - ) - # Release a few resources to control memory usage. - original_metadata = upload.pop("metadata", None) - media = upload.pop("media", None) - blob.check_preconditions_by_value( - upload.get("ifGenerationMatch"), - upload.get("ifGenerationNotMatch"), - upload.get("ifMetagenerationMatch"), - upload.get("ifMetagenerationNotMatch"), - ) - if upload.pop("instructions", None) == "inject-upload-data-error": - media = testbench_utils.corrupt_media(media) - revision = blob.insert_resumable(gcs_url, request, media, original_metadata) - revision.metadata.setdefault("metadata", {}) - revision.metadata["metadata"]["x_testbench_transfer_encoding"] = ":".join( - upload["transfer"] - ) - response_payload = testbench_utils.filter_fields_from_response( - upload.get("fields"), revision.metadata - ) - upload["payload"] = response_payload - testbench_utils.insert_object(object_path, blob) - - response = flask.make_response(response_payload) - if next_byte == 0: - response.headers["Range"] = "bytes=0-0" - else: - response.headers["Range"] = "bytes=0-%d" % (next_byte - 1) - if upload.get("done", False): - response.status_code = 200 - else: - response.status_code = 308 - return response diff --git a/google/cloud/storage/testbench/gcs_iam.py b/google/cloud/storage/testbench/gcs_iam.py deleted file mode 100644 index 71d2bd6f6dbc8..0000000000000 --- a/google/cloud/storage/testbench/gcs_iam.py +++ /dev/null @@ -1,50 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Simulate IAM operations.""" - -import base64 -import error_response -import flask -import json - -IAM_HANDLER_PATH = "/iamapi" -iam = flask.Flask(__name__) -iam.debug = True - - -@iam.route("/projects/-/serviceAccounts/:signBlob", methods=["POST"]) -def sign_blob(service_account): - """Implement the `projects.serviceAccounts.signBlob` API.""" - payload = json.loads(flask.request.data) - if payload.get("payload") is None: - raise error_response.ErrorResponse( - "Missing payload in the payload", status_code=400 - ) - try: - blob = base64.b64decode(payload.get("payload")) - except TypeError: - raise error_response.ErrorResponse( - "payload must be base64-encoded", status_code=400 - ) - blob = b"signed: " + blob - response = { - "keyId": "fake-key-id-123", - "signedBlob": base64.b64encode(blob).decode("utf-8"), - } - return json.dumps(response) - - -def get_iam_app(): - return IAM_HANDLER_PATH, iam diff --git a/google/cloud/storage/testbench/gcs_object.py b/google/cloud/storage/testbench/gcs_object.py deleted file mode 100644 index c4995a7b2d827..0000000000000 --- a/google/cloud/storage/testbench/gcs_object.py +++ /dev/null @@ -1,977 +0,0 @@ -#!/usr/bin/env python -# Copyright 2018 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Implement a class to simulate GCS objects.""" - -import base64 -import crc32c -import error_response -import hashlib -import json -import struct -import testbench_utils -import time - - -class GcsObjectVersion(object): - """Represent a single revision of a GCS Object.""" - - def __init__(self, gcs_url, bucket_name, name, generation, request, media): - """Initialize a new object revision. - - :param gcs_url:str the base URL for the GCS service. - :param bucket_name:str the name of the bucket that contains the object. - :param name:str the name of the object. - :param generation:int the generation number for this object. - :param request:flask.Request the contents of the HTTP request. - :param media:str the contents of the object. - """ - self.gcs_url = gcs_url - self.bucket_name = bucket_name - self.name = name - self.generation = str(generation) - self.object_id = bucket_name + "/o/" + name + "/" + str(generation) - now = time.gmtime(time.time()) - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - self.media = media - instructions = request.headers.get("x-goog-testbench-instructions") - if instructions == "inject-upload-data-error": - self.media = testbench_utils.corrupt_media(media) - - self.metadata = { - "timeCreated": timestamp, - "updated": timestamp, - "metageneration": "0", - "generation": str(generation), - "location": "US", - "storageClass": "STANDARD", - "size": str(len(self.media)), - "etag": "XYZ=", - "owner": {"entity": "project-owners-123456789", "entityId": ""}, - "md5Hash": base64.b64encode(hashlib.md5(self.media).digest()).decode( - "utf-8" - ), - "crc32c": base64.b64encode( - struct.pack(">I", crc32c.crc32(self.media)) - ).decode("utf-8"), - } - if request.headers.get("content-type") is not None: - self.metadata["contentType"] = request.headers.get("content-type") - # Update the derived metadata attributes (e.g.: id, kind, selfLink) - self.update_from_metadata({}) - # Capture any encryption key headers. - self._capture_customer_encryption(request) - self._update_predefined_acl(request.args.get("predefinedAcl")) - acl2json_mapping = { - "authenticated-read": "authenticatedRead", - "bucket-owner-full-control": "bucketOwnerFullControl", - "bucket-owner-read": "bucketOwnerRead", - "private": "private", - "project-private": "projectPrivate", - "public-read": "publicRead", - } - if request.headers.get("x-goog-acl") is not None: - acl = request.headers.get("x-goog-acl") - predefined = acl2json_mapping.get(acl) - if predefined is not None: - self._update_predefined_acl(predefined) - else: - raise error_response.ErrorResponse( - "Invalid predefinedAcl value %s" % acl, status_code=400 - ) - - def update_from_metadata(self, metadata): - """Update from a metadata dictionary. - - :param metadata:dict a dictionary with new metadata values. - :rtype:NoneType - """ - tmp = self.metadata.copy() - tmp.update(metadata) - tmp["bucket"] = tmp.get("bucket", self.name) - tmp["name"] = tmp.get("name", self.name) - now = time.gmtime(time.time()) - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - # Some values cannot be changed via updates, so we always reset them. - tmp.update( - { - "kind": "storage#object", - "bucket": self.bucket_name, - "name": self.name, - "id": self.object_id, - "selfLink": self.gcs_url + self.name, - "projectNumber": "123456789", - "updated": timestamp, - } - ) - tmp["metageneration"] = str(int(tmp.get("metageneration", "0")) + 1) - self.metadata = tmp - self._validate_hashes() - - def _validate_hashes(self): - """Validate the md5Hash and crc32c fields against the stored media.""" - self._validate_md5_hash() - self._validate_crc32c() - - def _validate_md5_hash(self): - """Validate the md5Hash field against the stored media.""" - actual = self.metadata.get("md5Hash", "") - expected = base64.b64encode(hashlib.md5(self.media).digest()).decode("utf-8") - if actual != expected: - raise error_response.ErrorResponse( - "Mismatched MD5 hash expected=%s, actual=%s" % (expected, actual) - ) - - def _validate_crc32c(self): - """Validate the crc32c field against the stored media.""" - actual = self.metadata.get("crc32c", "") - expected = base64.b64encode(struct.pack(">I", crc32c.crc32(self.media))).decode( - "utf-8" - ) - if actual != expected: - raise error_response.ErrorResponse( - "Mismatched CRC32C checksum expected=%s, actual=%s" % (expected, actual) - ) - - def validate_encryption_for_read(self, request, prefix="x-goog-encryption"): - """Verify that the request includes the correct encryption keys. - - :param request:flask.Request the http request. - :param prefix: str the prefix shared by the encryption headers, - typically 'x-goog-encryption', but for rewrite requests it can be - 'x-goog-copy-source-encryption'. - :rtype:NoneType - """ - key_header = prefix + "-key" - hash_header = prefix + "-key-sha256" - algo_header = prefix + "-algorithm" - encryption = self.metadata.get("customerEncryption") - if encryption is None: - # The object is not encrypted, no key is needed. - if request.headers.get(key_header) is None: - return - else: - # The data is not encrypted, sending an encryption key is an - # error. - testbench_utils.raise_csek_error() - # The data is encrypted, the key must be present, match, and match its - # hash. - key_header_value = request.headers.get(key_header) - hash_header_value = request.headers.get(hash_header) - algo_header_value = request.headers.get(algo_header) - testbench_utils.validate_customer_encryption_headers( - key_header_value, hash_header_value, algo_header_value - ) - if encryption.get("keySha256") != hash_header_value: - testbench_utils.raise_csek_error() - - def _capture_customer_encryption(self, request): - """Capture the customer-supplied encryption key, if any. - - :param request:flask.Request the http request. - :rtype:NoneType - """ - if request.headers.get("x-goog-encryption-key") is None: - return - prefix = "x-goog-encryption" - key_header = prefix + "-key" - hash_header = prefix + "-key-sha256" - algo_header = prefix + "-algorithm" - key_header_value = request.headers.get(key_header) - hash_header_value = request.headers.get(hash_header) - algo_header_value = request.headers.get(algo_header) - testbench_utils.validate_customer_encryption_headers( - key_header_value, hash_header_value, algo_header_value - ) - self.metadata["customerEncryption"] = { - "encryptionAlgorithm": algo_header_value, - "keySha256": hash_header_value, - } - - def _update_predefined_acl(self, predefined_acl): - """Update the ACL based on the given request parameter value.""" - if predefined_acl is None: - predefined_acl = "projectPrivate" - self.insert_acl( - testbench_utils.canonical_entity_name("project-owners-123456789"), "OWNER" - ) - bucket = testbench_utils.lookup_bucket(self.bucket_name) - owner = bucket.metadata.get("owner") - if owner is None: - owner_entity = "project-owners-123456789" - else: - owner_entity = owner.get("entity") - if predefined_acl == "authenticatedRead": - self.insert_acl("allAuthenticatedUsers", "READER") - elif predefined_acl == "bucketOwnerFullControl": - self.insert_acl(owner_entity, "OWNER") - elif predefined_acl == "bucketOwnerRead": - self.insert_acl(owner_entity, "READER") - elif predefined_acl == "private": - self.insert_acl("project-owners", "OWNER") - elif predefined_acl == "projectPrivate": - self.insert_acl( - testbench_utils.canonical_entity_name("project-editors-123456789"), - "OWNER", - ) - self.insert_acl( - testbench_utils.canonical_entity_name("project-viewers-123456789"), - "READER", - ) - elif predefined_acl == "publicRead": - self.insert_acl(testbench_utils.canonical_entity_name("allUsers"), "READER") - else: - raise error_response.ErrorResponse( - "Invalid predefinedAcl value", status_code=400 - ) - - def reset_predefined_acl(self, predefined_acl): - """Reset the ACL based on the given request parameter value.""" - self.metadata["acl"] = [] - self._update_predefined_acl(predefined_acl) - - def insert_acl(self, entity, role): - """Insert (or update) a new AccessControl entry for this object. - - :param entity:str the name of the entity to insert. - :param role:str the new role - :return: the dictionary representing the new AccessControl metadata. - :rtype:dict - """ - entity = testbench_utils.canonical_entity_name(entity) - email = "" - if entity.startswith("user-"): - email = entity - # Replace or insert the entry. - indexed = testbench_utils.index_acl(self.metadata.get("acl", [])) - indexed[entity] = { - "bucket": self.bucket_name, - "email": email, - "entity": entity, - "entity_id": "", - "etag": self.metadata.get("etag", "XYZ="), - "generation": str(self.generation), - "id": self.metadata.get("id", "") + "/" + entity, - "kind": "storage#objectAccessControl", - "object": self.name, - "role": role, - "selfLink": self.metadata.get("selfLink") + "/acl/" + entity, - } - self.metadata["acl"] = list(indexed.values()) - return indexed[entity] - - def delete_acl(self, entity): - """Delete a single AccessControl entry from the Object revision. - - :param entity:str the name of the entity. - :rtype:NoneType - """ - entity = testbench_utils.canonical_entity_name(entity) - indexed = testbench_utils.index_acl(self.metadata.get("acl", [])) - indexed.pop(entity) - self.metadata["acl"] = list(indexed.values()) - - def get_acl(self, entity): - """Get a single AccessControl entry from the Object revision. - - :param entity:str the name of the entity. - :return: with the contents of the ObjectAccessControl. - :rtype:dict - """ - entity = testbench_utils.canonical_entity_name(entity) - for acl in self.metadata.get("acl", []): - if acl.get("entity", "") == entity: - return acl - raise error_response.ErrorResponse( - "Entity %s not found in object %s" % (entity, self.name) - ) - - def update_acl(self, entity, role): - """Update a single AccessControl entry in this Object revision. - - :param entity:str the name of the entity. - :param role:str the new role for the entity. - :return: with the contents of the ObjectAccessControl. - :rtype: dict - """ - return self.insert_acl(entity, role) - - def patch_acl(self, entity, request): - """Patch a single AccessControl entry in this Object revision. - - :param entity:str the name of the entity. - :param request:flask.Request the parameters for this request. - :return: with the contents of the ObjectAccessControl. - :rtype: dict - """ - acl = self.get_acl(entity) - payload = json.loads(request.data) - request_entity = payload.get("entity") - if request_entity is not None and request_entity != entity: - raise error_response.ErrorResponse( - "Entity mismatch in ObjectAccessControls: patch, expected=%s, got=%s" - % (entity, request_entity) - ) - etag_match = request.headers.get("if-match") - if etag_match is not None and etag_match != acl.get("etag"): - raise error_response.ErrorResponse("Precondition Failed", status_code=412) - etag_none_match = request.headers.get("if-none-match") - if etag_none_match is not None and etag_none_match != acl.get("etag"): - raise error_response.ErrorResponse("Precondition Failed", status_code=412) - role = payload.get("role") - if role is None: - raise error_response.ErrorResponse("Missing role value") - return self.insert_acl(entity, role) - - def x_goog_hash_header(self): - """Return the value for the x-goog-hash header.""" - hashes = { - "md5": self.metadata.get("md5Hash", ""), - "crc32c": self.metadata.get("crc32c", ""), - } - hashes = ["%s=%s" % (key, val) for key, val in hashes.items() if val] - return ",".join(hashes) - - -class GcsObject(object): - """Represent a GCS Object, including all its revisions.""" - - def __init__(self, bucket_name, name): - """Initialize a fake GCS Blob. - - :param bucket_name:str the bucket that will contain the new object. - :param name:str the name of the new object. - """ - self.bucket_name = bucket_name - self.name = name - # A counter to create new generation numbers for the object revisions. - # Note that 0 is an invalid generation number. The application can use - # ifGenerationMatch=0 as a pre-condition that means "object does not - # exist". - self.generation_generator = 0 - self.current_generation = None - self.revisions = {} - self.rewrite_token_generator = 0 - self.rewrite_operations = {} - - def get_revision(self, request, version_field_name="generation"): - """Get the information about a particular object revision or raise. - - :param request:flask.Request the contents of the http request. - :param version_field_name:str the name of the generation - parameter, typically 'generation', but sometimes 'sourceGeneration'. - :return: the object revision. - :rtype: GcsObjectVersion - :raises:ErrorResponse if the request contains an invalid generation - number. - """ - generation = request.args.get(version_field_name) - if generation is None: - return self.get_latest() - version = self.revisions.get(generation) - if version is None: - raise error_response.ErrorResponse( - "Precondition Failed: generation %s not found" % generation - ) - return version - - def del_revision(self, request): - """Delete a version of a fake GCS Blob. - - :param request:flask.Request the contents of the HTTP request. - :return: True if the object entry in the Bucket should be deleted. - :rtype: bool - """ - generation = request.args.get("generation") or self.current_generation - if generation is None: - return True - self.revisions.pop(generation) - if len(self.revisions) == 0: - self.current_generation = None - return True - self.current_generation = sorted(self.revisions.keys())[-1] - return False - - @classmethod - def _remove_non_writable_keys(cls, metadata): - """Remove the keys from metadata (an update or patch) that are not - writable. - - Both `Objects: patch` and `Objects: update` either ignore non-writable - keys or return 400 if the key does not match the current value. In - the testbench we simply always ignore them, to make life easier. - - :param metadata:dict a dictionary representing a patch or - update to the metadata. - :return metadata but with only any non-writable keys removed. - :rtype: dict - """ - writeable_keys = { - "acl", - "cacheControl", - "contentDisposition", - "contentEncoding", - "contentLanguage", - "contentType", - "eventBasedHold", - "metadata", - "temporaryHold", - "storageClass", - "customTime", - } - # Cannot change `metadata` while we are iterating over it, so we make - # a copy - keys = [key for key in metadata.keys()] - for key in keys: - if key not in writeable_keys: - metadata.pop(key, None) - return metadata - - def update_revision(self, request): - """Update the metadata of particular object revision or raise. - - :param request:flask.Request - :return: the object revision updated revision. - :rtype: GcsObjectVersion - :raises:ErrorResponse if the request contains an invalid generation - number. - """ - generation = request.args.get("generation") - if generation is None: - version = self.get_latest() - else: - version = self.revisions.get(generation) - if version is None: - raise error_response.ErrorResponse( - "Precondition Failed: generation %s not found" % generation - ) - metadata = GcsObject._remove_non_writable_keys(json.loads(request.data)) - version.update_from_metadata(metadata) - return version - - def patch_revision(self, request): - """Patch the metadata of particular object revision or raise. - - :param request:flask.Request - :return: the object revision. - :rtype:GcsObjectRevision - :raises:ErrorResponse if the request contains an invalid generation - number. - """ - generation = request.args.get("generation") - - if generation is None: - version = self.get_latest() - else: - version = self.revisions.get(generation) - if version is None: - raise error_response.ErrorResponse( - "Precondition Failed: generation %s not found" % generation - ) - patch = GcsObject._remove_non_writable_keys(json.loads(request.data)) - patched = testbench_utils.json_api_patch( - version.metadata, patch, recurse_on={"metadata"} - ) - patched["metageneration"] = str(int(patched.get("metageneration", "0")) + 1) - version.metadata = patched - return version - - def get_revision_by_generation(self, generation): - """Get object revision by generation or None if not found. - - :param generation:int - :return: the object revision by generation or None. - :rtype:GcsObjectRevision - """ - return self.revisions.get(str(generation), None) - - def get_latest(self): - return self.revisions.get(self.current_generation, None) - - def check_preconditions_by_value( - self, - generation_match, - generation_not_match, - metageneration_match, - metageneration_not_match, - ): - """Verify that the given precondition values are met.""" - current_generation = self.current_generation or "0" - if generation_match is not None and generation_match != current_generation: - raise error_response.ErrorResponse("Precondition Failed", status_code=412) - # This object does not exist (yet), testing in this case is special. - if ( - generation_not_match is not None - and generation_not_match == current_generation - ): - raise error_response.ErrorResponse("Precondition Failed", status_code=412) - - if self.current_generation is None: - if metageneration_match is not None or metageneration_not_match is not None: - raise error_response.ErrorResponse( - "Precondition Failed", status_code=412 - ) - return - - current = self.revisions.get(current_generation) - if current is None: - raise error_response.ErrorResponse("Object not found", status_code=404) - metageneration = current.metadata.get("metageneration") - if ( - metageneration_not_match is not None - and metageneration_not_match == metageneration - ): - raise error_response.ErrorResponse("Precondition Failed", status_code=412) - if metageneration_match is not None and metageneration_match != metageneration: - raise error_response.ErrorResponse("Precondition Failed", status_code=412) - - def check_preconditions( - self, - request, - if_generation_match="ifGenerationMatch", - if_generation_not_match="ifGenerationNotMatch", - if_metageneration_match="ifMetagenerationMatch", - if_metageneration_not_match="ifMetagenerationNotMatch", - ): - """Verify that the preconditions in request are met. - - :param request:flask.Request the http request. - :param if_generation_match:str the name of the generation match - parameter name, typically 'ifGenerationMatch', but sometimes - 'ifSourceGenerationMatch'. - :param if_generation_not_match:str the name of the generation not-match - parameter name, typically 'ifGenerationNotMatch', but sometimes - 'ifSourceGenerationNotMatch'. - :param if_metageneration_match:str the name of the metageneration match - parameter name, typically 'ifMetagenerationMatch', but sometimes - 'ifSourceMetagenerationMatch'. - :param if_metageneration_not_match:str the name of the metageneration - not-match parameter name, typically 'ifMetagenerationNotMatch', but - sometimes 'ifSourceMetagenerationNotMatch'. - :rtype:NoneType - """ - generation_match = request.args.get(if_generation_match) - generation_not_match = request.args.get(if_generation_not_match) - metageneration_match = request.args.get(if_metageneration_match) - metageneration_not_match = request.args.get(if_metageneration_not_match) - self.check_preconditions_by_value( - generation_match, - generation_not_match, - metageneration_match, - metageneration_not_match, - ) - - def _insert_revision(self, revision): - """Insert a new revision that has been initialized and checked. - - :param revision: GcsObjectVersion the new revision to insert. - :rtype:NoneType - """ - update = {str(self.generation_generator): revision} - bucket = testbench_utils.lookup_bucket(self.bucket_name) - if not bucket.versioning_enabled(): - self.revisions = update - else: - self.revisions.update(update) - self.current_generation = str(self.generation_generator) - - def insert(self, gcs_url, request): - """Insert a new revision based on the give flask request. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :return: the newly created object version. - :rtype: GcsObjectVersion - """ - media = testbench_utils.extract_media(request) - self.generation_generator += 1 - revision = GcsObjectVersion( - gcs_url, - self.bucket_name, - self.name, - self.generation_generator, - request, - media, - ) - meta = revision.metadata.setdefault("metadata", {}) - meta["x_testbench_upload"] = "simple" - self._insert_revision(revision) - return revision - - def insert_multipart(self, gcs_url, request, resource, media_headers, media_body): - """Insert a new revision based on the give flask request. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :param resource:dict JSON resource with object metadata. - :param media_headers:dict media headers in a multi-part upload. - :param media_body:str object data in a multi-part upload. - :return: the newly created object version. - :rtype: GcsObjectVersion - """ - # There are two ways to specify the content-type, the 'content-type' - # header and the resource['contentType'] field. They must be consistent, - # and the service generates an error when they are not. - if ( - resource.get("contentType") is not None - and media_headers.get("content-type") is not None - and resource.get("contentType") != media_headers.get("content-type") - ): - raise error_response.ErrorResponse( - ( - "Content-Type specified in the upload (%s) does not match" - + "contentType specified in the metadata (%s)." - ) - % (media_headers.get("content-type"), resource.get("contentType")), - status_code=400, - ) - # Set the contentType in the resource from the header. Note that if both - # are set they have the same value. - resource.setdefault("contentType", media_headers.get("content-type")) - self.generation_generator += 1 - revision = GcsObjectVersion( - gcs_url, - self.bucket_name, - self.name, - self.generation_generator, - request, - media_body, - ) - meta = revision.metadata.setdefault("metadata", {}) - meta["x_testbench_upload"] = "multipart" - if "md5Hash" in resource: - # We should return `x_testbench_md5` only when the user enables - # `MD5Hash` computations. - meta["x_testbench_md5"] = resource.get("md5Hash") - meta["x_testbench_crc32c"] = resource.get("crc32c", "") - # Apply any overrides from the resource object part. - revision.update_from_metadata(resource) - self._insert_revision(revision) - return revision - - def insert_resumable(self, gcs_url, request, media, resource): - """Implement the final insert for a resumable upload. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :param media:str the media for the object. - :param resource:dict the metadata for the object. - :return: the newly created object version. - :rtype: GcsObjectVersion - """ - self.generation_generator += 1 - revision = GcsObjectVersion( - gcs_url, - self.bucket_name, - self.name, - self.generation_generator, - request, - media, - ) - meta = revision.metadata.setdefault("metadata", {}) - meta["x_testbench_upload"] = "resumable" - meta["x_testbench_md5"] = resource.get("md5Hash", "") - meta["x_testbench_crc32c"] = resource.get("crc32c", "") - # Apply any overrides from the resource object part. - revision.update_from_metadata(resource) - self._insert_revision(revision) - return revision - - def insert_xml(self, gcs_url, request): - """Implement the insert operation using the XML API. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :return: the newly created object version. - :rtype: GcsObjectVersion - """ - media = testbench_utils.extract_media(request) - self.generation_generator += 1 - goog_hash = request.headers.get("x-goog-hash") - md5hash = None - crc32c = None - if goog_hash is not None: - for hash in goog_hash.split(","): - if hash.startswith("md5="): - md5hash = hash[4:] - if hash.startswith("crc32c="): - crc32c = hash[7:] - revision = GcsObjectVersion( - gcs_url, - self.bucket_name, - self.name, - self.generation_generator, - request, - media, - ) - meta = revision.metadata.setdefault("metadata", {}) - meta["x_testbench_upload"] = "xml" - if md5hash is not None: - meta["x_testbench_md5"] = md5hash - revision.update_from_metadata({"md5Hash": md5hash}) - if crc32c is not None: - meta["x_testbench_crc32c"] = crc32c - revision.update_from_metadata({"crc32c": crc32c}) - self._insert_revision(revision) - return revision - - def copy_from(self, gcs_url, request, source_revision): - """Insert a new revision based on the give flask request. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :param source_revision:GcsObjectVersion the source object version to - copy from. - :return: the newly created object version. - :rtype: GcsObjectVersion - """ - self.generation_generator += 1 - source_revision.validate_encryption_for_read(request) - revision = GcsObjectVersion( - gcs_url, - self.bucket_name, - self.name, - self.generation_generator, - request, - source_revision.media, - ) - revision.reset_predefined_acl(request.args.get("destinationPredefinedAcl")) - metadata = json.loads(request.data) - revision.update_from_metadata(metadata) - self._insert_revision(revision) - return revision - - def compose_from(self, gcs_url, request, composed_media): - """Compose a new revision based on the give flask request. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :param composed_media:str contents of the composed object - :return: the newly created object version. - :rtype: GcsObjectVersion - """ - self.generation_generator += 1 - revision = GcsObjectVersion( - gcs_url, - self.bucket_name, - self.name, - self.generation_generator, - request, - composed_media, - ) - revision.reset_predefined_acl(request.args.get("destinationPredefinedAcl")) - payload = json.loads(request.data) - if payload.get("destination") is not None: - revision.update_from_metadata(payload.get("destination")) - # The server often discards the MD5 Hash when composing objects, we can - # easily maintain them in the testbench, but dropping them helps us - # detect bugs sooner. - revision.metadata.pop("md5Hash") - self._insert_revision(revision) - return revision - - @classmethod - def rewrite_fixed_args(cls): - """The arguments that should not change between requests for the same - rewrite operation.""" - return [ - "destinationKmsKeyName", - "destinationPredefinedAcl", - "ifGenerationMatch", - "ifGenerationNotMatch", - "ifMetagenerationMatch", - "ifMetagenerationNotMatch", - "ifSourceGenerationMatch", - "ifSourceGenerationNotMatch", - "ifSourceMetagenerationMatch", - "ifSourceMetagenerationNotMatch", - "maxBytesRewrittenPerCall", - "projection", - "sourceGeneration", - "userProject", - ] - - @classmethod - def capture_rewrite_operation_arguments( - cls, request, destination_bucket, destination_object - ): - """Captures the arguments used to validate related rewrite calls. - - :rtype:dict - """ - original_arguments = {} - for arg in GcsObject.rewrite_fixed_args(): - original_arguments[arg] = request.args.get(arg) - original_arguments.update( - { - "destination_bucket": destination_bucket, - "destination_object": destination_object, - } - ) - return original_arguments - - @classmethod - def make_rewrite_token( - cls, operation, destination_bucket, destination_object, generation - ): - """Create a new rewrite token for the given operation.""" - return base64.b64encode( - bytearray( - "/".join( - [ - str(operation.get("id")), - destination_bucket, - destination_object, - str(generation), - str(operation.get("bytes_rewritten")), - ] - ), - "utf-8", - ) - ).decode("utf-8") - - def make_rewrite_operation(self, request, destination_bucket, destination_object): - """Create a new rewrite token for `Objects: rewrite`.""" - generation = request.args.get("sourceGeneration") - if generation is None: - generation = str(self.generation_generator) - else: - generation = generation - - self.rewrite_token_generator = self.rewrite_token_generator + 1 - body = json.loads(request.data) - original_arguments = self.capture_rewrite_operation_arguments( - request, destination_object, destination_object - ) - operation = { - "id": self.rewrite_token_generator, - "original_arguments": original_arguments, - "actual_generation": generation, - "bytes_rewritten": 0, - "body": body, - } - token = GcsObject.make_rewrite_token( - operation, destination_bucket, destination_object, generation - ) - return token, operation - - def rewrite_finish(self, gcs_url, request, body, source): - """Complete a rewrite from `source` into this object. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :param body:dict the HTTP payload, parsed via json.loads() - :param source:GcsObjectVersion the source object version. - :return: the newly created object version. - :rtype: GcsObjectVersion - """ - media = source.media - self.check_preconditions(request) - self.generation_generator += 1 - revision = GcsObjectVersion( - gcs_url, - self.bucket_name, - self.name, - self.generation_generator, - request, - media, - ) - revision.update_from_metadata(body) - self._insert_revision(revision) - return revision - - def rewrite_step(self, gcs_url, request, destination_bucket, destination_object): - """Execute an iteration of `Objects: rewrite. - - Objects: rewrite may need to be called multiple times before it - succeeds. Only objects in the same location, with the same encryption, - are guaranteed to complete in a single request. - - The implementation simulates some, but not all, the behaviors of the - server, in particular, only rewrites within the same bucket and smaller - than 1MiB complete immediately. - - :param gcs_url:str the root URL for the fake GCS service. - :param request:flask.Request the contents of the HTTP request. - :param destination_bucket:str where will the object be placed after the - rewrite operation completes. - :param destination_object:str the name of the object when the rewrite - operation completes. - :return: a dictionary prepared for JSON encoding of a - `Objects: rewrite` response. - :rtype:dict - """ - body = json.loads(request.data) - rewrite_token = request.args.get("rewriteToken") - if rewrite_token is not None and rewrite_token != "": - # Note that we remove the rewrite operation, not just look it up. - # That way if the operation completes in this call, and/or fails, - # it is already removed. We need to insert it with a new token - # anyway, so this makes sense. - rewrite = self.rewrite_operations.pop(rewrite_token, None) - if rewrite is None: - raise error_response.ErrorResponse( - "Invalid or expired token in rewrite", status_code=410 - ) - else: - rewrite_token, rewrite = self.make_rewrite_operation( - request, destination_bucket, destination_bucket - ) - - # Compare the difference to the original arguments, on the first call - # this is a waste, but the code is easier to follow. - current_arguments = self.capture_rewrite_operation_arguments( - request, destination_bucket, destination_object - ) - diff = set(current_arguments) ^ set(rewrite.get("original_arguments")) - if len(diff) != 0: - raise error_response.ErrorResponse( - "Mismatched arguments to rewrite", status_code=412 - ) - - # This will raise if the version is deleted while the operation is in - # progress. - source = self.get_revision_by_generation(rewrite.get("actual_generation")) - source.validate_encryption_for_read( - request, prefix="x-goog-copy-source-encryption" - ) - bytes_rewritten = rewrite.get("bytes_rewritten") - bytes_rewritten += 1024 * 1024 - result = {"kind": "storage#rewriteResponse", "objectSize": len(source.media)} - if bytes_rewritten >= len(source.media): - bytes_rewritten = len(source.media) - rewrite["bytes_rewritten"] = bytes_rewritten - # Success, the operation completed. Return the new object: - object_path, destination = testbench_utils.get_object( - destination_bucket, - destination_object, - GcsObject(destination_bucket, destination_object), - ) - revision = destination.rewrite_finish(gcs_url, request, body, source) - testbench_utils.insert_object(object_path, destination) - result["done"] = True - result["resource"] = revision.metadata - rewrite_token = "" - else: - rewrite["bytes_rewritten"] = bytes_rewritten - rewrite_token = GcsObject.make_rewrite_token( - rewrite, destination_bucket, destination_object, source.generation - ) - self.rewrite_operations[rewrite_token] = rewrite - result["done"] = False - - result.update( - {"totalBytesRewritten": bytes_rewritten, "rewriteToken": rewrite_token} - ) - return result diff --git a/google/cloud/storage/testbench/gcs_project.py b/google/cloud/storage/testbench/gcs_project.py deleted file mode 100644 index c60d16a204bdc..0000000000000 --- a/google/cloud/storage/testbench/gcs_project.py +++ /dev/null @@ -1,315 +0,0 @@ -#!/usr/bin/env python -# Copyright 2019 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Implement a class to simulate projects (service accounts and HMAC keys).""" - -import base64 -import error_response -import flask -import json -import random -import testbench_utils -import time - - -class ServiceAccount(object): - """Represent a service account and its HMAC keys.""" - - key_id_generator = 20000 - - @classmethod - def next_key_id(cls): - cls.key_id_generator += 1 - return "key-id-%d" % cls.key_id_generator - - def __init__(self, email): - self.email = email - self.keys = {} - - def insert_key(self, project_id): - """Insert a new HMAC key to the service account.""" - key_id = ServiceAccount.next_key_id() - secret = "".join( - [random.choice("abcdefghijklmnopqrstuvwxyz0123456789") for _ in range(40)] - ) - now = time.gmtime(time.time()) - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - return self.keys.setdefault( - key_id, - { - "kind": "storage#hmacKeyCreate", - "secret": base64.b64encode(bytearray(secret, "utf-8")).decode("utf-8"), - "generator": 1, - "metadata": { - "accessId": "%s:%s" % (self.email, key_id), - "etag": base64.b64encode(bytearray("%d" % 1, "utf-8")).decode( - "utf-8" - ), - "id": key_id, - "kind": "storage#hmacKey", - "projectId": project_id, - "serviceAccountEmail": self.email, - "state": "ACTIVE", - "timeCreated": timestamp, - "updated": timestamp, - }, - }, - ) - - def key_items(self): - """Return the keys in this service account as a list of JSON objects.""" - return [k.get("metadata") for k in self.keys.values()] - - def delete_key(self, key_id): - """Delete an existing HMAC key from the service account.""" - key = self.keys.get(key_id) - if key is None: - raise error_response.ErrorResponse( - "Cannot find key for key %s" % key_id, status_code=404 - ) - resource = key.get("metadata") - if resource is None: - raise error_response.ErrorResponse( - "Missing resource for HMAC key %s" % key_id, status_code=500 - ) - if resource.get("state") == "ACTIVE": - raise error_response.ErrorResponse( - "Cannot delete ACTIVE key %s" % key_id, status_code=400 - ) - resource["state"] = "DELETED" - self.keys.pop(key_id) - return resource - - def get_key(self, key_id): - """Get an existing HMAC key from the service account.""" - key = self.keys.get(key_id) - if key is None: - raise error_response.ErrorResponse( - "Cannot find key for key %s" % key_id, status_code=404 - ) - metadata = key.get("metadata") - if metadata is None: - raise error_response.ErrorResponse( - "Missing resource for HMAC key %s" % key_id, status_code=500 - ) - return metadata - - def _check_etag(self, key_resource, etag, where): - """Verify that ETag values match the current ETag.""" - expected = key_resource.get("etag") - if etag is None or etag == expected: - return - raise error_response.ErrorResponse( - "Mismatched ETag for `HmacKeys: update` in %s expected %s, got %s" - % (where, expected, etag), - status_code=400, - ) - - def update_key(self, key_id, payload): - """Get an existing HMAC key from the service account.""" - key = self.keys.get(key_id) - if key is None: - raise error_response.ErrorResponse( - "Cannot find key for key %s" % key_id, status_code=404 - ) - metadata = key.get("metadata") - if metadata is None: - raise error_response.ErrorResponse( - "Missing metadata for HMAC key %s" % key_id, status_code=500 - ) - self._check_etag(metadata, payload.get("etag"), "payload") - self._check_etag(metadata, flask.request.headers.get("if-match-etag"), "header") - - state = payload.get("state") - if state not in ("ACTIVE", "INACTIVE"): - raise error_response.ErrorResponse( - "Invalid state `HmacKeys: update` request %s" % key_id, status_code=400 - ) - if metadata.get("state") == "DELETED": - raise error_response.ErrorResponse( - "Cannot restore DELETED key in `HmacKeys: update` request %s" % key_id, - status_code=400, - ) - key["generator"] += 1 - metadata["state"] = state - metadata["etag"] = base64.b64encode( - bytearray("%d" % key["generator"], "utf-8") - ).decode("utf-8") - now = time.gmtime(time.time()) - metadata["updated"] = time.strftime("%Y-%m-%dT%H:%M:%SZ", now) - return metadata - - -class GcsProject(object): - """Represent a GCS project.""" - - project_number_generator = 100000 - - @classmethod - def next_project_number(cls): - cls.project_number_generator += 1 - return cls.project_number_generator - - def __init__(self, project_id): - self.project_id = project_id - self.project_number = GcsProject.next_project_number() - self.service_accounts = {} - - def service_account_email(self): - """Return the GCS service account email for this project.""" - username = "service-%d" % self.project_number - domain = "gs-project-accounts.iam.gserviceaccount.com" - return "%s@%s" % (username, domain) - - def insert_hmac_key(self, service_account): - """Insert a new HMAC key (or an error).""" - sa = self.service_accounts.setdefault( - service_account, ServiceAccount(service_account) - ) - return sa.insert_key(self.project_id) - - def service_account(self, service_account_email): - """Return a ServiceAccount object given its email.""" - return self.service_accounts.get(service_account_email) - - def delete_hmac_key(self, access_id): - """Remove a key from the project.""" - (service_account, key_id) = access_id.split(":", 2) - sa = self.service_accounts.get(service_account) - if sa is None: - raise error_response.ErrorResponse( - "Cannot find service account for key=%s" % access_id, status_code=404 - ) - return sa.delete_key(key_id) - - def get_hmac_key(self, access_id): - """Get an existing key in the project.""" - (service_account, key_id) = access_id.split(":", 2) - sa = self.service_accounts.get(service_account) - if sa is None: - raise error_response.ErrorResponse( - "Cannot find service account for key=%s" % access_id, status_code=404 - ) - return sa.get_key(key_id) - - def update_hmac_key(self, access_id, payload): - """Update an existing key in the project.""" - (service_account, key_id) = access_id.split(":", 2) - sa = self.service_accounts.get(service_account) - if sa is None: - raise error_response.ErrorResponse( - "Cannot find service account for key=%s" % access_id, status_code=404 - ) - return sa.update_key(key_id, payload) - - -PROJECTS_HANDLER_PATH = "/storage/v1/projects" -projects = flask.Flask(__name__) -projects.debug = True - -VALID_PROJECTS = {} - - -def get_project(project_id): - """Find a project and return the GcsProject object.""" - # Dynamically create the projects. The GCS testbench does not have functions - # to create projects, nor do we want to create such functions. The point is - # to test the GCS client library, not the IAM client library. - return VALID_PROJECTS.setdefault(project_id, GcsProject(project_id)) - - -@projects.errorhandler(error_response.ErrorResponse) -def handle_error(error): - return error.as_response() - - -@projects.route("//serviceAccount") -def projects_get(project_id): - """Implement the `Projects.serviceAccount: get` API.""" - project = get_project(project_id) - email = project.service_account_email() - return testbench_utils.filtered_response( - flask.request, {"kind": "storage#serviceAccount", "email_address": email} - ) - - -@projects.route("//hmacKeys", methods=["POST"]) -def hmac_keys_insert(project_id): - """Implement the `HmacKeys: insert` API.""" - project = get_project(project_id) - service_account = flask.request.args.get("serviceAccountEmail") - if service_account is None: - raise error_response.ErrorResponse( - "serviceAccount is a required parameter", status_code=400 - ) - return testbench_utils.filtered_response( - flask.request, project.insert_hmac_key(service_account) - ) - - -@projects.route("//hmacKeys") -def hmac_keys_list(project_id): - """Implement the 'HmacKeys: list' API: return the HMAC keys in a project.""" - # Lookup the bucket, if this fails the bucket does not exist, and this - # function should return an error. - project = get_project(project_id) - result = {"kind": "storage#hmacKeysMetadata", "next_page_token": "", "items": []} - - state_filter = lambda x: x.get("state") != "DELETED" - if flask.request.args.get("deleted") == "true": - state_filter = lambda x: True - - items = [] - if flask.request.args.get("serviceAccountEmail"): - sa = flask.request.args.get("serviceAccountEmail") - service_account = project.service_account(sa) - if service_account: - items = service_account.key_items() - else: - for sa in project.service_accounts.values(): - items.extend(sa.key_items()) - - result["items"] = [i for i in items if state_filter(i)] - return testbench_utils.filtered_response(flask.request, result) - - -@projects.route("//hmacKeys/", methods=["DELETE"]) -def hmac_keys_delete(project_id, access_id): - """Implement the `HmacKeys: delete` API.""" - project = get_project(project_id) - project.delete_hmac_key(access_id) - return testbench_utils.filtered_response(flask.request, {}) - - -@projects.route("//hmacKeys/") -def hmac_keys_get(project_id, access_id): - """Implement the `HmacKeys: delete` API.""" - project = get_project(project_id) - return testbench_utils.filtered_response( - flask.request, project.get_hmac_key(access_id) - ) - - -@projects.route("//hmacKeys/", methods=["PUT"]) -def hmac_keys_update(project_id, access_id): - """Implement the `HmacKeys: delete` API.""" - project = get_project(project_id) - payload = json.loads(flask.request.data) - return testbench_utils.filtered_response( - flask.request, project.update_hmac_key(access_id, payload) - ) - - -def get_projects_app(): - return PROJECTS_HANDLER_PATH, projects diff --git a/google/cloud/storage/testbench/requirements.txt b/google/cloud/storage/testbench/requirements.txt deleted file mode 100644 index ec0fc2d7a8c00..0000000000000 --- a/google/cloud/storage/testbench/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -crc32c==2.1 -flask==1.1.2 -httpbin==0.7.0 - diff --git a/google/cloud/storage/testbench/testbench.py b/google/cloud/storage/testbench/testbench.py deleted file mode 100644 index 10dc2d0e74398..0000000000000 --- a/google/cloud/storage/testbench/testbench.py +++ /dev/null @@ -1,968 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2018 Google Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""A test bench for the Google Cloud Storage C++ Client Library.""" - -import argparse -import error_response -import flask -import gcs_bucket -import gcs_iam -import gcs_object -import gcs_project -import httpbin -import json -import os -import re -import testbench_utils -import time -import sys -from werkzeug import serving -from werkzeug.middleware.dispatcher import DispatcherMiddleware - - -@httpbin.app.errorhandler(error_response.ErrorResponse) -def httpbin_error(error): - return error.as_response() - - -root = flask.Flask(__name__, subdomain_matching=True) -root.debug = True - - -@root.route("/") -def index(): - """Default handler for the test bench.""" - return "OK" - - -@root.route("/", subdomain="") -def root_get_object(bucket_name, object_name): - return xml_get_object(bucket_name, object_name) - - -@root.route("//", subdomain="") -def root_get_object_with_bucket(bucket_name, object_name): - return xml_get_object(bucket_name, object_name) - - -@root.route("/", subdomain="", methods=["PUT"]) -def root_put_object(bucket_name, object_name): - return xml_put_object(flask.request.host_url, bucket_name, object_name) - - -@root.route("//", subdomain="", methods=["PUT"]) -def root_put_object_with_bucket(bucket_name, object_name): - return xml_put_object(flask.request.host_url, bucket_name, object_name) - - -@root.errorhandler(error_response.ErrorResponse) -def root_error(error): - return error.as_response() - - -# Define the WSGI application to handle bucket requests. -GCS_HANDLER_PATH = "/storage/v1" -gcs = flask.Flask(__name__) -gcs.debug = True - - -def insert_magic_bucket(base_url): - if len(testbench_utils.all_buckets()) == 0: - bucket_name = os.environ.get( - "GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME", "test-bucket" - ) - bucket = gcs_bucket.GcsBucket(base_url, bucket_name) - # Enable versioning in the Bucket, the integration tests expect this to - # be the case, this brings the metageneration number to 2. - bucket.update_from_metadata({"versioning": {"enabled": True}}) - # Perform trivial updates that bring the metageneration to 4, the value - # expected by the integration tests. - bucket.update_from_metadata({}) - bucket.update_from_metadata({}) - testbench_utils.insert_bucket(bucket_name, bucket) - - -@gcs.route("/") -def gcs_index(): - """The default handler for GCS requests.""" - return "OK" - - -@gcs.errorhandler(error_response.ErrorResponse) -def gcs_error(error): - return error.as_response() - - -@gcs.route("/b") -def buckets_list(): - """Implement the 'Buckets: list' API: return the Buckets in a project.""" - base_url = flask.url_for("gcs_index", _external=True) - project = flask.request.args.get("project") - if project is None or project.endswith("-"): - raise error_response.ErrorResponse( - "Invalid or missing project id in `Buckets: list`" - ) - insert_magic_bucket(base_url) - result = {"next_page_token": "", "items": []} - for name, b in testbench_utils.all_buckets(): - result["items"].append(b.metadata) - return testbench_utils.filtered_response(flask.request, result) - - -@gcs.route("/b", methods=["POST"]) -def buckets_insert(): - """Implement the 'Buckets: insert' API: create a new Bucket.""" - base_url = flask.url_for("gcs_index", _external=True) - insert_magic_bucket(base_url) - payload = json.loads(flask.request.data) - bucket_name = payload.get("name") - if bucket_name is None: - raise error_response.ErrorResponse( - "Missing bucket name in `Buckets: insert`", status_code=412 - ) - if not testbench_utils.validate_bucket_name(bucket_name): - raise error_response.ErrorResponse("Invalid bucket name in `Buckets: insert`") - if testbench_utils.has_bucket(bucket_name): - raise error_response.ErrorResponse( - "Bucket %s already exists" % bucket_name, status_code=400 - ) - bucket = gcs_bucket.GcsBucket(base_url, bucket_name) - bucket.update_from_metadata(payload) - testbench_utils.insert_bucket(bucket_name, bucket) - return testbench_utils.filtered_response(flask.request, bucket.metadata) - - -@gcs.route("/b/", methods=["PUT"]) -def buckets_update(bucket_name): - """Implement the 'Buckets: update' API: update an existing Bucket.""" - base_url = flask.url_for("gcs_index", _external=True) - insert_magic_bucket(base_url) - payload = json.loads(flask.request.data) - name = payload.get("name") - if name is None: - raise error_response.ErrorResponse( - "Missing bucket name in `Buckets: update`", status_code=412 - ) - if name != bucket_name: - raise error_response.ErrorResponse( - "Mismatched bucket name parameter in `Buckets: update`", status_code=400 - ) - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - bucket.update_from_metadata(payload) - return testbench_utils.filtered_response(flask.request, bucket.metadata) - - -@gcs.route("/b/") -def buckets_get(bucket_name): - """Implement the 'Buckets: get' API: return the metadata for a bucket.""" - base_url = flask.url_for("gcs_index", _external=True) - insert_magic_bucket(base_url) - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - return testbench_utils.filtered_response(flask.request, bucket.metadata) - - -@gcs.route("/b/", methods=["DELETE"]) -def buckets_delete(bucket_name): - """Implement the 'Buckets: delete' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - testbench_utils.delete_bucket(bucket_name) - return testbench_utils.filtered_response(flask.request, {}) - - -@gcs.route("/b/", methods=["PATCH"]) -def buckets_patch(bucket_name): - """Implement the 'Buckets: patch' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - patch = json.loads(flask.request.data) - bucket.apply_patch(patch) - return testbench_utils.filtered_response(flask.request, bucket.metadata) - - -@gcs.route("/b//acl") -def bucket_acl_list(bucket_name): - """Implement the 'BucketAccessControls: list' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - result = {"items": bucket.metadata.get("acl", [])} - return testbench_utils.filtered_response(flask.request, result) - - -@gcs.route("/b//acl", methods=["POST"]) -def bucket_acl_create(bucket_name): - """Implement the 'BucketAccessControls: create' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - payload = json.loads(flask.request.data) - return testbench_utils.filtered_response( - flask.request, - bucket.insert_acl(payload.get("entity", ""), payload.get("role", "")), - ) - - -@gcs.route("/b//acl/", methods=["DELETE"]) -def bucket_acl_delete(bucket_name, entity): - """Implement the 'BucketAccessControls: delete' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - bucket.delete_acl(entity) - return testbench_utils.filtered_response(flask.request, {}) - - -@gcs.route("/b//acl/") -def bucket_acl_get(bucket_name, entity): - """Implement the 'BucketAccessControls: get' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - acl = bucket.get_acl(entity) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//acl/", methods=["PUT"]) -def bucket_acl_update(bucket_name, entity): - """Implement the 'BucketAccessControls: update' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - payload = json.loads(flask.request.data) - acl = bucket.update_acl(entity, payload.get("role", "")) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//acl/", methods=["PATCH"]) -def bucket_acl_patch(bucket_name, entity): - """Implement the 'BucketAccessControls: patch' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - payload = json.loads(flask.request.data) - acl = bucket.update_acl(entity, payload.get("role", "")) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//defaultObjectAcl") -def bucket_default_object_acl_list(bucket_name): - """Implement the 'BucketAccessControls: list' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - result = {"items": bucket.metadata.get("defaultObjectAcl", [])} - return testbench_utils.filtered_response(flask.request, result) - - -@gcs.route("/b//defaultObjectAcl", methods=["POST"]) -def bucket_default_object_acl_create(bucket_name): - """Implement the 'BucketAccessControls: create' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - payload = json.loads(flask.request.data) - return testbench_utils.filtered_response( - flask.request, - bucket.insert_default_object_acl( - payload.get("entity", ""), payload.get("role", "") - ), - ) - - -@gcs.route("/b//defaultObjectAcl/", methods=["DELETE"]) -def bucket_default_object_acl_delete(bucket_name, entity): - """Implement the 'BucketAccessControls: delete' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - bucket.delete_default_object_acl(entity) - return testbench_utils.filtered_response(flask.request, {}) - - -@gcs.route("/b//defaultObjectAcl/") -def bucket_default_object_acl_get(bucket_name, entity): - """Implement the 'BucketAccessControls: get' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - acl = bucket.get_default_object_acl(entity) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//defaultObjectAcl/", methods=["PUT"]) -def bucket_default_object_acl_update(bucket_name, entity): - """Implement the 'DefaultObjectAccessControls: update' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - payload = json.loads(flask.request.data) - acl = bucket.update_default_object_acl(entity, payload.get("role", "")) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//defaultObjectAcl/", methods=["PATCH"]) -def bucket_default_object_acl_patch(bucket_name, entity): - """Implement the 'DefaultObjectAccessControls: patch' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - payload = json.loads(flask.request.data) - acl = bucket.update_default_object_acl(entity, payload.get("role", "")) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//notificationConfigs") -def bucket_notification_list(bucket_name): - """Implement the 'Notifications: list' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - return testbench_utils.filtered_response( - flask.request, - {"kind": "storage#notifications", "items": bucket.list_notifications()}, - ) - - -@gcs.route("/b//notificationConfigs", methods=["POST"]) -def bucket_notification_create(bucket_name): - """Implement the 'Notifications: insert' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - notification = bucket.insert_notification(flask.request) - return testbench_utils.filtered_response(flask.request, notification) - - -@gcs.route("/b//notificationConfigs/", methods=["DELETE"]) -def bucket_notification_delete(bucket_name, notification_id): - """Implement the 'Notifications: delete' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - bucket.delete_notification(notification_id) - return testbench_utils.filtered_response(flask.request, {}) - - -@gcs.route("/b//notificationConfigs/") -def bucket_notification_get(bucket_name, notification_id): - """Implement the 'Notifications: get' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - notification = bucket.get_notification(notification_id) - return testbench_utils.filtered_response(flask.request, notification) - - -@gcs.route("/b//iam") -def bucket_get_iam_policy(bucket_name): - """Implement the 'Buckets: getIamPolicy' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - return testbench_utils.filtered_response( - flask.request, bucket.get_iam_policy(flask.request) - ) - - -@gcs.route("/b//iam", methods=["PUT"]) -def bucket_set_iam_policy(bucket_name): - """Implement the 'Buckets: setIamPolicy' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - return testbench_utils.filtered_response( - flask.request, bucket.set_iam_policy(flask.request) - ) - - -@gcs.route("/b//iam/testPermissions") -def bucket_test_iam_permissions(bucket_name): - """Implement the 'Buckets: testIamPermissions' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.check_preconditions(flask.request) - return testbench_utils.filtered_response( - flask.request, bucket.test_iam_permissions(flask.request) - ) - - -@gcs.route("/b//lockRetentionPolicy", methods=["POST"]) -def bucket_lock_retention_policy(bucket_name): - """Implement the 'Buckets: lockRetentionPolicy' API.""" - bucket = testbench_utils.lookup_bucket(bucket_name) - bucket.lock_retention_policy(flask.request) - return testbench_utils.filtered_response(flask.request, bucket.metadata) - - -@gcs.route("/b//o") -def objects_list(bucket_name): - """Implement the 'Objects: list' API: return the objects in a bucket.""" - # Lookup the bucket, if this fails the bucket does not exist, and this - # function should return an error. - base_url = flask.url_for("gcs_index", _external=True) - insert_magic_bucket(base_url) - _ = testbench_utils.lookup_bucket(bucket_name) - result = {"next_page_token": "", "items": [], "prefixes:": []} - versions_parameter = flask.request.args.get("versions") - all_versions = versions_parameter is not None and bool(versions_parameter) - prefixes = set() - prefix = flask.request.args.get("prefix", "", type("")) - delimiter = flask.request.args.get("delimiter", "", type("")) - start_offset = flask.request.args.get("startOffset", "", type("")) - end_offset = flask.request.args.get("endOffset", "", type("")) - bucket_link = bucket_name + "/o/" - for name, o in testbench_utils.all_objects(): - if name.find(bucket_link + prefix) != 0: - continue - if o.get_latest() is None: - continue - # We assume `delimiter` has only one character. - if name[len(bucket_link) :] < start_offset: - continue - if end_offset != "" and name[len(bucket_link) :] >= end_offset: - continue - delimiter_index = name.find(delimiter, len(bucket_link + prefix)) - if delimiter != "" and delimiter_index > 0: - # We don't want to include `bucket_link` in the returned prefix. - prefixes.add(name[len(bucket_link) : delimiter_index + 1]) - continue - if all_versions: - for object_version in o.revisions.values(): - result["items"].append(object_version.metadata) - else: - result["items"].append(o.get_latest().metadata) - result["prefixes"] = list(prefixes) - return testbench_utils.filtered_response(flask.request, result) - - -@gcs.route( - "/b//o//copyTo/b//o/", - methods=["POST"], -) -def objects_copy(source_bucket, source_object, destination_bucket, destination_object): - """Implement the 'Objects: copy' API, copy an object.""" - object_path, blob = testbench_utils.lookup_object(source_bucket, source_object) - blob.check_preconditions( - flask.request, - if_generation_match="ifSourceGenerationMatch", - if_generation_not_match="ifSourceGenerationNotMatch", - if_metageneration_match="ifSourceMetagenerationMatch", - if_metageneration_not_match="ifSourceMetagenerationNotMatch", - ) - source_revision = blob.get_revision(flask.request, "sourceGeneration") - if source_revision is None: - raise error_response.ErrorResponse( - "Revision not found %s" % object_path, status_code=404 - ) - - destination_path, destination = testbench_utils.get_object( - destination_bucket, - destination_object, - gcs_object.GcsObject(destination_bucket, destination_object), - ) - base_url = flask.url_for("gcs_index", _external=True) - current_version = destination.copy_from(base_url, flask.request, source_revision) - testbench_utils.insert_object(destination_path, destination) - return testbench_utils.filtered_response(flask.request, current_version.metadata) - - -@gcs.route( - "/b//o//rewriteTo/b//o/", - methods=["POST"], -) -def objects_rewrite( - source_bucket, source_object, destination_bucket, destination_object -): - """Implement the 'Objects: rewrite' API.""" - base_url = flask.url_for("gcs_index", _external=True) - insert_magic_bucket(base_url) - object_path, blob = testbench_utils.lookup_object(source_bucket, source_object) - blob.check_preconditions( - flask.request, - if_generation_match="ifSourceGenerationMatch", - if_generation_not_match="ifSourceGenerationNotMatch", - if_metageneration_match="ifSourceMetagenerationMatch", - if_metageneration_not_match="ifSourceMetagenerationNotMatch", - ) - response = blob.rewrite_step( - base_url, flask.request, destination_bucket, destination_object - ) - return testbench_utils.filtered_response(flask.request, response) - - -def objects_get_common(bucket_name, object_name, revision): - # Respect the Range: header, if present. - range_header = flask.request.headers.get("range") - response_payload = revision.media - begin = 0 - end = len(response_payload) - if range_header is not None: - m = re.match("bytes=([0-9]+)-([0-9]+)", range_header) - if m: - begin = int(m.group(1)) - end = int(m.group(2)) - response_payload = response_payload[begin : end + 1] - m = re.match("bytes=([0-9]+)-$", range_header) - if m: - begin = int(m.group(1)) - response_payload = response_payload[begin:] - m = re.match("bytes=-([0-9]+)$", range_header) - if m: - last = int(m.group(1)) - response_payload = response_payload[-last:] - # Process custom headers to test error conditions. - instructions = flask.request.headers.get("x-goog-testbench-instructions") - if instructions == "return-broken-stream": - - def streamer(): - chunk_size = 64 * 1024 - for r in range(0, len(response_payload), chunk_size): - if r > 1024 * 1024: - print("\n\n###### EXIT to simulate crash\n") - sys.exit(1) - time.sleep(0.1) - chunk_end = min(r + chunk_size, len(response_payload)) - yield response_payload[r:chunk_end] - - length = len(response_payload) - content_range = "bytes %d-%d/%d" % (begin, end - 1, length) - headers = { - "Content-Range": content_range, - "Content-Length": length, - "x-goog-hash": revision.x_goog_hash_header(), - "x-goog-generation": revision.generation, - } - return flask.Response(streamer(), status=200, headers=headers) - - if instructions == "return-corrupted-data": - response_payload = testbench_utils.corrupt_media(response_payload) - - if instructions is not None and instructions.startswith(u"stall-always"): - length = len(response_payload) - content_range = "bytes %d-%d/%d" % (begin, end - 1, length) - - def streamer(): - chunk_size = 16 * 1024 - for r in range(begin, end, chunk_size): - chunk_end = min(r + chunk_size, end) - if r == begin: - time.sleep(10) - yield response_payload[r:chunk_end] - - headers = { - "Content-Range": content_range, - "x-goog-hash": revision.x_goog_hash_header(), - "x-goog-generation": revision.generation, - } - return flask.Response(streamer(), status=200, headers=headers) - - if instructions == "stall-at-256KiB" and begin == 0: - length = len(response_payload) - content_range = "bytes %d-%d/%d" % (begin, end - 1, length) - - def streamer(): - chunk_size = 16 * 1024 - for r in range(begin, end, chunk_size): - chunk_end = min(r + chunk_size, end) - if r == 256 * 1024: - time.sleep(10) - yield response_payload[r:chunk_end] - - headers = { - "Content-Range": content_range, - "x-goog-hash": revision.x_goog_hash_header(), - "x-goog-generation": revision.generation, - } - return flask.Response(streamer(), status=200, headers=headers) - - if instructions is not None and instructions.startswith(u"return-503-after-256K"): - length = len(response_payload) - headers = { - "Content-Range": "bytes %d-%d/%d" % (begin, end - 1, length), - "x-goog-hash": revision.x_goog_hash_header(), - "x-goog-generation": revision.generation, - } - if begin == 0: - - def streamer(): - chunk_size = 4 * 1024 - for r in range(0, len(response_payload), chunk_size): - if r >= 256 * 1024: - print("\n\n###### EXIT to simulate crash\n") - sys.exit(1) - time.sleep(0.01) - chunk_end = min(r + chunk_size, len(response_payload)) - yield response_payload[r:chunk_end] - - return flask.Response(streamer(), status=200, headers=headers) - if instructions.endswith(u"/retry-1"): - print("## Return error for retry 1") - return flask.Response("Service Unavailable", status=503) - if instructions.endswith(u"/retry-2"): - print("## Return error for retry 2") - return flask.Response("Service Unavailable", status=503) - print("## Return success for %s" % instructions) - return flask.Response(response_payload, status=200, headers=headers) - - response = flask.make_response(response_payload) - length = len(response_payload) - content_range = "bytes %d-%d/%d" % (begin, end - 1, length) - response.headers["Content-Range"] = content_range - response.headers["x-goog-hash"] = revision.x_goog_hash_header() - response.headers["x-goog-generation"] = revision.generation - return response - - -@gcs.route("/b//o/", methods=["DELETE"]) -def objects_delete(bucket_name, object_name): - """Implement the 'Objects: delete' API. Delete objects.""" - object_path, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - remove = blob.del_revision(flask.request) - if remove: - testbench_utils.delete_object(object_path) - return testbench_utils.filtered_response(flask.request, {}) - - -@gcs.route("/b//o/", methods=["PUT"]) -def objects_update(bucket_name, object_name): - """Implement the 'Objects: update' API: update an existing Object.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.update_revision(flask.request) - return json.dumps(revision.metadata) - - -@gcs.route("/b//o//compose", methods=["POST"]) -def objects_compose(bucket_name, object_name): - """Implement the 'Objects: compose' API: concatenate Objects.""" - payload = json.loads(flask.request.data) - source_objects = payload["sourceObjects"] - if source_objects is None: - raise error_response.ErrorResponse( - "You must provide at least one source component.", status_code=400 - ) - if len(source_objects) > 32: - raise error_response.ErrorResponse( - "The number of source components provided" - " (%d) exceeds the maximum (32)" % len(source_objects), - status_code=400, - ) - composed_media = b"" - for source_object in source_objects: - source_object_name = source_object.get("name") - if source_object_name is None: - raise error_response.ErrorResponse("Required.", status_code=400) - source_object_path, source_blob = testbench_utils.lookup_object( - bucket_name, source_object_name - ) - source_revision = source_blob.get_latest() - generation = source_object.get("generation") - if generation is not None: - source_revision = source_blob.get_revision_by_generation(generation) - if source_revision is None: - raise error_response.ErrorResponse( - "No such object: %s" % source_object_path, status_code=404 - ) - object_preconditions = source_object.get("objectPreconditions") - if object_preconditions is not None: - if_generation_match = object_preconditions.get("ifGenerationMatch") - source_blob.check_preconditions_by_value( - if_generation_match, None, None, None - ) - composed_media += source_revision.media - composed_object_path, composed_object = testbench_utils.get_object( - bucket_name, object_name, gcs_object.GcsObject(bucket_name, object_name) - ) - composed_object.check_preconditions(flask.request) - base_url = flask.url_for("gcs_index", _external=True) - current_version = composed_object.compose_from( - base_url, flask.request, composed_media - ) - testbench_utils.insert_object(composed_object_path, composed_object) - return testbench_utils.filtered_response(flask.request, current_version.metadata) - - -@gcs.route("/b//o/", methods=["PATCH"]) -def objects_patch(bucket_name, object_name): - """Implement the 'Objects: patch' API: update an existing Object.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.patch_revision(flask.request) - return json.dumps(revision.metadata) - - -@gcs.route("/b//o//acl") -def objects_acl_list(bucket_name, object_name): - """Implement the 'ObjectAccessControls: list' API.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.get_revision(flask.request) - result = {"items": revision.metadata.get("acl", [])} - return testbench_utils.filtered_response(flask.request, result) - - -@gcs.route("/b//o//acl", methods=["POST"]) -def objects_acl_create(bucket_name, object_name): - """Implement the 'ObjectAccessControls: create' API.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.get_revision(flask.request) - payload = json.loads(flask.request.data) - return testbench_utils.filtered_response( - flask.request, - revision.insert_acl(payload.get("entity", ""), payload.get("role", "")), - ) - - -@gcs.route("/b//o//acl/", methods=["DELETE"]) -def objects_acl_delete(bucket_name, object_name, entity): - """Implement the 'ObjectAccessControls: delete' API.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.get_revision(flask.request) - revision.delete_acl(entity) - return testbench_utils.filtered_response(flask.request, {}) - - -@gcs.route("/b//o//acl/") -def objects_acl_get(bucket_name, object_name, entity): - """Implement the 'ObjectAccessControls: get' API.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.get_revision(flask.request) - acl = revision.get_acl(entity) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//o//acl/", methods=["PUT"]) -def objects_acl_update(bucket_name, object_name, entity): - """Implement the 'ObjectAccessControls: update' API.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.get_revision(flask.request) - payload = json.loads(flask.request.data) - acl = revision.update_acl(entity, payload.get("role", "")) - return testbench_utils.filtered_response(flask.request, acl) - - -@gcs.route("/b//o//acl/", methods=["PATCH"]) -def objects_acl_patch(bucket_name, object_name, entity): - """Implement the 'ObjectAccessControls: patch' API.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.get_revision(flask.request) - acl = revision.patch_acl(entity, flask.request) - return testbench_utils.filtered_response(flask.request, acl) - - -# Define the WSGI application to handle bucket requests. -DOWNLOAD_HANDLER_PATH = "/download/storage/v1" -download = flask.Flask(__name__) -download.debug = True - - -@download.errorhandler(error_response.ErrorResponse) -def download_error(error): - return error.as_response() - - -@gcs.route("/b//o/") -@download.route("/b//o/") -def objects_get(bucket_name, object_name): - """Implement the 'Objects: get' API. Read objects or their metadata.""" - _, blob = testbench_utils.lookup_object(bucket_name, object_name) - blob.check_preconditions(flask.request) - revision = blob.get_revision(flask.request) - - media = flask.request.args.get("alt", None) - if media is None or media == "json": - return testbench_utils.filtered_response(flask.request, revision.metadata) - if media != "media": - raise error_response.ErrorResponse("Invalid alt=%s parameter" % media) - revision.validate_encryption_for_read(flask.request) - return objects_get_common(bucket_name, object_name, revision) - - -# Define the WSGI application to handle bucket requests. -UPLOAD_HANDLER_PATH = "/upload/storage/v1" -upload = flask.Flask(__name__) -upload.debug = True - - -@upload.errorhandler(error_response.ErrorResponse) -def upload_error(error): - return error.as_response() - - -@upload.route("/b//o", methods=["POST"]) -def objects_insert(bucket_name): - """Implement the 'Objects: insert' API. Insert a new GCS Object.""" - gcs_url = flask.url_for( - "objects_insert", bucket_name=bucket_name, _external=True - ).replace("/upload/", "/") - insert_magic_bucket(gcs_url) - - upload_type = flask.request.args.get("uploadType") - if upload_type is None: - raise error_response.ErrorResponse( - "uploadType not set in Objects: insert", status_code=400 - ) - if upload_type not in {"multipart", "media", "resumable"}: - raise error_response.ErrorResponse( - "testbench does not support %s uploadType" % upload_type, status_code=400 - ) - - if upload_type == "resumable": - bucket = testbench_utils.lookup_bucket(bucket_name) - upload_url = flask.url_for( - "objects_insert", bucket_name=bucket_name, _external=True - ) - return bucket.create_resumable_upload(upload_url, flask.request) - - object_path = None - blob = None - current_version = None - if upload_type == "media": - object_name = flask.request.args.get("name", None) - if object_name is None: - raise error_response.ErrorResponse( - "name not set in Objects: insert", status_code=412 - ) - object_path, blob = testbench_utils.get_object( - bucket_name, object_name, gcs_object.GcsObject(bucket_name, object_name) - ) - blob.check_preconditions(flask.request) - current_version = blob.insert(gcs_url, flask.request) - else: - resource, media_headers, media_body = testbench_utils.parse_multi_part( - flask.request - ) - object_name = flask.request.args.get("name", resource.get("name", None)) - if object_name is None: - raise error_response.ErrorResponse( - "name not set in Objects: insert", status_code=412 - ) - object_path, blob = testbench_utils.get_object( - bucket_name, object_name, gcs_object.GcsObject(bucket_name, object_name) - ) - blob.check_preconditions(flask.request) - current_version = blob.insert_multipart( - gcs_url, flask.request, resource, media_headers, media_body - ) - testbench_utils.insert_object(object_path, blob) - return testbench_utils.filtered_response(flask.request, current_version.metadata) - - -@upload.route("/b//o", methods=["PUT"]) -def resumable_upload_chunk(bucket_name): - """Receive a chunk for a resumable upload.""" - gcs_url = flask.url_for( - "objects_insert", bucket_name=bucket_name, _external=True - ).replace("/upload/", "/") - bucket = testbench_utils.lookup_bucket(bucket_name) - return bucket.receive_upload_chunk(gcs_url, flask.request) - - -@upload.route("/b//o", methods=["DELETE"]) -def delete_resumable_upload(bucket_name): - upload_type = flask.request.args.get("uploadType") - if upload_type != "resumable": - raise error_response.ErrorResponse( - "testbench can delete resumable uploadType only", status_code=400 - ) - upload_id = flask.request.args.get("upload_id") - if upload_id is None: - raise error_response.ErrorResponse( - "missing upload_id in delete_resumable_upload", status_code=400 - ) - bucket = testbench_utils.lookup_bucket(bucket_name) - if upload_id not in bucket.resumable_uploads: - raise error_response.ErrorResponse("upload_id does not exist", status_code=404) - bucket.resumable_uploads.pop(upload_id) - return testbench_utils.filtered_response(flask.request, {}) - - -def xml_put_object(gcs_url, bucket_name, object_name): - """Implement PUT for the XML API.""" - insert_magic_bucket(gcs_url) - object_path, blob = testbench_utils.get_object( - bucket_name, object_name, gcs_object.GcsObject(bucket_name, object_name) - ) - generation_match = flask.request.headers.get("x-goog-if-generation-match") - metageneration_match = flask.request.headers.get("x-goog-if-metageneration-match") - blob.check_preconditions_by_value( - generation_match, None, metageneration_match, None - ) - revision = blob.insert_xml(gcs_url, flask.request) - testbench_utils.insert_object(object_path, blob) - response = flask.make_response("") - response.headers["x-goog-hash"] = revision.x_goog_hash_header() - return response - - -def xml_get_object(bucket_name, object_name): - """Implement the 'Objects: insert' API. Insert a new GCS Object.""" - object_path, blob = testbench_utils.lookup_object(bucket_name, object_name) - if flask.request.args.get("acl") is not None: - raise error_response.ErrorResponse( - "ACL query not supported in XML API", status_code=500 - ) - if flask.request.args.get("encryption") is not None: - raise error_response.ErrorResponse( - "Encryption query not supported in XML API", status_code=500 - ) - generation_match = flask.request.headers.get("if-generation-match") - metageneration_match = flask.request.headers.get("if-metageneration-match") - blob.check_preconditions_by_value( - generation_match, None, metageneration_match, None - ) - revision = blob.get_revision(flask.request) - return objects_get_common(bucket_name, object_name, revision) - - -# Define the WSGI application to handle HMAC key requests -(PROJECTS_HANDLER_PATH, projects_app) = gcs_project.get_projects_app() - -# Define the WSGI application to handle IAM requests -(IAM_HANDLER_PATH, iam_app) = gcs_iam.get_iam_app() - -application = DispatcherMiddleware( - root, - { - "/httpbin": httpbin.app, - GCS_HANDLER_PATH: gcs, - UPLOAD_HANDLER_PATH: upload, - DOWNLOAD_HANDLER_PATH: download, - PROJECTS_HANDLER_PATH: projects_app, - IAM_HANDLER_PATH: iam_app, - }, -) - - -def main(): - """Parse the arguments and run the test bench application.""" - parser = argparse.ArgumentParser( - description="A testbench for the Google Cloud C++ Client Library" - ) - parser.add_argument("--host", default="localhost", help="The listening address") - parser.add_argument("--port", help="The listening port") - # By default we do not turn on the debugging. This typically runs inside a - # Docker image, with a uid that has not entry in /etc/passwd, and the - # werkzeug debugger crashes in that environment (as it should probably). - parser.add_argument( - "--debug", help="Use the WSGI debugger", default=False, action="store_true" - ) - arguments = parser.parse_args() - - root.config.update(SERVER_NAME=arguments.host) - - # Compose the different WSGI applications. - serving.run_simple( - arguments.host, - int(arguments.port), - application, - use_reloader=True, - use_debugger=arguments.debug, - use_evalex=True, - ) - - -if __name__ == "__main__": - main() diff --git a/google/cloud/storage/testbench/testbench_utils.py b/google/cloud/storage/testbench/testbench_utils.py deleted file mode 100644 index 3eb194bfb3029..0000000000000 --- a/google/cloud/storage/testbench/testbench_utils.py +++ /dev/null @@ -1,413 +0,0 @@ -#!/usr/bin/env python -# Copyright 2018 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""Standalone helpers for the Google Cloud Storage test bench.""" - -import base64 -import error_response -import hashlib -import json -import random -import re - - -def validate_bucket_name(bucket_name): - """Return True if bucket_name is a valid bucket name. - - Bucket naming requirements are described in: - - https://cloud.google.com/storage/docs/naming - - Note that this function does not verify domain bucket names: - - https://cloud.google.com/storage/docs/domain-name-verification - - :param bucket_name:str the name to validate. - :rtype: bool - """ - valid = True - if "." in bucket_name: - valid &= len(bucket_name) <= 222 - valid &= all([len(part) <= 63 for part in bucket_name.split(".")]) - else: - valid &= len(bucket_name) <= 63 - valid &= re.match("^[a-z0-9][a-z0-9._\\-]+[a-z0-9]$", bucket_name) is not None - valid &= not bucket_name.startswith("goog") - valid &= re.search("g[0o][0o]g[1l][e3]", bucket_name) is None - valid &= ( - re.match("^[0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}[.][0-9]{1,3}$", bucket_name) - is None - ) - return valid - - -def canonical_entity_name(entity): - """Convert entity names to their canonical form. - - Some entities (notably project--) have more than one name, for - example the project-owners- entities are called - project-owners- internally. This function - :param entity:str convert this entity to its canonical name. - :return: the name in canonical form. - :rtype:str - """ - if entity == "allUsers" or entity == "allAuthenticatedUsers": - return entity - if entity.startswith("project-owners-"): - entity = "project-owners-123456789" - if entity.startswith("project-editors-"): - entity = "project-editors-123456789" - if entity.startswith("project-viewers-"): - entity = "project-viewers-123456789" - return entity.lower() - - -def index_acl(acl): - """Return a ACL as a dictionary indexed by the 'entity' values of the ACL. - - We represent ACLs as lists of dictionaries, that makes it easy to convert - them to JSON objects. When changing them though, we need to make sure there - is a single element in the list for each `entity` value, so it is convenient - to convert the list to a dictionary (indexed by `entity`) of dictionaries. - This function performs that conversion. - - :param acl:list of dict - :return: the ACL indexed by the entity of each entry. - :rtype:dict - """ - # This can be expressed by a comprehension but turns out to be less - # readable in that form. - indexed = dict() - for e in acl: - indexed[e["entity"]] = e - return indexed - - -def filter_fields_from_response(fields, response): - """Format the response as a JSON string, using any filtering included in - the request. - - :param fields:str the value of the `fields` parameter in the original - request. - :param response:dict a dictionary to be formatted as a JSON string. - :return: the response formatted as a string. - :rtype:str - """ - if fields is None: - return json.dumps(response) - tmp = {} - # TODO(#1037) - support full filter expressions - for key in fields.split(","): - if key in response: - tmp[key] = response[key] - return json.dumps(tmp) - - -def filtered_response(request, response): - """Format the response as a JSON string, using any filtering included in - the request. - - :param request:flask.Request the original HTTP request. - :param response:dict a dictionary to be formatted as a JSON string. - :return: the response formatted as a string. - :rtype:str - """ - fields = request.args.get("fields") - return filter_fields_from_response(fields, response) - - -def raise_csek_error(code=400): - msg = "Missing a SHA256 hash of the encryption key, or it is not" - msg += " base64 encoded, or it does not match the encryption key." - link = "https://cloud.google.com/storage/docs/encryption#customer-supplied_encryption_keys" - error = { - "error": { - "errors": [ - { - "domain": "global", - "reason": "customerEncryptionKeySha256IsInvalid", - "message": msg, - "extendedHelp": link, - } - ], - "code": code, - "message": msg, - } - } - raise error_response.ErrorResponse(json.dumps(error), status_code=code) - - -def validate_customer_encryption_headers( - key_header_value, hash_header_value, algo_header_value -): - """Verify that the encryption headers are internally consistent. - - :param key_header_value: str the value of the x-goog-*-key header - :param hash_header_value: str the value of the x-goog-*-key-sha256 header - :param algo_header_value: str the value of the x-goog-*-key-algorithm header - :rtype: NoneType - """ - try: - if algo_header_value is None or algo_header_value != "AES256": - raise error_response.ErrorResponse( - "Invalid or missing algorithm %s for CSEK" % algo_header_value, - status_code=400, - ) - - key = base64.standard_b64decode(key_header_value) - if key is None or len(key) != 256 / 8: - raise_csek_error() - - h = hashlib.sha256() - h.update(key) - expected = base64.standard_b64encode(h.digest()).decode("utf-8") - if hash_header_value is None or expected != hash_header_value: - raise_csek_error() - except error_response.ErrorResponse: - # error_response.ErrorResponse indicates that the request was invalid, just pass - # that exception through. - raise - except Exception: - # Many of the functions above may raise, convert those to an - # error_response.ErrorResponse with the right format. - raise_csek_error() - - -def json_api_patch(original, patch, recurse_on=set({})): - """Patch a dictionary using the JSON API semantics. - - Patches are applied using the following algorithm: - - patch is a dictionary representing a JSON object. JSON `null` values are - represented by None). - - For fields that are not in `recursive_fields`: - - If patch contains {field: None} the field is erased from `original`. - - Otherwise `patch[field]` replaces `original[field]`. - - For fields that are in `recursive_fields`: - - If patch contains {field: None} the field is erased from `original`. - - If patch contains {field: {}} the field is left untouched in `original`, - note that if the field does not exist in original this means it is not - created. - - Otherwise patch[field] is treated as a patch and applied to - `original[field]`, potentially creating the new field. - - :param original:dict the dictionary to patch - :param patch:dict the patch to apply. Elements pointing to None are removed, - other elements are replaced. - :param recurse_on:set of strings, the names of fields for which the patch - is applied recursively. - :return: the updated dictionary - :rtype:dict - """ - tmp = original.copy() - for key, value in patch.items(): - if value is None: - tmp.pop(key, None) - elif key not in recurse_on: - tmp[key] = value - elif len(value) != 0: - tmp[key] = json_api_patch(original.get(key, {}), value) - return tmp - - -def extract_media(request): - """Extract the media from a flask Request. - - To avoid race conditions when using greenlets we cannot perform I/O in the - constructor of GcsObjectVersion, or in any of the operations that modify - the state of the service. Because sometimes the media is uploaded with - chunked encoding, we need to do I/O before finishing the GcsObjectVersion - creation. If we do this I/O after the GcsObjectVersion creation started, - the the state of the application may change due to other I/O. - - :param request:flask.Request the HTTP request. - :return: the full media of the request. - :rtype: str - """ - if request.environ.get("HTTP_TRANSFER_ENCODING", "") == "chunked": - return request.environ.get("wsgi.input").read() - return request.data - - -def corrupt_media(media): - """Return a randomly modified version of a string. - - :param media:bytes a string (typically some object media) to be modified. - :return: a string that is slightly different than media. - :rtype: str - """ - # Deal with the boundary condition. - if not media: - return bytearray(random.sample("abcdefghijklmnopqrstuvwxyz", 1), "utf-8") - return b"B" + media[1:] if media[0:1] == b"A" else b"A" + media[1:] - - -# Define the collection of Buckets indexed by -GCS_BUCKETS = dict() - - -def lookup_bucket(bucket_name): - """Lookup a bucket by name in the global collection. - - :param bucket_name:str the name of the Bucket. - :return: the bucket matching the name. - :rtype:GcsBucket - :raises:ErrorResponse if the bucket is not found. - """ - bucket = GCS_BUCKETS.get(bucket_name) - if bucket is None: - raise error_response.ErrorResponse( - "Bucket %s not found" % bucket_name, status_code=404 - ) - return bucket - - -def has_bucket(bucket_name): - """Return True if the bucket already exists in the global collection.""" - return GCS_BUCKETS.get(bucket_name) is not None - - -def insert_bucket(bucket_name, bucket): - """Insert (or replace) a new bucket into the global collection. - - :param bucket_name:str the name of the bucket. - :param bucket:GcsBucket the bucket to insert. - """ - GCS_BUCKETS[bucket_name] = bucket - - -def delete_bucket(bucket_name): - """Delete a bucket from the global collection.""" - GCS_BUCKETS.pop(bucket_name) - - -def all_buckets(): - """Return a key,value iterator for all the buckets in the global collection. - - :rtype:dict[str, GcsBucket] - """ - return GCS_BUCKETS.items() - - -# Define the collection of GcsObjects indexed by /o/ -GCS_OBJECTS = dict() - - -def lookup_object(bucket_name, object_name): - """Lookup an object by name in the global collection. - - :param bucket_name:str the name of the Bucket that contains the object. - :param object_name:str the name of the Object. - :return: tuple the object path and the object. - :rtype: (str,GcsObject) - :raises:ErrorResponse if the object is not found. - """ - object_path, gcs_object = get_object(bucket_name, object_name, None) - if gcs_object is None: - raise error_response.ErrorResponse( - "Object %s in %s not found" % (object_name, bucket_name), status_code=404 - ) - return object_path, gcs_object - - -def get_object(bucket_name, object_name, default_value): - """Find an object in the global collection, return a default value if not - found. - - :param bucket_name:str the name of the Bucket that contains the object. - :param object_name:str the name of the Object. - :param default_value:GcsObject the default value returned if the object is - not found. - :return: tuple the object path and the object. - :rtype: (str,GcsObject) - """ - object_path = bucket_name + "/o/" + object_name - return object_path, GCS_OBJECTS.get(object_path, default_value) - - -def insert_object(object_path, value): - """Insert an object to the global collection.""" - GCS_OBJECTS[object_path] = value - - -def delete_object(object_path): - """Delete an object from the global collection.""" - GCS_OBJECTS.pop(object_path) - - -def all_objects(): - """Return a key,value iterator for all the objects in the global collection. - - :rtype:dict[str, GcsBucket] - """ - return GCS_OBJECTS.items() - - -def parse_part(multipart_upload_part): - """Parse a portion of a multipart breaking out the headers and payload. - - :param multipart_upload_part:str a portion of the multipart upload body. - :return: a tuple with the headers and the payload. - :rtype: (dict, str) - """ - headers = dict() - index = 0 - next_line = multipart_upload_part.find(b"\r\n", index) - while next_line != index: - header_line = multipart_upload_part[index:next_line] - key, value = header_line.split(b": ", 2) - # This does not work for repeated headers, but we do not expect - # those in the testbench. - headers[key.decode("utf-8")] = value.decode("utf-8") - index = next_line + 2 - next_line = multipart_upload_part.find(b"\r\n", index) - return headers, multipart_upload_part[next_line + 2 :] - - -def parse_multi_part(request): - """Parse a multi-part request - - :param request:flask.Request multipart request. - :return: a tuple with the resource, media_headers and the media_body. - :rtype: (dict, dict, str) - """ - content_type = request.headers.get("content-type") - if content_type is None or not content_type.startswith("multipart/related"): - raise error_response.ErrorResponse( - "Missing or invalid content-type header in multipart upload" - ) - _, _, boundary = content_type.partition("boundary=") - if boundary is None: - raise error_response.ErrorResponse( - "Missing boundary (%s) in content-type header in multipart upload" - % boundary - ) - - boundary = bytearray(boundary, "utf-8") - marker = b"--" + boundary + b"\r\n" - body = extract_media(request) - parts = body.split(marker) - # parts[0] is the empty string, `multipart` should start with the boundary - # parts[1] is the JSON resource object part, with some headers - resource_headers, resource_body = parse_part(parts[1]) - # parts[2] is the media, with some headers - media_headers, media_body = parse_part(parts[2]) - end = media_body.find(b"\r\n--" + boundary + b"--\r\n") - if end == -1: - raise error_response.ErrorResponse( - "Missing end marker (--%s--) in media body" % boundary - ) - media_body = media_body[:end] - resource = json.loads(resource_body) - - return resource, media_headers, media_body From 64c9343786f74566ccc467063c1c91aaf5f487f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 14:33:46 +0700 Subject: [PATCH 03/17] rename `CLOUD_STORAGE_TESTBENCH_ENDPOINT` to `CLOUD_STORAGE_EMULATOR_ENDPOINT` --- .../ci/run_integration_tests_emulator_bazel.sh | 2 +- google/cloud/storage/client_options.cc | 12 +++++------- google/cloud/storage/client_options.h | 4 ++-- google/cloud/storage/client_options_test.cc | 8 ++++---- google/cloud/storage/doc/storage-main.dox | 2 +- google/cloud/storage/emulator/README.md | 2 +- .../storage/examples/storage_examples_common.cc | 2 +- .../storage/examples/storage_examples_common_test.cc | 6 +++--- google/cloud/storage/internal/curl_client_test.cc | 4 ++-- .../storage/internal/grpc_client_failures_test.cc | 3 +-- .../storage/testing/storage_integration_test.cc | 2 +- .../tests/object_basic_crud_integration_test.cc | 4 ++-- .../storage/tests/object_media_integration_test.cc | 12 ++++++------ google/cloud/storage/tools/run_testbench_utils.sh | 4 ++-- 14 files changed, 32 insertions(+), 35 deletions(-) diff --git a/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh b/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh index ef4a86db624ff..9d80d693b0307 100755 --- a/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh +++ b/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh @@ -79,7 +79,7 @@ done # are missing too. EMULATOR_SHA=$(git ls-files google/cloud/storage/emulator | sort | cat | sha256sum) testbench_args=( - "--test_env=CLOUD_STORAGE_TESTBENCH_ENDPOINT=${CLOUD_STORAGE_TESTBENCH_ENDPOINT}" + "--test_env=CLOUD_STORAGE_EMULATOR_ENDPOINT=${CLOUD_STORAGE_EMULATOR_ENDPOINT}" "--test_env=CLOUD_STORAGE_GRPC_ENDPOINT=${CLOUD_STORAGE_GRPC_ENDPOINT}" "--test_env=HTTPBIN_ENDPOINT=${HTTPBIN_ENDPOINT}" "--test_env=GOOGLE_CLOUD_CPP_STORAGE_TEST_HMAC_SERVICE_ACCOUNT=fake-service-account-sign@example.com" diff --git a/google/cloud/storage/client_options.cc b/google/cloud/storage/client_options.cc index b3c83de0b79c4..8b0a0a12f7fc2 100644 --- a/google/cloud/storage/client_options.cc +++ b/google/cloud/storage/client_options.cc @@ -33,23 +33,21 @@ using ::google::cloud::internal::GetEnv; namespace internal { std::string JsonEndpoint(ClientOptions const& options) { - return GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") - .value_or(options.endpoint_) + + return GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT").value_or(options.endpoint_) + "/storage/" + options.version(); } std::string JsonUploadEndpoint(ClientOptions const& options) { - return GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") - .value_or(options.endpoint_) + + return GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT").value_or(options.endpoint_) + "/upload/storage/" + options.version(); } std::string XmlEndpoint(ClientOptions const& options) { - return GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT").value_or(options.endpoint_); + return GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT").value_or(options.endpoint_); } std::string IamEndpoint(ClientOptions const& options) { - auto testbench = GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT"); + auto testbench = GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); if (testbench) return *testbench + "/iamapi"; return options.iam_endpoint(); } @@ -126,7 +124,7 @@ ClientOptions::ClientOptions(std::shared_ptr credentials, download_stall_timeout_( GOOGLE_CLOUD_CPP_STORAGE_DEFAULT_DOWNLOAD_STALL_TIMEOUT), channel_options_(std::move(channel_options)) { - auto emulator = GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT"); + auto emulator = GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); if (emulator.has_value()) { endpoint_ = *emulator; iam_endpoint_ = *emulator + "/iamapi"; diff --git a/google/cloud/storage/client_options.h b/google/cloud/storage/client_options.h index 89a05bd4ea8e8..b69c8b70f523a 100644 --- a/google/cloud/storage/client_options.h +++ b/google/cloud/storage/client_options.h @@ -60,7 +60,7 @@ class ChannelOptions { * * By default, several environment variables are read to configure the client: * - * - `CLOUD_STORAGE_TESTBENCH_ENDPOINT`: if set, use this http endpoint to + * - `CLOUD_STORAGE_EMULATOR_ENDPOINT`: if set, use this http endpoint to * make all http requests instead of the production GCS service. Also, * if set, the `CreateDefaultClientOptions()` function will use an * `AnonymousCredentials` object instead of loading Application Default @@ -82,7 +82,7 @@ class ClientOptions { * Creates a `ClientOptions` with Google Application Default %Credentials. * * If Application Default %Credentials could not be loaded, this returns a - * `Status` with failure details. If the `CLOUD_STORAGE_TESTBENCH_ENDPOINT` + * `Status` with failure details. If the `CLOUD_STORAGE_EMULATOR_ENDPOINT` * environment variable is set, this function instead uses an * `AnonymousCredentials` to configure the client. */ diff --git a/google/cloud/storage/client_options_test.cc b/google/cloud/storage/client_options_test.cc index 1231da0bd9634..0045b8543cec2 100644 --- a/google/cloud/storage/client_options_test.cc +++ b/google/cloud/storage/client_options_test.cc @@ -31,7 +31,7 @@ class ClientOptionsTest : public ::testing::Test { public: ClientOptionsTest() : enable_tracing_("CLOUD_STORAGE_ENABLE_TRACING", {}), - endpoint_("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}), + endpoint_("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}), generator_(std::random_device{}()) {} std::string CreateRandomFileName() { @@ -124,7 +124,7 @@ TEST_F(ClientOptionsTest, EnableHttp) { } TEST_F(ClientOptionsTest, EndpointsDefault) { - testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_TESTBENCH_ENDPOINT", + testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); ClientOptions options(oauth2::CreateAnonymousCredentials()); EXPECT_EQ("https://storage.googleapis.com", options.endpoint()); @@ -137,7 +137,7 @@ TEST_F(ClientOptionsTest, EndpointsDefault) { } TEST_F(ClientOptionsTest, EndpointsOverride) { - testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_TESTBENCH_ENDPOINT", + testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); ClientOptions options(oauth2::CreateAnonymousCredentials()); options.set_endpoint("http://127.0.0.1.nip.io:1234"); @@ -152,7 +152,7 @@ TEST_F(ClientOptionsTest, EndpointsOverride) { } TEST_F(ClientOptionsTest, EndpointsTestBench) { - testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_TESTBENCH_ENDPOINT", + testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:1234"); ClientOptions options(oauth2::CreateAnonymousCredentials()); EXPECT_EQ("http://localhost:1234", options.endpoint()); diff --git a/google/cloud/storage/doc/storage-main.dox b/google/cloud/storage/doc/storage-main.dox index cdcae33034a8f..af54837d3f362 100644 --- a/google/cloud/storage/doc/storage-main.dox +++ b/google/cloud/storage/doc/storage-main.dox @@ -58,7 +58,7 @@ which should give you a taste of the Cloud Storage C++ client library API. - `CLOUD_STORAGE_ENABLE_TRACING=raw-client,http` enables all logging. -- `CLOUD_STORAGE_TESTBENCH_ENDPOINT=...` override the default endpoint used by +- `CLOUD_STORAGE_EMULATOR_ENDPOINT=...` override the default endpoint used by the library, intended for testing only. ### Experimental diff --git a/google/cloud/storage/emulator/README.md b/google/cloud/storage/emulator/README.md index 4890dceb0fcb9..845f912f48cab 100644 --- a/google/cloud/storage/emulator/README.md +++ b/google/cloud/storage/emulator/README.md @@ -32,7 +32,7 @@ curl "http://localhost:9000/start_grpc?port=8000" For `google-cloud-cpp`, please set the following enviroment variable ```bash -CLOUD_STORAGE_TESTBENCH_ENDPOINT=http://localhost:9000 # For JSON and XML API +CLOUD_STORAGE_EMULATOR_ENDPOINT=http://localhost:9000 # For JSON and XML API CLOUD_STORAGE_GRPC_ENDPOINT=localhost:8000 # For gRPC API ``` diff --git a/google/cloud/storage/examples/storage_examples_common.cc b/google/cloud/storage/examples/storage_examples_common.cc index 3748cf44a79e1..f16cd414dca0e 100644 --- a/google/cloud/storage/examples/storage_examples_common.cc +++ b/google/cloud/storage/examples/storage_examples_common.cc @@ -24,7 +24,7 @@ namespace storage { namespace examples { bool UsingEmulator() { - return !google::cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") + return !google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT") .value_or("") .empty(); } diff --git a/google/cloud/storage/examples/storage_examples_common_test.cc b/google/cloud/storage/examples/storage_examples_common_test.cc index 0ba6f0f909cc9..1edde0b5ab73e 100644 --- a/google/cloud/storage/examples/storage_examples_common_test.cc +++ b/google/cloud/storage/examples/storage_examples_common_test.cc @@ -46,7 +46,7 @@ TEST(StorageExamplesCommon, CreateCommandEntryUsage) { // Set the client to use the testbench, this avoids any problems trying to // find and load the default credentials file. google::cloud::testing_util::ScopedEnvironment env( - "CLOUD_STORAGE_TESTBENCH_ENDPOINT", "http://localhost:9090"); + "CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:9090"); int call_count = 0; auto command = [&call_count](google::cloud::storage::Client const&, @@ -75,7 +75,7 @@ TEST(StorageExamplesCommon, CreateCommandEntryNoArguments) { // Set the client to use the testbench, this avoids any problems trying to // find and load the default credentials file. google::cloud::testing_util::ScopedEnvironment env( - "CLOUD_STORAGE_TESTBENCH_ENDPOINT", "http://localhost:9090"); + "CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:9090"); int call_count = 0; auto command = [&call_count](google::cloud::storage::Client const&, @@ -115,7 +115,7 @@ TEST(StorageExamplesCommon, CreateCommandEntryVarargs) { // Set the client to use the testbench, this avoids any problems trying to // find and load the default credentials file. google::cloud::testing_util::ScopedEnvironment env( - "CLOUD_STORAGE_TESTBENCH_ENDPOINT", "http://localhost:9090"); + "CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:9090"); int call_count = 0; auto command = [&call_count](google::cloud::storage::Client const&, diff --git a/google/cloud/storage/internal/curl_client_test.cc b/google/cloud/storage/internal/curl_client_test.cc index 9e3da601b1562..2a69884cab53c 100644 --- a/google/cloud/storage/internal/curl_client_test.cc +++ b/google/cloud/storage/internal/curl_client_test.cc @@ -52,7 +52,7 @@ class FailingCredentials : public Credentials { class CurlClientTest : public ::testing::Test, public ::testing::WithParamInterface { protected: - CurlClientTest() : endpoint_("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}) {} + CurlClientTest() : endpoint_("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}) {} void SetUp() override { std::string const error_type = GetParam(); @@ -65,7 +65,7 @@ class CurlClientTest : public ::testing::Test, EXPECT_THAT(actual.message(), HasSubstr(kStatusErrorMsg)); }; } else if (error_type == "libcurl-failure") { - google::cloud::internal::SetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT", + google::cloud::internal::SetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:1"); client_ = CurlClient::Create(ClientOptions(oauth2::CreateAnonymousCredentials()) diff --git a/google/cloud/storage/internal/grpc_client_failures_test.cc b/google/cloud/storage/internal/grpc_client_failures_test.cc index 53fea57a46b5d..cbbff1ad697f3 100644 --- a/google/cloud/storage/internal/grpc_client_failures_test.cc +++ b/google/cloud/storage/internal/grpc_client_failures_test.cc @@ -42,8 +42,7 @@ class GrpcClientFailuresTest protected: GrpcClientFailuresTest() : grpc_config_("GOOGLE_CLOUD_CPP_STORAGE_GRPC_CONFIG", {}), - rest_endpoint_("CLOUD_STORAGE_TESTBENCH_ENDPOINT", - "http://localhost:1"), + rest_endpoint_("CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:1"), grpc_endpoint_("CLOUD_STORAGE_GRPC_ENDPOINT", "localhost:1") {} void SetUp() override { diff --git a/google/cloud/storage/testing/storage_integration_test.cc b/google/cloud/storage/testing/storage_integration_test.cc index 242b44b78026d..d08232b11e2fd 100644 --- a/google/cloud/storage/testing/storage_integration_test.cc +++ b/google/cloud/storage/testing/storage_integration_test.cc @@ -160,7 +160,7 @@ EncryptionKeyData StorageIntegrationTest::MakeEncryptionKeyData() { } bool StorageIntegrationTest::UsingEmulator() { - return google::cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") + return google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT") .has_value(); } diff --git a/google/cloud/storage/tests/object_basic_crud_integration_test.cc b/google/cloud/storage/tests/object_basic_crud_integration_test.cc index d85b5e76852af..b1395c4255630 100644 --- a/google/cloud/storage/tests/object_basic_crud_integration_test.cc +++ b/google/cloud/storage/tests/object_basic_crud_integration_test.cc @@ -152,9 +152,9 @@ TEST_F(ObjectBasicCRUDIntegrationTest, BasicCRUD) { StatusOr CreateNonDefaultClient() { auto testbench = - google::cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT"); + google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); google::cloud::testing_util::ScopedEnvironment env( - "CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}); + "CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); auto options = ClientOptions(oauth2::CreateAnonymousCredentials()); if (!testbench) { // Use a different spelling of the default endpoint. This disables the diff --git a/google/cloud/storage/tests/object_media_integration_test.cc b/google/cloud/storage/tests/object_media_integration_test.cc index deee576609fbe..381e89c058210 100644 --- a/google/cloud/storage/tests/object_media_integration_test.cc +++ b/google/cloud/storage/tests/object_media_integration_test.cc @@ -531,7 +531,7 @@ TEST_F(ObjectMediaIntegrationTest, ReadByChunk) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadJSON) { - ScopedEnvironment disable_testbench("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}); + ScopedEnvironment disable_testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -552,7 +552,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadJSON) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadXML) { - ScopedEnvironment testbench("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}); + ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -569,7 +569,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadXML) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureWriteJSON) { - ScopedEnvironment testbench("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}); + ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -588,7 +588,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureWriteJSON) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureWriteXML) { - ScopedEnvironment testbench("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}); + ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -605,7 +605,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureWriteXML) { TEST_F(ObjectMediaIntegrationTest, ConnectionFailureDownloadFile) { google::cloud::testing_util::ScopedEnvironment endpoint( - "CLOUD_STORAGE_TESTBENCH_ENDPOINT", "http://localhost:1"); + "CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:1"); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -619,7 +619,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureDownloadFile) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureUploadFile) { - ScopedEnvironment testbench("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}); + ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; diff --git a/google/cloud/storage/tools/run_testbench_utils.sh b/google/cloud/storage/tools/run_testbench_utils.sh index feadec1d6f30c..e135dbe45d11b 100644 --- a/google/cloud/storage/tools/run_testbench_utils.sh +++ b/google/cloud/storage/tools/run_testbench_utils.sh @@ -44,7 +44,7 @@ kill_testbench() { # TESTBENCH_PORT: the listening port for the test bench, 8000 if not set. # HTTPBIN_ENDPOINT: the httpbin endpoint on the test bench. # TESTBENCH_PID: the process id for the test bench. -# CLOUD_STORAGE_TESTBENCH_ENDPOINT: the google cloud storage endpoint for the +# CLOUD_STORAGE_EMULATOR_ENDPOINT: the google cloud storage endpoint for the # test bench. # IO_COLOR_*: colorize output messages, defined in lib/io.sh # Arguments: @@ -85,7 +85,7 @@ start_testbench() { fi export HTTPBIN_ENDPOINT="http://localhost:${testbench_port}/httpbin" - export CLOUD_STORAGE_TESTBENCH_ENDPOINT="http://localhost:${testbench_port}" + export CLOUD_STORAGE_EMULATOR_ENDPOINT="http://localhost:${testbench_port}" delay=1 connected=no From 3f75ea6b87efb8e52373cfba392e33d3f9c617d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 14:35:03 +0700 Subject: [PATCH 04/17] rename `testbench` to `emulator` in Dockerfile.* --- ci/kokoro/docker/Dockerfile.fedora | 2 +- ci/kokoro/docker/Dockerfile.fedora-install | 2 +- ci/kokoro/docker/Dockerfile.fedora-libcxx-msan | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ci/kokoro/docker/Dockerfile.fedora b/ci/kokoro/docker/Dockerfile.fedora index 87eb2f101527c..f427618d444ca 100644 --- a/ci/kokoro/docker/Dockerfile.fedora +++ b/ci/kokoro/docker/Dockerfile.fedora @@ -29,7 +29,7 @@ RUN dnf makecache && \ # the container's /etc/passwd file. RUN echo 'root:' | chpasswd -# Install the Python modules needed to run the storage testbench +# Install the Python modules needed to run the storage emulator RUN pip3 install --upgrade pip RUN pip3 install setuptools wheel RUN pip3 install git+git://github.com/googleapis/python-storage@8cf6c62a96ba3fff7e5028d931231e28e5029f1c diff --git a/ci/kokoro/docker/Dockerfile.fedora-install b/ci/kokoro/docker/Dockerfile.fedora-install index bdb9f23d785c1..5ce87de18398d 100644 --- a/ci/kokoro/docker/Dockerfile.fedora-install +++ b/ci/kokoro/docker/Dockerfile.fedora-install @@ -52,7 +52,7 @@ RUN pip3 install cmake_format==0.6.8 # Install black to automatically format the Python files. RUN pip3 install black==19.3b0 -# Install the Python modules needed to run the storage testbench +# Install the Python modules needed to run the storage emulator RUN dnf makecache && dnf install -y python3-devel RUN pip3 install setuptools wheel RUN pip3 install git+git://github.com/googleapis/python-storage@8cf6c62a96ba3fff7e5028d931231e28e5029f1c diff --git a/ci/kokoro/docker/Dockerfile.fedora-libcxx-msan b/ci/kokoro/docker/Dockerfile.fedora-libcxx-msan index 8e19d24309069..53c9d5521c714 100644 --- a/ci/kokoro/docker/Dockerfile.fedora-libcxx-msan +++ b/ci/kokoro/docker/Dockerfile.fedora-libcxx-msan @@ -29,7 +29,7 @@ RUN dnf makecache && \ # the container's /etc/passwd file. RUN echo 'root:' | chpasswd -# Install the Python modules needed to run the storage testbench +# Install the Python modules needed to run the storage emulator RUN pip3 install --upgrade pip RUN pip3 install setuptools wheel RUN pip3 install git+git://github.com/googleapis/python-storage@8cf6c62a96ba3fff7e5028d931231e28e5029f1c From 5c7b1398c7062a6ef35917cfd5c4e722be678bb9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 14:45:26 +0700 Subject: [PATCH 05/17] rename `x_testbench_transfer_encoding` to `x_emulator_transfer_encoding` --- google/cloud/storage/emulator/emulator.py | 4 ++-- .../storage/tests/object_resumable_write_integration_test.cc | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/google/cloud/storage/emulator/emulator.py b/google/cloud/storage/emulator/emulator.py index 1ee6813e3d46c..8d28b9fb247e6 100644 --- a/google/cloud/storage/emulator/emulator.py +++ b/google/cloud/storage/emulator/emulator.py @@ -686,7 +686,7 @@ def resumable_upload_chunk(bucket_name): False, None, ) - blob.metadata.metadata["x_testbench_transfer_encoding"] = ":".join( + blob.metadata.metadata["x_emulator_transfer_encoding"] = ":".join( upload.transfer ) blob.metadata.metadata["x_testbench_upload"] = "resumable" @@ -734,7 +734,7 @@ def resumable_upload_chunk(bucket_name): None, upload.rest_only, ) - blob.metadata.metadata["x_testbench_transfer_encoding"] = ":".join( + blob.metadata.metadata["x_emulator_transfer_encoding"] = ":".join( upload.transfer ) blob.metadata.metadata["x_testbench_upload"] = "resumable" diff --git a/google/cloud/storage/tests/object_resumable_write_integration_test.cc b/google/cloud/storage/tests/object_resumable_write_integration_test.cc index 7a46bb83acc79..7bc5a7f7b1cc3 100644 --- a/google/cloud/storage/tests/object_resumable_write_integration_test.cc +++ b/google/cloud/storage/tests/object_resumable_write_integration_test.cc @@ -190,8 +190,8 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteNotChunked) { if (meta.has_metadata("x_testbench_upload")) { EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); } - if (meta.has_metadata("x_testbench_transfer_encoding")) { - EXPECT_THAT(meta.metadata("x_testbench_transfer_encoding"), + if (meta.has_metadata("x_emulator_transfer_encoding")) { + EXPECT_THAT(meta.metadata("x_emulator_transfer_encoding"), Not(HasSubstr("chunked"))); } From ab7dce2ebec5678eeddd325e6946b58f3d7260f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 14:58:16 +0700 Subject: [PATCH 06/17] rename `x-goog-testbench-instructions` to `x-goog-emulator-instructions` --- google/cloud/storage/emulator/README.md | 14 +++++++------- google/cloud/storage/emulator/gcs/object.py | 4 ++-- .../storage/internal/retry_object_read_source.cc | 4 ++-- .../tests/object_checksum_integration_test.cc | 11 +++++------ .../storage/tests/object_hash_integration_test.cc | 11 +++++------ .../storage/tests/object_media_integration_test.cc | 6 +++--- .../tests/slow_reader_chunk_integration_test.cc | 2 +- .../tests/slow_reader_stream_integration_test.cc | 2 +- google/cloud/storage/well_known_headers_test.cc | 4 ++-- 9 files changed, 28 insertions(+), 30 deletions(-) diff --git a/google/cloud/storage/emulator/README.md b/google/cloud/storage/emulator/README.md index 845f912f48cab..8c3ab921ac316 100644 --- a/google/cloud/storage/emulator/README.md +++ b/google/cloud/storage/emulator/README.md @@ -38,36 +38,36 @@ CLOUD_STORAGE_GRPC_ENDPOINT=localhost:8000 # For gRPC API ## Force Failures -You can force the following failures by using the `x-goog-testbench-instructions` header. +You can force the following failures by using the `x-goog-emulator-instructions` header. ### return-broken-stream -Set request headers with `x-goog-testbench-instructions: return-broken-stream`. +Set request headers with `x-goog-emulator-instructions: return-broken-stream`. Emulator will fail after sending 1024*1024 bytes. ### return-corrupted-data -Set request headers with `x-goog-testbench-instructions: return-corrupted-data`. +Set request headers with `x-goog-emulator-instructions: return-corrupted-data`. Emulator will return corrupted data. ### stall-always -Set request headers with `x-goog-testbench-instructions: stall-always`. +Set request headers with `x-goog-emulator-instructions: stall-always`. Emulator will stall at the beginning. ### stall-at-256KiB -Set request headers with `x-goog-testbench-instructions: stall-at-256KiB`. +Set request headers with `x-goog-emulator-instructions: stall-at-256KiB`. Emulator will stall at 256KiB bytes. ### return-503-after-256K -Set request headers with `x-goog-testbench-instructions: return-503-after-256K`. +Set request headers with `x-goog-emulator-instructions: return-503-after-256K`. Emulator will return a `HTTP 503` after sending 256KiB bytes. ### return-503-after-256K/retry-N -Set request headers with `x-goog-testbench-instructions: return-503-after-256K/retry-1` up to `x-goog-testbench-instructions: return-503-after-256K/retry-N`. +Set request headers with `x-goog-emulator-instructions: return-503-after-256K/retry-1` up to `x-goog-emulator-instructions: return-503-after-256K/retry-N`. For N==1 and N==2 behave like `return-305-after-256K`, for `N>=3` ignore the failure instruction and return successfully. This is used to test failures during diff --git a/google/cloud/storage/emulator/gcs/object.py b/google/cloud/storage/emulator/gcs/object.py index f1269dac0f994..2d4516431caae 100644 --- a/google/cloud/storage/emulator/gcs/object.py +++ b/google/cloud/storage/emulator/gcs/object.py @@ -90,7 +90,7 @@ def init( cls, request, metadata, media, bucket, is_destination, context, rest_only=None ): if context is None: - instruction = request.headers.get("x-goog-testbench-instructions") + instruction = request.headers.get("x-goog-emulator-instructions") if instruction == "inject-upload-data-error": media = utils.common.corrupt_media(media) timestamp = datetime.datetime.now(datetime.timezone.utc) @@ -417,7 +417,7 @@ def rest_media(self, request): streamer, length, headers = None, len(response_payload), {} content_range = "bytes %d-%d/%d" % (begin, end - 1, length) - instructions = request.headers.get("x-goog-testbench-instructions") + instructions = request.headers.get("x-goog-emulator-instructions") if instructions == "return-broken-stream": headers["Content-Length"] = length diff --git a/google/cloud/storage/internal/retry_object_read_source.cc b/google/cloud/storage/internal/retry_object_read_source.cc index 95fc9e7d72314..fca3c60ce3001 100644 --- a/google/cloud/storage/internal/retry_object_read_source.cc +++ b/google/cloud/storage/internal/retry_object_read_source.cc @@ -75,7 +75,7 @@ StatusOr RetryObjectReadSource::Read(char* buf, std::string instructions; if (request_.HasOption()) { auto name = request_.GetOption().custom_header_name(); - has_testbench_instructions = (name == "x-goog-testbench-instructions"); + has_testbench_instructions = (name == "x-goog-emulator-instructions"); instructions = request_.GetOption().value(); } @@ -94,7 +94,7 @@ StatusOr RetryObjectReadSource::Read(char* buf, if (has_testbench_instructions) { request_.set_multiple_options( - CustomHeader("x-goog-testbench-instructions", + CustomHeader("x-goog-emulator-instructions", instructions + "/retry-" + std::to_string(++counter))); } diff --git a/google/cloud/storage/tests/object_checksum_integration_test.cc b/google/cloud/storage/tests/object_checksum_integration_test.cc index 76c8896413025..317764ff99931 100644 --- a/google/cloud/storage/tests/object_checksum_integration_test.cc +++ b/google/cloud/storage/tests/object_checksum_integration_test.cc @@ -312,7 +312,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadXML) { auto stream = client->ReadObject( bucket_name_, object_name, - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); #if GOOGLE_CLOUD_CPP_HAVE_EXCEPTIONS EXPECT_THROW( @@ -357,7 +357,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadJSON) { auto stream = client->ReadObject( bucket_name_, object_name, DisableMD5Hash(true), IfMetagenerationNotMatch(0), - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); #if GOOGLE_CLOUD_CPP_HAVE_EXCEPTIONS EXPECT_THROW( @@ -401,7 +401,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadXMLRead) { auto stream = client->ReadObject( bucket_name_, object_name, DisableMD5Hash(true), - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); // Create a buffer large enough to hold the results and read pas EOF. std::vector buffer(2 * contents.size()); @@ -438,7 +438,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadJSONRead) { auto stream = client->ReadObject( bucket_name_, object_name, DisableMD5Hash(true), IfMetagenerationNotMatch(0), - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); // Create a buffer large enough to hold the results and read pas EOF. std::vector buffer(2 * contents.size()); @@ -468,8 +468,7 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingWriteJSON) { // Create a stream to upload an object. ObjectWriteStream stream = client->WriteObject( bucket_name_, object_name, DisableMD5Hash(true), IfGenerationMatch(0), - CustomHeader("x-goog-testbench-instructions", - "inject-upload-data-error")); + CustomHeader("x-goog-emulator-instructions", "inject-upload-data-error")); stream << LoremIpsum() << "\n"; stream << LoremIpsum(); diff --git a/google/cloud/storage/tests/object_hash_integration_test.cc b/google/cloud/storage/tests/object_hash_integration_test.cc index ac1c6045be6d7..ef4086970b98b 100644 --- a/google/cloud/storage/tests/object_hash_integration_test.cc +++ b/google/cloud/storage/tests/object_hash_integration_test.cc @@ -412,7 +412,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXML) { auto stream = client->ReadObject( bucket_name_, object_name, DisableCrc32cChecksum(true), EnableMD5Hash(), - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); #if GOOGLE_CLOUD_CPP_HAVE_EXCEPTIONS EXPECT_THROW( @@ -455,7 +455,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSON) { auto stream = client->ReadObject( bucket_name_, object_name, DisableCrc32cChecksum(true), EnableMD5Hash(), IfMetagenerationNotMatch(0), - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); #if GOOGLE_CLOUD_CPP_HAVE_EXCEPTIONS EXPECT_THROW( @@ -498,7 +498,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXMLRead) { auto stream = client->ReadObject( bucket_name_, object_name, DisableCrc32cChecksum(true), EnableMD5Hash(), - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); // Create a buffer large enough to hold the results and read pas EOF. std::vector buffer(2 * contents.size()); @@ -534,7 +534,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSONRead) { auto stream = client->ReadObject( bucket_name_, object_name, DisableCrc32cChecksum(true), EnableMD5Hash(), IfMetagenerationNotMatch(0), - CustomHeader("x-goog-testbench-instructions", "return-corrupted-data")); + CustomHeader("x-goog-emulator-instructions", "return-corrupted-data")); // Create a buffer large enough to hold the results and read pas EOF. std::vector buffer(2 * contents.size()); @@ -564,8 +564,7 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingWriteJSON) { ObjectWriteStream stream = client->WriteObject( bucket_name_, object_name, DisableCrc32cChecksum(true), EnableMD5Hash(), IfGenerationMatch(0), - CustomHeader("x-goog-testbench-instructions", - "inject-upload-data-error")); + CustomHeader("x-goog-emulator-instructions", "inject-upload-data-error")); stream << LoremIpsum() << "\n"; stream << LoremIpsum(); diff --git a/google/cloud/storage/tests/object_media_integration_test.cc b/google/cloud/storage/tests/object_media_integration_test.cc index 381e89c058210..0446965f5b62c 100644 --- a/google/cloud/storage/tests/object_media_integration_test.cc +++ b/google/cloud/storage/tests/object_media_integration_test.cc @@ -660,7 +660,7 @@ TEST_F(ObjectMediaIntegrationTest, StreamingReadTimeout) { auto stream = client.ReadObject( bucket_name_, object_name, - CustomHeader("x-goog-testbench-instructions", "stall-always")); + CustomHeader("x-goog-emulator-instructions", "stall-always")); std::vector buffer(kObjectSize); stream.read(buffer.data(), kObjectSize); @@ -694,7 +694,7 @@ TEST_F(ObjectMediaIntegrationTest, StreamingReadTimeoutContinues) { auto stream = client.ReadObject( bucket_name_, object_name, - CustomHeader("x-goog-testbench-instructions", "stall-at-256KiB")); + CustomHeader("x-goog-emulator-instructions", "stall-at-256KiB")); std::vector buffer(kObjectSize); stream.read(buffer.data(), kObjectSize); @@ -727,7 +727,7 @@ TEST_F(ObjectMediaIntegrationTest, StreamingReadInternalError) { auto stream = client.ReadObject( bucket_name_, object_name, - CustomHeader("x-goog-testbench-instructions", "return-503-after-256K")); + CustomHeader("x-goog-emulator-instructions", "return-503-after-256K")); std::vector actual(64 * 1024); for (std::size_t offset = 0; offset < contents.size() && !stream.bad() && !stream.eof(); diff --git a/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc b/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc index db7a9bfc8bc9a..4874544fcf24b 100644 --- a/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc +++ b/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc @@ -62,7 +62,7 @@ TEST_F(SlowReaderChunkIntegrationTest, LongPauses) { if (UsingEmulator()) { return client->ReadObject( bucket_name_, object_name, - CustomHeader("x-goog-testbench-instructions", "return-broken-stream"), + CustomHeader("x-goog-emulator-instructions", "return-broken-stream"), ReadFromOffset(offset)); } return client->ReadObject(bucket_name_, object_name, diff --git a/google/cloud/storage/tests/slow_reader_stream_integration_test.cc b/google/cloud/storage/tests/slow_reader_stream_integration_test.cc index d62c7aeb89ea9..a8c96fd597378 100644 --- a/google/cloud/storage/tests/slow_reader_stream_integration_test.cc +++ b/google/cloud/storage/tests/slow_reader_stream_integration_test.cc @@ -63,7 +63,7 @@ TEST_F(SlowReaderStreamIntegrationTest, LongPauses) { if (UsingEmulator()) { stream = client->ReadObject( bucket_name_, object_name, - CustomHeader("x-goog-testbench-instructions", "return-broken-stream")); + CustomHeader("x-goog-emulator-instructions", "return-broken-stream")); } else { stream = client->ReadObject(bucket_name_, object_name); } diff --git a/google/cloud/storage/well_known_headers_test.cc b/google/cloud/storage/well_known_headers_test.cc index 2baed6926ed0e..d8e63f42edb1d 100644 --- a/google/cloud/storage/well_known_headers_test.cc +++ b/google/cloud/storage/well_known_headers_test.cc @@ -25,11 +25,11 @@ using ::testing::HasSubstr; /// @test Verify that CustomHeader works as expected. TEST(WellKnownHeader, CustomHeader) { - CustomHeader header("x-goog-testbench-instructions", "do-stuff"); + CustomHeader header("x-goog-emulator-instructions", "do-stuff"); std::ostringstream os; os << header; EXPECT_THAT(os.str(), HasSubstr("do-stuff")); - EXPECT_THAT(os.str(), HasSubstr("x-goog-testbench-instructions")); + EXPECT_THAT(os.str(), HasSubstr("x-goog-emulator-instructions")); } /// @test Verify that EncryptionKey streaming works as expected. From 5e59a51a535632828ecc696eec95b3e06dc752f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 15:06:53 +0700 Subject: [PATCH 07/17] rename `x_testbench_upload` to `x_emulator_upload` --- google/cloud/storage/emulator/emulator.py | 4 ++-- google/cloud/storage/emulator/gcs/object.py | 6 +++--- google/cloud/storage/emulator/grpc_server.py | 2 +- .../storage/tests/grpc_integration_test.cc | 4 ++-- .../tests/object_checksum_integration_test.cc | 4 ++-- .../tests/object_file_integration_test.cc | 8 ++++---- .../tests/object_hash_integration_test.cc | 8 ++++---- ...object_resumable_write_integration_test.cc | 20 +++++++++---------- 8 files changed, 28 insertions(+), 28 deletions(-) diff --git a/google/cloud/storage/emulator/emulator.py b/google/cloud/storage/emulator/emulator.py index 8d28b9fb247e6..169a8912c1555 100644 --- a/google/cloud/storage/emulator/emulator.py +++ b/google/cloud/storage/emulator/emulator.py @@ -689,7 +689,7 @@ def resumable_upload_chunk(bucket_name): blob.metadata.metadata["x_emulator_transfer_encoding"] = ":".join( upload.transfer ) - blob.metadata.metadata["x_testbench_upload"] = "resumable" + blob.metadata.metadata["x_emulator_upload"] = "resumable" db.insert_object(upload.request, bucket_name, blob, None) projection = utils.common.extract_projection( upload.request, CommonEnums.Projection.NO_ACL, None @@ -737,7 +737,7 @@ def resumable_upload_chunk(bucket_name): blob.metadata.metadata["x_emulator_transfer_encoding"] = ":".join( upload.transfer ) - blob.metadata.metadata["x_testbench_upload"] = "resumable" + blob.metadata.metadata["x_emulator_upload"] = "resumable" db.insert_object(upload.request, bucket_name, blob, None) projection = utils.common.extract_projection( upload.request, CommonEnums.Projection.NO_ACL, None diff --git a/google/cloud/storage/emulator/gcs/object.py b/google/cloud/storage/emulator/gcs/object.py index 2d4516431caae..4fa7548b6484f 100644 --- a/google/cloud/storage/emulator/gcs/object.py +++ b/google/cloud/storage/emulator/gcs/object.py @@ -172,7 +172,7 @@ def init_media(cls, request, bucket): metadata = { "bucket": bucket.name, "name": object_name, - "metadata": {"x_testbench_upload": "simple"}, + "metadata": {"x_emulator_upload": "simple"}, } return cls.init_dict(request, metadata, media, bucket, False) @@ -198,7 +198,7 @@ def init_multipart(cls, request, bucket): metadata["metadata"] = ( {} if "metadata" not in metadata else metadata["metadata"] ) - metadata["metadata"]["x_testbench_upload"] = "multipart" + metadata["metadata"]["x_emulator_upload"] = "multipart" if "md5Hash" in metadata: metadata["metadata"]["x_testbench_md5"] = metadata["md5Hash"] metadata["md5Hash"] = metadata["md5Hash"] @@ -215,7 +215,7 @@ def init_xml(cls, request, bucket, name): metadata = { "bucket": bucket.name, "name": name, - "metadata": {"x_testbench_upload": "xml"}, + "metadata": {"x_emulator_upload": "xml"}, } if "content-type" in request.headers: metadata["contentType"] = request.headers["content-type"] diff --git a/google/cloud/storage/emulator/grpc_server.py b/google/cloud/storage/emulator/grpc_server.py index 8519f42976f9a..5a8b9a84a1b00 100644 --- a/google/cloud/storage/emulator/grpc_server.py +++ b/google/cloud/storage/emulator/grpc_server.py @@ -136,7 +136,7 @@ def StartResumableWrite(self, request, context): upload = gcs_type.holder.DataHolder.init_resumable_grpc( request, bucket, context ) - upload.metadata.metadata["x_testbench_upload"] = "resumable" + upload.metadata.metadata["x_emulator_upload"] = "resumable" db.insert_upload(upload) return storage_pb2.StartResumableWriteResponse(upload_id=upload.upload_id) diff --git a/google/cloud/storage/tests/grpc_integration_test.cc b/google/cloud/storage/tests/grpc_integration_test.cc index a08615880c542..cd5c555931f52 100644 --- a/google/cloud/storage/tests/grpc_integration_test.cc +++ b/google/cloud/storage/tests/grpc_integration_test.cc @@ -166,8 +166,8 @@ TEST_P(GrpcIntegrationTest, WriteResume) { EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name, meta.bucket()); if (UsingEmulator()) { - EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); - EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); + EXPECT_TRUE(meta.has_metadata("x_emulator_upload")); + EXPECT_EQ("resumable", meta.metadata("x_emulator_upload")); } auto status = client->DeleteObject(bucket_name, object_name); diff --git a/google/cloud/storage/tests/object_checksum_integration_test.cc b/google/cloud/storage/tests/object_checksum_integration_test.cc index 317764ff99931..7bc59e742c727 100644 --- a/google/cloud/storage/tests/object_checksum_integration_test.cc +++ b/google/cloud/storage/tests/object_checksum_integration_test.cc @@ -203,10 +203,10 @@ TEST_F(ObjectChecksumIntegrationTest, DefaultCrc32cInsertJSON) { backend->ClearLogLines(), Contains(StartsWith("content-type: multipart/related; boundary="))); - if (insert_meta->has_metadata("x_testbench_upload")) { + if (insert_meta->has_metadata("x_emulator_upload")) { // When running against the testbench, we have some more information to // verify the right upload type and contents were sent. - EXPECT_EQ("multipart", insert_meta->metadata("x_testbench_upload")); + EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); ASSERT_TRUE(insert_meta->has_metadata("x_testbench_crc32c")); auto expected_crc32c = ComputeCrc32cChecksum(LoremIpsum()); EXPECT_EQ(expected_crc32c, insert_meta->metadata("x_testbench_crc32c")); diff --git a/google/cloud/storage/tests/object_file_integration_test.cc b/google/cloud/storage/tests/object_file_integration_test.cc index 8b867ad7f03b0..bc5238aab336e 100644 --- a/google/cloud/storage/tests/object_file_integration_test.cc +++ b/google/cloud/storage/tests/object_file_integration_test.cc @@ -435,8 +435,8 @@ TEST_F(ObjectFileIntegrationTest, UploadFileResumableBySize) { ASSERT_EQ(expected_str.size(), meta->size()); if (UsingEmulator()) { - ASSERT_TRUE(meta->has_metadata("x_testbench_upload")); - EXPECT_EQ("resumable", meta->metadata("x_testbench_upload")); + ASSERT_TRUE(meta->has_metadata("x_emulator_upload")); + EXPECT_EQ("resumable", meta->metadata("x_emulator_upload")); } // Create an iostream to read the object back. @@ -475,8 +475,8 @@ TEST_F(ObjectFileIntegrationTest, UploadFileResumableByOption) { ASSERT_EQ(expected_str.size(), meta->size()); if (UsingEmulator()) { - ASSERT_TRUE(meta->has_metadata("x_testbench_upload")); - EXPECT_EQ("resumable", meta->metadata("x_testbench_upload")); + ASSERT_TRUE(meta->has_metadata("x_emulator_upload")); + EXPECT_EQ("resumable", meta->metadata("x_emulator_upload")); } // Create an iostream to read the object back. diff --git a/google/cloud/storage/tests/object_hash_integration_test.cc b/google/cloud/storage/tests/object_hash_integration_test.cc index ef4086970b98b..6adbde603af17 100644 --- a/google/cloud/storage/tests/object_hash_integration_test.cc +++ b/google/cloud/storage/tests/object_hash_integration_test.cc @@ -95,10 +95,10 @@ TEST_F(ObjectHashIntegrationTest, DefaultMD5HashJSON) { backend->ClearLogLines(), Contains(StartsWith("content-type: multipart/related; boundary="))); - if (insert_meta->has_metadata("x_testbench_upload")) { + if (insert_meta->has_metadata("x_emulator_upload")) { // When running against the testbench, we have some more information to // verify the right upload type and contents were sent. - EXPECT_EQ("multipart", insert_meta->metadata("x_testbench_upload")); + EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); ASSERT_FALSE(insert_meta->has_metadata("x_testbench_md5")); } @@ -157,10 +157,10 @@ TEST_F(ObjectHashIntegrationTest, DisableMD5HashJSON) { backend->ClearLogLines(), Contains(StartsWith("content-type: multipart/related; boundary="))); - if (insert_meta->has_metadata("x_testbench_upload")) { + if (insert_meta->has_metadata("x_emulator_upload")) { // When running against the testbench, we have some more information to // verify the right upload type and contents were sent. - EXPECT_EQ("multipart", insert_meta->metadata("x_testbench_upload")); + EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); ASSERT_FALSE(insert_meta->has_metadata("x_testbench_md5")); } diff --git a/google/cloud/storage/tests/object_resumable_write_integration_test.cc b/google/cloud/storage/tests/object_resumable_write_integration_test.cc index 7bc5a7f7b1cc3..641364cb878cd 100644 --- a/google/cloud/storage/tests/object_resumable_write_integration_test.cc +++ b/google/cloud/storage/tests/object_resumable_write_integration_test.cc @@ -67,8 +67,8 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteWithContentType) { EXPECT_EQ(bucket_name_, meta.bucket()); EXPECT_EQ("text/plain", meta.content_type()); if (UsingEmulator()) { - EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); - EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); + EXPECT_TRUE(meta.has_metadata("x_emulator_upload")); + EXPECT_EQ("resumable", meta.metadata("x_emulator_upload")); } auto status = client->DeleteObject(bucket_name_, object_name); @@ -115,8 +115,8 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteWithUseResumable) { EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name_, meta.bucket()); if (UsingEmulator()) { - EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); - EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); + EXPECT_TRUE(meta.has_metadata("x_emulator_upload")); + EXPECT_EQ("resumable", meta.metadata("x_emulator_upload")); } auto status = client->DeleteObject(bucket_name_, object_name); @@ -154,8 +154,8 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteResume) { EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name_, meta.bucket()); if (UsingEmulator()) { - EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); - EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); + EXPECT_TRUE(meta.has_metadata("x_emulator_upload")); + EXPECT_EQ("resumable", meta.metadata("x_emulator_upload")); } auto status = client->DeleteObject(bucket_name_, object_name); @@ -187,8 +187,8 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteNotChunked) { os.Close(); ASSERT_STATUS_OK(os.metadata()); ObjectMetadata meta = os.metadata().value(); - if (meta.has_metadata("x_testbench_upload")) { - EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); + if (meta.has_metadata("x_emulator_upload")) { + EXPECT_EQ("resumable", meta.metadata("x_emulator_upload")); } if (meta.has_metadata("x_emulator_transfer_encoding")) { EXPECT_THAT(meta.metadata("x_emulator_transfer_encoding"), @@ -230,8 +230,8 @@ TEST_F(ObjectResumableWriteIntegrationTest, WriteResumeFinalizedUpload) { EXPECT_EQ(object_name, meta.name()); EXPECT_EQ(bucket_name_, meta.bucket()); if (UsingEmulator()) { - EXPECT_TRUE(meta.has_metadata("x_testbench_upload")); - EXPECT_EQ("resumable", meta.metadata("x_testbench_upload")); + EXPECT_TRUE(meta.has_metadata("x_emulator_upload")); + EXPECT_EQ("resumable", meta.metadata("x_emulator_upload")); } } From 391dc073f4fd1bf57e8111ff5e36cf18253cc963 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 15:15:35 +0700 Subject: [PATCH 08/17] rename `x_testbench_md5` to `x_emulator_md5` --- google/cloud/storage/emulator/gcs/object.py | 6 +++--- google/cloud/storage/tests/object_hash_integration_test.cc | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/google/cloud/storage/emulator/gcs/object.py b/google/cloud/storage/emulator/gcs/object.py index 4fa7548b6484f..14f25dc589f61 100644 --- a/google/cloud/storage/emulator/gcs/object.py +++ b/google/cloud/storage/emulator/gcs/object.py @@ -200,7 +200,7 @@ def init_multipart(cls, request, bucket): ) metadata["metadata"]["x_emulator_upload"] = "multipart" if "md5Hash" in metadata: - metadata["metadata"]["x_testbench_md5"] = metadata["md5Hash"] + metadata["metadata"]["x_emulator_md5"] = metadata["md5Hash"] metadata["md5Hash"] = metadata["md5Hash"] if "crc32c" in metadata: metadata["metadata"]["x_testbench_crc32c"] = metadata["crc32c"] @@ -388,10 +388,10 @@ def x_goog_hash_header(self): header = "" if "x_testbench_crc32c" in self.metadata.metadata: header += "crc32c=" + self.metadata.metadata["x_testbench_crc32c"] - if "x_testbench_md5" in self.metadata.metadata: + if "x_emulator_md5" in self.metadata.metadata: if header != "": header += "," - header += "md5=" + self.metadata.metadata["x_testbench_md5"] + header += "md5=" + self.metadata.metadata["x_emulator_md5"] return header if header != "" else None def rest_media(self, request): diff --git a/google/cloud/storage/tests/object_hash_integration_test.cc b/google/cloud/storage/tests/object_hash_integration_test.cc index 6adbde603af17..ab7c5549d0db2 100644 --- a/google/cloud/storage/tests/object_hash_integration_test.cc +++ b/google/cloud/storage/tests/object_hash_integration_test.cc @@ -99,7 +99,7 @@ TEST_F(ObjectHashIntegrationTest, DefaultMD5HashJSON) { // When running against the testbench, we have some more information to // verify the right upload type and contents were sent. EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); - ASSERT_FALSE(insert_meta->has_metadata("x_testbench_md5")); + ASSERT_FALSE(insert_meta->has_metadata("x_emulator_md5")); } auto status = client.DeleteObject(bucket_name_, object_name); @@ -161,7 +161,7 @@ TEST_F(ObjectHashIntegrationTest, DisableMD5HashJSON) { // When running against the testbench, we have some more information to // verify the right upload type and contents were sent. EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); - ASSERT_FALSE(insert_meta->has_metadata("x_testbench_md5")); + ASSERT_FALSE(insert_meta->has_metadata("x_emulator_md5")); } auto status = client.DeleteObject(bucket_name_, object_name); From 0ec46afc492e17059b87c565946033abed9f7f8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 15:19:30 +0700 Subject: [PATCH 09/17] rename `x_testbench_crc32c` to `x_emulator_crc32c` --- google/cloud/storage/emulator/gcs/object.py | 6 +++--- .../cloud/storage/tests/object_checksum_integration_test.cc | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/google/cloud/storage/emulator/gcs/object.py b/google/cloud/storage/emulator/gcs/object.py index 14f25dc589f61..ea864ca3873b4 100644 --- a/google/cloud/storage/emulator/gcs/object.py +++ b/google/cloud/storage/emulator/gcs/object.py @@ -203,7 +203,7 @@ def init_multipart(cls, request, bucket): metadata["metadata"]["x_emulator_md5"] = metadata["md5Hash"] metadata["md5Hash"] = metadata["md5Hash"] if "crc32c" in metadata: - metadata["metadata"]["x_testbench_crc32c"] = metadata["crc32c"] + metadata["metadata"]["x_emulator_crc32c"] = metadata["crc32c"] metadata["crc32c"] = struct.unpack( ">I", base64.b64decode(metadata["crc32c"].encode("utf-8")) )[0] @@ -386,8 +386,8 @@ def rest_metadata(self): def x_goog_hash_header(self): header = "" - if "x_testbench_crc32c" in self.metadata.metadata: - header += "crc32c=" + self.metadata.metadata["x_testbench_crc32c"] + if "x_emulator_crc32c" in self.metadata.metadata: + header += "crc32c=" + self.metadata.metadata["x_emulator_crc32c"] if "x_emulator_md5" in self.metadata.metadata: if header != "": header += "," diff --git a/google/cloud/storage/tests/object_checksum_integration_test.cc b/google/cloud/storage/tests/object_checksum_integration_test.cc index 7bc59e742c727..0fbc5bcaf60be 100644 --- a/google/cloud/storage/tests/object_checksum_integration_test.cc +++ b/google/cloud/storage/tests/object_checksum_integration_test.cc @@ -207,9 +207,9 @@ TEST_F(ObjectChecksumIntegrationTest, DefaultCrc32cInsertJSON) { // When running against the testbench, we have some more information to // verify the right upload type and contents were sent. EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); - ASSERT_TRUE(insert_meta->has_metadata("x_testbench_crc32c")); + ASSERT_TRUE(insert_meta->has_metadata("x_emulator_crc32c")); auto expected_crc32c = ComputeCrc32cChecksum(LoremIpsum()); - EXPECT_EQ(expected_crc32c, insert_meta->metadata("x_testbench_crc32c")); + EXPECT_EQ(expected_crc32c, insert_meta->metadata("x_emulator_crc32c")); } auto status = client.DeleteObject(bucket_name_, object_name); From 7435705651ff457a502056a064497b02e7fbb9d1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 15:40:09 +0700 Subject: [PATCH 10/17] rename `testbench` to `emulator` in *.[cc/h] enable test against emulator for `TEST_F(ObjectFileIntegrationTest, UploadFileBinary)` --- google/cloud/storage/client_options.cc | 4 ++-- google/cloud/storage/client_options_test.cc | 2 +- .../examples/storage_examples_common_test.cc | 6 ++--- .../internal/retry_object_read_source.cc | 6 ++--- .../testing/storage_integration_test.cc | 4 ++-- .../testing/storage_integration_test.h | 2 +- .../storage/tests/bucket_integration_test.cc | 2 +- .../tests/key_file_integration_test.cc | 2 +- .../object_basic_crud_integration_test.cc | 8 +++---- .../tests/object_checksum_integration_test.cc | 22 ++++++++--------- .../tests/object_hash_integration_test.cc | 24 +++++++++---------- ..._list_objects_versions_integration_test.cc | 2 +- .../tests/object_media_integration_test.cc | 20 ++++++++-------- .../tests/object_rewrite_integration_test.cc | 2 +- .../tests/signed_url_integration_test.cc | 8 +++---- .../slow_reader_chunk_integration_test.cc | 2 +- .../slow_reader_stream_integration_test.cc | 2 +- 17 files changed, 59 insertions(+), 59 deletions(-) diff --git a/google/cloud/storage/client_options.cc b/google/cloud/storage/client_options.cc index 8b0a0a12f7fc2..30845a63c78e4 100644 --- a/google/cloud/storage/client_options.cc +++ b/google/cloud/storage/client_options.cc @@ -47,8 +47,8 @@ std::string XmlEndpoint(ClientOptions const& options) { } std::string IamEndpoint(ClientOptions const& options) { - auto testbench = GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); - if (testbench) return *testbench + "/iamapi"; + auto emulator = GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); + if (emulator) return *emulator + "/iamapi"; return options.iam_endpoint(); } diff --git a/google/cloud/storage/client_options_test.cc b/google/cloud/storage/client_options_test.cc index 0045b8543cec2..ed978b833f3d2 100644 --- a/google/cloud/storage/client_options_test.cc +++ b/google/cloud/storage/client_options_test.cc @@ -151,7 +151,7 @@ TEST_F(ClientOptionsTest, EndpointsOverride) { internal::IamEndpoint(options)); } -TEST_F(ClientOptionsTest, EndpointsTestBench) { +TEST_F(ClientOptionsTest, EndpointsEmulator) { testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:1234"); ClientOptions options(oauth2::CreateAnonymousCredentials()); diff --git a/google/cloud/storage/examples/storage_examples_common_test.cc b/google/cloud/storage/examples/storage_examples_common_test.cc index 1edde0b5ab73e..e2c7bc25993f6 100644 --- a/google/cloud/storage/examples/storage_examples_common_test.cc +++ b/google/cloud/storage/examples/storage_examples_common_test.cc @@ -43,7 +43,7 @@ TEST(StorageExamplesCommon, RandomObject) { } TEST(StorageExamplesCommon, CreateCommandEntryUsage) { - // Set the client to use the testbench, this avoids any problems trying to + // Set the client to use the emulator, this avoids any problems trying to // find and load the default credentials file. google::cloud::testing_util::ScopedEnvironment env( "CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:9090"); @@ -72,7 +72,7 @@ TEST(StorageExamplesCommon, CreateCommandEntryUsage) { } TEST(StorageExamplesCommon, CreateCommandEntryNoArguments) { - // Set the client to use the testbench, this avoids any problems trying to + // Set the client to use the emulator, this avoids any problems trying to // find and load the default credentials file. google::cloud::testing_util::ScopedEnvironment env( "CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:9090"); @@ -112,7 +112,7 @@ TEST(StorageExamplesCommon, CreateCommandEntryNoArguments) { } TEST(StorageExamplesCommon, CreateCommandEntryVarargs) { - // Set the client to use the testbench, this avoids any problems trying to + // Set the client to use the emulator, this avoids any problems trying to // find and load the default credentials file. google::cloud::testing_util::ScopedEnvironment env( "CLOUD_STORAGE_EMULATOR_ENDPOINT", "http://localhost:9090"); diff --git a/google/cloud/storage/internal/retry_object_read_source.cc b/google/cloud/storage/internal/retry_object_read_source.cc index fca3c60ce3001..9697faca93a7d 100644 --- a/google/cloud/storage/internal/retry_object_read_source.cc +++ b/google/cloud/storage/internal/retry_object_read_source.cc @@ -71,11 +71,11 @@ StatusOr RetryObjectReadSource::Read(char* buf, if (handle_result(result)) { return result; } - bool has_testbench_instructions = false; + bool has_emulator_instructions = false; std::string instructions; if (request_.HasOption()) { auto name = request_.GetOption().custom_header_name(); - has_testbench_instructions = (name == "x-goog-emulator-instructions"); + has_emulator_instructions = (name == "x-goog-emulator-instructions"); instructions = request_.GetOption().value(); } @@ -92,7 +92,7 @@ StatusOr RetryObjectReadSource::Read(char* buf, // already be exhausted, so we should fail this operation too. child_.reset(); - if (has_testbench_instructions) { + if (has_emulator_instructions) { request_.set_multiple_options( CustomHeader("x-goog-emulator-instructions", instructions + "/retry-" + std::to_string(++counter))); diff --git a/google/cloud/storage/testing/storage_integration_test.cc b/google/cloud/storage/testing/storage_integration_test.cc index d08232b11e2fd..918e1aedac907 100644 --- a/google/cloud/storage/testing/storage_integration_test.cc +++ b/google/cloud/storage/testing/storage_integration_test.cc @@ -104,9 +104,9 @@ StorageIntegrationTest::MakeIntegrationTestClient( std::unique_ptr StorageIntegrationTest::TestBackoffPolicy() { std::chrono::milliseconds initial_delay(std::chrono::seconds(1)); - auto constexpr kShortDelayForTestbench = std::chrono::milliseconds(10); + auto constexpr kShortDelayForEmulator = std::chrono::milliseconds(10); if (UsingEmulator()) { - initial_delay = kShortDelayForTestbench; + initial_delay = kShortDelayForEmulator; } auto constexpr kMaximumBackoffDelay = std::chrono::minutes(5); diff --git a/google/cloud/storage/testing/storage_integration_test.h b/google/cloud/storage/testing/storage_integration_test.h index c3b3217ca3d0d..c2c3cd2f49934 100644 --- a/google/cloud/storage/testing/storage_integration_test.h +++ b/google/cloud/storage/testing/storage_integration_test.h @@ -37,7 +37,7 @@ class StorageIntegrationTest : public ::testing::Test { /** * Return a client suitable for most integration tests. * - * Most integration tests, particularly when running against the testbench, + * Most integration tests, particularly when running against the emulator, * should use short backoff and retry periods. This returns a client so * configured. */ diff --git a/google/cloud/storage/tests/bucket_integration_test.cc b/google/cloud/storage/tests/bucket_integration_test.cc index 5f48113893826..5e8cdf7186562 100644 --- a/google/cloud/storage/tests/bucket_integration_test.cc +++ b/google/cloud/storage/tests/bucket_integration_test.cc @@ -889,7 +889,7 @@ TEST_F(BucketIntegrationTest, CreateFailure) { ASSERT_STATUS_OK(client); // Try to create an invalid bucket (the name should not start with an - // uppercase letter), the service (or testbench) will reject the request and + // uppercase letter), the service (or emulator) will reject the request and // we should report that error correctly. For good measure, make the project // id invalid too. StatusOr meta = client->CreateBucketForProject( diff --git a/google/cloud/storage/tests/key_file_integration_test.cc b/google/cloud/storage/tests/key_file_integration_test.cc index 04cab8ae3aea7..8b9beb6ad362d 100644 --- a/google/cloud/storage/tests/key_file_integration_test.cc +++ b/google/cloud/storage/tests/key_file_integration_test.cc @@ -32,7 +32,7 @@ class KeyFileIntegrationTest public ::testing::WithParamInterface { protected: void SetUp() override { - // The testbench does not implement signed URLs. + // The emulator does not implement signed URLs. if (UsingEmulator()) GTEST_SKIP(); std::string const key_file_envvar = GetParam(); diff --git a/google/cloud/storage/tests/object_basic_crud_integration_test.cc b/google/cloud/storage/tests/object_basic_crud_integration_test.cc index b1395c4255630..f7fe2ac6f9aa3 100644 --- a/google/cloud/storage/tests/object_basic_crud_integration_test.cc +++ b/google/cloud/storage/tests/object_basic_crud_integration_test.cc @@ -151,12 +151,12 @@ TEST_F(ObjectBasicCRUDIntegrationTest, BasicCRUD) { } StatusOr CreateNonDefaultClient() { - auto testbench = + auto emulator = google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); google::cloud::testing_util::ScopedEnvironment env( "CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); auto options = ClientOptions(oauth2::CreateAnonymousCredentials()); - if (!testbench) { + if (!emulator) { // Use a different spelling of the default endpoint. This disables the // allegedly "slightly faster" XML endpoints, but should continue to work. options.set_endpoint("https://storage.googleapis.com:443"); @@ -164,8 +164,8 @@ StatusOr CreateNonDefaultClient() { if (!creds) return std::move(creds).status(); options.set_credentials(*std::move(creds)); } else { - // Use the testbench endpoint, but not through the environment variable - options.set_endpoint(*testbench); + // Use the emulator endpoint, but not through the environment variable + options.set_endpoint(*emulator); options.set_credentials(oauth2::CreateAnonymousCredentials()); } return Client(options); diff --git a/google/cloud/storage/tests/object_checksum_integration_test.cc b/google/cloud/storage/tests/object_checksum_integration_test.cc index 0fbc5bcaf60be..054a23b606321 100644 --- a/google/cloud/storage/tests/object_checksum_integration_test.cc +++ b/google/cloud/storage/tests/object_checksum_integration_test.cc @@ -204,7 +204,7 @@ TEST_F(ObjectChecksumIntegrationTest, DefaultCrc32cInsertJSON) { Contains(StartsWith("content-type: multipart/related; boundary="))); if (insert_meta->has_metadata("x_emulator_upload")) { - // When running against the testbench, we have some more information to + // When running against the emulator, we have some more information to // verify the right upload type and contents were sent. EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); ASSERT_TRUE(insert_meta->has_metadata("x_emulator_crc32c")); @@ -296,8 +296,8 @@ TEST_F(ObjectChecksumIntegrationTest, DefaultCrc32cStreamingWriteJSON) { /// @test Verify that CRC32C checksum mismatches are reported by default on /// downloads. TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadXML) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -340,8 +340,8 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadXML) { /// @test Verify that CRC32C checksum mismatches are reported by default on /// downloads. TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadJSON) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -384,8 +384,8 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingReadJSON) { /// @test Verify that CRC32C checksum mismatches are reported when using /// .read(). TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadXMLRead) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -420,8 +420,8 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadXMLRead) { /// @test Verify that CRC32C checksum mismatches are reported when using /// .read(). TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadJSONRead) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -457,8 +457,8 @@ TEST_F(ObjectChecksumIntegrationTest, MismatchedMD5StreamingReadJSONRead) { /// @test Verify that CRC32C checksum mismatches are reported by default on /// downloads. TEST_F(ObjectChecksumIntegrationTest, MismatchedCrc32cStreamingWriteJSON) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/object_hash_integration_test.cc b/google/cloud/storage/tests/object_hash_integration_test.cc index ab7c5549d0db2..9d74734b465cd 100644 --- a/google/cloud/storage/tests/object_hash_integration_test.cc +++ b/google/cloud/storage/tests/object_hash_integration_test.cc @@ -96,7 +96,7 @@ TEST_F(ObjectHashIntegrationTest, DefaultMD5HashJSON) { Contains(StartsWith("content-type: multipart/related; boundary="))); if (insert_meta->has_metadata("x_emulator_upload")) { - // When running against the testbench, we have some more information to + // When running against the emulator, we have some more information to // verify the right upload type and contents were sent. EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); ASSERT_FALSE(insert_meta->has_metadata("x_emulator_md5")); @@ -158,7 +158,7 @@ TEST_F(ObjectHashIntegrationTest, DisableMD5HashJSON) { Contains(StartsWith("content-type: multipart/related; boundary="))); if (insert_meta->has_metadata("x_emulator_upload")) { - // When running against the testbench, we have some more information to + // When running against the emulator, we have some more information to // verify the right upload type and contents were sent. EXPECT_EQ("multipart", insert_meta->metadata("x_emulator_upload")); ASSERT_FALSE(insert_meta->has_metadata("x_emulator_md5")); @@ -396,8 +396,8 @@ TEST_F(ObjectHashIntegrationTest, DisableHashesStreamingWriteJSON) { /// @test Verify that MD5 hash mismatches are reported by default on downloads. TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXML) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -438,8 +438,8 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXML) { /// @test Verify that MD5 hash mismatches are reported by default on downloads. TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSON) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -481,8 +481,8 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSON) { /// @test Verify that MD5 hash mismatches are reported when using .read(). TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXMLRead) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -516,8 +516,8 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadXMLRead) { /// @test Verify that MD5 hash mismatches are reported when using .read(). TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSONRead) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -552,8 +552,8 @@ TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingReadJSONRead) { /// @test Verify that MD5 hash mismatches are reported by default on downloads. TEST_F(ObjectHashIntegrationTest, MismatchedMD5StreamingWriteJSON) { - // This test is disabled when not using the testbench as it relies on the - // testbench to inject faults. + // This test is disabled when not using the emulator as it relies on the + // emulator to inject faults. if (!UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/object_list_objects_versions_integration_test.cc b/google/cloud/storage/tests/object_list_objects_versions_integration_test.cc index a0784cbdbdb96..162edb5165993 100644 --- a/google/cloud/storage/tests/object_list_objects_versions_integration_test.cc +++ b/google/cloud/storage/tests/object_list_objects_versions_integration_test.cc @@ -42,7 +42,7 @@ TEST_F(ObjectListObjectsVersionsIntegrationTest, ListObjectsVersions) { // This test requires the bucket to be configured with versioning. The buckets // used by the CI build are already configured with versioning enabled. The - // bucket created in the testbench also has versioning. Regardless, set the + // bucket created in the emulator also has versioning. Regardless, set the // bucket to the desired state, which will produce a better error message if // there is a configuration problem. auto bucket_meta = client->GetBucketMetadata(bucket_name_); diff --git a/google/cloud/storage/tests/object_media_integration_test.cc b/google/cloud/storage/tests/object_media_integration_test.cc index 0446965f5b62c..1d52d1b12acb8 100644 --- a/google/cloud/storage/tests/object_media_integration_test.cc +++ b/google/cloud/storage/tests/object_media_integration_test.cc @@ -84,7 +84,7 @@ TEST_F(ObjectMediaIntegrationTest, StreamingReadClose) { /// @test Read a portion of a relatively large object using the JSON API. TEST_F(ObjectMediaIntegrationTest, ReadRangeJSON) { - // The testbench always requires multiple iterations to copy this object. + // The emulator always requires multiple iterations to copy this object. StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -125,7 +125,7 @@ TEST_F(ObjectMediaIntegrationTest, ReadRangeJSON) { /// @test Read a portion of a relatively large object using the XML API. TEST_F(ObjectMediaIntegrationTest, ReadRangeXml) { - // The testbench always requires multiple iterations to copy this object. + // The emulator always requires multiple iterations to copy this object. StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -165,7 +165,7 @@ TEST_F(ObjectMediaIntegrationTest, ReadRangeXml) { /// @test Read a portion of a relatively large object using the JSON API. TEST_F(ObjectMediaIntegrationTest, ReadFromOffsetJSON) { - // The testbench always requires multiple iterations to copy this object. + // The emulator always requires multiple iterations to copy this object. StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -206,7 +206,7 @@ TEST_F(ObjectMediaIntegrationTest, ReadFromOffsetJSON) { /// @test Read a portion of a relatively large object using the XML API. TEST_F(ObjectMediaIntegrationTest, ReadFromOffsetXml) { - // The testbench always requires multiple iterations to copy this object. + // The emulator always requires multiple iterations to copy this object. StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -246,7 +246,7 @@ TEST_F(ObjectMediaIntegrationTest, ReadFromOffsetXml) { /// @test Read a relatively large object using chunks of different sizes. TEST_F(ObjectMediaIntegrationTest, ReadMixedChunks) { - // The testbench always requires multiple iterations to copy this object. + // The emulator always requires multiple iterations to copy this object. StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -531,7 +531,7 @@ TEST_F(ObjectMediaIntegrationTest, ReadByChunk) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadJSON) { - ScopedEnvironment disable_testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); + ScopedEnvironment disable_emulator("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -552,7 +552,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadJSON) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadXML) { - ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); + ScopedEnvironment emulator("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -569,7 +569,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureReadXML) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureWriteJSON) { - ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); + ScopedEnvironment emulator("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -588,7 +588,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureWriteJSON) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureWriteXML) { - ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); + ScopedEnvironment emulator("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; @@ -619,7 +619,7 @@ TEST_F(ObjectMediaIntegrationTest, ConnectionFailureDownloadFile) { } TEST_F(ObjectMediaIntegrationTest, ConnectionFailureUploadFile) { - ScopedEnvironment testbench("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); + ScopedEnvironment emulator("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}); Client client{ClientOptions(oauth2::CreateAnonymousCredentials()) .set_endpoint("http://localhost:1"), LimitedErrorCountRetryPolicy(2)}; diff --git a/google/cloud/storage/tests/object_rewrite_integration_test.cc b/google/cloud/storage/tests/object_rewrite_integration_test.cc index 9ff15dbcf6d26..a5a8717f2a96a 100644 --- a/google/cloud/storage/tests/object_rewrite_integration_test.cc +++ b/google/cloud/storage/tests/object_rewrite_integration_test.cc @@ -383,7 +383,7 @@ TEST_F(ObjectRewriteIntegrationTest, RewriteEncrypted) { } TEST_F(ObjectRewriteIntegrationTest, RewriteLarge) { - // The testbench always requires multiple iterations to copy this object. + // The emulator always requires multiple iterations to copy this object. StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/signed_url_integration_test.cc b/google/cloud/storage/tests/signed_url_integration_test.cc index 36a88db50b2eb..7fe5e1a6c41e5 100644 --- a/google/cloud/storage/tests/signed_url_integration_test.cc +++ b/google/cloud/storage/tests/signed_url_integration_test.cc @@ -48,7 +48,7 @@ class SignedUrlIntegrationTest }; TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlGet) { - // The testbench does not implement signed URLs. + // The emulator does not implement signed URLs. if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -81,7 +81,7 @@ TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlGet) { } TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlPut) { - // The testbench does not implement signed URLs. + // The emulator does not implement signed URLs. if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -114,7 +114,7 @@ TEST_F(SignedUrlIntegrationTest, CreateV2SignedUrlPut) { } TEST_F(SignedUrlIntegrationTest, CreateV4SignedUrlGet) { - // The testbench does not implement signed URLs. + // The emulator does not implement signed URLs. if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); @@ -147,7 +147,7 @@ TEST_F(SignedUrlIntegrationTest, CreateV4SignedUrlGet) { } TEST_F(SignedUrlIntegrationTest, CreateV4SignedUrlPut) { - // The testbench does not implement signed URLs. + // The emulator does not implement signed URLs. if (UsingEmulator()) GTEST_SKIP(); StatusOr client = MakeIntegrationTestClient(); ASSERT_STATUS_OK(client); diff --git a/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc b/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc index 4874544fcf24b..fed0c840f3871 100644 --- a/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc +++ b/google/cloud/storage/tests/slow_reader_chunk_integration_test.cc @@ -56,7 +56,7 @@ TEST_F(SlowReaderChunkIntegrationTest, LongPauses) { ASSERT_STATUS_OK(source_meta); // Create an iostream to read the object back. When running against the - // testbench we can fail quickly by asking the testbench to break the stream + // emulator we can fail quickly by asking the emulator to break the stream // in the middle. auto make_reader = [this, object_name, &client](int64_t offset) { if (UsingEmulator()) { diff --git a/google/cloud/storage/tests/slow_reader_stream_integration_test.cc b/google/cloud/storage/tests/slow_reader_stream_integration_test.cc index a8c96fd597378..b0ea4a0b46cd9 100644 --- a/google/cloud/storage/tests/slow_reader_stream_integration_test.cc +++ b/google/cloud/storage/tests/slow_reader_stream_integration_test.cc @@ -56,7 +56,7 @@ TEST_F(SlowReaderStreamIntegrationTest, LongPauses) { ASSERT_STATUS_OK(source_meta); // Create an iostream to read the object back. When running against the - // testbench we can fail quickly by asking the testbench to break the stream + // emulator we can fail quickly by asking the emulator to break the stream // in the middle. ObjectReadStream stream; From 558708cbca23a9f7634a16d684949a32941c3a4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 16:08:02 +0700 Subject: [PATCH 11/17] rename `testbench` to `emulator` in *.sh --- .../run_integration_tests_emulator_bazel.sh | 20 +++---- .../run_integration_tests_emulator_cmake.sh | 8 +-- google/cloud/storage/emulator/gcs/project.py | 2 +- .../storage/tools/run_testbench_utils.sh | 54 +++++++++---------- 4 files changed, 42 insertions(+), 42 deletions(-) diff --git a/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh b/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh index 9d80d693b0307..704cc289284cc 100755 --- a/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh +++ b/google/cloud/storage/ci/run_integration_tests_emulator_bazel.sh @@ -39,8 +39,8 @@ if [[ "$exit_status" -ne 0 ]]; then exit "${exit_status}" fi -# Configure run_testbench_utils.sh to run the GCS testbench. -source "${PROJECT_ROOT}/google/cloud/storage/tools/run_testbench_utils.sh" +# Configure run_emulator_utils.sh to run the GCS emulator. +source "${PROJECT_ROOT}/google/cloud/storage/tools/run_emulator_utils.sh" # These can only run against production production_only_targets=( @@ -54,13 +54,13 @@ production_only_targets=( --test_tag_filters="integration-test" -- \ "${production_only_targets[@]}" -# `start_testbench` creates unsightly *.log files in the current directory +# `start_emulator` creates unsightly *.log files in the current directory # (which is ${PROJECT_ROOT}) and we cannot use a subshell because we want the # environment variables that it sets. pushd "${HOME}" >/dev/null -# Start the testbench on a fixed port, otherwise the Bazel cache gets +# Start the emulator on a fixed port, otherwise the Bazel cache gets # invalidated on each run. -start_testbench 8585 8000 +start_emulator 8585 8000 popd >/dev/null excluded_targets=( @@ -78,7 +78,7 @@ done # `storage_bucket_samples` binary is missing the examples that use said bucket # are missing too. EMULATOR_SHA=$(git ls-files google/cloud/storage/emulator | sort | cat | sha256sum) -testbench_args=( +emulator_args=( "--test_env=CLOUD_STORAGE_EMULATOR_ENDPOINT=${CLOUD_STORAGE_EMULATOR_ENDPOINT}" "--test_env=CLOUD_STORAGE_GRPC_ENDPOINT=${CLOUD_STORAGE_GRPC_ENDPOINT}" "--test_env=HTTPBIN_ENDPOINT=${HTTPBIN_ENDPOINT}" @@ -86,16 +86,16 @@ testbench_args=( "--test_env=GOOGLE_CLOUD_CPP_AUTO_RUN_EXAMPLES=yes" "--test_env=EMULATOR_SHA=${EMULATOR_SHA}" ) -"${BAZEL_BIN}" run "${bazel_test_args[@]}" "${testbench_args[@]}" \ +"${BAZEL_BIN}" run "${bazel_test_args[@]}" "${emulator_args[@]}" \ "//google/cloud/storage/examples:storage_bucket_samples" \ -- create-bucket-for-project \ "${GOOGLE_CLOUD_CPP_STORAGE_TEST_DESTINATION_BUCKET_NAME}" \ "${GOOGLE_CLOUD_PROJECT}" >/dev/null # We need to forward some environment variables suitable for running against -# the testbench. Note that the HMAC service account is completely invalid and +# the emulator. Note that the HMAC service account is completely invalid and # it is not unique to each test, neither is a problem when using the emulator. -"${BAZEL_BIN}" "${BAZEL_VERB}" "${bazel_test_args[@]}" "${testbench_args[@]}" \ +"${BAZEL_BIN}" "${BAZEL_VERB}" "${bazel_test_args[@]}" "${emulator_args[@]}" \ --test_tag_filters="integration-test" -- \ "//google/cloud/storage/...:all" \ "${excluded_targets[@]}" @@ -103,7 +103,7 @@ exit_status=$? if [[ "$exit_status" -ne 0 ]]; then source "${PROJECT_ROOT}/ci/define-dump-log.sh" - dump_log "${HOME}/testbench.log" + dump_log "${HOME}/gcs_emulator.log" fi exit "${exit_status}" diff --git a/google/cloud/storage/ci/run_integration_tests_emulator_cmake.sh b/google/cloud/storage/ci/run_integration_tests_emulator_cmake.sh index 3f88d05b5e28d..a5fb12f05485d 100755 --- a/google/cloud/storage/ci/run_integration_tests_emulator_cmake.sh +++ b/google/cloud/storage/ci/run_integration_tests_emulator_cmake.sh @@ -32,7 +32,7 @@ shift ctest_args=("$@") # Configure run_emulators_utils.sh to find the instance admin emulator. -source "${PROJECT_ROOT}/google/cloud/storage/tools/run_testbench_utils.sh" +source "${PROJECT_ROOT}/google/cloud/storage/tools/run_emulator_utils.sh" # Use the same configuration parameters as we use for testing against # production. Easier to maintain just one copy. @@ -43,7 +43,7 @@ export GOOGLE_CLOUD_CPP_STORAGE_TEST_SIGNING_KEYFILE="${PROJECT_ROOT}/google/clo export GOOGLE_CLOUD_CPP_STORAGE_TEST_SIGNING_CONFORMANCE_FILENAME="${PROJECT_ROOT}/google/cloud/storage/tests/v4_signatures.json" cd "${BINARY_DIR}" -start_testbench +start_emulator # GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME is automatically created, but we # need to create the *DESTINATION_BUCKET_NAME too. Note that when the @@ -59,12 +59,12 @@ fi ctest -R "^storage_" "${ctest_args[@]}" exit_status=$? -kill_testbench +kill_emulator trap '' EXIT if [[ "$exit_status" -ne 0 ]]; then source "${PROJECT_ROOT}/ci/define-dump-log.sh" - dump_log "${HOME}/testbench.log" + dump_log "${HOME}/emulator.log" fi exit "${exit_status}" diff --git a/google/cloud/storage/emulator/gcs/project.py b/google/cloud/storage/emulator/gcs/project.py index e22780211519f..699252b8c26a6 100644 --- a/google/cloud/storage/emulator/gcs/project.py +++ b/google/cloud/storage/emulator/gcs/project.py @@ -198,7 +198,7 @@ def update_hmac_key(self, access_id, payload): def get_project(project_id): """Find a project and return the GcsProject object.""" - # Dynamically create the projects. The GCS testbench does not have functions + # Dynamically create the projects. The GCS emulator does not have functions # to create projects, nor do we want to create such functions. The point is # to test the GCS client library, not the IAM client library. return VALID_PROJECTS.setdefault(project_id, GcsProject(project_id)) diff --git a/google/cloud/storage/tools/run_testbench_utils.sh b/google/cloud/storage/tools/run_testbench_utils.sh index e135dbe45d11b..4c43836f777c9 100644 --- a/google/cloud/storage/tools/run_testbench_utils.sh +++ b/google/cloud/storage/tools/run_testbench_utils.sh @@ -16,23 +16,23 @@ source module lib/io.sh -TESTBENCH_PID=0 +EMULATOR_PID=0 ################################################ # Terminate the Google Cloud Storage test bench # Globals: -# TESTBENCH_PID: the process id for the test bench +# EMULATOR_PID: the process id for the test bench # IO_COLOR_*: colorize output messages, defined in ci/lib/io.sh # Arguments: # None # Returns: # None ################################################ -kill_testbench() { +kill_emulator() { echo "${IO_COLOR_GREEN}[ -------- ]${IO_COLOR_RESET} Integration test environment tear-down." - echo -n "Killing testbench server [${TESTBENCH_PID}] ... " - kill "${TESTBENCH_PID}" - wait "${TESTBENCH_PID}" >/dev/null 2>&1 + echo -n "Killing emulator server [${EMULATOR_PID}] ... " + kill "${EMULATOR_PID}" + wait "${EMULATOR_PID}" >/dev/null 2>&1 echo "done." echo "${IO_COLOR_GREEN}[ ======== ]${IO_COLOR_RESET} Integration test environment tear-down." @@ -41,9 +41,9 @@ kill_testbench() { ################################################ # Start the Google Cloud Storage test bench # Globals: -# TESTBENCH_PORT: the listening port for the test bench, 8000 if not set. +# EMULATOR_PORT: the listening port for the test bench, 8000 if not set. # HTTPBIN_ENDPOINT: the httpbin endpoint on the test bench. -# TESTBENCH_PID: the process id for the test bench. +# EMULATOR_PID: the process id for the test bench. # CLOUD_STORAGE_EMULATOR_ENDPOINT: the google cloud storage endpoint for the # test bench. # IO_COLOR_*: colorize output messages, defined in lib/io.sh @@ -52,12 +52,12 @@ kill_testbench() { # Returns: # None ################################################ -start_testbench() { +start_emulator() { local port="${1:-0}" echo "${IO_COLOR_GREEN}[ -------- ]${IO_COLOR_RESET} Integration test environment set-up" - echo "Launching testbench emulator in the background" - trap kill_testbench EXIT + echo "Launching emulator emulator in the background" + trap kill_emulator EXIT gunicorn --bind "0.0.0.0:${port}" \ --worker-class sync \ @@ -65,27 +65,27 @@ start_testbench() { --access-logfile - \ --chdir "${PROJECT_ROOT}/google/cloud/storage/emulator" \ "emulator:run()" \ - >testbench.log 2>&1 gcs_emulator.log 2>&1 &2 - cat testbench.log + if [[ -z "${emulator_port}" ]]; then + echo "${IO_COLOR_RED}Cannot find listening port for emulator.${IO_COLOR_RESET}" >&2 + cat gcs_emulator.log exit 1 fi - export HTTPBIN_ENDPOINT="http://localhost:${testbench_port}/httpbin" - export CLOUD_STORAGE_EMULATOR_ENDPOINT="http://localhost:${testbench_port}" + export HTTPBIN_ENDPOINT="http://localhost:${emulator_port}/httpbin" + export CLOUD_STORAGE_EMULATOR_ENDPOINT="http://localhost:${emulator_port}" delay=1 connected=no @@ -99,23 +99,23 @@ start_testbench() { done if [[ "${connected}" = "no" ]]; then - echo "${IO_COLOR_RED}Cannot connect to testbench; aborting test.${IO_COLOR_RESET}" >&2 - cat testbench.log + echo "${IO_COLOR_RED}Cannot connect to emulator; aborting test.${IO_COLOR_RESET}" >&2 + cat gcs_emulator.log exit 1 else - echo "Successfully connected to testbench [${TESTBENCH_PID}]" + echo "Successfully connected to emulator [${EMULATOR_PID}]" fi port="${2:-0}" local grpc_port="" - grpc_port=$(curl -s --retry 5 --retry-max-time 40 "http://localhost:${testbench_port}/start_grpc?port=${port}") + grpc_port=$(curl -s --retry 5 --retry-max-time 40 "http://localhost:${emulator_port}/start_grpc?port=${port}") if [ "${grpc_port}" -eq "${grpc_port}" ] 2>/dev/null; then echo "Successfully connected to gRPC server at port ${grpc_port}" else echo "${IO_COLOR_RED}${grpc_port} must be an integer" >&2 echo "${IO_COLOR_RED}Cannot connect to gRPC server; aborting test.${IO_COLOR_RESET}" >&2 - cat testbench.log + cat gcs_emulator.log exit 1 fi export CLOUD_STORAGE_GRPC_ENDPOINT="localhost:${grpc_port}" From 20946d46da8b02956ac1cb4536da96c57a77f00d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 18 Nov 2020 20:56:00 +0700 Subject: [PATCH 12/17] rename `run_testbench_utils.sh` to `run_emulator_utils.sh` --- .../tools/{run_testbench_utils.sh => run_emulator_utils.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename google/cloud/storage/tools/{run_testbench_utils.sh => run_emulator_utils.sh} (100%) diff --git a/google/cloud/storage/tools/run_testbench_utils.sh b/google/cloud/storage/tools/run_emulator_utils.sh similarity index 100% rename from google/cloud/storage/tools/run_testbench_utils.sh rename to google/cloud/storage/tools/run_emulator_utils.sh From 6f779e65595fddf6ff962f2aa912f708d433a905 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Tue, 24 Nov 2020 22:22:48 +0700 Subject: [PATCH 13/17] Add `GetEmulator()` --- google/cloud/storage/client_options.cc | 22 +++++++++++++-------- google/cloud/storage/client_options_test.cc | 16 +++++++++++++++ 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/google/cloud/storage/client_options.cc b/google/cloud/storage/client_options.cc index 30845a63c78e4..cd069c7a725b8 100644 --- a/google/cloud/storage/client_options.cc +++ b/google/cloud/storage/client_options.cc @@ -32,22 +32,28 @@ using ::google::cloud::internal::GetEnv; namespace internal { +absl::optional GetEmulator() { + auto emulator = GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); + if (emulator) return emulator; + return GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT"); +} + std::string JsonEndpoint(ClientOptions const& options) { - return GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT").value_or(options.endpoint_) + - "/storage/" + options.version(); + return GetEmulator().value_or(options.endpoint_) + "/storage/" + + options.version(); } std::string JsonUploadEndpoint(ClientOptions const& options) { - return GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT").value_or(options.endpoint_) + - "/upload/storage/" + options.version(); + return GetEmulator().value_or(options.endpoint_) + "/upload/storage/" + + options.version(); } std::string XmlEndpoint(ClientOptions const& options) { - return GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT").value_or(options.endpoint_); + return GetEmulator().value_or(options.endpoint_); } std::string IamEndpoint(ClientOptions const& options) { - auto emulator = GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); + auto emulator = GetEmulator(); if (emulator) return *emulator + "/iamapi"; return options.iam_endpoint(); } @@ -57,7 +63,7 @@ std::string IamEndpoint(ClientOptions const& options) { namespace { StatusOr> StorageDefaultCredentials( ChannelOptions const& channel_options) { - auto emulator = cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT"); + auto emulator = internal::GetEmulator(); if (emulator.has_value()) { return StatusOr>( oauth2::CreateAnonymousCredentials()); @@ -124,7 +130,7 @@ ClientOptions::ClientOptions(std::shared_ptr credentials, download_stall_timeout_( GOOGLE_CLOUD_CPP_STORAGE_DEFAULT_DOWNLOAD_STALL_TIMEOUT), channel_options_(std::move(channel_options)) { - auto emulator = GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); + auto emulator = internal::GetEmulator(); if (emulator.has_value()) { endpoint_ = *emulator; iam_endpoint_ = *emulator + "/iamapi"; diff --git a/google/cloud/storage/client_options_test.cc b/google/cloud/storage/client_options_test.cc index ed978b833f3d2..f6d92cf9332d6 100644 --- a/google/cloud/storage/client_options_test.cc +++ b/google/cloud/storage/client_options_test.cc @@ -32,6 +32,7 @@ class ClientOptionsTest : public ::testing::Test { ClientOptionsTest() : enable_tracing_("CLOUD_STORAGE_ENABLE_TRACING", {}), endpoint_("CLOUD_STORAGE_EMULATOR_ENDPOINT", {}), + old_endpoint_("CLOUD_STORAGE_TESTBENCH_ENDPOINT", {}), generator_(std::random_device{}()) {} std::string CreateRandomFileName() { @@ -46,6 +47,7 @@ class ClientOptionsTest : public ::testing::Test { protected: testing_util::ScopedEnvironment enable_tracing_; testing_util::ScopedEnvironment endpoint_; + testing_util::ScopedEnvironment old_endpoint_; google::cloud::internal::DefaultPRNG generator_; }; @@ -164,6 +166,20 @@ TEST_F(ClientOptionsTest, EndpointsEmulator) { EXPECT_EQ("http://localhost:1234/iamapi", internal::IamEndpoint(options)); } +TEST_F(ClientOptionsTest, OldEndpointsEmulator) { + google::cloud::internal::UnsetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); + testing_util::ScopedEnvironment endpoint("CLOUD_STORAGE_TESTBENCH_ENDPOINT", + "http://localhost:1234"); + ClientOptions options(oauth2::CreateAnonymousCredentials()); + EXPECT_EQ("http://localhost:1234", options.endpoint()); + EXPECT_EQ("http://localhost:1234/storage/v1", + internal::JsonEndpoint(options)); + EXPECT_EQ("http://localhost:1234/upload/storage/v1", + internal::JsonUploadEndpoint(options)); + EXPECT_EQ("http://localhost:1234", internal::XmlEndpoint(options)); + EXPECT_EQ("http://localhost:1234/iamapi", internal::IamEndpoint(options)); +} + TEST_F(ClientOptionsTest, SetVersion) { ClientOptions options(oauth2::CreateAnonymousCredentials()); options.set_version("vTest"); From 948663c955df309ee9d74dcc92513e3ad5fd8ca5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Tue, 24 Nov 2020 23:03:30 +0700 Subject: [PATCH 14/17] Fix `UsingEmulator` --- google/cloud/storage/examples/storage_examples_common.cc | 8 +++++--- google/cloud/storage/testing/storage_integration_test.cc | 5 ++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/google/cloud/storage/examples/storage_examples_common.cc b/google/cloud/storage/examples/storage_examples_common.cc index f16cd414dca0e..df9f9cc7b5eda 100644 --- a/google/cloud/storage/examples/storage_examples_common.cc +++ b/google/cloud/storage/examples/storage_examples_common.cc @@ -24,9 +24,11 @@ namespace storage { namespace examples { bool UsingEmulator() { - return !google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT") - .value_or("") - .empty(); + auto emulator = + google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); + if (emulator) return true; + return google::cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") + .has_value(); } std::string MakeRandomBucketName(google::cloud::internal::DefaultPRNG& gen) { diff --git a/google/cloud/storage/testing/storage_integration_test.cc b/google/cloud/storage/testing/storage_integration_test.cc index 918e1aedac907..417b2d424fe8d 100644 --- a/google/cloud/storage/testing/storage_integration_test.cc +++ b/google/cloud/storage/testing/storage_integration_test.cc @@ -160,7 +160,10 @@ EncryptionKeyData StorageIntegrationTest::MakeEncryptionKeyData() { } bool StorageIntegrationTest::UsingEmulator() { - return google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT") + auto emulator = + google::cloud::internal::GetEnv("CLOUD_STORAGE_EMULATOR_ENDPOINT"); + if (emulator) return true; + return google::cloud::internal::GetEnv("CLOUD_STORAGE_TESTBENCH_ENDPOINT") .has_value(); } From 7e9d62ebe03af09fe8daac4f9aeb6a850dd59730 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Tue, 24 Nov 2020 23:41:37 +0700 Subject: [PATCH 15/17] add `extract_instructions` --- google/cloud/storage/emulator/gcs/object.py | 9 ++++----- google/cloud/storage/emulator/utils/common.py | 17 +++++++++++++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/google/cloud/storage/emulator/gcs/object.py b/google/cloud/storage/emulator/gcs/object.py index ea864ca3873b4..00686ee721913 100644 --- a/google/cloud/storage/emulator/gcs/object.py +++ b/google/cloud/storage/emulator/gcs/object.py @@ -89,10 +89,9 @@ def __insert_predefined_acl(cls, metadata, bucket, predefined_acl, context): def init( cls, request, metadata, media, bucket, is_destination, context, rest_only=None ): - if context is None: - instruction = request.headers.get("x-goog-emulator-instructions") - if instruction == "inject-upload-data-error": - media = utils.common.corrupt_media(media) + instruction = utils.common.extract_instruction(request, context) + if instruction == "inject-upload-data-error": + media = utils.common.corrupt_media(media) timestamp = datetime.datetime.now(datetime.timezone.utc) metadata.bucket = bucket.name metadata.generation = random.getrandbits(63) @@ -417,7 +416,7 @@ def rest_media(self, request): streamer, length, headers = None, len(response_payload), {} content_range = "bytes %d-%d/%d" % (begin, end - 1, length) - instructions = request.headers.get("x-goog-emulator-instructions") + instructions = utils.common.extract_instruction(request, None) if instructions == "return-broken-stream": headers["Content-Length"] = length diff --git a/google/cloud/storage/emulator/utils/common.py b/google/cloud/storage/emulator/utils/common.py index 2a6f174b4a3da..a4cab12993364 100644 --- a/google/cloud/storage/emulator/utils/common.py +++ b/google/cloud/storage/emulator/utils/common.py @@ -309,3 +309,20 @@ def corrupt_media(media): if not media: return bytearray(random.sample("abcdefghijklmnopqrstuvwxyz", 1), "utf-8") return b"B" + media[1:] if media[0:1] == b"A" else b"A" + media[1:] + + +# === HEADERS === # + + +def extract_instruction(request, context): + instruction = None + if context is not None: + if hasattr(context, "invocation_metadata"): + for key, value in context.invocation_metadata(): + if key == "x-goog-emulator-instructions": + instruction = value + else: + instruction = request.headers.get("x-goog-emulator-instructions") + if instruction is None: + instruction = request.headers.get("x-goog-testbench-instructions") + return instruction From 52383284cd36b5aaa652738a66b5d8628875c929 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 25 Nov 2020 00:01:35 +0700 Subject: [PATCH 16/17] add *testbench*` to response. --- google/cloud/storage/emulator/gcs/object.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/google/cloud/storage/emulator/gcs/object.py b/google/cloud/storage/emulator/gcs/object.py index 00686ee721913..2839a85afdd56 100644 --- a/google/cloud/storage/emulator/gcs/object.py +++ b/google/cloud/storage/emulator/gcs/object.py @@ -378,6 +378,13 @@ def rest(cls, metadata, rest_only): struct.pack(">I", response["crc32c"]) ).decode("utf-8") response.update(rest_only) + old_metadata = {} + if "metadata" in response: + for key, value in response["metadata"].items(): + if "emulator" in key: + old_key = key.replace("emulator", "testbench") + old_metadata[old_key] = value + response["metadata"].update(old_metadata) return response def rest_metadata(self): From 71b0cd938c8d2a1fd2a373c5296ac1c8a42ea561 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=B5=20V=C4=83n=20Ngh=C4=A9a?= Date: Wed, 25 Nov 2020 00:30:17 +0700 Subject: [PATCH 17/17] Document --- google/cloud/storage/doc/storage-main.dox | 3 +++ google/cloud/storage/emulator/README.md | 3 +++ 2 files changed, 6 insertions(+) diff --git a/google/cloud/storage/doc/storage-main.dox b/google/cloud/storage/doc/storage-main.dox index af54837d3f362..718e2291e3c3c 100644 --- a/google/cloud/storage/doc/storage-main.dox +++ b/google/cloud/storage/doc/storage-main.dox @@ -61,6 +61,9 @@ which should give you a taste of the Cloud Storage C++ client library API. - `CLOUD_STORAGE_EMULATOR_ENDPOINT=...` override the default endpoint used by the library, intended for testing only. +- `CLOUD_STORAGE_TESTBENCH_ENDPOINT=...` **DEPRECATED** + please use `CLOUD_STORAGE_EMULATOR_ENDPOINT` instead. + ### Experimental - `GOOGLE_CLOUD_CPP_STORAGE_REST_CONFIG=...` configuration for the REST diff --git a/google/cloud/storage/emulator/README.md b/google/cloud/storage/emulator/README.md index 8c3ab921ac316..0766ac48b12e8 100644 --- a/google/cloud/storage/emulator/README.md +++ b/google/cloud/storage/emulator/README.md @@ -39,6 +39,9 @@ CLOUD_STORAGE_GRPC_ENDPOINT=localhost:8000 # For gRPC API ## Force Failures You can force the following failures by using the `x-goog-emulator-instructions` header. +The `x-goog-testbench-instructions` header is deprecated, but supported for +backwards compatibility and provides the same functionality as +`x-goog-emulator-instructions`, please change your code to use `x-goog-emulator-instructions` instead. ### return-broken-stream