diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml
index ae6c57fa..cb89b2e3 100644
--- a/.github/.OwlBot.lock.yaml
+++ b/.github/.OwlBot.lock.yaml
@@ -1,3 +1,3 @@
docker:
image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest
- digest: sha256:82b12321da4446a73cb11bcb6812fbec8c105abda3946d46e6394e5fbfb64c0f
+ digest: sha256:ec49167c606648a063d1222220b48119c912562849a0528f35bfb592a9f72737
diff --git a/.kokoro/docs/common.cfg b/.kokoro/docs/common.cfg
index 41eeb4d7..25cb338f 100644
--- a/.kokoro/docs/common.cfg
+++ b/.kokoro/docs/common.cfg
@@ -30,6 +30,7 @@ env_vars: {
env_vars: {
key: "V2_STAGING_BUCKET"
+ # Push google cloud library docs to the Cloud RAD bucket `docs-staging-v2`
value: "docs-staging-v2"
}
diff --git a/.kokoro/samples/lint/common.cfg b/.kokoro/samples/lint/common.cfg
index 2a372d92..936dd962 100644
--- a/.kokoro/samples/lint/common.cfg
+++ b/.kokoro/samples/lint/common.cfg
@@ -31,4 +31,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-dataproc/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-dataproc/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.10/common.cfg b/.kokoro/samples/python3.10/common.cfg
new file mode 100644
index 00000000..552f3dde
--- /dev/null
+++ b/.kokoro/samples/python3.10/common.cfg
@@ -0,0 +1,40 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+# Build logs will be here
+action {
+ define_artifacts {
+ regex: "**/*sponge_log.xml"
+ }
+}
+
+# Specify which tests to run
+env_vars: {
+ key: "RUN_TESTS_SESSION"
+ value: "py-3.10"
+}
+
+# Declare build specific Cloud project.
+env_vars: {
+ key: "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ value: "python-docs-samples-tests-310"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-dataproc/.kokoro/test-samples.sh"
+}
+
+# Configure the docker image for kokoro-trampoline.
+env_vars: {
+ key: "TRAMPOLINE_IMAGE"
+ value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker"
+}
+
+# Download secrets for samples
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
+
+# Download trampoline resources.
+gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
+
+# Use the trampoline script to run in docker.
+build_file: "python-dataproc/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.10/continuous.cfg b/.kokoro/samples/python3.10/continuous.cfg
new file mode 100644
index 00000000..a1c8d975
--- /dev/null
+++ b/.kokoro/samples/python3.10/continuous.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.10/periodic-head.cfg b/.kokoro/samples/python3.10/periodic-head.cfg
new file mode 100644
index 00000000..07ff9c8c
--- /dev/null
+++ b/.kokoro/samples/python3.10/periodic-head.cfg
@@ -0,0 +1,11 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
+
+env_vars: {
+ key: "TRAMPOLINE_BUILD_FILE"
+ value: "github/python-dataproc/.kokoro/test-samples-against-head.sh"
+}
diff --git a/.kokoro/samples/python3.10/periodic.cfg b/.kokoro/samples/python3.10/periodic.cfg
new file mode 100644
index 00000000..71cd1e59
--- /dev/null
+++ b/.kokoro/samples/python3.10/periodic.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "False"
+}
diff --git a/.kokoro/samples/python3.10/presubmit.cfg b/.kokoro/samples/python3.10/presubmit.cfg
new file mode 100644
index 00000000..a1c8d975
--- /dev/null
+++ b/.kokoro/samples/python3.10/presubmit.cfg
@@ -0,0 +1,6 @@
+# Format: //devtools/kokoro/config/proto/build.proto
+
+env_vars: {
+ key: "INSTALL_LIBRARY_FROM_SOURCE"
+ value: "True"
+}
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/common.cfg b/.kokoro/samples/python3.6/common.cfg
index c0dd20b7..af20dd24 100644
--- a/.kokoro/samples/python3.6/common.cfg
+++ b/.kokoro/samples/python3.6/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-dataproc/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-dataproc/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.6/periodic.cfg b/.kokoro/samples/python3.6/periodic.cfg
index 50fec964..71cd1e59 100644
--- a/.kokoro/samples/python3.6/periodic.cfg
+++ b/.kokoro/samples/python3.6/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.7/common.cfg b/.kokoro/samples/python3.7/common.cfg
index e6abf059..5a2751ef 100644
--- a/.kokoro/samples/python3.7/common.cfg
+++ b/.kokoro/samples/python3.7/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-dataproc/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-dataproc/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.7/periodic.cfg b/.kokoro/samples/python3.7/periodic.cfg
index 50fec964..71cd1e59 100644
--- a/.kokoro/samples/python3.7/periodic.cfg
+++ b/.kokoro/samples/python3.7/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.8/common.cfg b/.kokoro/samples/python3.8/common.cfg
index 2ede3de9..0143c95b 100644
--- a/.kokoro/samples/python3.8/common.cfg
+++ b/.kokoro/samples/python3.8/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-dataproc/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-dataproc/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.8/periodic.cfg b/.kokoro/samples/python3.8/periodic.cfg
index 50fec964..71cd1e59 100644
--- a/.kokoro/samples/python3.8/periodic.cfg
+++ b/.kokoro/samples/python3.8/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/samples/python3.9/common.cfg b/.kokoro/samples/python3.9/common.cfg
index 1d37eb32..ab97a74d 100644
--- a/.kokoro/samples/python3.9/common.cfg
+++ b/.kokoro/samples/python3.9/common.cfg
@@ -37,4 +37,4 @@ gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples"
gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline"
# Use the trampoline script to run in docker.
-build_file: "python-dataproc/.kokoro/trampoline.sh"
\ No newline at end of file
+build_file: "python-dataproc/.kokoro/trampoline_v2.sh"
\ No newline at end of file
diff --git a/.kokoro/samples/python3.9/periodic.cfg b/.kokoro/samples/python3.9/periodic.cfg
index 50fec964..71cd1e59 100644
--- a/.kokoro/samples/python3.9/periodic.cfg
+++ b/.kokoro/samples/python3.9/periodic.cfg
@@ -3,4 +3,4 @@
env_vars: {
key: "INSTALL_LIBRARY_FROM_SOURCE"
value: "False"
-}
\ No newline at end of file
+}
diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh
index 68eddb57..ba3a707b 100755
--- a/.kokoro/test-samples-against-head.sh
+++ b/.kokoro/test-samples-against-head.sh
@@ -23,6 +23,4 @@ set -eo pipefail
# Enables `**` to include files nested inside sub-folders
shopt -s globstar
-cd github/python-dataproc
-
exec .kokoro/test-samples-impl.sh
diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh
index fbde31d4..11c042d3 100755
--- a/.kokoro/test-samples.sh
+++ b/.kokoro/test-samples.sh
@@ -24,8 +24,6 @@ set -eo pipefail
# Enables `**` to include files nested inside sub-folders
shopt -s globstar
-cd github/python-dataproc
-
# Run periodic samples tests at latest release
if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"periodic"* ]]; then
# preserving the test runner implementation.
diff --git a/.trampolinerc b/.trampolinerc
index 383b6ec8..0eee72ab 100644
--- a/.trampolinerc
+++ b/.trampolinerc
@@ -16,15 +16,26 @@
# Add required env vars here.
required_envvars+=(
- "STAGING_BUCKET"
- "V2_STAGING_BUCKET"
)
# Add env vars which are passed down into the container here.
pass_down_envvars+=(
+ "NOX_SESSION"
+ ###############
+ # Docs builds
+ ###############
"STAGING_BUCKET"
"V2_STAGING_BUCKET"
- "NOX_SESSION"
+ ##################
+ # Samples builds
+ ##################
+ "INSTALL_LIBRARY_FROM_SOURCE"
+ "RUN_TESTS_SESSION"
+ "BUILD_SPECIFIC_GCLOUD_PROJECT"
+ # Target directories.
+ "RUN_TESTS_DIRS"
+ # The nox session to run.
+ "RUN_TESTS_SESSION"
)
# Prevent unintentional override on the default image.
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 502a793e..47b5ae91 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,16 @@
[1]: https://pypi.org/project/google-cloud-dataproc/#history
+## [3.1.0](https://www.github.com/googleapis/python-dataproc/compare/v3.0.0...v3.1.0) (2021-10-26)
+
+
+### Features
+
+* add context manager support in client ([#285](https://www.github.com/googleapis/python-dataproc/issues/285)) ([b54fb76](https://www.github.com/googleapis/python-dataproc/commit/b54fb7647deaea64fe6ad553514c9d0ad62a0cbc))
+* add Dataproc Serverless for Spark Batches API ([#290](https://www.github.com/googleapis/python-dataproc/issues/290)) ([f0ed26c](https://www.github.com/googleapis/python-dataproc/commit/f0ed26c6ccd2e9f438d1d5f31c5512761b0e20b9))
+* Add support for dataproc BatchController service ([#291](https://www.github.com/googleapis/python-dataproc/issues/291)) ([24a6f7d](https://www.github.com/googleapis/python-dataproc/commit/24a6f7defee1e0fd2d195f934c004769d8f1a2b7))
+* add support for python 3.10 ([#289](https://www.github.com/googleapis/python-dataproc/issues/289)) ([229f919](https://www.github.com/googleapis/python-dataproc/commit/229f919e31c39bc028cd2e6062437b0a8d061556))
+
## [3.0.0](https://www.github.com/googleapis/python-dataproc/compare/v2.6.0...v3.0.0) (2021-10-04)
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index d4180de8..6e4192b9 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -22,7 +22,7 @@ In order to add a feature:
documentation.
- The feature must work fully on the following CPython versions:
- 3.6, 3.7, 3.8 and 3.9 on both UNIX and Windows.
+ 3.6, 3.7, 3.8, 3.9 and 3.10 on both UNIX and Windows.
- The feature must not add unnecessary dependencies (where
"unnecessary" is of course subjective, but new dependencies should
@@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests.
- To run a single unit test::
- $ nox -s unit-3.9 -- -k
+ $ nox -s unit-3.10 -- -k
.. note::
@@ -225,11 +225,13 @@ We support:
- `Python 3.7`_
- `Python 3.8`_
- `Python 3.9`_
+- `Python 3.10`_
.. _Python 3.6: https://docs.python.org/3.6/
.. _Python 3.7: https://docs.python.org/3.7/
.. _Python 3.8: https://docs.python.org/3.8/
.. _Python 3.9: https://docs.python.org/3.9/
+.. _Python 3.10: https://docs.python.org/3.10/
Supported versions can be found in our ``noxfile.py`` `config`_.
diff --git a/docs/dataproc_v1/batch_controller.rst b/docs/dataproc_v1/batch_controller.rst
new file mode 100644
index 00000000..e28563d2
--- /dev/null
+++ b/docs/dataproc_v1/batch_controller.rst
@@ -0,0 +1,10 @@
+BatchController
+---------------------------------
+
+.. automodule:: google.cloud.dataproc_v1.services.batch_controller
+ :members:
+ :inherited-members:
+
+.. automodule:: google.cloud.dataproc_v1.services.batch_controller.pagers
+ :members:
+ :inherited-members:
diff --git a/docs/dataproc_v1/services.rst b/docs/dataproc_v1/services.rst
index 9d91e7ce..ae0031d1 100644
--- a/docs/dataproc_v1/services.rst
+++ b/docs/dataproc_v1/services.rst
@@ -4,6 +4,7 @@ Services for Google Cloud Dataproc v1 API
:maxdepth: 2
autoscaling_policy_service
+ batch_controller
cluster_controller
job_controller
workflow_template_service
diff --git a/google/cloud/dataproc/__init__.py b/google/cloud/dataproc/__init__.py
index 90d84a64..5ea01691 100644
--- a/google/cloud/dataproc/__init__.py
+++ b/google/cloud/dataproc/__init__.py
@@ -20,6 +20,12 @@
from google.cloud.dataproc_v1.services.autoscaling_policy_service.async_client import (
AutoscalingPolicyServiceAsyncClient,
)
+from google.cloud.dataproc_v1.services.batch_controller.client import (
+ BatchControllerClient,
+)
+from google.cloud.dataproc_v1.services.batch_controller.async_client import (
+ BatchControllerAsyncClient,
+)
from google.cloud.dataproc_v1.services.cluster_controller.client import (
ClusterControllerClient,
)
@@ -65,12 +71,23 @@
from google.cloud.dataproc_v1.types.autoscaling_policies import (
UpdateAutoscalingPolicyRequest,
)
+from google.cloud.dataproc_v1.types.batches import Batch
+from google.cloud.dataproc_v1.types.batches import CreateBatchRequest
+from google.cloud.dataproc_v1.types.batches import DeleteBatchRequest
+from google.cloud.dataproc_v1.types.batches import GetBatchRequest
+from google.cloud.dataproc_v1.types.batches import ListBatchesRequest
+from google.cloud.dataproc_v1.types.batches import ListBatchesResponse
+from google.cloud.dataproc_v1.types.batches import PySparkBatch
+from google.cloud.dataproc_v1.types.batches import SparkBatch
+from google.cloud.dataproc_v1.types.batches import SparkRBatch
+from google.cloud.dataproc_v1.types.batches import SparkSqlBatch
from google.cloud.dataproc_v1.types.clusters import AcceleratorConfig
from google.cloud.dataproc_v1.types.clusters import AutoscalingConfig
from google.cloud.dataproc_v1.types.clusters import Cluster
from google.cloud.dataproc_v1.types.clusters import ClusterConfig
from google.cloud.dataproc_v1.types.clusters import ClusterMetrics
from google.cloud.dataproc_v1.types.clusters import ClusterStatus
+from google.cloud.dataproc_v1.types.clusters import ConfidentialInstanceConfig
from google.cloud.dataproc_v1.types.clusters import CreateClusterRequest
from google.cloud.dataproc_v1.types.clusters import DeleteClusterRequest
from google.cloud.dataproc_v1.types.clusters import DiagnoseClusterRequest
@@ -122,9 +139,17 @@
from google.cloud.dataproc_v1.types.jobs import SubmitJobRequest
from google.cloud.dataproc_v1.types.jobs import UpdateJobRequest
from google.cloud.dataproc_v1.types.jobs import YarnApplication
+from google.cloud.dataproc_v1.types.operations import BatchOperationMetadata
from google.cloud.dataproc_v1.types.operations import ClusterOperationMetadata
from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus
+from google.cloud.dataproc_v1.types.shared import EnvironmentConfig
+from google.cloud.dataproc_v1.types.shared import ExecutionConfig
+from google.cloud.dataproc_v1.types.shared import PeripheralsConfig
+from google.cloud.dataproc_v1.types.shared import RuntimeConfig
+from google.cloud.dataproc_v1.types.shared import RuntimeInfo
+from google.cloud.dataproc_v1.types.shared import SparkHistoryServerConfig
from google.cloud.dataproc_v1.types.shared import Component
+from google.cloud.dataproc_v1.types.shared import FailureAction
from google.cloud.dataproc_v1.types.workflow_templates import ClusterOperation
from google.cloud.dataproc_v1.types.workflow_templates import ClusterSelector
from google.cloud.dataproc_v1.types.workflow_templates import (
@@ -164,6 +189,8 @@
__all__ = (
"AutoscalingPolicyServiceClient",
"AutoscalingPolicyServiceAsyncClient",
+ "BatchControllerClient",
+ "BatchControllerAsyncClient",
"ClusterControllerClient",
"ClusterControllerAsyncClient",
"JobControllerClient",
@@ -180,12 +207,23 @@
"ListAutoscalingPoliciesRequest",
"ListAutoscalingPoliciesResponse",
"UpdateAutoscalingPolicyRequest",
+ "Batch",
+ "CreateBatchRequest",
+ "DeleteBatchRequest",
+ "GetBatchRequest",
+ "ListBatchesRequest",
+ "ListBatchesResponse",
+ "PySparkBatch",
+ "SparkBatch",
+ "SparkRBatch",
+ "SparkSqlBatch",
"AcceleratorConfig",
"AutoscalingConfig",
"Cluster",
"ClusterConfig",
"ClusterMetrics",
"ClusterStatus",
+ "ConfidentialInstanceConfig",
"CreateClusterRequest",
"DeleteClusterRequest",
"DiagnoseClusterRequest",
@@ -237,9 +275,17 @@
"SubmitJobRequest",
"UpdateJobRequest",
"YarnApplication",
+ "BatchOperationMetadata",
"ClusterOperationMetadata",
"ClusterOperationStatus",
+ "EnvironmentConfig",
+ "ExecutionConfig",
+ "PeripheralsConfig",
+ "RuntimeConfig",
+ "RuntimeInfo",
+ "SparkHistoryServerConfig",
"Component",
+ "FailureAction",
"ClusterOperation",
"ClusterSelector",
"CreateWorkflowTemplateRequest",
diff --git a/google/cloud/dataproc_v1/__init__.py b/google/cloud/dataproc_v1/__init__.py
index 22af0899..2c9ea7da 100644
--- a/google/cloud/dataproc_v1/__init__.py
+++ b/google/cloud/dataproc_v1/__init__.py
@@ -16,6 +16,8 @@
from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient
from .services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient
+from .services.batch_controller import BatchControllerClient
+from .services.batch_controller import BatchControllerAsyncClient
from .services.cluster_controller import ClusterControllerClient
from .services.cluster_controller import ClusterControllerAsyncClient
from .services.job_controller import JobControllerClient
@@ -33,12 +35,23 @@
from .types.autoscaling_policies import ListAutoscalingPoliciesRequest
from .types.autoscaling_policies import ListAutoscalingPoliciesResponse
from .types.autoscaling_policies import UpdateAutoscalingPolicyRequest
+from .types.batches import Batch
+from .types.batches import CreateBatchRequest
+from .types.batches import DeleteBatchRequest
+from .types.batches import GetBatchRequest
+from .types.batches import ListBatchesRequest
+from .types.batches import ListBatchesResponse
+from .types.batches import PySparkBatch
+from .types.batches import SparkBatch
+from .types.batches import SparkRBatch
+from .types.batches import SparkSqlBatch
from .types.clusters import AcceleratorConfig
from .types.clusters import AutoscalingConfig
from .types.clusters import Cluster
from .types.clusters import ClusterConfig
from .types.clusters import ClusterMetrics
from .types.clusters import ClusterStatus
+from .types.clusters import ConfidentialInstanceConfig
from .types.clusters import CreateClusterRequest
from .types.clusters import DeleteClusterRequest
from .types.clusters import DiagnoseClusterRequest
@@ -90,9 +103,17 @@
from .types.jobs import SubmitJobRequest
from .types.jobs import UpdateJobRequest
from .types.jobs import YarnApplication
+from .types.operations import BatchOperationMetadata
from .types.operations import ClusterOperationMetadata
from .types.operations import ClusterOperationStatus
+from .types.shared import EnvironmentConfig
+from .types.shared import ExecutionConfig
+from .types.shared import PeripheralsConfig
+from .types.shared import RuntimeConfig
+from .types.shared import RuntimeInfo
+from .types.shared import SparkHistoryServerConfig
from .types.shared import Component
+from .types.shared import FailureAction
from .types.workflow_templates import ClusterOperation
from .types.workflow_templates import ClusterSelector
from .types.workflow_templates import CreateWorkflowTemplateRequest
@@ -117,6 +138,7 @@
__all__ = (
"AutoscalingPolicyServiceAsyncClient",
+ "BatchControllerAsyncClient",
"ClusterControllerAsyncClient",
"JobControllerAsyncClient",
"WorkflowTemplateServiceAsyncClient",
@@ -126,6 +148,9 @@
"AutoscalingPolicyServiceClient",
"BasicAutoscalingAlgorithm",
"BasicYarnAutoscalingConfig",
+ "Batch",
+ "BatchControllerClient",
+ "BatchOperationMetadata",
"CancelJobRequest",
"Cluster",
"ClusterConfig",
@@ -137,10 +162,13 @@
"ClusterSelector",
"ClusterStatus",
"Component",
+ "ConfidentialInstanceConfig",
"CreateAutoscalingPolicyRequest",
+ "CreateBatchRequest",
"CreateClusterRequest",
"CreateWorkflowTemplateRequest",
"DeleteAutoscalingPolicyRequest",
+ "DeleteBatchRequest",
"DeleteClusterRequest",
"DeleteJobRequest",
"DeleteWorkflowTemplateRequest",
@@ -149,8 +177,12 @@
"DiskConfig",
"EncryptionConfig",
"EndpointConfig",
+ "EnvironmentConfig",
+ "ExecutionConfig",
+ "FailureAction",
"GceClusterConfig",
"GetAutoscalingPolicyRequest",
+ "GetBatchRequest",
"GetClusterRequest",
"GetJobRequest",
"GetWorkflowTemplateRequest",
@@ -173,6 +205,8 @@
"LifecycleConfig",
"ListAutoscalingPoliciesRequest",
"ListAutoscalingPoliciesResponse",
+ "ListBatchesRequest",
+ "ListBatchesResponse",
"ListClustersRequest",
"ListClustersResponse",
"ListJobsRequest",
@@ -187,17 +221,25 @@
"NodeInitializationAction",
"OrderedJob",
"ParameterValidation",
+ "PeripheralsConfig",
"PigJob",
"PrestoJob",
+ "PySparkBatch",
"PySparkJob",
"QueryList",
"RegexValidation",
"ReservationAffinity",
+ "RuntimeConfig",
+ "RuntimeInfo",
"SecurityConfig",
"ShieldedInstanceConfig",
"SoftwareConfig",
+ "SparkBatch",
+ "SparkHistoryServerConfig",
"SparkJob",
+ "SparkRBatch",
"SparkRJob",
+ "SparkSqlBatch",
"SparkSqlJob",
"StartClusterRequest",
"StopClusterRequest",
diff --git a/google/cloud/dataproc_v1/gapic_metadata.json b/google/cloud/dataproc_v1/gapic_metadata.json
index 2d068a45..8e050e14 100644
--- a/google/cloud/dataproc_v1/gapic_metadata.json
+++ b/google/cloud/dataproc_v1/gapic_metadata.json
@@ -69,6 +69,60 @@
}
}
},
+ "BatchController": {
+ "clients": {
+ "grpc": {
+ "libraryClient": "BatchControllerClient",
+ "rpcs": {
+ "CreateBatch": {
+ "methods": [
+ "create_batch"
+ ]
+ },
+ "DeleteBatch": {
+ "methods": [
+ "delete_batch"
+ ]
+ },
+ "GetBatch": {
+ "methods": [
+ "get_batch"
+ ]
+ },
+ "ListBatches": {
+ "methods": [
+ "list_batches"
+ ]
+ }
+ }
+ },
+ "grpc-async": {
+ "libraryClient": "BatchControllerAsyncClient",
+ "rpcs": {
+ "CreateBatch": {
+ "methods": [
+ "create_batch"
+ ]
+ },
+ "DeleteBatch": {
+ "methods": [
+ "delete_batch"
+ ]
+ },
+ "GetBatch": {
+ "methods": [
+ "get_batch"
+ ]
+ },
+ "ListBatches": {
+ "methods": [
+ "list_batches"
+ ]
+ }
+ }
+ }
+ }
+ },
"ClusterController": {
"clients": {
"grpc": {
diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py
index f2c5718c..76e7ff5c 100644
--- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py
+++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py
@@ -621,6 +621,12 @@ async def delete_autoscaling_policy(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py
index 4d3e5aad..43a042b8 100644
--- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py
+++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py
@@ -350,10 +350,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
)
def create_autoscaling_policy(
@@ -791,6 +788,19 @@ def delete_autoscaling_policy(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py
index 9c988099..bcaddf6c 100644
--- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py
+++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py
@@ -210,6 +210,15 @@ def _prep_wrapped_messages(self, client_info):
),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
def create_autoscaling_policy(
self,
diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py
index 83931784..4d5eefcd 100644
--- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py
+++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py
@@ -376,5 +376,8 @@ def delete_autoscaling_policy(
)
return self._stubs["delete_autoscaling_policy"]
+ def close(self):
+ self.grpc_channel.close()
+
__all__ = ("AutoscalingPolicyServiceGrpcTransport",)
diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py
index ae024621..07c82123 100644
--- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py
+++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py
@@ -380,5 +380,8 @@ def delete_autoscaling_policy(
)
return self._stubs["delete_autoscaling_policy"]
+ def close(self):
+ return self.grpc_channel.close()
+
__all__ = ("AutoscalingPolicyServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/__init__.py b/google/cloud/dataproc_v1/services/batch_controller/__init__.py
new file mode 100644
index 00000000..284eb0a3
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/__init__.py
@@ -0,0 +1,22 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from .client import BatchControllerClient
+from .async_client import BatchControllerAsyncClient
+
+__all__ = (
+ "BatchControllerClient",
+ "BatchControllerAsyncClient",
+)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/async_client.py b/google/cloud/dataproc_v1/services/batch_controller/async_client.py
new file mode 100644
index 00000000..d4bcbe43
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/async_client.py
@@ -0,0 +1,506 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from collections import OrderedDict
+import functools
+import re
+from typing import Dict, Sequence, Tuple, Type, Union
+import pkg_resources
+
+import google.api_core.client_options as ClientOptions # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.dataproc_v1.services.batch_controller import pagers
+from google.cloud.dataproc_v1.types import batches
+from google.cloud.dataproc_v1.types import operations
+from google.cloud.dataproc_v1.types import shared
+from google.protobuf import timestamp_pb2 # type: ignore
+from .transports.base import BatchControllerTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc_asyncio import BatchControllerGrpcAsyncIOTransport
+from .client import BatchControllerClient
+
+
+class BatchControllerAsyncClient:
+ """The BatchController provides methods to manage batch
+ workloads.
+ """
+
+ _client: BatchControllerClient
+
+ DEFAULT_ENDPOINT = BatchControllerClient.DEFAULT_ENDPOINT
+ DEFAULT_MTLS_ENDPOINT = BatchControllerClient.DEFAULT_MTLS_ENDPOINT
+
+ batch_path = staticmethod(BatchControllerClient.batch_path)
+ parse_batch_path = staticmethod(BatchControllerClient.parse_batch_path)
+ common_billing_account_path = staticmethod(
+ BatchControllerClient.common_billing_account_path
+ )
+ parse_common_billing_account_path = staticmethod(
+ BatchControllerClient.parse_common_billing_account_path
+ )
+ common_folder_path = staticmethod(BatchControllerClient.common_folder_path)
+ parse_common_folder_path = staticmethod(
+ BatchControllerClient.parse_common_folder_path
+ )
+ common_organization_path = staticmethod(
+ BatchControllerClient.common_organization_path
+ )
+ parse_common_organization_path = staticmethod(
+ BatchControllerClient.parse_common_organization_path
+ )
+ common_project_path = staticmethod(BatchControllerClient.common_project_path)
+ parse_common_project_path = staticmethod(
+ BatchControllerClient.parse_common_project_path
+ )
+ common_location_path = staticmethod(BatchControllerClient.common_location_path)
+ parse_common_location_path = staticmethod(
+ BatchControllerClient.parse_common_location_path
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BatchControllerAsyncClient: The constructed client.
+ """
+ return BatchControllerClient.from_service_account_info.__func__(BatchControllerAsyncClient, info, *args, **kwargs) # type: ignore
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BatchControllerAsyncClient: The constructed client.
+ """
+ return BatchControllerClient.from_service_account_file.__func__(BatchControllerAsyncClient, filename, *args, **kwargs) # type: ignore
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> BatchControllerTransport:
+ """Returns the transport used by the client instance.
+
+ Returns:
+ BatchControllerTransport: The transport used by the client instance.
+ """
+ return self._client.transport
+
+ get_transport_class = functools.partial(
+ type(BatchControllerClient).get_transport_class, type(BatchControllerClient)
+ )
+
+ def __init__(
+ self,
+ *,
+ credentials: ga_credentials.Credentials = None,
+ transport: Union[str, BatchControllerTransport] = "grpc_asyncio",
+ client_options: ClientOptions = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiates the batch controller client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, ~.BatchControllerTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (ClientOptions): Custom options for the client. It
+ won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ self._client = BatchControllerClient(
+ credentials=credentials,
+ transport=transport,
+ client_options=client_options,
+ client_info=client_info,
+ )
+
+ async def create_batch(
+ self,
+ request: batches.CreateBatchRequest = None,
+ *,
+ parent: str = None,
+ batch: batches.Batch = None,
+ batch_id: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation_async.AsyncOperation:
+ r"""Creates a batch workload that executes
+ asynchronously.
+
+ Args:
+ request (:class:`google.cloud.dataproc_v1.types.CreateBatchRequest`):
+ The request object. A request to create a batch
+ workload.
+ parent (:class:`str`):
+ Required. The parent resource where
+ this batch will be created.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ batch (:class:`google.cloud.dataproc_v1.types.Batch`):
+ Required. The batch to create.
+ This corresponds to the ``batch`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ batch_id (:class:`str`):
+ Optional. The ID to use for the batch, which will become
+ the final component of the batch's resource name.
+
+ This value must be 4-63 characters. Valid characters are
+ ``/[a-z][0-9]-/``.
+
+ This corresponds to the ``batch_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation_async.AsyncOperation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.dataproc_v1.types.Batch` A
+ representation of a batch workload in the service.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, batch, batch_id])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = batches.CreateBatchRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if batch is not None:
+ request.batch = batch
+ if batch_id is not None:
+ request.batch_id = batch_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.create_batch,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation_async.from_gapic(
+ response,
+ self._client._transport.operations_client,
+ batches.Batch,
+ metadata_type=operations.BatchOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def get_batch(
+ self,
+ request: batches.GetBatchRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> batches.Batch:
+ r"""Gets the batch workload resource representation.
+
+ Args:
+ request (:class:`google.cloud.dataproc_v1.types.GetBatchRequest`):
+ The request object. A request to get the resource
+ representation for a batch workload.
+ name (:class:`str`):
+ Required. The name of the batch to
+ retrieve.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.dataproc_v1.types.Batch:
+ A representation of a batch workload
+ in the service.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = batches.GetBatchRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.get_batch,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ async def list_batches(
+ self,
+ request: batches.ListBatchesRequest = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListBatchesAsyncPager:
+ r"""Lists batch workloads.
+
+ Args:
+ request (:class:`google.cloud.dataproc_v1.types.ListBatchesRequest`):
+ The request object. A request to list batch workloads in
+ a project.
+ parent (:class:`str`):
+ Required. The parent, which owns this
+ collection of batches.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.dataproc_v1.services.batch_controller.pagers.ListBatchesAsyncPager:
+ A list of batch workloads.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = batches.ListBatchesRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.list_batches,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = await rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__aiter__` convenience method.
+ response = pagers.ListBatchesAsyncPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ async def delete_batch(
+ self,
+ request: batches.DeleteBatchRequest = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Deletes the batch workload resource. If the batch is not in
+ terminal state, the delete fails and the response returns
+ ``FAILED_PRECONDITION``.
+
+ Args:
+ request (:class:`google.cloud.dataproc_v1.types.DeleteBatchRequest`):
+ The request object. A request to delete a batch
+ workload.
+ name (:class:`str`):
+ Required. The name of the batch
+ resource to delete.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ request = batches.DeleteBatchRequest(request)
+
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = gapic_v1.method_async.wrap_method(
+ self._client._transport.delete_batch,
+ default_timeout=None,
+ client_info=DEFAULT_CLIENT_INFO,
+ )
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ await rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("BatchControllerAsyncClient",)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/client.py b/google/cloud/dataproc_v1/services/batch_controller/client.py
new file mode 100644
index 00000000..6556c4a8
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/client.py
@@ -0,0 +1,699 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from collections import OrderedDict
+from distutils import util
+import os
+import re
+from typing import Dict, Optional, Sequence, Tuple, Type, Union
+import pkg_resources
+
+from google.api_core import client_options as client_options_lib # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.auth.transport import mtls # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+from google.auth.exceptions import MutualTLSChannelError # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.api_core import operation # type: ignore
+from google.api_core import operation_async # type: ignore
+from google.cloud.dataproc_v1.services.batch_controller import pagers
+from google.cloud.dataproc_v1.types import batches
+from google.cloud.dataproc_v1.types import operations
+from google.cloud.dataproc_v1.types import shared
+from google.protobuf import timestamp_pb2 # type: ignore
+from .transports.base import BatchControllerTransport, DEFAULT_CLIENT_INFO
+from .transports.grpc import BatchControllerGrpcTransport
+from .transports.grpc_asyncio import BatchControllerGrpcAsyncIOTransport
+
+
+class BatchControllerClientMeta(type):
+ """Metaclass for the BatchController client.
+
+ This provides class-level methods for building and retrieving
+ support objects (e.g. transport) without polluting the client instance
+ objects.
+ """
+
+ _transport_registry = (
+ OrderedDict()
+ ) # type: Dict[str, Type[BatchControllerTransport]]
+ _transport_registry["grpc"] = BatchControllerGrpcTransport
+ _transport_registry["grpc_asyncio"] = BatchControllerGrpcAsyncIOTransport
+
+ def get_transport_class(cls, label: str = None,) -> Type[BatchControllerTransport]:
+ """Returns an appropriate transport class.
+
+ Args:
+ label: The name of the desired transport. If none is
+ provided, then the first transport in the registry is used.
+
+ Returns:
+ The transport class to use.
+ """
+ # If a specific transport is requested, return that one.
+ if label:
+ return cls._transport_registry[label]
+
+ # No transport is requested; return the default (that is, the first one
+ # in the dictionary).
+ return next(iter(cls._transport_registry.values()))
+
+
+class BatchControllerClient(metaclass=BatchControllerClientMeta):
+ """The BatchController provides methods to manage batch
+ workloads.
+ """
+
+ @staticmethod
+ def _get_default_mtls_endpoint(api_endpoint):
+ """Converts api endpoint to mTLS endpoint.
+
+ Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
+ "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
+ Args:
+ api_endpoint (Optional[str]): the api endpoint to convert.
+ Returns:
+ str: converted mTLS api endpoint.
+ """
+ if not api_endpoint:
+ return api_endpoint
+
+ mtls_endpoint_re = re.compile(
+ r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?"
+ )
+
+ m = mtls_endpoint_re.match(api_endpoint)
+ name, mtls, sandbox, googledomain = m.groups()
+ if mtls or not googledomain:
+ return api_endpoint
+
+ if sandbox:
+ return api_endpoint.replace(
+ "sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
+ )
+
+ return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
+
+ DEFAULT_ENDPOINT = "dataproc.googleapis.com"
+ DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
+ DEFAULT_ENDPOINT
+ )
+
+ @classmethod
+ def from_service_account_info(cls, info: dict, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ info.
+
+ Args:
+ info (dict): The service account private key info.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BatchControllerClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_info(info)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ @classmethod
+ def from_service_account_file(cls, filename: str, *args, **kwargs):
+ """Creates an instance of this client using the provided credentials
+ file.
+
+ Args:
+ filename (str): The path to the service account private key json
+ file.
+ args: Additional arguments to pass to the constructor.
+ kwargs: Additional arguments to pass to the constructor.
+
+ Returns:
+ BatchControllerClient: The constructed client.
+ """
+ credentials = service_account.Credentials.from_service_account_file(filename)
+ kwargs["credentials"] = credentials
+ return cls(*args, **kwargs)
+
+ from_service_account_json = from_service_account_file
+
+ @property
+ def transport(self) -> BatchControllerTransport:
+ """Returns the transport used by the client instance.
+
+ Returns:
+ BatchControllerTransport: The transport used by the client
+ instance.
+ """
+ return self._transport
+
+ @staticmethod
+ def batch_path(project: str, location: str, batch: str,) -> str:
+ """Returns a fully-qualified batch string."""
+ return "projects/{project}/locations/{location}/batches/{batch}".format(
+ project=project, location=location, batch=batch,
+ )
+
+ @staticmethod
+ def parse_batch_path(path: str) -> Dict[str, str]:
+ """Parses a batch path into its component segments."""
+ m = re.match(
+ r"^projects/(?P.+?)/locations/(?P.+?)/batches/(?P.+?)$",
+ path,
+ )
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_billing_account_path(billing_account: str,) -> str:
+ """Returns a fully-qualified billing_account string."""
+ return "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+
+ @staticmethod
+ def parse_common_billing_account_path(path: str) -> Dict[str, str]:
+ """Parse a billing_account path into its component segments."""
+ m = re.match(r"^billingAccounts/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_folder_path(folder: str,) -> str:
+ """Returns a fully-qualified folder string."""
+ return "folders/{folder}".format(folder=folder,)
+
+ @staticmethod
+ def parse_common_folder_path(path: str) -> Dict[str, str]:
+ """Parse a folder path into its component segments."""
+ m = re.match(r"^folders/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_organization_path(organization: str,) -> str:
+ """Returns a fully-qualified organization string."""
+ return "organizations/{organization}".format(organization=organization,)
+
+ @staticmethod
+ def parse_common_organization_path(path: str) -> Dict[str, str]:
+ """Parse a organization path into its component segments."""
+ m = re.match(r"^organizations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_project_path(project: str,) -> str:
+ """Returns a fully-qualified project string."""
+ return "projects/{project}".format(project=project,)
+
+ @staticmethod
+ def parse_common_project_path(path: str) -> Dict[str, str]:
+ """Parse a project path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ @staticmethod
+ def common_location_path(project: str, location: str,) -> str:
+ """Returns a fully-qualified location string."""
+ return "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+
+ @staticmethod
+ def parse_common_location_path(path: str) -> Dict[str, str]:
+ """Parse a location path into its component segments."""
+ m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path)
+ return m.groupdict() if m else {}
+
+ def __init__(
+ self,
+ *,
+ credentials: Optional[ga_credentials.Credentials] = None,
+ transport: Union[str, BatchControllerTransport, None] = None,
+ client_options: Optional[client_options_lib.ClientOptions] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ ) -> None:
+ """Instantiates the batch controller client.
+
+ Args:
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ transport (Union[str, BatchControllerTransport]): The
+ transport to use. If set to None, a transport is chosen
+ automatically.
+ client_options (google.api_core.client_options.ClientOptions): Custom options for the
+ client. It won't take effect if a ``transport`` instance is provided.
+ (1) The ``api_endpoint`` property can be used to override the
+ default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
+ environment variable can also be used to override the endpoint:
+ "always" (always use the default mTLS endpoint), "never" (always
+ use the default regular endpoint) and "auto" (auto switch to the
+ default mTLS endpoint if client certificate is present, this is
+ the default value). However, the ``api_endpoint`` property takes
+ precedence if provided.
+ (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
+ is "true", then the ``client_cert_source`` property can be used
+ to provide client certificate for mutual TLS transport. If
+ not provided, the default SSL client certificate will be used if
+ present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
+ set, no client certificate will be used.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ """
+ if isinstance(client_options, dict):
+ client_options = client_options_lib.from_dict(client_options)
+ if client_options is None:
+ client_options = client_options_lib.ClientOptions()
+
+ # Create SSL credentials for mutual TLS if needed.
+ use_client_cert = bool(
+ util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
+ )
+
+ client_cert_source_func = None
+ is_mtls = False
+ if use_client_cert:
+ if client_options.client_cert_source:
+ is_mtls = True
+ client_cert_source_func = client_options.client_cert_source
+ else:
+ is_mtls = mtls.has_default_client_cert_source()
+ if is_mtls:
+ client_cert_source_func = mtls.default_client_cert_source()
+ else:
+ client_cert_source_func = None
+
+ # Figure out which api endpoint to use.
+ if client_options.api_endpoint is not None:
+ api_endpoint = client_options.api_endpoint
+ else:
+ use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
+ if use_mtls_env == "never":
+ api_endpoint = self.DEFAULT_ENDPOINT
+ elif use_mtls_env == "always":
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ elif use_mtls_env == "auto":
+ if is_mtls:
+ api_endpoint = self.DEFAULT_MTLS_ENDPOINT
+ else:
+ api_endpoint = self.DEFAULT_ENDPOINT
+ else:
+ raise MutualTLSChannelError(
+ "Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
+ "values: never, auto, always"
+ )
+
+ # Save or instantiate the transport.
+ # Ordinarily, we provide the transport, but allowing a custom transport
+ # instance provides an extensibility point for unusual situations.
+ if isinstance(transport, BatchControllerTransport):
+ # transport is a BatchControllerTransport instance.
+ if credentials or client_options.credentials_file:
+ raise ValueError(
+ "When providing a transport instance, "
+ "provide its credentials directly."
+ )
+ if client_options.scopes:
+ raise ValueError(
+ "When providing a transport instance, provide its scopes "
+ "directly."
+ )
+ self._transport = transport
+ else:
+ Transport = type(self).get_transport_class(transport)
+ self._transport = Transport(
+ credentials=credentials,
+ credentials_file=client_options.credentials_file,
+ host=api_endpoint,
+ scopes=client_options.scopes,
+ client_cert_source_for_mtls=client_cert_source_func,
+ quota_project_id=client_options.quota_project_id,
+ client_info=client_info,
+ always_use_jwt_access=True,
+ )
+
+ def create_batch(
+ self,
+ request: Union[batches.CreateBatchRequest, dict] = None,
+ *,
+ parent: str = None,
+ batch: batches.Batch = None,
+ batch_id: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> operation.Operation:
+ r"""Creates a batch workload that executes
+ asynchronously.
+
+ Args:
+ request (Union[google.cloud.dataproc_v1.types.CreateBatchRequest, dict]):
+ The request object. A request to create a batch
+ workload.
+ parent (str):
+ Required. The parent resource where
+ this batch will be created.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ batch (google.cloud.dataproc_v1.types.Batch):
+ Required. The batch to create.
+ This corresponds to the ``batch`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ batch_id (str):
+ Optional. The ID to use for the batch, which will become
+ the final component of the batch's resource name.
+
+ This value must be 4-63 characters. Valid characters are
+ ``/[a-z][0-9]-/``.
+
+ This corresponds to the ``batch_id`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.api_core.operation.Operation:
+ An object representing a long-running operation.
+
+ The result type for the operation will be
+ :class:`google.cloud.dataproc_v1.types.Batch` A
+ representation of a batch workload in the service.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent, batch, batch_id])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a batches.CreateBatchRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, batches.CreateBatchRequest):
+ request = batches.CreateBatchRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+ if batch is not None:
+ request.batch = batch
+ if batch_id is not None:
+ request.batch_id = batch_id
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.create_batch]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Wrap the response in an operation future.
+ response = operation.from_gapic(
+ response,
+ self._transport.operations_client,
+ batches.Batch,
+ metadata_type=operations.BatchOperationMetadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def get_batch(
+ self,
+ request: Union[batches.GetBatchRequest, dict] = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> batches.Batch:
+ r"""Gets the batch workload resource representation.
+
+ Args:
+ request (Union[google.cloud.dataproc_v1.types.GetBatchRequest, dict]):
+ The request object. A request to get the resource
+ representation for a batch workload.
+ name (str):
+ Required. The name of the batch to
+ retrieve.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.dataproc_v1.types.Batch:
+ A representation of a batch workload
+ in the service.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a batches.GetBatchRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, batches.GetBatchRequest):
+ request = batches.GetBatchRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.get_batch]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # Done; return the response.
+ return response
+
+ def list_batches(
+ self,
+ request: Union[batches.ListBatchesRequest, dict] = None,
+ *,
+ parent: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> pagers.ListBatchesPager:
+ r"""Lists batch workloads.
+
+ Args:
+ request (Union[google.cloud.dataproc_v1.types.ListBatchesRequest, dict]):
+ The request object. A request to list batch workloads in
+ a project.
+ parent (str):
+ Required. The parent, which owns this
+ collection of batches.
+
+ This corresponds to the ``parent`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+
+ Returns:
+ google.cloud.dataproc_v1.services.batch_controller.pagers.ListBatchesPager:
+ A list of batch workloads.
+ Iterating over this object will yield
+ results and resolve additional pages
+ automatically.
+
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([parent])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a batches.ListBatchesRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, batches.ListBatchesRequest):
+ request = batches.ListBatchesRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if parent is not None:
+ request.parent = parent
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.list_batches]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
+ )
+
+ # Send the request.
+ response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
+
+ # This method is paged; wrap the response in a pager, which provides
+ # an `__iter__` convenience method.
+ response = pagers.ListBatchesPager(
+ method=rpc, request=request, response=response, metadata=metadata,
+ )
+
+ # Done; return the response.
+ return response
+
+ def delete_batch(
+ self,
+ request: Union[batches.DeleteBatchRequest, dict] = None,
+ *,
+ name: str = None,
+ retry: retries.Retry = gapic_v1.method.DEFAULT,
+ timeout: float = None,
+ metadata: Sequence[Tuple[str, str]] = (),
+ ) -> None:
+ r"""Deletes the batch workload resource. If the batch is not in
+ terminal state, the delete fails and the response returns
+ ``FAILED_PRECONDITION``.
+
+ Args:
+ request (Union[google.cloud.dataproc_v1.types.DeleteBatchRequest, dict]):
+ The request object. A request to delete a batch
+ workload.
+ name (str):
+ Required. The name of the batch
+ resource to delete.
+
+ This corresponds to the ``name`` field
+ on the ``request`` instance; if ``request`` is provided, this
+ should not be set.
+ retry (google.api_core.retry.Retry): Designation of what errors, if any,
+ should be retried.
+ timeout (float): The timeout for this request.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ # Create or coerce a protobuf request object.
+ # Sanity check: If we got a request object, we should *not* have
+ # gotten any keyword arguments that map to the request.
+ has_flattened_params = any([name])
+ if request is not None and has_flattened_params:
+ raise ValueError(
+ "If the `request` argument is set, then none of "
+ "the individual field arguments should be set."
+ )
+
+ # Minor optimization to avoid making a copy if the user passes
+ # in a batches.DeleteBatchRequest.
+ # There's no risk of modifying the input as we've already verified
+ # there are no flattened fields.
+ if not isinstance(request, batches.DeleteBatchRequest):
+ request = batches.DeleteBatchRequest(request)
+ # If we have keyword arguments corresponding to fields on the
+ # request, apply these.
+ if name is not None:
+ request.name = name
+
+ # Wrap the RPC method; this adds retry and timeout information,
+ # and friendly error handling.
+ rpc = self._transport._wrapped_methods[self._transport.delete_batch]
+
+ # Certain fields should be provided within the metadata header;
+ # add these here.
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
+ )
+
+ # Send the request.
+ rpc(
+ request, retry=retry, timeout=timeout, metadata=metadata,
+ )
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+
+__all__ = ("BatchControllerClient",)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/pagers.py b/google/cloud/dataproc_v1/services/batch_controller/pagers.py
new file mode 100644
index 00000000..fce8bc99
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/pagers.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from typing import (
+ Any,
+ AsyncIterator,
+ Awaitable,
+ Callable,
+ Sequence,
+ Tuple,
+ Optional,
+ Iterator,
+)
+
+from google.cloud.dataproc_v1.types import batches
+
+
+class ListBatchesPager:
+ """A pager for iterating through ``list_batches`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.dataproc_v1.types.ListBatchesResponse` object, and
+ provides an ``__iter__`` method to iterate through its
+ ``batches`` field.
+
+ If there are more pages, the ``__iter__`` method will make additional
+ ``ListBatches`` requests and continue to iterate
+ through the ``batches`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.dataproc_v1.types.ListBatchesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., batches.ListBatchesResponse],
+ request: batches.ListBatchesRequest,
+ response: batches.ListBatchesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiate the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.dataproc_v1.types.ListBatchesRequest):
+ The initial request object.
+ response (google.cloud.dataproc_v1.types.ListBatchesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = batches.ListBatchesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ def pages(self) -> Iterator[batches.ListBatchesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __iter__(self) -> Iterator[batches.Batch]:
+ for page in self.pages:
+ yield from page.batches
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
+
+
+class ListBatchesAsyncPager:
+ """A pager for iterating through ``list_batches`` requests.
+
+ This class thinly wraps an initial
+ :class:`google.cloud.dataproc_v1.types.ListBatchesResponse` object, and
+ provides an ``__aiter__`` method to iterate through its
+ ``batches`` field.
+
+ If there are more pages, the ``__aiter__`` method will make additional
+ ``ListBatches`` requests and continue to iterate
+ through the ``batches`` field on the
+ corresponding responses.
+
+ All the usual :class:`google.cloud.dataproc_v1.types.ListBatchesResponse`
+ attributes are available on the pager. If multiple requests are made, only
+ the most recent response is retained, and thus used for attribute lookup.
+ """
+
+ def __init__(
+ self,
+ method: Callable[..., Awaitable[batches.ListBatchesResponse]],
+ request: batches.ListBatchesRequest,
+ response: batches.ListBatchesResponse,
+ *,
+ metadata: Sequence[Tuple[str, str]] = ()
+ ):
+ """Instantiates the pager.
+
+ Args:
+ method (Callable): The method that was originally called, and
+ which instantiated this pager.
+ request (google.cloud.dataproc_v1.types.ListBatchesRequest):
+ The initial request object.
+ response (google.cloud.dataproc_v1.types.ListBatchesResponse):
+ The initial response object.
+ metadata (Sequence[Tuple[str, str]]): Strings which should be
+ sent along with the request as metadata.
+ """
+ self._method = method
+ self._request = batches.ListBatchesRequest(request)
+ self._response = response
+ self._metadata = metadata
+
+ def __getattr__(self, name: str) -> Any:
+ return getattr(self._response, name)
+
+ @property
+ async def pages(self) -> AsyncIterator[batches.ListBatchesResponse]:
+ yield self._response
+ while self._response.next_page_token:
+ self._request.page_token = self._response.next_page_token
+ self._response = await self._method(self._request, metadata=self._metadata)
+ yield self._response
+
+ def __aiter__(self) -> AsyncIterator[batches.Batch]:
+ async def async_generator():
+ async for page in self.pages:
+ for response in page.batches:
+ yield response
+
+ return async_generator()
+
+ def __repr__(self) -> str:
+ return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/transports/__init__.py b/google/cloud/dataproc_v1/services/batch_controller/transports/__init__.py
new file mode 100644
index 00000000..4771da8c
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/transports/__init__.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+from collections import OrderedDict
+from typing import Dict, Type
+
+from .base import BatchControllerTransport
+from .grpc import BatchControllerGrpcTransport
+from .grpc_asyncio import BatchControllerGrpcAsyncIOTransport
+
+
+# Compile a registry of transports.
+_transport_registry = OrderedDict() # type: Dict[str, Type[BatchControllerTransport]]
+_transport_registry["grpc"] = BatchControllerGrpcTransport
+_transport_registry["grpc_asyncio"] = BatchControllerGrpcAsyncIOTransport
+
+__all__ = (
+ "BatchControllerTransport",
+ "BatchControllerGrpcTransport",
+ "BatchControllerGrpcAsyncIOTransport",
+)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/transports/base.py b/google/cloud/dataproc_v1/services/batch_controller/transports/base.py
new file mode 100644
index 00000000..e3368260
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/transports/base.py
@@ -0,0 +1,221 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import abc
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
+import packaging.version
+import pkg_resources
+
+import google.auth # type: ignore
+import google.api_core # type: ignore
+from google.api_core import exceptions as core_exceptions # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import retry as retries # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.oauth2 import service_account # type: ignore
+
+from google.cloud.dataproc_v1.types import batches
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+
+try:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
+ gapic_version=pkg_resources.get_distribution("google-cloud-dataproc",).version,
+ )
+except pkg_resources.DistributionNotFound:
+ DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
+
+try:
+ # google.auth.__version__ was added in 1.26.0
+ _GOOGLE_AUTH_VERSION = google.auth.__version__
+except AttributeError:
+ try: # try pkg_resources if it is available
+ _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version
+ except pkg_resources.DistributionNotFound: # pragma: NO COVER
+ _GOOGLE_AUTH_VERSION = None
+
+
+class BatchControllerTransport(abc.ABC):
+ """Abstract transport class for BatchController."""
+
+ AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
+
+ DEFAULT_HOST: str = "dataproc.googleapis.com"
+
+ def __init__(
+ self,
+ *,
+ host: str = DEFAULT_HOST,
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ **kwargs,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]):
+ The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A list of scopes.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+ """
+ # Save the hostname. Default to port 443 (HTTPS) if none is specified.
+ if ":" not in host:
+ host += ":443"
+ self._host = host
+
+ scopes_kwargs = self._get_scopes_kwargs(self._host, scopes)
+
+ # Save the scopes.
+ self._scopes = scopes
+
+ # If no credentials are provided, then determine the appropriate
+ # defaults.
+ if credentials and credentials_file:
+ raise core_exceptions.DuplicateCredentialArgs(
+ "'credentials_file' and 'credentials' are mutually exclusive"
+ )
+
+ if credentials_file is not None:
+ credentials, _ = google.auth.load_credentials_from_file(
+ credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
+ )
+
+ elif credentials is None:
+ credentials, _ = google.auth.default(
+ **scopes_kwargs, quota_project_id=quota_project_id
+ )
+
+ # If the credentials are service account credentials, then always try to use self signed JWT.
+ if (
+ always_use_jwt_access
+ and isinstance(credentials, service_account.Credentials)
+ and hasattr(service_account.Credentials, "with_always_use_jwt_access")
+ ):
+ credentials = credentials.with_always_use_jwt_access(True)
+
+ # Save the credentials.
+ self._credentials = credentials
+
+ # TODO(busunkim): This method is in the base transport
+ # to avoid duplicating code across the transport classes. These functions
+ # should be deleted once the minimum required versions of google-auth is increased.
+
+ # TODO: Remove this function once google-auth >= 1.25.0 is required
+ @classmethod
+ def _get_scopes_kwargs(
+ cls, host: str, scopes: Optional[Sequence[str]]
+ ) -> Dict[str, Optional[Sequence[str]]]:
+ """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version"""
+
+ scopes_kwargs = {}
+
+ if _GOOGLE_AUTH_VERSION and (
+ packaging.version.parse(_GOOGLE_AUTH_VERSION)
+ >= packaging.version.parse("1.25.0")
+ ):
+ scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES}
+ else:
+ scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES}
+
+ return scopes_kwargs
+
+ def _prep_wrapped_messages(self, client_info):
+ # Precompute the wrapped methods.
+ self._wrapped_methods = {
+ self.create_batch: gapic_v1.method.wrap_method(
+ self.create_batch, default_timeout=None, client_info=client_info,
+ ),
+ self.get_batch: gapic_v1.method.wrap_method(
+ self.get_batch, default_timeout=None, client_info=client_info,
+ ),
+ self.list_batches: gapic_v1.method.wrap_method(
+ self.list_batches, default_timeout=None, client_info=client_info,
+ ),
+ self.delete_batch: gapic_v1.method.wrap_method(
+ self.delete_batch, default_timeout=None, client_info=client_info,
+ ),
+ }
+
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Return the client designed to process long-running operations."""
+ raise NotImplementedError()
+
+ @property
+ def create_batch(
+ self,
+ ) -> Callable[
+ [batches.CreateBatchRequest],
+ Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def get_batch(
+ self,
+ ) -> Callable[
+ [batches.GetBatchRequest], Union[batches.Batch, Awaitable[batches.Batch]]
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def list_batches(
+ self,
+ ) -> Callable[
+ [batches.ListBatchesRequest],
+ Union[batches.ListBatchesResponse, Awaitable[batches.ListBatchesResponse]],
+ ]:
+ raise NotImplementedError()
+
+ @property
+ def delete_batch(
+ self,
+ ) -> Callable[
+ [batches.DeleteBatchRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
+ ]:
+ raise NotImplementedError()
+
+
+__all__ = ("BatchControllerTransport",)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc.py
new file mode 100644
index 00000000..3fe81565
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc.py
@@ -0,0 +1,354 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import warnings
+from typing import Callable, Dict, Optional, Sequence, Tuple, Union
+
+from google.api_core import grpc_helpers # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.api_core import gapic_v1 # type: ignore
+import google.auth # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+
+import grpc # type: ignore
+
+from google.cloud.dataproc_v1.types import batches
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from .base import BatchControllerTransport, DEFAULT_CLIENT_INFO
+
+
+class BatchControllerGrpcTransport(BatchControllerTransport):
+ """gRPC backend transport for BatchController.
+
+ The BatchController provides methods to manage batch
+ workloads.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _stubs: Dict[str, Callable]
+
+ def __init__(
+ self,
+ *,
+ host: str = "dataproc.googleapis.com",
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Sequence[str] = None,
+ channel: grpc.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id: Optional[str] = None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]):
+ The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional(Sequence[str])): A list of scopes. This argument is
+ ignored if ``channel`` is provided.
+ channel (Optional[grpc.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or application default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for the grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure a mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+
+ Raises:
+ google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._grpc_channel = None
+ self._ssl_channel_credentials = ssl_channel_credentials
+ self._stubs: Dict[str, Callable] = {}
+ self._operations_client = None
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Ignore credentials if a channel was passed.
+ credentials = False
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+
+ else:
+ if api_mtls_endpoint:
+ host = api_mtls_endpoint
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
+
+ else:
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # The base transport sets the host, credentials and scopes
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ )
+
+ if not self._grpc_channel:
+ self._grpc_channel = type(self).create_channel(
+ self._host,
+ credentials=self._credentials,
+ credentials_file=credentials_file,
+ scopes=self._scopes,
+ ssl_credentials=self._ssl_channel_credentials,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Wrap messages. This must be done after self._grpc_channel exists
+ self._prep_wrapped_messages(client_info)
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "dataproc.googleapis.com",
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: str = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> grpc.Channel:
+ """Create and return a gRPC channel object.
+ Args:
+ host (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is mutually exclusive with credentials.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ grpc.Channel: A gRPC channel object.
+
+ Raises:
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+
+ return grpc_helpers.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
+ **kwargs,
+ )
+
+ @property
+ def grpc_channel(self) -> grpc.Channel:
+ """Return the channel designed to connect to this service.
+ """
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_batch(
+ self,
+ ) -> Callable[[batches.CreateBatchRequest], operations_pb2.Operation]:
+ r"""Return a callable for the create batch method over gRPC.
+
+ Creates a batch workload that executes
+ asynchronously.
+
+ Returns:
+ Callable[[~.CreateBatchRequest],
+ ~.Operation]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_batch" not in self._stubs:
+ self._stubs["create_batch"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/CreateBatch",
+ request_serializer=batches.CreateBatchRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["create_batch"]
+
+ @property
+ def get_batch(self) -> Callable[[batches.GetBatchRequest], batches.Batch]:
+ r"""Return a callable for the get batch method over gRPC.
+
+ Gets the batch workload resource representation.
+
+ Returns:
+ Callable[[~.GetBatchRequest],
+ ~.Batch]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_batch" not in self._stubs:
+ self._stubs["get_batch"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/GetBatch",
+ request_serializer=batches.GetBatchRequest.serialize,
+ response_deserializer=batches.Batch.deserialize,
+ )
+ return self._stubs["get_batch"]
+
+ @property
+ def list_batches(
+ self,
+ ) -> Callable[[batches.ListBatchesRequest], batches.ListBatchesResponse]:
+ r"""Return a callable for the list batches method over gRPC.
+
+ Lists batch workloads.
+
+ Returns:
+ Callable[[~.ListBatchesRequest],
+ ~.ListBatchesResponse]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_batches" not in self._stubs:
+ self._stubs["list_batches"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/ListBatches",
+ request_serializer=batches.ListBatchesRequest.serialize,
+ response_deserializer=batches.ListBatchesResponse.deserialize,
+ )
+ return self._stubs["list_batches"]
+
+ @property
+ def delete_batch(self) -> Callable[[batches.DeleteBatchRequest], empty_pb2.Empty]:
+ r"""Return a callable for the delete batch method over gRPC.
+
+ Deletes the batch workload resource. If the batch is not in
+ terminal state, the delete fails and the response returns
+ ``FAILED_PRECONDITION``.
+
+ Returns:
+ Callable[[~.DeleteBatchRequest],
+ ~.Empty]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_batch" not in self._stubs:
+ self._stubs["delete_batch"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/DeleteBatch",
+ request_serializer=batches.DeleteBatchRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_batch"]
+
+ def close(self):
+ self.grpc_channel.close()
+
+
+__all__ = ("BatchControllerGrpcTransport",)
diff --git a/google/cloud/dataproc_v1/services/batch_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc_asyncio.py
new file mode 100644
index 00000000..d531aca6
--- /dev/null
+++ b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc_asyncio.py
@@ -0,0 +1,363 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import warnings
+from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
+
+from google.api_core import gapic_v1 # type: ignore
+from google.api_core import grpc_helpers_async # type: ignore
+from google.api_core import operations_v1 # type: ignore
+from google.auth import credentials as ga_credentials # type: ignore
+from google.auth.transport.grpc import SslCredentials # type: ignore
+import packaging.version
+
+import grpc # type: ignore
+from grpc.experimental import aio # type: ignore
+
+from google.cloud.dataproc_v1.types import batches
+from google.longrunning import operations_pb2 # type: ignore
+from google.protobuf import empty_pb2 # type: ignore
+from .base import BatchControllerTransport, DEFAULT_CLIENT_INFO
+from .grpc import BatchControllerGrpcTransport
+
+
+class BatchControllerGrpcAsyncIOTransport(BatchControllerTransport):
+ """gRPC AsyncIO backend transport for BatchController.
+
+ The BatchController provides methods to manage batch
+ workloads.
+
+ This class defines the same methods as the primary client, so the
+ primary client can load the underlying transport implementation
+ and call it.
+
+ It sends protocol buffers over the wire using gRPC (which is built on
+ top of HTTP/2); the ``grpcio`` package must be installed.
+ """
+
+ _grpc_channel: aio.Channel
+ _stubs: Dict[str, Callable] = {}
+
+ @classmethod
+ def create_channel(
+ cls,
+ host: str = "dataproc.googleapis.com",
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ quota_project_id: Optional[str] = None,
+ **kwargs,
+ ) -> aio.Channel:
+ """Create and return a gRPC AsyncIO channel object.
+ Args:
+ host (Optional[str]): The host for the channel to use.
+ credentials (Optional[~.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify this application to the service. If
+ none are specified, the client will attempt to ascertain
+ the credentials from the environment.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ kwargs (Optional[dict]): Keyword arguments, which are passed to the
+ channel creation.
+ Returns:
+ aio.Channel: A gRPC AsyncIO channel object.
+ """
+
+ return grpc_helpers_async.create_channel(
+ host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ quota_project_id=quota_project_id,
+ default_scopes=cls.AUTH_SCOPES,
+ scopes=scopes,
+ default_host=cls.DEFAULT_HOST,
+ **kwargs,
+ )
+
+ def __init__(
+ self,
+ *,
+ host: str = "dataproc.googleapis.com",
+ credentials: ga_credentials.Credentials = None,
+ credentials_file: Optional[str] = None,
+ scopes: Optional[Sequence[str]] = None,
+ channel: aio.Channel = None,
+ api_mtls_endpoint: str = None,
+ client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
+ ssl_channel_credentials: grpc.ChannelCredentials = None,
+ client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
+ quota_project_id=None,
+ client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
+ always_use_jwt_access: Optional[bool] = False,
+ ) -> None:
+ """Instantiate the transport.
+
+ Args:
+ host (Optional[str]):
+ The hostname to connect to.
+ credentials (Optional[google.auth.credentials.Credentials]): The
+ authorization credentials to attach to requests. These
+ credentials identify the application to the service; if none
+ are specified, the client will attempt to ascertain the
+ credentials from the environment.
+ This argument is ignored if ``channel`` is provided.
+ credentials_file (Optional[str]): A file with credentials that can
+ be loaded with :func:`google.auth.load_credentials_from_file`.
+ This argument is ignored if ``channel`` is provided.
+ scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
+ service. These are only used when credentials are not specified and
+ are passed to :func:`google.auth.default`.
+ channel (Optional[aio.Channel]): A ``Channel`` instance through
+ which to make calls.
+ api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
+ If provided, it overrides the ``host`` argument and tries to create
+ a mutual TLS channel with client SSL credentials from
+ ``client_cert_source`` or application default SSL credentials.
+ client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ Deprecated. A callback to provide client SSL certificate bytes and
+ private key bytes, both in PEM format. It is ignored if
+ ``api_mtls_endpoint`` is None.
+ ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
+ for the grpc channel. It is ignored if ``channel`` is provided.
+ client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
+ A callback to provide client certificate bytes and private key bytes,
+ both in PEM format. It is used to configure a mutual TLS channel. It is
+ ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
+ quota_project_id (Optional[str]): An optional project to use for billing
+ and quota.
+ client_info (google.api_core.gapic_v1.client_info.ClientInfo):
+ The client info used to send a user-agent string along with
+ API requests. If ``None``, then default info will be used.
+ Generally, you only need to set this if you're developing
+ your own client library.
+ always_use_jwt_access (Optional[bool]): Whether self signed JWT should
+ be used for service account credentials.
+
+ Raises:
+ google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
+ creation failed for any reason.
+ google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
+ and ``credentials_file`` are passed.
+ """
+ self._grpc_channel = None
+ self._ssl_channel_credentials = ssl_channel_credentials
+ self._stubs: Dict[str, Callable] = {}
+ self._operations_client = None
+
+ if api_mtls_endpoint:
+ warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
+ if client_cert_source:
+ warnings.warn("client_cert_source is deprecated", DeprecationWarning)
+
+ if channel:
+ # Ignore credentials if a channel was passed.
+ credentials = False
+ # If a channel was explicitly provided, set it.
+ self._grpc_channel = channel
+ self._ssl_channel_credentials = None
+ else:
+ if api_mtls_endpoint:
+ host = api_mtls_endpoint
+
+ # Create SSL credentials with client_cert_source or application
+ # default SSL credentials.
+ if client_cert_source:
+ cert, key = client_cert_source()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+ else:
+ self._ssl_channel_credentials = SslCredentials().ssl_credentials
+
+ else:
+ if client_cert_source_for_mtls and not ssl_channel_credentials:
+ cert, key = client_cert_source_for_mtls()
+ self._ssl_channel_credentials = grpc.ssl_channel_credentials(
+ certificate_chain=cert, private_key=key
+ )
+
+ # The base transport sets the host, credentials and scopes
+ super().__init__(
+ host=host,
+ credentials=credentials,
+ credentials_file=credentials_file,
+ scopes=scopes,
+ quota_project_id=quota_project_id,
+ client_info=client_info,
+ always_use_jwt_access=always_use_jwt_access,
+ )
+
+ if not self._grpc_channel:
+ self._grpc_channel = type(self).create_channel(
+ self._host,
+ credentials=self._credentials,
+ credentials_file=credentials_file,
+ scopes=self._scopes,
+ ssl_credentials=self._ssl_channel_credentials,
+ quota_project_id=quota_project_id,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Wrap messages. This must be done after self._grpc_channel exists
+ self._prep_wrapped_messages(client_info)
+
+ @property
+ def grpc_channel(self) -> aio.Channel:
+ """Create the channel designed to connect to this service.
+
+ This property caches on the instance; repeated calls return
+ the same channel.
+ """
+ # Return the channel from cache.
+ return self._grpc_channel
+
+ @property
+ def operations_client(self) -> operations_v1.OperationsAsyncClient:
+ """Create the client designed to process long-running operations.
+
+ This property caches on the instance; repeated calls return the same
+ client.
+ """
+ # Sanity check: Only create a new client if we do not already have one.
+ if self._operations_client is None:
+ self._operations_client = operations_v1.OperationsAsyncClient(
+ self.grpc_channel
+ )
+
+ # Return the client from cache.
+ return self._operations_client
+
+ @property
+ def create_batch(
+ self,
+ ) -> Callable[[batches.CreateBatchRequest], Awaitable[operations_pb2.Operation]]:
+ r"""Return a callable for the create batch method over gRPC.
+
+ Creates a batch workload that executes
+ asynchronously.
+
+ Returns:
+ Callable[[~.CreateBatchRequest],
+ Awaitable[~.Operation]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "create_batch" not in self._stubs:
+ self._stubs["create_batch"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/CreateBatch",
+ request_serializer=batches.CreateBatchRequest.serialize,
+ response_deserializer=operations_pb2.Operation.FromString,
+ )
+ return self._stubs["create_batch"]
+
+ @property
+ def get_batch(
+ self,
+ ) -> Callable[[batches.GetBatchRequest], Awaitable[batches.Batch]]:
+ r"""Return a callable for the get batch method over gRPC.
+
+ Gets the batch workload resource representation.
+
+ Returns:
+ Callable[[~.GetBatchRequest],
+ Awaitable[~.Batch]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "get_batch" not in self._stubs:
+ self._stubs["get_batch"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/GetBatch",
+ request_serializer=batches.GetBatchRequest.serialize,
+ response_deserializer=batches.Batch.deserialize,
+ )
+ return self._stubs["get_batch"]
+
+ @property
+ def list_batches(
+ self,
+ ) -> Callable[[batches.ListBatchesRequest], Awaitable[batches.ListBatchesResponse]]:
+ r"""Return a callable for the list batches method over gRPC.
+
+ Lists batch workloads.
+
+ Returns:
+ Callable[[~.ListBatchesRequest],
+ Awaitable[~.ListBatchesResponse]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "list_batches" not in self._stubs:
+ self._stubs["list_batches"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/ListBatches",
+ request_serializer=batches.ListBatchesRequest.serialize,
+ response_deserializer=batches.ListBatchesResponse.deserialize,
+ )
+ return self._stubs["list_batches"]
+
+ @property
+ def delete_batch(
+ self,
+ ) -> Callable[[batches.DeleteBatchRequest], Awaitable[empty_pb2.Empty]]:
+ r"""Return a callable for the delete batch method over gRPC.
+
+ Deletes the batch workload resource. If the batch is not in
+ terminal state, the delete fails and the response returns
+ ``FAILED_PRECONDITION``.
+
+ Returns:
+ Callable[[~.DeleteBatchRequest],
+ Awaitable[~.Empty]]:
+ A function that, when called, will call the underlying RPC
+ on the server.
+ """
+ # Generate a "stub function" on-the-fly which will actually make
+ # the request.
+ # gRPC handles serialization and deserialization, so we just need
+ # to pass in the functions for each.
+ if "delete_batch" not in self._stubs:
+ self._stubs["delete_batch"] = self.grpc_channel.unary_unary(
+ "/google.cloud.dataproc.v1.BatchController/DeleteBatch",
+ request_serializer=batches.DeleteBatchRequest.serialize,
+ response_deserializer=empty_pb2.Empty.FromString,
+ )
+ return self._stubs["delete_batch"]
+
+ def close(self):
+ return self.grpc_channel.close()
+
+
+__all__ = ("BatchControllerGrpcAsyncIOTransport",)
diff --git a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py
index 67dc8a57..36ab734a 100644
--- a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py
+++ b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py
@@ -220,7 +220,7 @@ async def create_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -292,6 +292,9 @@ async def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata `__.
+ The cluster must be in a
+ [``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
+ state or an error is returned.
Args:
request (:class:`google.cloud.dataproc_v1.types.UpdateClusterRequest`):
@@ -398,7 +401,7 @@ async def update_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -483,7 +486,7 @@ async def stop_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -535,7 +538,7 @@ async def start_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -727,7 +730,7 @@ async def get_cluster(
google.cloud.dataproc_v1.types.Cluster:
Describes the identifying
information, config, and status of a
- cluster of Compute Engine instances.
+ Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -1015,6 +1018,12 @@ async def diagnose_cluster(
# Done; return the response.
return response
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/cluster_controller/client.py b/google/cloud/dataproc_v1/services/cluster_controller/client.py
index 870eb9e9..adc835bb 100644
--- a/google/cloud/dataproc_v1/services/cluster_controller/client.py
+++ b/google/cloud/dataproc_v1/services/cluster_controller/client.py
@@ -369,10 +369,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
)
def create_cluster(
@@ -425,7 +422,7 @@ def create_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -488,6 +485,9 @@ def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata `__.
+ The cluster must be in a
+ [``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
+ state or an error is returned.
Args:
request (Union[google.cloud.dataproc_v1.types.UpdateClusterRequest, dict]):
@@ -594,7 +594,7 @@ def update_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -670,7 +670,7 @@ def stop_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -723,7 +723,7 @@ def start_cluster(
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.dataproc_v1.types.Cluster` Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -907,7 +907,7 @@ def get_cluster(
google.cloud.dataproc_v1.types.Cluster:
Describes the identifying
information, config, and status of a
- cluster of Compute Engine instances.
+ Dataproc cluster
"""
# Create or coerce a protobuf request object.
@@ -1164,6 +1164,19 @@ def diagnose_cluster(
# Done; return the response.
return response
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py
index 508158ed..880dc1db 100644
--- a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py
+++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py
@@ -250,6 +250,15 @@ def _prep_wrapped_messages(self, client_info):
),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py
index 67d97c46..9c6f2b9a 100644
--- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py
+++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py
@@ -282,6 +282,9 @@ def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata `__.
+ The cluster must be in a
+ [``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
+ state or an error is returned.
Returns:
Callable[[~.UpdateClusterRequest],
@@ -467,5 +470,8 @@ def diagnose_cluster(
)
return self._stubs["diagnose_cluster"]
+ def close(self):
+ self.grpc_channel.close()
+
__all__ = ("ClusterControllerGrpcTransport",)
diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py
index e19d261b..555486fc 100644
--- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py
+++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py
@@ -287,6 +287,9 @@ def update_cluster(
[Operation.metadata][google.longrunning.Operation.metadata] will
be
`ClusterOperationMetadata `__.
+ The cluster must be in a
+ [``RUNNING``][google.cloud.dataproc.v1.ClusterStatus.State]
+ state or an error is returned.
Returns:
Callable[[~.UpdateClusterRequest],
@@ -478,5 +481,8 @@ def diagnose_cluster(
)
return self._stubs["diagnose_cluster"]
+ def close(self):
+ return self.grpc_channel.close()
+
__all__ = ("ClusterControllerGrpcAsyncIOTransport",)
diff --git a/google/cloud/dataproc_v1/services/job_controller/async_client.py b/google/cloud/dataproc_v1/services/job_controller/async_client.py
index 809b1178..cb82f02e 100644
--- a/google/cloud/dataproc_v1/services/job_controller/async_client.py
+++ b/google/cloud/dataproc_v1/services/job_controller/async_client.py
@@ -798,6 +798,12 @@ async def delete_job(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/job_controller/client.py b/google/cloud/dataproc_v1/services/job_controller/client.py
index 1648b69c..e02935e6 100644
--- a/google/cloud/dataproc_v1/services/job_controller/client.py
+++ b/google/cloud/dataproc_v1/services/job_controller/client.py
@@ -328,10 +328,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
)
def submit_job(
@@ -903,6 +900,19 @@ def delete_job(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/base.py b/google/cloud/dataproc_v1/services/job_controller/transports/base.py
index de2683cd..60fe2244 100644
--- a/google/cloud/dataproc_v1/services/job_controller/transports/base.py
+++ b/google/cloud/dataproc_v1/services/job_controller/transports/base.py
@@ -261,6 +261,15 @@ def _prep_wrapped_messages(self, client_info):
),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py
index e20d4103..b69f64b4 100644
--- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py
+++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py
@@ -419,5 +419,8 @@ def delete_job(self) -> Callable[[jobs.DeleteJobRequest], empty_pb2.Empty]:
)
return self._stubs["delete_job"]
+ def close(self):
+ self.grpc_channel.close()
+
__all__ = ("JobControllerGrpcTransport",)
diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py
index 59de6e20..4c9f9d3a 100644
--- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py
+++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py
@@ -428,5 +428,8 @@ def delete_job(
)
return self._stubs["delete_job"]
+ def close(self):
+ return self.grpc_channel.close()
+
__all__ = ("JobControllerGrpcAsyncIOTransport",)
diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py
index 6e895241..294b27eb 100644
--- a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py
+++ b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py
@@ -948,6 +948,12 @@ async def delete_workflow_template(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ async def __aenter__(self):
+ return self
+
+ async def __aexit__(self, exc_type, exc, tb):
+ await self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/client.py b/google/cloud/dataproc_v1/services/workflow_template_service/client.py
index 7725b1ad..c11b5499 100644
--- a/google/cloud/dataproc_v1/services/workflow_template_service/client.py
+++ b/google/cloud/dataproc_v1/services/workflow_template_service/client.py
@@ -387,10 +387,7 @@ def __init__(
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
- always_use_jwt_access=(
- Transport == type(self).get_transport_class("grpc")
- or Transport == type(self).get_transport_class("grpc_asyncio")
- ),
+ always_use_jwt_access=True,
)
def create_workflow_template(
@@ -1106,6 +1103,19 @@ def delete_workflow_template(
request, retry=retry, timeout=timeout, metadata=metadata,
)
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ """Releases underlying transport's resources.
+
+ .. warning::
+ ONLY use as a context manager if the transport is NOT shared
+ with other clients! Exiting the with block will CLOSE the transport
+ and may cause errors in other clients!
+ """
+ self.transport.close()
+
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py
index 6991402f..343b96c2 100644
--- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py
+++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py
@@ -259,6 +259,15 @@ def _prep_wrapped_messages(self, client_info):
),
}
+ def close(self):
+ """Closes resources associated with the transport.
+
+ .. warning::
+ Only call this method if the transport is NOT shared
+ with other clients - this may cause errors in other clients!
+ """
+ raise NotImplementedError()
+
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py
index d609525c..fd20ffed 100644
--- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py
+++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py
@@ -500,5 +500,8 @@ def delete_workflow_template(
)
return self._stubs["delete_workflow_template"]
+ def close(self):
+ self.grpc_channel.close()
+
__all__ = ("WorkflowTemplateServiceGrpcTransport",)
diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py
index c9a93367..729d8f41 100644
--- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py
+++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py
@@ -507,5 +507,8 @@ def delete_workflow_template(
)
return self._stubs["delete_workflow_template"]
+ def close(self):
+ return self.grpc_channel.close()
+
__all__ = ("WorkflowTemplateServiceGrpcAsyncIOTransport",)
diff --git a/google/cloud/dataproc_v1/types/__init__.py b/google/cloud/dataproc_v1/types/__init__.py
index 814a8c19..2ac23800 100644
--- a/google/cloud/dataproc_v1/types/__init__.py
+++ b/google/cloud/dataproc_v1/types/__init__.py
@@ -25,6 +25,18 @@
ListAutoscalingPoliciesResponse,
UpdateAutoscalingPolicyRequest,
)
+from .batches import (
+ Batch,
+ CreateBatchRequest,
+ DeleteBatchRequest,
+ GetBatchRequest,
+ ListBatchesRequest,
+ ListBatchesResponse,
+ PySparkBatch,
+ SparkBatch,
+ SparkRBatch,
+ SparkSqlBatch,
+)
from .clusters import (
AcceleratorConfig,
AutoscalingConfig,
@@ -32,6 +44,7 @@
ClusterConfig,
ClusterMetrics,
ClusterStatus,
+ ConfidentialInstanceConfig,
CreateClusterRequest,
DeleteClusterRequest,
DiagnoseClusterRequest,
@@ -87,9 +100,20 @@
YarnApplication,
)
from .operations import (
+ BatchOperationMetadata,
ClusterOperationMetadata,
ClusterOperationStatus,
)
+from .shared import (
+ EnvironmentConfig,
+ ExecutionConfig,
+ PeripheralsConfig,
+ RuntimeConfig,
+ RuntimeInfo,
+ SparkHistoryServerConfig,
+ Component,
+ FailureAction,
+)
from .workflow_templates import (
ClusterOperation,
ClusterSelector,
@@ -125,12 +149,23 @@
"ListAutoscalingPoliciesRequest",
"ListAutoscalingPoliciesResponse",
"UpdateAutoscalingPolicyRequest",
+ "Batch",
+ "CreateBatchRequest",
+ "DeleteBatchRequest",
+ "GetBatchRequest",
+ "ListBatchesRequest",
+ "ListBatchesResponse",
+ "PySparkBatch",
+ "SparkBatch",
+ "SparkRBatch",
+ "SparkSqlBatch",
"AcceleratorConfig",
"AutoscalingConfig",
"Cluster",
"ClusterConfig",
"ClusterMetrics",
"ClusterStatus",
+ "ConfidentialInstanceConfig",
"CreateClusterRequest",
"DeleteClusterRequest",
"DiagnoseClusterRequest",
@@ -182,9 +217,17 @@
"SubmitJobRequest",
"UpdateJobRequest",
"YarnApplication",
+ "BatchOperationMetadata",
"ClusterOperationMetadata",
"ClusterOperationStatus",
+ "EnvironmentConfig",
+ "ExecutionConfig",
+ "PeripheralsConfig",
+ "RuntimeConfig",
+ "RuntimeInfo",
+ "SparkHistoryServerConfig",
"Component",
+ "FailureAction",
"ClusterOperation",
"ClusterSelector",
"CreateWorkflowTemplateRequest",
diff --git a/google/cloud/dataproc_v1/types/autoscaling_policies.py b/google/cloud/dataproc_v1/types/autoscaling_policies.py
index ae9afa52..0b590fe1 100644
--- a/google/cloud/dataproc_v1/types/autoscaling_policies.py
+++ b/google/cloud/dataproc_v1/types/autoscaling_policies.py
@@ -67,6 +67,15 @@ class AutoscalingPolicy(proto.Message):
secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig):
Optional. Describes how the autoscaler will
operate for secondary workers.
+ labels (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy.LabelsEntry]):
+ Optional. The labels to associate with this autoscaling
+ policy. Label **keys** must contain 1 to 63 characters, and
+ must conform to `RFC
+ 1035 `__. Label
+ **values** may be empty, but, if present, must contain 1 to
+ 63 characters, and must conform to `RFC
+ 1035 `__. No more than
+ 32 labels can be associated with an autoscaling policy.
"""
id = proto.Field(proto.STRING, number=1,)
@@ -80,10 +89,12 @@ class AutoscalingPolicy(proto.Message):
secondary_worker_config = proto.Field(
proto.MESSAGE, number=5, message="InstanceGroupAutoscalingPolicyConfig",
)
+ labels = proto.MapField(proto.STRING, proto.STRING, number=6,)
class BasicAutoscalingAlgorithm(proto.Message):
r"""Basic algorithm for autoscaling.
+
Attributes:
yarn_config (google.cloud.dataproc_v1.types.BasicYarnAutoscalingConfig):
Required. YARN autoscaling configuration.
@@ -105,6 +116,7 @@ class BasicAutoscalingAlgorithm(proto.Message):
class BasicYarnAutoscalingConfig(proto.Message):
r"""Basic autoscaling configurations for YARN.
+
Attributes:
graceful_decommission_timeout (google.protobuf.duration_pb2.Duration):
Required. Timeout for YARN graceful decommissioning of Node
@@ -216,6 +228,7 @@ class InstanceGroupAutoscalingPolicyConfig(proto.Message):
class CreateAutoscalingPolicyRequest(proto.Message):
r"""A request to create an autoscaling policy.
+
Attributes:
parent (str):
Required. The "resource name" of the region or location, as
@@ -239,6 +252,7 @@ class CreateAutoscalingPolicyRequest(proto.Message):
class GetAutoscalingPolicyRequest(proto.Message):
r"""A request to fetch an autoscaling policy.
+
Attributes:
name (str):
Required. The "resource name" of the autoscaling policy, as
@@ -259,6 +273,7 @@ class GetAutoscalingPolicyRequest(proto.Message):
class UpdateAutoscalingPolicyRequest(proto.Message):
r"""A request to update an autoscaling policy.
+
Attributes:
policy (google.cloud.dataproc_v1.types.AutoscalingPolicy):
Required. The updated autoscaling policy.
@@ -292,6 +307,7 @@ class DeleteAutoscalingPolicyRequest(proto.Message):
class ListAutoscalingPoliciesRequest(proto.Message):
r"""A request to list autoscaling policies in a project.
+
Attributes:
parent (str):
Required. The "resource name" of the region or location, as
diff --git a/google/cloud/dataproc_v1/types/batches.py b/google/cloud/dataproc_v1/types/batches.py
new file mode 100644
index 00000000..1a6cef8b
--- /dev/null
+++ b/google/cloud/dataproc_v1/types/batches.py
@@ -0,0 +1,389 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import proto # type: ignore
+
+from google.cloud.dataproc_v1.types import shared
+from google.protobuf import timestamp_pb2 # type: ignore
+
+
+__protobuf__ = proto.module(
+ package="google.cloud.dataproc.v1",
+ manifest={
+ "CreateBatchRequest",
+ "GetBatchRequest",
+ "ListBatchesRequest",
+ "ListBatchesResponse",
+ "DeleteBatchRequest",
+ "Batch",
+ "PySparkBatch",
+ "SparkBatch",
+ "SparkRBatch",
+ "SparkSqlBatch",
+ },
+)
+
+
+class CreateBatchRequest(proto.Message):
+ r"""A request to create a batch workload.
+
+ Attributes:
+ parent (str):
+ Required. The parent resource where this
+ batch will be created.
+ batch (google.cloud.dataproc_v1.types.Batch):
+ Required. The batch to create.
+ batch_id (str):
+ Optional. The ID to use for the batch, which will become the
+ final component of the batch's resource name.
+
+ This value must be 4-63 characters. Valid characters are
+ ``/[a-z][0-9]-/``.
+ request_id (str):
+ Optional. A unique ID used to identify the request. If the
+ service receives two
+ `CreateBatchRequest `__\ s
+ with the same request_id, the second request is ignored and
+ the Operation that corresponds to the first Batch created
+ and stored in the backend is returned.
+
+ Recommendation: Set this value to a
+ `UUID `__.
+
+ The value must contain only letters (a-z, A-Z), numbers
+ (0-9), underscores (_), and hyphens (-). The maximum length
+ is 40 characters.
+ """
+
+ parent = proto.Field(proto.STRING, number=1,)
+ batch = proto.Field(proto.MESSAGE, number=2, message="Batch",)
+ batch_id = proto.Field(proto.STRING, number=3,)
+ request_id = proto.Field(proto.STRING, number=4,)
+
+
+class GetBatchRequest(proto.Message):
+ r"""A request to get the resource representation for a batch
+ workload.
+
+ Attributes:
+ name (str):
+ Required. The name of the batch to retrieve.
+ """
+
+ name = proto.Field(proto.STRING, number=1,)
+
+
+class ListBatchesRequest(proto.Message):
+ r"""A request to list batch workloads in a project.
+
+ Attributes:
+ parent (str):
+ Required. The parent, which owns this
+ collection of batches.
+ page_size (int):
+ Optional. The maximum number of batches to
+ return in each response. The service may return
+ fewer than this value. The default page size is
+ 20; the maximum page size is 1000.
+ page_token (str):
+ Optional. A page token received from a previous
+ ``ListBatches`` call. Provide this token to retrieve the
+ subsequent page.
+ """
+
+ parent = proto.Field(proto.STRING, number=1,)
+ page_size = proto.Field(proto.INT32, number=2,)
+ page_token = proto.Field(proto.STRING, number=3,)
+
+
+class ListBatchesResponse(proto.Message):
+ r"""A list of batch workloads.
+
+ Attributes:
+ batches (Sequence[google.cloud.dataproc_v1.types.Batch]):
+ The batches from the specified collection.
+ next_page_token (str):
+ A token, which can be sent as ``page_token`` to retrieve the
+ next page. If this field is omitted, there are no subsequent
+ pages.
+ """
+
+ @property
+ def raw_page(self):
+ return self
+
+ batches = proto.RepeatedField(proto.MESSAGE, number=1, message="Batch",)
+ next_page_token = proto.Field(proto.STRING, number=2,)
+
+
+class DeleteBatchRequest(proto.Message):
+ r"""A request to delete a batch workload.
+
+ Attributes:
+ name (str):
+ Required. The name of the batch resource to
+ delete.
+ """
+
+ name = proto.Field(proto.STRING, number=1,)
+
+
+class Batch(proto.Message):
+ r"""A representation of a batch workload in the service.
+
+ Attributes:
+ name (str):
+ Output only. The resource name of the batch.
+ uuid (str):
+ Output only. A batch UUID (Unique Universal
+ Identifier). The service generates this value
+ when it creates the batch.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. The time when the batch was
+ created.
+ pyspark_batch (google.cloud.dataproc_v1.types.PySparkBatch):
+ Optional. PySpark batch config.
+ spark_batch (google.cloud.dataproc_v1.types.SparkBatch):
+ Optional. Spark batch config.
+ spark_r_batch (google.cloud.dataproc_v1.types.SparkRBatch):
+ Optional. SparkR batch config.
+ spark_sql_batch (google.cloud.dataproc_v1.types.SparkSqlBatch):
+ Optional. SparkSql batch config.
+ runtime_info (google.cloud.dataproc_v1.types.RuntimeInfo):
+ Output only. Runtime information about batch
+ execution.
+ state (google.cloud.dataproc_v1.types.Batch.State):
+ Output only. The state of the batch.
+ state_message (str):
+ Output only. Batch state details, such as a failure
+ description if the state is ``FAILED``.
+ state_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. The time when the batch entered
+ a current state.
+ creator (str):
+ Output only. The email address of the user
+ who created the batch.
+ labels (Sequence[google.cloud.dataproc_v1.types.Batch.LabelsEntry]):
+ Optional. The labels to associate with this batch. Label
+ **keys** must contain 1 to 63 characters, and must conform
+ to `RFC 1035 `__.
+ Label **values** may be empty, but, if present, must contain
+ 1 to 63 characters, and must conform to `RFC
+ 1035 `__. No more than
+ 32 labels can be associated with a batch.
+ runtime_config (google.cloud.dataproc_v1.types.RuntimeConfig):
+ Optional. Runtime configuration for the batch
+ execution.
+ environment_config (google.cloud.dataproc_v1.types.EnvironmentConfig):
+ Optional. Environment configuration for the
+ batch execution.
+ operation (str):
+ Output only. The resource name of the
+ operation associated with this batch.
+ state_history (Sequence[google.cloud.dataproc_v1.types.Batch.StateHistory]):
+ Output only. Historical state information for
+ the batch.
+ """
+
+ class State(proto.Enum):
+ r"""The batch state."""
+ STATE_UNSPECIFIED = 0
+ PENDING = 1
+ RUNNING = 2
+ CANCELLING = 3
+ CANCELLED = 4
+ SUCCEEDED = 5
+ FAILED = 6
+
+ class StateHistory(proto.Message):
+ r"""Historical state information.
+
+ Attributes:
+ state (google.cloud.dataproc_v1.types.Batch.State):
+ Output only. The state of the batch at this
+ point in history.
+ state_message (str):
+ Output only. Details about the state at this
+ point in history.
+ state_start_time (google.protobuf.timestamp_pb2.Timestamp):
+ Output only. The time when the batch entered
+ the historical state.
+ """
+
+ state = proto.Field(proto.ENUM, number=1, enum="Batch.State",)
+ state_message = proto.Field(proto.STRING, number=2,)
+ state_start_time = proto.Field(
+ proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
+ )
+
+ name = proto.Field(proto.STRING, number=1,)
+ uuid = proto.Field(proto.STRING, number=2,)
+ create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
+ pyspark_batch = proto.Field(
+ proto.MESSAGE, number=4, oneof="batch_config", message="PySparkBatch",
+ )
+ spark_batch = proto.Field(
+ proto.MESSAGE, number=5, oneof="batch_config", message="SparkBatch",
+ )
+ spark_r_batch = proto.Field(
+ proto.MESSAGE, number=6, oneof="batch_config", message="SparkRBatch",
+ )
+ spark_sql_batch = proto.Field(
+ proto.MESSAGE, number=7, oneof="batch_config", message="SparkSqlBatch",
+ )
+ runtime_info = proto.Field(proto.MESSAGE, number=8, message=shared.RuntimeInfo,)
+ state = proto.Field(proto.ENUM, number=9, enum=State,)
+ state_message = proto.Field(proto.STRING, number=10,)
+ state_time = proto.Field(proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp,)
+ creator = proto.Field(proto.STRING, number=12,)
+ labels = proto.MapField(proto.STRING, proto.STRING, number=13,)
+ runtime_config = proto.Field(
+ proto.MESSAGE, number=14, message=shared.RuntimeConfig,
+ )
+ environment_config = proto.Field(
+ proto.MESSAGE, number=15, message=shared.EnvironmentConfig,
+ )
+ operation = proto.Field(proto.STRING, number=16,)
+ state_history = proto.RepeatedField(proto.MESSAGE, number=17, message=StateHistory,)
+
+
+class PySparkBatch(proto.Message):
+ r"""A configuration for running an `Apache
+ PySpark `__
+ batch workload.
+
+ Attributes:
+ main_python_file_uri (str):
+ Required. The HCFS URI of the main Python
+ file to use as the Spark driver. Must be a .py
+ file.
+ args (Sequence[str]):
+ Optional. The arguments to pass to the driver. Do not
+ include arguments that can be set as batch properties, such
+ as ``--conf``, since a collision can occur that causes an
+ incorrect batch submission.
+ python_file_uris (Sequence[str]):
+ Optional. HCFS file URIs of Python files to pass to the
+ PySpark framework. Supported file types: ``.py``, ``.egg``,
+ and ``.zip``.
+ jar_file_uris (Sequence[str]):
+ Optional. HCFS URIs of jar files to add to
+ the classpath of the Spark driver and tasks.
+ file_uris (Sequence[str]):
+ Optional. HCFS URIs of files to be placed in
+ the working directory of each executor.
+ archive_uris (Sequence[str]):
+ Optional. HCFS URIs of archives to be extracted into the
+ working directory of each executor. Supported file types:
+ ``.jar``, ``.tar``, ``.tar.gz``, ``.tgz``, and ``.zip``.
+ """
+
+ main_python_file_uri = proto.Field(proto.STRING, number=1,)
+ args = proto.RepeatedField(proto.STRING, number=2,)
+ python_file_uris = proto.RepeatedField(proto.STRING, number=3,)
+ jar_file_uris = proto.RepeatedField(proto.STRING, number=4,)
+ file_uris = proto.RepeatedField(proto.STRING, number=5,)
+ archive_uris = proto.RepeatedField(proto.STRING, number=6,)
+
+
+class SparkBatch(proto.Message):
+ r"""A configuration for running an `Apache
+ Spark `__ batch workload.
+
+ Attributes:
+ main_jar_file_uri (str):
+ Optional. The HCFS URI of the jar file that
+ contains the main class.
+ main_class (str):
+ Optional. The name of the driver main class. The jar file
+ that contains the class must be in the classpath or
+ specified in ``jar_file_uris``.
+ args (Sequence[str]):
+ Optional. The arguments to pass to the driver. Do not
+ include arguments that can be set as batch properties, such
+ as ``--conf``, since a collision can occur that causes an
+ incorrect batch submission.
+ jar_file_uris (Sequence[str]):
+ Optional. HCFS URIs of jar files to add to
+ the classpath of the Spark driver and tasks.
+ file_uris (Sequence[str]):
+ Optional. HCFS URIs of files to be placed in
+ the working directory of each executor.
+ archive_uris (Sequence[str]):
+ Optional. HCFS URIs of archives to be extracted into the
+ working directory of each executor. Supported file types:
+ ``.jar``, ``.tar``, ``.tar.gz``, ``.tgz``, and ``.zip``.
+ """
+
+ main_jar_file_uri = proto.Field(proto.STRING, number=1, oneof="driver",)
+ main_class = proto.Field(proto.STRING, number=2, oneof="driver",)
+ args = proto.RepeatedField(proto.STRING, number=3,)
+ jar_file_uris = proto.RepeatedField(proto.STRING, number=4,)
+ file_uris = proto.RepeatedField(proto.STRING, number=5,)
+ archive_uris = proto.RepeatedField(proto.STRING, number=6,)
+
+
+class SparkRBatch(proto.Message):
+ r"""A configuration for running an `Apache
+ SparkR `__ batch
+ workload.
+
+ Attributes:
+ main_r_file_uri (str):
+ Required. The HCFS URI of the main R file to use as the
+ driver. Must be a ``.R`` or ``.r`` file.
+ args (Sequence[str]):
+ Optional. The arguments to pass to the Spark driver. Do not
+ include arguments that can be set as batch properties, such
+ as ``--conf``, since a collision can occur that causes an
+ incorrect batch submission.
+ file_uris (Sequence[str]):
+ Optional. HCFS URIs of files to be placed in
+ the working directory of each executor.
+ archive_uris (Sequence[str]):
+ Optional. HCFS URIs of archives to be extracted into the
+ working directory of each executor. Supported file types:
+ ``.jar``, ``.tar``, ``.tar.gz``, ``.tgz``, and ``.zip``.
+ """
+
+ main_r_file_uri = proto.Field(proto.STRING, number=1,)
+ args = proto.RepeatedField(proto.STRING, number=2,)
+ file_uris = proto.RepeatedField(proto.STRING, number=3,)
+ archive_uris = proto.RepeatedField(proto.STRING, number=4,)
+
+
+class SparkSqlBatch(proto.Message):
+ r"""A configuration for running `Apache Spark
+ SQL `__ queries as a batch workload.
+
+ Attributes:
+ query_file_uri (str):
+ Required. The HCFS URI of the script that
+ contains Spark SQL queries to execute.
+ query_variables (Sequence[google.cloud.dataproc_v1.types.SparkSqlBatch.QueryVariablesEntry]):
+ Optional. Mapping of query variable names to values
+ (equivalent to the Spark SQL command:
+ ``SET name="value";``).
+ jar_file_uris (Sequence[str]):
+ Optional. HCFS URIs of jar files to be added
+ to the Spark CLASSPATH.
+ """
+
+ query_file_uri = proto.Field(proto.STRING, number=1,)
+ query_variables = proto.MapField(proto.STRING, proto.STRING, number=2,)
+ jar_file_uris = proto.RepeatedField(proto.STRING, number=3,)
+
+
+__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/dataproc_v1/types/clusters.py b/google/cloud/dataproc_v1/types/clusters.py
index f2837680..a1634912 100644
--- a/google/cloud/dataproc_v1/types/clusters.py
+++ b/google/cloud/dataproc_v1/types/clusters.py
@@ -33,6 +33,7 @@
"GceClusterConfig",
"NodeGroupAffinity",
"ShieldedInstanceConfig",
+ "ConfidentialInstanceConfig",
"InstanceGroupConfig",
"ManagedGroupConfig",
"AcceleratorConfig",
@@ -63,7 +64,7 @@
class Cluster(proto.Message):
r"""Describes the identifying information, config, and status of
- a cluster of Compute Engine instances.
+ a Dataproc cluster
Attributes:
project_id (str):
@@ -115,6 +116,7 @@ class Cluster(proto.Message):
class ClusterConfig(proto.Message):
r"""The cluster config.
+
Attributes:
config_bucket (str):
Optional. A Cloud Storage bucket used to stage job
@@ -124,10 +126,10 @@ class ClusterConfig(proto.Message):
your cluster's staging bucket according to the Compute
Engine zone where your cluster is deployed, and then create
and manage this project-level, per-location bucket (see
- `Dataproc staging
- bucket `__).
- **This field requires a Cloud Storage bucket name, not a URI
- to a Cloud Storage bucket.**
+ `Dataproc staging and temp
+ buckets `__).
+ **This field requires a Cloud Storage bucket name, not a
+ ``gs://...`` URI to a Cloud Storage bucket.**
temp_bucket (str):
Optional. A Cloud Storage bucket used to store ephemeral
cluster and jobs data, such as Spark and MapReduce history
@@ -137,23 +139,26 @@ class ClusterConfig(proto.Message):
zone where your cluster is deployed, and then create and
manage this project-level, per-location bucket. The default
bucket has a TTL of 90 days, but you can use any TTL (or
- none) if you specify a bucket. **This field requires a Cloud
- Storage bucket name, not a URI to a Cloud Storage bucket.**
+ none) if you specify a bucket (see `Dataproc staging and
+ temp
+ buckets `__).
+ **This field requires a Cloud Storage bucket name, not a
+ ``gs://...`` URI to a Cloud Storage bucket.**
gce_cluster_config (google.cloud.dataproc_v1.types.GceClusterConfig):
Optional. The shared Compute Engine config
settings for all instances in a cluster.
master_config (google.cloud.dataproc_v1.types.InstanceGroupConfig):
Optional. The Compute Engine config settings
- for the master instance in a cluster.
+ for the cluster's master instance.
worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig):
Optional. The Compute Engine config settings
- for worker instances in a cluster.
+ for the cluster's worker instances.
secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupConfig):
Optional. The Compute Engine config settings
- for additional worker instances in a cluster.
+ for a cluster's secondary worker instances
software_config (google.cloud.dataproc_v1.types.SoftwareConfig):
- Optional. The config settings for software
- inside the cluster.
+ Optional. The config settings for cluster
+ software.
initialization_actions (Sequence[google.cloud.dataproc_v1.types.NodeInitializationAction]):
Optional. Commands to execute on each node after config is
completed. By default, executables are run on master and all
@@ -228,6 +233,7 @@ class ClusterConfig(proto.Message):
class GkeClusterConfig(proto.Message):
r"""The GKE config for this cluster.
+
Attributes:
namespaced_gke_deployment_target (google.cloud.dataproc_v1.types.GkeClusterConfig.NamespacedGkeDeploymentTarget):
Optional. A target for the deployment.
@@ -256,6 +262,7 @@ class NamespacedGkeDeploymentTarget(proto.Message):
class EndpointConfig(proto.Message):
r"""Endpoint config for this cluster
+
Attributes:
http_ports (Sequence[google.cloud.dataproc_v1.types.EndpointConfig.HttpPortsEntry]):
Output only. The map of port descriptions to URLs. Will only
@@ -272,6 +279,7 @@ class EndpointConfig(proto.Message):
class AutoscalingConfig(proto.Message):
r"""Autoscaling Policy config associated with the cluster.
+
Attributes:
policy_uri (str):
Optional. The autoscaling policy used by the cluster.
@@ -291,6 +299,7 @@ class AutoscalingConfig(proto.Message):
class EncryptionConfig(proto.Message):
r"""Encryption settings for the cluster.
+
Attributes:
gce_pd_kms_key_name (str):
Optional. The Cloud KMS key name to use for
@@ -399,6 +408,10 @@ class GceClusterConfig(proto.Message):
Optional. Shielded Instance Config for clusters using
`Compute Engine Shielded
VMs `__.
+ confidential_instance_config (google.cloud.dataproc_v1.types.ConfidentialInstanceConfig):
+ Optional. Confidential Instance Config for clusters using
+ `Confidential
+ VMs `__.
"""
class PrivateIpv6GoogleAccess(proto.Enum):
@@ -433,6 +446,9 @@ class PrivateIpv6GoogleAccess(proto.Enum):
shielded_instance_config = proto.Field(
proto.MESSAGE, number=14, message="ShieldedInstanceConfig",
)
+ confidential_instance_config = proto.Field(
+ proto.MESSAGE, number=15, message="ConfidentialInstanceConfig",
+ )
class NodeGroupAffinity(proto.Message):
@@ -477,6 +493,19 @@ class ShieldedInstanceConfig(proto.Message):
enable_integrity_monitoring = proto.Field(proto.BOOL, number=3,)
+class ConfidentialInstanceConfig(proto.Message):
+ r"""Confidential Instance Config for clusters using `Confidential
+ VMs `__
+
+ Attributes:
+ enable_confidential_compute (bool):
+ Optional. Defines whether the instance should
+ have confidential compute enabled.
+ """
+
+ enable_confidential_compute = proto.Field(proto.BOOL, number=1,)
+
+
class InstanceGroupConfig(proto.Message):
r"""The config settings for Compute Engine resources in
an instance group, such as a master or worker group.
@@ -687,6 +716,7 @@ class NodeInitializationAction(proto.Message):
class ClusterStatus(proto.Message):
r"""The status of a cluster and its instances.
+
Attributes:
state (google.cloud.dataproc_v1.types.ClusterStatus.State):
Output only. The cluster's state.
@@ -708,6 +738,7 @@ class State(proto.Enum):
CREATING = 1
RUNNING = 2
ERROR = 3
+ ERROR_DUE_TO_UPDATE = 9
DELETING = 4
UPDATING = 5
STOPPING = 6
@@ -747,6 +778,7 @@ class SecurityConfig(proto.Message):
class KerberosConfig(proto.Message):
r"""Specifies Kerberos related configuration.
+
Attributes:
enable_kerberos (bool):
Optional. Flag to indicate whether to
@@ -894,6 +926,7 @@ class SoftwareConfig(proto.Message):
class LifecycleConfig(proto.Message):
r"""Specifies the cluster auto-delete schedule configuration.
+
Attributes:
idle_delete_ttl (google.protobuf.duration_pb2.Duration):
Optional. The duration to keep the cluster alive while
@@ -935,6 +968,7 @@ class LifecycleConfig(proto.Message):
class MetastoreConfig(proto.Message):
r"""Specifies a Metastore configuration.
+
Attributes:
dataproc_metastore_service (str):
Required. Resource name of an existing Dataproc Metastore
@@ -967,6 +1001,7 @@ class ClusterMetrics(proto.Message):
class CreateClusterRequest(proto.Message):
r"""A request to create a cluster.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -977,7 +1012,7 @@ class CreateClusterRequest(proto.Message):
cluster (google.cloud.dataproc_v1.types.Cluster):
Required. The cluster to create.
request_id (str):
- Optional. A unique id used to identify the request. If the
+ Optional. A unique ID used to identify the request. If the
server receives two
`CreateClusterRequest `__\ s
with the same id, then the second request will be ignored
@@ -988,19 +1023,26 @@ class CreateClusterRequest(proto.Message):
It is recommended to always set this value to a
`UUID `__.
- The id must contain only letters (a-z, A-Z), numbers (0-9),
+ The ID must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
+ action_on_failed_primary_workers (google.cloud.dataproc_v1.types.FailureAction):
+ Optional. Failure action when primary worker
+ creation fails.
"""
project_id = proto.Field(proto.STRING, number=1,)
region = proto.Field(proto.STRING, number=3,)
cluster = proto.Field(proto.MESSAGE, number=2, message="Cluster",)
request_id = proto.Field(proto.STRING, number=4,)
+ action_on_failed_primary_workers = proto.Field(
+ proto.ENUM, number=5, enum=shared.FailureAction,
+ )
class UpdateClusterRequest(proto.Message):
r"""A request to update a cluster.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -1086,7 +1128,7 @@ class UpdateClusterRequest(proto.Message):
request_id (str):
- Optional. A unique id used to identify the request. If the
+ Optional. A unique ID used to identify the request. If the
server receives two
`UpdateClusterRequest `__\ s
with the same id, then the second request will be ignored
@@ -1097,7 +1139,7 @@ class UpdateClusterRequest(proto.Message):
It is recommended to always set this value to a
`UUID `__.
- The id must contain only letters (a-z, A-Z), numbers (0-9),
+ The ID must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
"""
@@ -1117,6 +1159,7 @@ class UpdateClusterRequest(proto.Message):
class StopClusterRequest(proto.Message):
r"""A request to stop a cluster.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -1131,7 +1174,7 @@ class StopClusterRequest(proto.Message):
fail (with error NOT_FOUND) if a cluster with the specified
UUID does not exist.
request_id (str):
- Optional. A unique id used to identify the request. If the
+ Optional. A unique ID used to identify the request. If the
server receives two
`StopClusterRequest `__\ s
with the same id, then the second request will be ignored
@@ -1142,7 +1185,7 @@ class StopClusterRequest(proto.Message):
Recommendation: Set this value to a
`UUID `__.
- The id must contain only letters (a-z, A-Z), numbers (0-9),
+ The ID must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
"""
@@ -1156,6 +1199,7 @@ class StopClusterRequest(proto.Message):
class StartClusterRequest(proto.Message):
r"""A request to start a cluster.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -1170,7 +1214,7 @@ class StartClusterRequest(proto.Message):
fail (with error NOT_FOUND) if a cluster with the specified
UUID does not exist.
request_id (str):
- Optional. A unique id used to identify the request. If the
+ Optional. A unique ID used to identify the request. If the
server receives two
`StartClusterRequest `__\ s
with the same id, then the second request will be ignored
@@ -1181,7 +1225,7 @@ class StartClusterRequest(proto.Message):
Recommendation: Set this value to a
`UUID `__.
- The id must contain only letters (a-z, A-Z), numbers (0-9),
+ The ID must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
"""
@@ -1195,6 +1239,7 @@ class StartClusterRequest(proto.Message):
class DeleteClusterRequest(proto.Message):
r"""A request to delete a cluster.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -1209,7 +1254,7 @@ class DeleteClusterRequest(proto.Message):
should fail (with error NOT_FOUND) if cluster with specified
UUID does not exist.
request_id (str):
- Optional. A unique id used to identify the request. If the
+ Optional. A unique ID used to identify the request. If the
server receives two
`DeleteClusterRequest `__\ s
with the same id, then the second request will be ignored
@@ -1220,7 +1265,7 @@ class DeleteClusterRequest(proto.Message):
It is recommended to always set this value to a
`UUID `__.
- The id must contain only letters (a-z, A-Z), numbers (0-9),
+ The ID must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and hyphens (-). The maximum length is 40
characters.
"""
@@ -1254,6 +1299,7 @@ class GetClusterRequest(proto.Message):
class ListClustersRequest(proto.Message):
r"""A request to list the clusters in a project.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -1298,6 +1344,7 @@ class ListClustersRequest(proto.Message):
class ListClustersResponse(proto.Message):
r"""The list of all clusters in a project.
+
Attributes:
clusters (Sequence[google.cloud.dataproc_v1.types.Cluster]):
Output only. The clusters in the project.
@@ -1318,6 +1365,7 @@ def raw_page(self):
class DiagnoseClusterRequest(proto.Message):
r"""A request to collect cluster diagnostic information.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -1336,6 +1384,7 @@ class DiagnoseClusterRequest(proto.Message):
class DiagnoseClusterResults(proto.Message):
r"""The location of diagnostic output.
+
Attributes:
output_uri (str):
Output only. The Cloud Storage URI of the
@@ -1349,6 +1398,7 @@ class DiagnoseClusterResults(proto.Message):
class ReservationAffinity(proto.Message):
r"""Reservation Affinity for consuming Zonal reservation.
+
Attributes:
consume_reservation_type (google.cloud.dataproc_v1.types.ReservationAffinity.Type):
Optional. Type of reservation to consume
diff --git a/google/cloud/dataproc_v1/types/jobs.py b/google/cloud/dataproc_v1/types/jobs.py
index 87fa758e..0acc8554 100644
--- a/google/cloud/dataproc_v1/types/jobs.py
+++ b/google/cloud/dataproc_v1/types/jobs.py
@@ -52,6 +52,7 @@
class LoggingConfig(proto.Message):
r"""The runtime logging config of the job.
+
Attributes:
driver_log_levels (Sequence[google.cloud.dataproc_v1.types.LoggingConfig.DriverLogLevelsEntry]):
The per-package log levels for the driver.
@@ -63,8 +64,8 @@ class LoggingConfig(proto.Message):
class Level(proto.Enum):
r"""The Log4j level for job execution. When running an `Apache
- Hive `__ job, Cloud Dataproc configures the
- Hive client to an equivalent verbosity level.
+ Hive `__ job, Cloud Dataproc configures
+ the Hive client to an equivalent verbosity level.
"""
LEVEL_UNSPECIFIED = 0
ALL = 1
@@ -243,6 +244,7 @@ class PySparkJob(proto.Message):
class QueryList(proto.Message):
r"""A list of queries to run on a cluster.
+
Attributes:
queries (Sequence[str]):
Required. The queries to execute. You do not need to end a
@@ -478,6 +480,7 @@ class PrestoJob(proto.Message):
class JobPlacement(proto.Message):
r"""Dataproc job config.
+
Attributes:
cluster_name (str):
Required. The name of the cluster where the
@@ -497,6 +500,7 @@ class JobPlacement(proto.Message):
class JobStatus(proto.Message):
r"""Dataproc job status.
+
Attributes:
state (google.cloud.dataproc_v1.types.JobStatus.State):
Output only. A state message specifying the
@@ -543,6 +547,7 @@ class Substate(proto.Enum):
class JobReference(proto.Message):
r"""Encapsulates the full scoping used to reference a job.
+
Attributes:
project_id (str):
Optional. The ID of the Google Cloud Platform
@@ -611,6 +616,7 @@ class State(proto.Enum):
class Job(proto.Message):
r"""A Dataproc job resource.
+
Attributes:
reference (google.cloud.dataproc_v1.types.JobReference):
Optional. The fully qualified reference to the job, which
@@ -716,6 +722,7 @@ class Job(proto.Message):
class JobScheduling(proto.Message):
r"""Job scheduling options.
+
Attributes:
max_failures_per_hour (int):
Optional. Maximum number of times per hour a
@@ -741,6 +748,7 @@ class JobScheduling(proto.Message):
class SubmitJobRequest(proto.Message):
r"""A request to submit a job.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -774,6 +782,7 @@ class SubmitJobRequest(proto.Message):
class JobMetadata(proto.Message):
r"""Job Operation metadata.
+
Attributes:
job_id (str):
Output only. The job id.
@@ -813,6 +822,7 @@ class GetJobRequest(proto.Message):
class ListJobsRequest(proto.Message):
r"""A request to list jobs in a project.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -873,6 +883,7 @@ class JobStateMatcher(proto.Enum):
class UpdateJobRequest(proto.Message):
r"""A request to update a job.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -903,6 +914,7 @@ class UpdateJobRequest(proto.Message):
class ListJobsResponse(proto.Message):
r"""A list of jobs in a project.
+
Attributes:
jobs (Sequence[google.cloud.dataproc_v1.types.Job]):
Output only. Jobs list.
@@ -923,6 +935,7 @@ def raw_page(self):
class CancelJobRequest(proto.Message):
r"""A request to cancel a job.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
@@ -941,6 +954,7 @@ class CancelJobRequest(proto.Message):
class DeleteJobRequest(proto.Message):
r"""A request to delete a job.
+
Attributes:
project_id (str):
Required. The ID of the Google Cloud Platform
diff --git a/google/cloud/dataproc_v1/types/operations.py b/google/cloud/dataproc_v1/types/operations.py
index 9448110a..03721bc1 100644
--- a/google/cloud/dataproc_v1/types/operations.py
+++ b/google/cloud/dataproc_v1/types/operations.py
@@ -20,12 +20,55 @@
__protobuf__ = proto.module(
package="google.cloud.dataproc.v1",
- manifest={"ClusterOperationStatus", "ClusterOperationMetadata",},
+ manifest={
+ "BatchOperationMetadata",
+ "ClusterOperationStatus",
+ "ClusterOperationMetadata",
+ },
)
+class BatchOperationMetadata(proto.Message):
+ r"""Metadata describing the Batch operation.
+
+ Attributes:
+ batch (str):
+ Name of the batch for the operation.
+ batch_uuid (str):
+ Batch UUID for the operation.
+ create_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time when the operation was created.
+ done_time (google.protobuf.timestamp_pb2.Timestamp):
+ The time when the operation finished.
+ operation_type (google.cloud.dataproc_v1.types.BatchOperationMetadata.BatchOperationType):
+ The operation type.
+ description (str):
+ Short description of the operation.
+ labels (Sequence[google.cloud.dataproc_v1.types.BatchOperationMetadata.LabelsEntry]):
+ Labels associated with the operation.
+ warnings (Sequence[str]):
+ Warnings encountered during operation
+ execution.
+ """
+
+ class BatchOperationType(proto.Enum):
+ r"""Operation type for Batch resources"""
+ BATCH_OPERATION_TYPE_UNSPECIFIED = 0
+ BATCH = 1
+
+ batch = proto.Field(proto.STRING, number=1,)
+ batch_uuid = proto.Field(proto.STRING, number=2,)
+ create_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
+ done_time = proto.Field(proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp,)
+ operation_type = proto.Field(proto.ENUM, number=6, enum=BatchOperationType,)
+ description = proto.Field(proto.STRING, number=7,)
+ labels = proto.MapField(proto.STRING, proto.STRING, number=8,)
+ warnings = proto.RepeatedField(proto.STRING, number=9,)
+
+
class ClusterOperationStatus(proto.Message):
r"""The status of the operation.
+
Attributes:
state (google.cloud.dataproc_v1.types.ClusterOperationStatus.State):
Output only. A message containing the
@@ -57,6 +100,7 @@ class State(proto.Enum):
class ClusterOperationMetadata(proto.Message):
r"""Metadata describing the operation.
+
Attributes:
cluster_name (str):
Output only. Name of the cluster for the
diff --git a/google/cloud/dataproc_v1/types/shared.py b/google/cloud/dataproc_v1/types/shared.py
index 2e397004..d4d4b616 100644
--- a/google/cloud/dataproc_v1/types/shared.py
+++ b/google/cloud/dataproc_v1/types/shared.py
@@ -17,14 +17,22 @@
__protobuf__ = proto.module(
- package="google.cloud.dataproc.v1", manifest={"Component",},
+ package="google.cloud.dataproc.v1",
+ manifest={
+ "Component",
+ "FailureAction",
+ "RuntimeConfig",
+ "EnvironmentConfig",
+ "ExecutionConfig",
+ "SparkHistoryServerConfig",
+ "PeripheralsConfig",
+ "RuntimeInfo",
+ },
)
class Component(proto.Enum):
- r"""Cluster components that can be activated.
- Next ID: 16.
- """
+ r"""Cluster components that can be activated."""
COMPONENT_UNSPECIFIED = 0
ANACONDA = 5
DOCKER = 13
@@ -40,4 +48,125 @@ class Component(proto.Enum):
ZOOKEEPER = 8
+class FailureAction(proto.Enum):
+ r"""Actions in response to failure of a resource associated with
+ a cluster.
+ """
+ FAILURE_ACTION_UNSPECIFIED = 0
+ NO_ACTION = 1
+ DELETE = 2
+
+
+class RuntimeConfig(proto.Message):
+ r"""Runtime configuration for a workload.
+
+ Attributes:
+ properties (Sequence[google.cloud.dataproc_v1.types.RuntimeConfig.PropertiesEntry]):
+ Optional. A mapping of property names to
+ values, which are used to configure workload
+ execution.
+ """
+
+ properties = proto.MapField(proto.STRING, proto.STRING, number=3,)
+
+
+class EnvironmentConfig(proto.Message):
+ r"""Environment configuration for a workload.
+
+ Attributes:
+ execution_config (google.cloud.dataproc_v1.types.ExecutionConfig):
+ Optional. Execution configuration for a
+ workload.
+ peripherals_config (google.cloud.dataproc_v1.types.PeripheralsConfig):
+ Optional. Peripherals configuration that
+ workload has access to.
+ """
+
+ execution_config = proto.Field(proto.MESSAGE, number=1, message="ExecutionConfig",)
+ peripherals_config = proto.Field(
+ proto.MESSAGE, number=2, message="PeripheralsConfig",
+ )
+
+
+class ExecutionConfig(proto.Message):
+ r"""Execution configuration for a workload.
+
+ Attributes:
+ service_account (str):
+ Optional. Service account that used to
+ execute workload.
+ network_uri (str):
+ Optional. Network URI to connect workload to.
+ subnetwork_uri (str):
+ Optional. Subnetwork URI to connect workload
+ to.
+ network_tags (Sequence[str]):
+ Optional. Tags used for network traffic
+ control.
+ kms_key (str):
+ Optional. The Cloud KMS key to use for
+ encryption.
+ """
+
+ service_account = proto.Field(proto.STRING, number=2,)
+ network_uri = proto.Field(proto.STRING, number=4, oneof="network",)
+ subnetwork_uri = proto.Field(proto.STRING, number=5, oneof="network",)
+ network_tags = proto.RepeatedField(proto.STRING, number=6,)
+ kms_key = proto.Field(proto.STRING, number=7,)
+
+
+class SparkHistoryServerConfig(proto.Message):
+ r"""Spark History Server configuration for the workload.
+
+ Attributes:
+ dataproc_cluster (str):
+ Optional. Resource name of an existing Dataproc Cluster to
+ act as a Spark History Server for the workload.
+
+ Example:
+
+ - ``projects/[project_id]/regions/[region]/clusters/[cluster_name]``
+ """
+
+ dataproc_cluster = proto.Field(proto.STRING, number=1,)
+
+
+class PeripheralsConfig(proto.Message):
+ r"""Auxiliary services configuration for a workload.
+
+ Attributes:
+ metastore_service (str):
+ Optional. Resource name of an existing Dataproc Metastore
+ service.
+
+ Example:
+
+ - ``projects/[project_id]/locations/[region]/services/[service_id]``
+ spark_history_server_config (google.cloud.dataproc_v1.types.SparkHistoryServerConfig):
+ Optional. The Spark History Server
+ configuration for the workload.
+ """
+
+ metastore_service = proto.Field(proto.STRING, number=1,)
+ spark_history_server_config = proto.Field(
+ proto.MESSAGE, number=2, message="SparkHistoryServerConfig",
+ )
+
+
+class RuntimeInfo(proto.Message):
+ r"""Runtime information about workload execution.
+
+ Attributes:
+ endpoints (Sequence[google.cloud.dataproc_v1.types.RuntimeInfo.EndpointsEntry]):
+ Output only. Map of remote access endpoints
+ (such as web interfaces and APIs) to their URIs.
+ output_uri (str):
+ Output only. A URI pointing to the location
+ of the stdout and stderr of the workload.
+ """
+
+ endpoints = proto.MapField(proto.STRING, proto.STRING, number=1,)
+ output_uri = proto.Field(proto.STRING, number=2,)
+
+
__all__ = tuple(sorted(__protobuf__.manifest))
diff --git a/google/cloud/dataproc_v1/types/workflow_templates.py b/google/cloud/dataproc_v1/types/workflow_templates.py
index 98987809..24556467 100644
--- a/google/cloud/dataproc_v1/types/workflow_templates.py
+++ b/google/cloud/dataproc_v1/types/workflow_templates.py
@@ -51,6 +51,7 @@
class WorkflowTemplate(proto.Message):
r"""A Dataproc workflow template resource.
+
Attributes:
id (str):
@@ -164,6 +165,7 @@ class WorkflowTemplatePlacement(proto.Message):
class ManagedCluster(proto.Message):
r"""Cluster that is managed by the workflow.
+
Attributes:
cluster_name (str):
Required. The cluster name prefix. A unique
@@ -218,6 +220,7 @@ class ClusterSelector(proto.Message):
class OrderedJob(proto.Message):
r"""A job executed by the workflow.
+
Attributes:
step_id (str):
Required. The step id. The id must be unique among all jobs
@@ -385,6 +388,7 @@ class TemplateParameter(proto.Message):
class ParameterValidation(proto.Message):
r"""Configuration for parameter validation.
+
Attributes:
regex (google.cloud.dataproc_v1.types.RegexValidation):
Validation based on regular expressions.
@@ -402,6 +406,7 @@ class ParameterValidation(proto.Message):
class RegexValidation(proto.Message):
r"""Validation based on regular expressions.
+
Attributes:
regexes (Sequence[str]):
Required. RE2 regular expressions used to
@@ -415,6 +420,7 @@ class RegexValidation(proto.Message):
class ValueValidation(proto.Message):
r"""Validation based on a list of allowed values.
+
Attributes:
values (Sequence[str]):
Required. List of allowed values for the
@@ -426,6 +432,7 @@ class ValueValidation(proto.Message):
class WorkflowMetadata(proto.Message):
r"""A Dataproc workflow template resource.
+
Attributes:
template (str):
Output only. The resource name of the workflow template as
@@ -506,6 +513,7 @@ class State(proto.Enum):
class ClusterOperation(proto.Message):
r"""The cluster operation triggered by a workflow.
+
Attributes:
operation_id (str):
Output only. The id of the cluster operation.
@@ -522,6 +530,7 @@ class ClusterOperation(proto.Message):
class WorkflowGraph(proto.Message):
r"""The workflow graph.
+
Attributes:
nodes (Sequence[google.cloud.dataproc_v1.types.WorkflowNode]):
Output only. The workflow nodes.
@@ -532,6 +541,7 @@ class WorkflowGraph(proto.Message):
class WorkflowNode(proto.Message):
r"""The workflow node.
+
Attributes:
step_id (str):
Output only. The name of the node.
@@ -564,6 +574,7 @@ class NodeState(proto.Enum):
class CreateWorkflowTemplateRequest(proto.Message):
r"""A request to create a workflow template.
+
Attributes:
parent (str):
Required. The resource name of the region or location, as
@@ -588,6 +599,7 @@ class CreateWorkflowTemplateRequest(proto.Message):
class GetWorkflowTemplateRequest(proto.Message):
r"""A request to fetch a workflow template.
+
Attributes:
name (str):
Required. The resource name of the workflow template, as
@@ -614,6 +626,7 @@ class GetWorkflowTemplateRequest(proto.Message):
class InstantiateWorkflowTemplateRequest(proto.Message):
r"""A request to instantiate a workflow template.
+
Attributes:
name (str):
Required. The resource name of the workflow template, as
@@ -661,6 +674,7 @@ class InstantiateWorkflowTemplateRequest(proto.Message):
class InstantiateInlineWorkflowTemplateRequest(proto.Message):
r"""A request to instantiate an inline workflow template.
+
Attributes:
parent (str):
Required. The resource name of the region or location, as
@@ -699,6 +713,7 @@ class InstantiateInlineWorkflowTemplateRequest(proto.Message):
class UpdateWorkflowTemplateRequest(proto.Message):
r"""A request to update a workflow template.
+
Attributes:
template (google.cloud.dataproc_v1.types.WorkflowTemplate):
Required. The updated workflow template.
@@ -712,6 +727,7 @@ class UpdateWorkflowTemplateRequest(proto.Message):
class ListWorkflowTemplatesRequest(proto.Message):
r"""A request to list workflow templates in a project.
+
Attributes:
parent (str):
Required. The resource name of the region or location, as
diff --git a/noxfile.py b/noxfile.py
index dfec6d7e..7ada6183 100644
--- a/noxfile.py
+++ b/noxfile.py
@@ -29,7 +29,7 @@
DEFAULT_PYTHON_VERSION = "3.8"
SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"]
-UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
+UNIT_TEST_PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
@@ -101,7 +101,7 @@ def default(session):
"py.test",
"--quiet",
f"--junitxml=unit_{session.python}_sponge_log.xml",
- "--cov=google/cloud",
+ "--cov=google",
"--cov=tests/unit",
"--cov-append",
"--cov-config=.coveragerc",
diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py
index 1fd8956f..93a9122c 100644
--- a/samples/snippets/noxfile.py
+++ b/samples/snippets/noxfile.py
@@ -87,7 +87,7 @@ def get_pytest_env_vars() -> Dict[str, str]:
# DO NOT EDIT - automatically generated.
# All versions used to test samples.
-ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9"]
+ALL_VERSIONS = ["3.6", "3.7", "3.8", "3.9", "3.10"]
# Any default versions that should be ignored.
IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"]
diff --git a/samples/snippets/requirements.txt b/samples/snippets/requirements.txt
index 34decddd..133ac5f1 100644
--- a/samples/snippets/requirements.txt
+++ b/samples/snippets/requirements.txt
@@ -1,8 +1,8 @@
backoff==1.11.1
grpcio==1.41.0
-google-auth==2.2.1
+google-auth==2.3.0
google-auth-httplib2==0.1.0
google-cloud==0.34.0
google-cloud-storage==1.42.3
-google-cloud-dataproc==2.5.0
+google-cloud-dataproc==3.0.0
diff --git a/scripts/fixup_dataproc_v1_keywords.py b/scripts/fixup_dataproc_v1_keywords.py
index c5d79a7f..290f6db2 100644
--- a/scripts/fixup_dataproc_v1_keywords.py
+++ b/scripts/fixup_dataproc_v1_keywords.py
@@ -41,20 +41,24 @@ class dataprocCallTransformer(cst.CSTTransformer):
METHOD_TO_PARAMS: Dict[str, Tuple[str]] = {
'cancel_job': ('project_id', 'region', 'job_id', ),
'create_autoscaling_policy': ('parent', 'policy', ),
- 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', ),
+ 'create_batch': ('parent', 'batch', 'batch_id', 'request_id', ),
+ 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', 'action_on_failed_primary_workers', ),
'create_workflow_template': ('parent', 'template', ),
'delete_autoscaling_policy': ('name', ),
+ 'delete_batch': ('name', ),
'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ),
'delete_job': ('project_id', 'region', 'job_id', ),
'delete_workflow_template': ('name', 'version', ),
'diagnose_cluster': ('project_id', 'region', 'cluster_name', ),
'get_autoscaling_policy': ('name', ),
+ 'get_batch': ('name', ),
'get_cluster': ('project_id', 'region', 'cluster_name', ),
'get_job': ('project_id', 'region', 'job_id', ),
'get_workflow_template': ('name', 'version', ),
'instantiate_inline_workflow_template': ('parent', 'template', 'request_id', ),
'instantiate_workflow_template': ('name', 'version', 'request_id', 'parameters', ),
'list_autoscaling_policies': ('parent', 'page_size', 'page_token', ),
+ 'list_batches': ('parent', 'page_size', 'page_token', ),
'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ),
'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ),
'list_workflow_templates': ('parent', 'page_size', 'page_token', ),
diff --git a/setup.py b/setup.py
index f933a258..e5ab9495 100644
--- a/setup.py
+++ b/setup.py
@@ -22,7 +22,7 @@
name = "google-cloud-dataproc"
description = "Google Cloud Dataproc API client library"
-version = "3.0.0"
+version = "3.1.0"
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
@@ -79,6 +79,8 @@
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
"Operating System :: OS Independent",
"Topic :: Internet",
],
diff --git a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py
index 035bd8d0..880f0c4a 100644
--- a/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py
+++ b/tests/unit/gapic/dataproc_v1/test_autoscaling_policy_service.py
@@ -29,6 +29,7 @@
from google.api_core import gapic_v1
from google.api_core import grpc_helpers
from google.api_core import grpc_helpers_async
+from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataproc_v1.services.autoscaling_policy_service import (
@@ -1987,6 +1988,9 @@ def test_autoscaling_policy_service_base_transport():
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
@requires_google_auth_gte_1_25_0
def test_autoscaling_policy_service_base_transport_with_credentials_file():
@@ -2475,3 +2479,49 @@ def test_client_withDEFAULT_CLIENT_INFO():
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = AutoscalingPolicyServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = AutoscalingPolicyServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = AutoscalingPolicyServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
diff --git a/tests/unit/gapic/dataproc_v1/test_batch_controller.py b/tests/unit/gapic/dataproc_v1/test_batch_controller.py
new file mode 100644
index 00000000..39eb1ab0
--- /dev/null
+++ b/tests/unit/gapic/dataproc_v1/test_batch_controller.py
@@ -0,0 +1,2135 @@
+# -*- coding: utf-8 -*-
+# Copyright 2020 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+import os
+import mock
+import packaging.version
+
+import grpc
+from grpc.experimental import aio
+import math
+import pytest
+from proto.marshal.rules.dates import DurationRule, TimestampRule
+
+
+from google.api_core import client_options
+from google.api_core import exceptions as core_exceptions
+from google.api_core import future
+from google.api_core import gapic_v1
+from google.api_core import grpc_helpers
+from google.api_core import grpc_helpers_async
+from google.api_core import operation_async # type: ignore
+from google.api_core import operations_v1
+from google.api_core import path_template
+from google.auth import credentials as ga_credentials
+from google.auth.exceptions import MutualTLSChannelError
+from google.cloud.dataproc_v1.services.batch_controller import (
+ BatchControllerAsyncClient,
+)
+from google.cloud.dataproc_v1.services.batch_controller import BatchControllerClient
+from google.cloud.dataproc_v1.services.batch_controller import pagers
+from google.cloud.dataproc_v1.services.batch_controller import transports
+from google.cloud.dataproc_v1.services.batch_controller.transports.base import (
+ _GOOGLE_AUTH_VERSION,
+)
+from google.cloud.dataproc_v1.types import batches
+from google.cloud.dataproc_v1.types import operations
+from google.cloud.dataproc_v1.types import shared
+from google.longrunning import operations_pb2
+from google.oauth2 import service_account
+from google.protobuf import timestamp_pb2 # type: ignore
+import google.auth
+
+
+# TODO(busunkim): Once google-auth >= 1.25.0 is required transitively
+# through google-api-core:
+# - Delete the auth "less than" test cases
+# - Delete these pytest markers (Make the "greater than or equal to" tests the default).
+requires_google_auth_lt_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth < 1.25.0",
+)
+requires_google_auth_gte_1_25_0 = pytest.mark.skipif(
+ packaging.version.parse(_GOOGLE_AUTH_VERSION) < packaging.version.parse("1.25.0"),
+ reason="This test requires google-auth >= 1.25.0",
+)
+
+
+def client_cert_source_callback():
+ return b"cert bytes", b"key bytes"
+
+
+# If default endpoint is localhost, then default mtls endpoint will be the same.
+# This method modifies the default endpoint so the client can produce a different
+# mtls endpoint for endpoint testing purposes.
+def modify_default_endpoint(client):
+ return (
+ "foo.googleapis.com"
+ if ("localhost" in client.DEFAULT_ENDPOINT)
+ else client.DEFAULT_ENDPOINT
+ )
+
+
+def test__get_default_mtls_endpoint():
+ api_endpoint = "example.googleapis.com"
+ api_mtls_endpoint = "example.mtls.googleapis.com"
+ sandbox_endpoint = "example.sandbox.googleapis.com"
+ sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
+ non_googleapi = "api.example.com"
+
+ assert BatchControllerClient._get_default_mtls_endpoint(None) is None
+ assert (
+ BatchControllerClient._get_default_mtls_endpoint(api_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ BatchControllerClient._get_default_mtls_endpoint(api_mtls_endpoint)
+ == api_mtls_endpoint
+ )
+ assert (
+ BatchControllerClient._get_default_mtls_endpoint(sandbox_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ BatchControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
+ == sandbox_mtls_endpoint
+ )
+ assert (
+ BatchControllerClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class", [BatchControllerClient, BatchControllerAsyncClient,]
+)
+def test_batch_controller_client_from_service_account_info(client_class):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_info"
+ ) as factory:
+ factory.return_value = creds
+ info = {"valid": True}
+ client = client_class.from_service_account_info(info)
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == "dataproc.googleapis.com:443"
+
+
+@pytest.mark.parametrize(
+ "transport_class,transport_name",
+ [
+ (transports.BatchControllerGrpcTransport, "grpc"),
+ (transports.BatchControllerGrpcAsyncIOTransport, "grpc_asyncio"),
+ ],
+)
+def test_batch_controller_client_service_account_always_use_jwt(
+ transport_class, transport_name
+):
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=True)
+ use_jwt.assert_called_once_with(True)
+
+ with mock.patch.object(
+ service_account.Credentials, "with_always_use_jwt_access", create=True
+ ) as use_jwt:
+ creds = service_account.Credentials(None, None, None)
+ transport = transport_class(credentials=creds, always_use_jwt_access=False)
+ use_jwt.assert_not_called()
+
+
+@pytest.mark.parametrize(
+ "client_class", [BatchControllerClient, BatchControllerAsyncClient,]
+)
+def test_batch_controller_client_from_service_account_file(client_class):
+ creds = ga_credentials.AnonymousCredentials()
+ with mock.patch.object(
+ service_account.Credentials, "from_service_account_file"
+ ) as factory:
+ factory.return_value = creds
+ client = client_class.from_service_account_file("dummy/file/path.json")
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ client = client_class.from_service_account_json("dummy/file/path.json")
+ assert client.transport._credentials == creds
+ assert isinstance(client, client_class)
+
+ assert client.transport._host == "dataproc.googleapis.com:443"
+
+
+def test_batch_controller_client_get_transport_class():
+ transport = BatchControllerClient.get_transport_class()
+ available_transports = [
+ transports.BatchControllerGrpcTransport,
+ ]
+ assert transport in available_transports
+
+ transport = BatchControllerClient.get_transport_class("grpc")
+ assert transport == transports.BatchControllerGrpcTransport
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (BatchControllerClient, transports.BatchControllerGrpcTransport, "grpc"),
+ (
+ BatchControllerAsyncClient,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+@mock.patch.object(
+ BatchControllerClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BatchControllerClient),
+)
+@mock.patch.object(
+ BatchControllerAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BatchControllerAsyncClient),
+)
+def test_batch_controller_client_client_options(
+ client_class, transport_class, transport_name
+):
+ # Check that if channel is provided we won't create a new one.
+ with mock.patch.object(BatchControllerClient, "get_transport_class") as gtc:
+ transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
+ client = client_class(transport=transport)
+ gtc.assert_not_called()
+
+ # Check that if channel is provided via str we will create a new one.
+ with mock.patch.object(BatchControllerClient, "get_transport_class") as gtc:
+ client = client_class(transport=transport_name)
+ gtc.assert_called()
+
+ # Check the case api_endpoint is provided.
+ options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "never".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
+ # "always".
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_MTLS_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
+ # unsupported value.
+ with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
+ with pytest.raises(MutualTLSChannelError):
+ client = client_class()
+
+ # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
+ ):
+ with pytest.raises(ValueError):
+ client = client_class()
+
+ # Check the case quota_project_id is provided
+ options = client_options.ClientOptions(quota_project_id="octopus")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id="octopus",
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name,use_client_cert_env",
+ [
+ (
+ BatchControllerClient,
+ transports.BatchControllerGrpcTransport,
+ "grpc",
+ "true",
+ ),
+ (
+ BatchControllerAsyncClient,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "true",
+ ),
+ (
+ BatchControllerClient,
+ transports.BatchControllerGrpcTransport,
+ "grpc",
+ "false",
+ ),
+ (
+ BatchControllerAsyncClient,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ "false",
+ ),
+ ],
+)
+@mock.patch.object(
+ BatchControllerClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BatchControllerClient),
+)
+@mock.patch.object(
+ BatchControllerAsyncClient,
+ "DEFAULT_ENDPOINT",
+ modify_default_endpoint(BatchControllerAsyncClient),
+)
+@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
+def test_batch_controller_client_mtls_env_auto(
+ client_class, transport_class, transport_name, use_client_cert_env
+):
+ # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
+ # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
+
+ # Check the case client_cert_source is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ options = client_options.ClientOptions(
+ client_cert_source=client_cert_source_callback
+ )
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+
+ if use_client_cert_env == "false":
+ expected_client_cert_source = None
+ expected_host = client.DEFAULT_ENDPOINT
+ else:
+ expected_client_cert_source = client_cert_source_callback
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # Check the case ADC client cert is provided. Whether client cert is used depends on
+ # GOOGLE_API_USE_CLIENT_CERTIFICATE value.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=True,
+ ):
+ with mock.patch(
+ "google.auth.transport.mtls.default_client_cert_source",
+ return_value=client_cert_source_callback,
+ ):
+ if use_client_cert_env == "false":
+ expected_host = client.DEFAULT_ENDPOINT
+ expected_client_cert_source = None
+ else:
+ expected_host = client.DEFAULT_MTLS_ENDPOINT
+ expected_client_cert_source = client_cert_source_callback
+
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=expected_host,
+ scopes=None,
+ client_cert_source_for_mtls=expected_client_cert_source,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+ # Check the case client_cert_source and ADC client cert are not provided.
+ with mock.patch.dict(
+ os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
+ ):
+ with mock.patch.object(transport_class, "__init__") as patched:
+ with mock.patch(
+ "google.auth.transport.mtls.has_default_client_cert_source",
+ return_value=False,
+ ):
+ patched.return_value = None
+ client = client_class()
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (BatchControllerClient, transports.BatchControllerGrpcTransport, "grpc"),
+ (
+ BatchControllerAsyncClient,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+def test_batch_controller_client_client_options_scopes(
+ client_class, transport_class, transport_name
+):
+ # Check the case scopes are provided.
+ options = client_options.ClientOptions(scopes=["1", "2"],)
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host=client.DEFAULT_ENDPOINT,
+ scopes=["1", "2"],
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+
+@pytest.mark.parametrize(
+ "client_class,transport_class,transport_name",
+ [
+ (BatchControllerClient, transports.BatchControllerGrpcTransport, "grpc"),
+ (
+ BatchControllerAsyncClient,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ "grpc_asyncio",
+ ),
+ ],
+)
+def test_batch_controller_client_client_options_credentials_file(
+ client_class, transport_class, transport_name
+):
+ # Check the case credentials file is provided.
+ options = client_options.ClientOptions(credentials_file="credentials.json")
+ with mock.patch.object(transport_class, "__init__") as patched:
+ patched.return_value = None
+ client = client_class(client_options=options)
+ patched.assert_called_once_with(
+ credentials=None,
+ credentials_file="credentials.json",
+ host=client.DEFAULT_ENDPOINT,
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+
+def test_batch_controller_client_client_options_from_dict():
+ with mock.patch(
+ "google.cloud.dataproc_v1.services.batch_controller.transports.BatchControllerGrpcTransport.__init__"
+ ) as grpc_transport:
+ grpc_transport.return_value = None
+ client = BatchControllerClient(
+ client_options={"api_endpoint": "squid.clam.whelk"}
+ )
+ grpc_transport.assert_called_once_with(
+ credentials=None,
+ credentials_file=None,
+ host="squid.clam.whelk",
+ scopes=None,
+ client_cert_source_for_mtls=None,
+ quota_project_id=None,
+ client_info=transports.base.DEFAULT_CLIENT_INFO,
+ always_use_jwt_access=True,
+ )
+
+
+def test_create_batch(transport: str = "grpc", request_type=batches.CreateBatchRequest):
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/spam")
+ response = client.create_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.CreateBatchRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+def test_create_batch_from_dict():
+ test_create_batch(request_type=dict)
+
+
+def test_create_batch_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_batch), "__call__") as call:
+ client.create_batch()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.CreateBatchRequest()
+
+
+@pytest.mark.asyncio
+async def test_create_batch_async(
+ transport: str = "grpc_asyncio", request_type=batches.CreateBatchRequest
+):
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ response = await client.create_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.CreateBatchRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, future.Future)
+
+
+@pytest.mark.asyncio
+async def test_create_batch_async_from_dict():
+ await test_create_batch_async(request_type=dict)
+
+
+def test_create_batch_field_headers():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.CreateBatchRequest()
+
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_batch), "__call__") as call:
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ client.create_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_create_batch_field_headers_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.CreateBatchRequest()
+
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_batch), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/op")
+ )
+ await client.create_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+def test_create_batch_flattened():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.create_batch(
+ parent="parent_value",
+ batch=batches.Batch(name="name_value"),
+ batch_id="batch_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0].parent == "parent_value"
+ assert args[0].batch == batches.Batch(name="name_value")
+ assert args[0].batch_id == "batch_id_value"
+
+
+def test_create_batch_flattened_error():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.create_batch(
+ batches.CreateBatchRequest(),
+ parent="parent_value",
+ batch=batches.Batch(name="name_value"),
+ batch_id="batch_id_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_create_batch_flattened_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.create_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = operations_pb2.Operation(name="operations/op")
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ operations_pb2.Operation(name="operations/spam")
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.create_batch(
+ parent="parent_value",
+ batch=batches.Batch(name="name_value"),
+ batch_id="batch_id_value",
+ )
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0].parent == "parent_value"
+ assert args[0].batch == batches.Batch(name="name_value")
+ assert args[0].batch_id == "batch_id_value"
+
+
+@pytest.mark.asyncio
+async def test_create_batch_flattened_error_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.create_batch(
+ batches.CreateBatchRequest(),
+ parent="parent_value",
+ batch=batches.Batch(name="name_value"),
+ batch_id="batch_id_value",
+ )
+
+
+def test_get_batch(transport: str = "grpc", request_type=batches.GetBatchRequest):
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batches.Batch(
+ name="name_value",
+ uuid="uuid_value",
+ state=batches.Batch.State.PENDING,
+ state_message="state_message_value",
+ creator="creator_value",
+ operation="operation_value",
+ pyspark_batch=batches.PySparkBatch(
+ main_python_file_uri="main_python_file_uri_value"
+ ),
+ )
+ response = client.get_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.GetBatchRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, batches.Batch)
+ assert response.name == "name_value"
+ assert response.uuid == "uuid_value"
+ assert response.state == batches.Batch.State.PENDING
+ assert response.state_message == "state_message_value"
+ assert response.creator == "creator_value"
+ assert response.operation == "operation_value"
+
+
+def test_get_batch_from_dict():
+ test_get_batch(request_type=dict)
+
+
+def test_get_batch_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_batch), "__call__") as call:
+ client.get_batch()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.GetBatchRequest()
+
+
+@pytest.mark.asyncio
+async def test_get_batch_async(
+ transport: str = "grpc_asyncio", request_type=batches.GetBatchRequest
+):
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batches.Batch(
+ name="name_value",
+ uuid="uuid_value",
+ state=batches.Batch.State.PENDING,
+ state_message="state_message_value",
+ creator="creator_value",
+ operation="operation_value",
+ )
+ )
+ response = await client.get_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.GetBatchRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, batches.Batch)
+ assert response.name == "name_value"
+ assert response.uuid == "uuid_value"
+ assert response.state == batches.Batch.State.PENDING
+ assert response.state_message == "state_message_value"
+ assert response.creator == "creator_value"
+ assert response.operation == "operation_value"
+
+
+@pytest.mark.asyncio
+async def test_get_batch_async_from_dict():
+ await test_get_batch_async(request_type=dict)
+
+
+def test_get_batch_field_headers():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.GetBatchRequest()
+
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_batch), "__call__") as call:
+ call.return_value = batches.Batch()
+ client.get_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_get_batch_field_headers_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.GetBatchRequest()
+
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_batch), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batches.Batch())
+ await client.get_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+def test_get_batch_flattened():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batches.Batch()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.get_batch(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0].name == "name_value"
+
+
+def test_get_batch_flattened_error():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.get_batch(
+ batches.GetBatchRequest(), name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_get_batch_flattened_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.get_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batches.Batch()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(batches.Batch())
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.get_batch(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0].name == "name_value"
+
+
+@pytest.mark.asyncio
+async def test_get_batch_flattened_error_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.get_batch(
+ batches.GetBatchRequest(), name="name_value",
+ )
+
+
+def test_list_batches(transport: str = "grpc", request_type=batches.ListBatchesRequest):
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batches.ListBatchesResponse(
+ next_page_token="next_page_token_value",
+ )
+ response = client.list_batches(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.ListBatchesRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListBatchesPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+def test_list_batches_from_dict():
+ test_list_batches(request_type=dict)
+
+
+def test_list_batches_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ client.list_batches()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.ListBatchesRequest()
+
+
+@pytest.mark.asyncio
+async def test_list_batches_async(
+ transport: str = "grpc_asyncio", request_type=batches.ListBatchesRequest
+):
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batches.ListBatchesResponse(next_page_token="next_page_token_value",)
+ )
+ response = await client.list_batches(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.ListBatchesRequest()
+
+ # Establish that the response is the type that we expect.
+ assert isinstance(response, pagers.ListBatchesAsyncPager)
+ assert response.next_page_token == "next_page_token_value"
+
+
+@pytest.mark.asyncio
+async def test_list_batches_async_from_dict():
+ await test_list_batches_async(request_type=dict)
+
+
+def test_list_batches_field_headers():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.ListBatchesRequest()
+
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ call.return_value = batches.ListBatchesResponse()
+ client.list_batches(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_list_batches_field_headers_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.ListBatchesRequest()
+
+ request.parent = "parent/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batches.ListBatchesResponse()
+ )
+ await client.list_batches(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "parent=parent/value",) in kw["metadata"]
+
+
+def test_list_batches_flattened():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batches.ListBatchesResponse()
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.list_batches(parent="parent_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0].parent == "parent_value"
+
+
+def test_list_batches_flattened_error():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.list_batches(
+ batches.ListBatchesRequest(), parent="parent_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_list_batches_flattened_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = batches.ListBatchesResponse()
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
+ batches.ListBatchesResponse()
+ )
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.list_batches(parent="parent_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0].parent == "parent_value"
+
+
+@pytest.mark.asyncio
+async def test_list_batches_flattened_error_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.list_batches(
+ batches.ListBatchesRequest(), parent="parent_value",
+ )
+
+
+def test_list_batches_pager():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(), batches.Batch(), batches.Batch(),],
+ next_page_token="abc",
+ ),
+ batches.ListBatchesResponse(batches=[], next_page_token="def",),
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(),], next_page_token="ghi",
+ ),
+ batches.ListBatchesResponse(batches=[batches.Batch(), batches.Batch(),],),
+ RuntimeError,
+ )
+
+ metadata = ()
+ metadata = tuple(metadata) + (
+ gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
+ )
+ pager = client.list_batches(request={})
+
+ assert pager._metadata == metadata
+
+ results = [i for i in pager]
+ assert len(results) == 6
+ assert all(isinstance(i, batches.Batch) for i in results)
+
+
+def test_list_batches_pages():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials,)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.list_batches), "__call__") as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(), batches.Batch(), batches.Batch(),],
+ next_page_token="abc",
+ ),
+ batches.ListBatchesResponse(batches=[], next_page_token="def",),
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(),], next_page_token="ghi",
+ ),
+ batches.ListBatchesResponse(batches=[batches.Batch(), batches.Batch(),],),
+ RuntimeError,
+ )
+ pages = list(client.list_batches(request={}).pages)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+@pytest.mark.asyncio
+async def test_list_batches_async_pager():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batches), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(), batches.Batch(), batches.Batch(),],
+ next_page_token="abc",
+ ),
+ batches.ListBatchesResponse(batches=[], next_page_token="def",),
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(),], next_page_token="ghi",
+ ),
+ batches.ListBatchesResponse(batches=[batches.Batch(), batches.Batch(),],),
+ RuntimeError,
+ )
+ async_pager = await client.list_batches(request={},)
+ assert async_pager.next_page_token == "abc"
+ responses = []
+ async for response in async_pager:
+ responses.append(response)
+
+ assert len(responses) == 6
+ assert all(isinstance(i, batches.Batch) for i in responses)
+
+
+@pytest.mark.asyncio
+async def test_list_batches_async_pages():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials,
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(
+ type(client.transport.list_batches), "__call__", new_callable=mock.AsyncMock
+ ) as call:
+ # Set the response to a series of pages.
+ call.side_effect = (
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(), batches.Batch(), batches.Batch(),],
+ next_page_token="abc",
+ ),
+ batches.ListBatchesResponse(batches=[], next_page_token="def",),
+ batches.ListBatchesResponse(
+ batches=[batches.Batch(),], next_page_token="ghi",
+ ),
+ batches.ListBatchesResponse(batches=[batches.Batch(), batches.Batch(),],),
+ RuntimeError,
+ )
+ pages = []
+ async for page_ in (await client.list_batches(request={})).pages:
+ pages.append(page_)
+ for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
+ assert page_.raw_page.next_page_token == token
+
+
+def test_delete_batch(transport: str = "grpc", request_type=batches.DeleteBatchRequest):
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ response = client.delete_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.DeleteBatchRequest()
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+def test_delete_batch_from_dict():
+ test_delete_batch(request_type=dict)
+
+
+def test_delete_batch_empty_call():
+ # This test is a coverage failsafe to make sure that totally empty calls,
+ # i.e. request == None and no flattened fields passed, work.
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_batch), "__call__") as call:
+ client.delete_batch()
+ call.assert_called()
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.DeleteBatchRequest()
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_async(
+ transport: str = "grpc_asyncio", request_type=batches.DeleteBatchRequest
+):
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # Everything is optional in proto3 as far as the runtime is concerned,
+ # and we are mocking out the actual API, so just send an empty request.
+ request = request_type()
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ response = await client.delete_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == batches.DeleteBatchRequest()
+
+ # Establish that the response is the type that we expect.
+ assert response is None
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_async_from_dict():
+ await test_delete_batch_async(request_type=dict)
+
+
+def test_delete_batch_field_headers():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.DeleteBatchRequest()
+
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_batch), "__call__") as call:
+ call.return_value = None
+ client.delete_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_field_headers_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Any value that is part of the HTTP/1.1 URI should be sent as
+ # a field header. Set these to a non-empty value.
+ request = batches.DeleteBatchRequest()
+
+ request.name = "name/value"
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_batch), "__call__") as call:
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ await client.delete_batch(request)
+
+ # Establish that the underlying gRPC stub method was called.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0] == request
+
+ # Establish that the field header was sent.
+ _, _, kw = call.mock_calls[0]
+ assert ("x-goog-request-params", "name=name/value",) in kw["metadata"]
+
+
+def test_delete_batch_flattened():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ client.delete_batch(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls) == 1
+ _, args, _ = call.mock_calls[0]
+ assert args[0].name == "name_value"
+
+
+def test_delete_batch_flattened_error():
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ client.delete_batch(
+ batches.DeleteBatchRequest(), name="name_value",
+ )
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_flattened_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Mock the actual call within the gRPC stub, and fake the request.
+ with mock.patch.object(type(client.transport.delete_batch), "__call__") as call:
+ # Designate an appropriate return value for the call.
+ call.return_value = None
+
+ call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
+ # Call the method with a truthy value for each flattened field,
+ # using the keyword arguments to the method.
+ response = await client.delete_batch(name="name_value",)
+
+ # Establish that the underlying call was made with the expected
+ # request object values.
+ assert len(call.mock_calls)
+ _, args, _ = call.mock_calls[0]
+ assert args[0].name == "name_value"
+
+
+@pytest.mark.asyncio
+async def test_delete_batch_flattened_error_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Attempting to call a method with both a request object and flattened
+ # fields is an error.
+ with pytest.raises(ValueError):
+ await client.delete_batch(
+ batches.DeleteBatchRequest(), name="name_value",
+ )
+
+
+def test_credentials_transport_error():
+ # It is an error to provide credentials and a transport instance.
+ transport = transports.BatchControllerGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport,
+ )
+
+ # It is an error to provide a credentials file and a transport instance.
+ transport = transports.BatchControllerGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = BatchControllerClient(
+ client_options={"credentials_file": "credentials.json"},
+ transport=transport,
+ )
+
+ # It is an error to provide scopes and a transport instance.
+ transport = transports.BatchControllerGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ with pytest.raises(ValueError):
+ client = BatchControllerClient(
+ client_options={"scopes": ["1", "2"]}, transport=transport,
+ )
+
+
+def test_transport_instance():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.BatchControllerGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ client = BatchControllerClient(transport=transport)
+ assert client.transport is transport
+
+
+def test_transport_get_channel():
+ # A client may be instantiated with a custom transport instance.
+ transport = transports.BatchControllerGrpcTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+ transport = transports.BatchControllerGrpcAsyncIOTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+ channel = transport.grpc_channel
+ assert channel
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.BatchControllerGrpcTransport,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ ],
+)
+def test_transport_adc(transport_class):
+ # Test default credentials are used if not provided.
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class()
+ adc.assert_called_once()
+
+
+def test_transport_grpc_default():
+ # A client should use the gRPC transport by default.
+ client = BatchControllerClient(credentials=ga_credentials.AnonymousCredentials(),)
+ assert isinstance(client.transport, transports.BatchControllerGrpcTransport,)
+
+
+def test_batch_controller_base_transport_error():
+ # Passing both a credentials object and credentials_file should raise an error
+ with pytest.raises(core_exceptions.DuplicateCredentialArgs):
+ transport = transports.BatchControllerTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ credentials_file="credentials.json",
+ )
+
+
+def test_batch_controller_base_transport():
+ # Instantiate the base transport.
+ with mock.patch(
+ "google.cloud.dataproc_v1.services.batch_controller.transports.BatchControllerTransport.__init__"
+ ) as Transport:
+ Transport.return_value = None
+ transport = transports.BatchControllerTransport(
+ credentials=ga_credentials.AnonymousCredentials(),
+ )
+
+ # Every method on the transport should just blindly
+ # raise NotImplementedError.
+ methods = (
+ "create_batch",
+ "get_batch",
+ "list_batches",
+ "delete_batch",
+ )
+ for method in methods:
+ with pytest.raises(NotImplementedError):
+ getattr(transport, method)(request=object())
+
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
+ # Additionally, the LRO client (a property) should
+ # also raise NotImplementedError
+ with pytest.raises(NotImplementedError):
+ transport.operations_client
+
+
+@requires_google_auth_gte_1_25_0
+def test_batch_controller_base_transport_with_credentials_file():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.dataproc_v1.services.batch_controller.transports.BatchControllerTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.BatchControllerTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_batch_controller_base_transport_with_credentials_file_old_google_auth():
+ # Instantiate the base transport with a credentials file
+ with mock.patch.object(
+ google.auth, "load_credentials_from_file", autospec=True
+ ) as load_creds, mock.patch(
+ "google.cloud.dataproc_v1.services.batch_controller.transports.BatchControllerTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.BatchControllerTransport(
+ credentials_file="credentials.json", quota_project_id="octopus",
+ )
+ load_creds.assert_called_once_with(
+ "credentials.json",
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+def test_batch_controller_base_transport_with_adc():
+ # Test the default credentials are used if credentials and credentials_file are None.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
+ "google.cloud.dataproc_v1.services.batch_controller.transports.BatchControllerTransport._prep_wrapped_messages"
+ ) as Transport:
+ Transport.return_value = None
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport = transports.BatchControllerTransport()
+ adc.assert_called_once()
+
+
+@requires_google_auth_gte_1_25_0
+def test_batch_controller_auth_adc():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ BatchControllerClient()
+ adc.assert_called_once_with(
+ scopes=None,
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@requires_google_auth_lt_1_25_0
+def test_batch_controller_auth_adc_old_google_auth():
+ # If no credentials are provided, we should use ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ BatchControllerClient()
+ adc.assert_called_once_with(
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id=None,
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.BatchControllerGrpcTransport,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ ],
+)
+@requires_google_auth_gte_1_25_0
+def test_batch_controller_transport_auth_adc(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+ adc.assert_called_once_with(
+ scopes=["1", "2"],
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.BatchControllerGrpcTransport,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ ],
+)
+@requires_google_auth_lt_1_25_0
+def test_batch_controller_transport_auth_adc_old_google_auth(transport_class):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(google.auth, "default", autospec=True) as adc:
+ adc.return_value = (ga_credentials.AnonymousCredentials(), None)
+ transport_class(quota_project_id="octopus")
+ adc.assert_called_once_with(
+ scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ quota_project_id="octopus",
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class,grpc_helpers",
+ [
+ (transports.BatchControllerGrpcTransport, grpc_helpers),
+ (transports.BatchControllerGrpcAsyncIOTransport, grpc_helpers_async),
+ ],
+)
+def test_batch_controller_transport_create_channel(transport_class, grpc_helpers):
+ # If credentials and host are not provided, the transport class should use
+ # ADC credentials.
+ with mock.patch.object(
+ google.auth, "default", autospec=True
+ ) as adc, mock.patch.object(
+ grpc_helpers, "create_channel", autospec=True
+ ) as create_channel:
+ creds = ga_credentials.AnonymousCredentials()
+ adc.return_value = (creds, None)
+ transport_class(quota_project_id="octopus", scopes=["1", "2"])
+
+ create_channel.assert_called_with(
+ "dataproc.googleapis.com:443",
+ credentials=creds,
+ credentials_file=None,
+ quota_project_id="octopus",
+ default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
+ scopes=["1", "2"],
+ default_host="dataproc.googleapis.com",
+ ssl_credentials=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.BatchControllerGrpcTransport,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ ],
+)
+def test_batch_controller_grpc_transport_client_cert_source_for_mtls(transport_class):
+ cred = ga_credentials.AnonymousCredentials()
+
+ # Check ssl_channel_credentials is used if provided.
+ with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
+ mock_ssl_channel_creds = mock.Mock()
+ transport_class(
+ host="squid.clam.whelk",
+ credentials=cred,
+ ssl_channel_credentials=mock_ssl_channel_creds,
+ )
+ mock_create_channel.assert_called_once_with(
+ "squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_channel_creds,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+
+ # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
+ # is used.
+ with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
+ with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
+ transport_class(
+ credentials=cred,
+ client_cert_source_for_mtls=client_cert_source_callback,
+ )
+ expected_cert, expected_key = client_cert_source_callback()
+ mock_ssl_cred.assert_called_once_with(
+ certificate_chain=expected_cert, private_key=expected_key
+ )
+
+
+def test_batch_controller_host_no_port():
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="dataproc.googleapis.com"
+ ),
+ )
+ assert client.transport._host == "dataproc.googleapis.com:443"
+
+
+def test_batch_controller_host_with_port():
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(),
+ client_options=client_options.ClientOptions(
+ api_endpoint="dataproc.googleapis.com:8000"
+ ),
+ )
+ assert client.transport._host == "dataproc.googleapis.com:8000"
+
+
+def test_batch_controller_grpc_transport_channel():
+ channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.BatchControllerGrpcTransport(
+ host="squid.clam.whelk", channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+def test_batch_controller_grpc_asyncio_transport_channel():
+ channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
+
+ # Check that channel is used if provided.
+ transport = transports.BatchControllerGrpcAsyncIOTransport(
+ host="squid.clam.whelk", channel=channel,
+ )
+ assert transport.grpc_channel == channel
+ assert transport._host == "squid.clam.whelk:443"
+ assert transport._ssl_channel_credentials == None
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.BatchControllerGrpcTransport,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ ],
+)
+def test_batch_controller_transport_channel_mtls_with_client_cert_source(
+ transport_class,
+):
+ with mock.patch(
+ "grpc.ssl_channel_credentials", autospec=True
+ ) as grpc_ssl_channel_cred:
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_ssl_cred = mock.Mock()
+ grpc_ssl_channel_cred.return_value = mock_ssl_cred
+
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+
+ cred = ga_credentials.AnonymousCredentials()
+ with pytest.warns(DeprecationWarning):
+ with mock.patch.object(google.auth, "default") as adc:
+ adc.return_value = (cred, None)
+ transport = transport_class(
+ host="squid.clam.whelk",
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=client_cert_source_callback,
+ )
+ adc.assert_called_once()
+
+ grpc_ssl_channel_cred.assert_called_once_with(
+ certificate_chain=b"cert bytes", private_key=b"key bytes"
+ )
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+ assert transport._ssl_channel_credentials == mock_ssl_cred
+
+
+# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
+# removed from grpc/grpc_asyncio transport constructor.
+@pytest.mark.parametrize(
+ "transport_class",
+ [
+ transports.BatchControllerGrpcTransport,
+ transports.BatchControllerGrpcAsyncIOTransport,
+ ],
+)
+def test_batch_controller_transport_channel_mtls_with_adc(transport_class):
+ mock_ssl_cred = mock.Mock()
+ with mock.patch.multiple(
+ "google.auth.transport.grpc.SslCredentials",
+ __init__=mock.Mock(return_value=None),
+ ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
+ ):
+ with mock.patch.object(
+ transport_class, "create_channel"
+ ) as grpc_create_channel:
+ mock_grpc_channel = mock.Mock()
+ grpc_create_channel.return_value = mock_grpc_channel
+ mock_cred = mock.Mock()
+
+ with pytest.warns(DeprecationWarning):
+ transport = transport_class(
+ host="squid.clam.whelk",
+ credentials=mock_cred,
+ api_mtls_endpoint="mtls.squid.clam.whelk",
+ client_cert_source=None,
+ )
+
+ grpc_create_channel.assert_called_once_with(
+ "mtls.squid.clam.whelk:443",
+ credentials=mock_cred,
+ credentials_file=None,
+ scopes=None,
+ ssl_credentials=mock_ssl_cred,
+ quota_project_id=None,
+ options=[
+ ("grpc.max_send_message_length", -1),
+ ("grpc.max_receive_message_length", -1),
+ ],
+ )
+ assert transport.grpc_channel == mock_grpc_channel
+
+
+def test_batch_controller_grpc_lro_client():
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(transport.operations_client, operations_v1.OperationsClient,)
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_batch_controller_grpc_lro_async_client():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ transport = client.transport
+
+ # Ensure that we have a api-core operations client.
+ assert isinstance(transport.operations_client, operations_v1.OperationsAsyncClient,)
+
+ # Ensure that subsequent calls to the property send the exact same object.
+ assert transport.operations_client is transport.operations_client
+
+
+def test_batch_path():
+ project = "squid"
+ location = "clam"
+ batch = "whelk"
+ expected = "projects/{project}/locations/{location}/batches/{batch}".format(
+ project=project, location=location, batch=batch,
+ )
+ actual = BatchControllerClient.batch_path(project, location, batch)
+ assert expected == actual
+
+
+def test_parse_batch_path():
+ expected = {
+ "project": "octopus",
+ "location": "oyster",
+ "batch": "nudibranch",
+ }
+ path = BatchControllerClient.batch_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = BatchControllerClient.parse_batch_path(path)
+ assert expected == actual
+
+
+def test_common_billing_account_path():
+ billing_account = "cuttlefish"
+ expected = "billingAccounts/{billing_account}".format(
+ billing_account=billing_account,
+ )
+ actual = BatchControllerClient.common_billing_account_path(billing_account)
+ assert expected == actual
+
+
+def test_parse_common_billing_account_path():
+ expected = {
+ "billing_account": "mussel",
+ }
+ path = BatchControllerClient.common_billing_account_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = BatchControllerClient.parse_common_billing_account_path(path)
+ assert expected == actual
+
+
+def test_common_folder_path():
+ folder = "winkle"
+ expected = "folders/{folder}".format(folder=folder,)
+ actual = BatchControllerClient.common_folder_path(folder)
+ assert expected == actual
+
+
+def test_parse_common_folder_path():
+ expected = {
+ "folder": "nautilus",
+ }
+ path = BatchControllerClient.common_folder_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = BatchControllerClient.parse_common_folder_path(path)
+ assert expected == actual
+
+
+def test_common_organization_path():
+ organization = "scallop"
+ expected = "organizations/{organization}".format(organization=organization,)
+ actual = BatchControllerClient.common_organization_path(organization)
+ assert expected == actual
+
+
+def test_parse_common_organization_path():
+ expected = {
+ "organization": "abalone",
+ }
+ path = BatchControllerClient.common_organization_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = BatchControllerClient.parse_common_organization_path(path)
+ assert expected == actual
+
+
+def test_common_project_path():
+ project = "squid"
+ expected = "projects/{project}".format(project=project,)
+ actual = BatchControllerClient.common_project_path(project)
+ assert expected == actual
+
+
+def test_parse_common_project_path():
+ expected = {
+ "project": "clam",
+ }
+ path = BatchControllerClient.common_project_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = BatchControllerClient.parse_common_project_path(path)
+ assert expected == actual
+
+
+def test_common_location_path():
+ project = "whelk"
+ location = "octopus"
+ expected = "projects/{project}/locations/{location}".format(
+ project=project, location=location,
+ )
+ actual = BatchControllerClient.common_location_path(project, location)
+ assert expected == actual
+
+
+def test_parse_common_location_path():
+ expected = {
+ "project": "oyster",
+ "location": "nudibranch",
+ }
+ path = BatchControllerClient.common_location_path(**expected)
+
+ # Check that the path construction is reversible.
+ actual = BatchControllerClient.parse_common_location_path(path)
+ assert expected == actual
+
+
+def test_client_withDEFAULT_CLIENT_INFO():
+ client_info = gapic_v1.client_info.ClientInfo()
+
+ with mock.patch.object(
+ transports.BatchControllerTransport, "_prep_wrapped_messages"
+ ) as prep:
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+ with mock.patch.object(
+ transports.BatchControllerTransport, "_prep_wrapped_messages"
+ ) as prep:
+ transport_class = BatchControllerClient.get_transport_class()
+ transport = transport_class(
+ credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
+ )
+ prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = BatchControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = BatchControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py
index b57aa868..5fae118e 100644
--- a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py
+++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py
@@ -32,6 +32,7 @@
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
+from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataproc_v1.services.cluster_controller import (
@@ -1919,6 +1920,9 @@ def test_cluster_controller_base_transport():
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
@@ -2456,3 +2460,49 @@ def test_client_withDEFAULT_CLIENT_INFO():
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = ClusterControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = ClusterControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = ClusterControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
diff --git a/tests/unit/gapic/dataproc_v1/test_job_controller.py b/tests/unit/gapic/dataproc_v1/test_job_controller.py
index de9fcfc5..05fcd58b 100644
--- a/tests/unit/gapic/dataproc_v1/test_job_controller.py
+++ b/tests/unit/gapic/dataproc_v1/test_job_controller.py
@@ -32,6 +32,7 @@
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
+from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataproc_v1.services.job_controller import JobControllerAsyncClient
@@ -1837,6 +1838,9 @@ def test_job_controller_base_transport():
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
@@ -2324,3 +2328,49 @@ def test_client_withDEFAULT_CLIENT_INFO():
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = JobControllerAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = JobControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = JobControllerClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()
diff --git a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py
index ed87a04e..3e220b7a 100644
--- a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py
+++ b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py
@@ -32,6 +32,7 @@
from google.api_core import grpc_helpers_async
from google.api_core import operation_async # type: ignore
from google.api_core import operations_v1
+from google.api_core import path_template
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.cloud.dataproc_v1.services.workflow_template_service import (
@@ -2450,6 +2451,9 @@ def test_workflow_template_service_base_transport():
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
+ with pytest.raises(NotImplementedError):
+ transport.close()
+
# Additionally, the LRO client (a property) should
# also raise NotImplementedError
with pytest.raises(NotImplementedError):
@@ -3017,3 +3021,49 @@ def test_client_withDEFAULT_CLIENT_INFO():
credentials=ga_credentials.AnonymousCredentials(), client_info=client_info,
)
prep.assert_called_once_with(client_info)
+
+
+@pytest.mark.asyncio
+async def test_transport_close_async():
+ client = WorkflowTemplateServiceAsyncClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport="grpc_asyncio",
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, "grpc_channel")), "close"
+ ) as close:
+ async with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_transport_close():
+ transports = {
+ "grpc": "_grpc_channel",
+ }
+
+ for transport, close_name in transports.items():
+ client = WorkflowTemplateServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ with mock.patch.object(
+ type(getattr(client.transport, close_name)), "close"
+ ) as close:
+ with client:
+ close.assert_not_called()
+ close.assert_called_once()
+
+
+def test_client_ctx():
+ transports = [
+ "grpc",
+ ]
+ for transport in transports:
+ client = WorkflowTemplateServiceClient(
+ credentials=ga_credentials.AnonymousCredentials(), transport=transport
+ )
+ # Test client calls underlying transport.
+ with mock.patch.object(type(client.transport), "close") as close:
+ close.assert_not_called()
+ with client:
+ pass
+ close.assert_called()