@@ -146,9 +146,10 @@ def __init__(self,
146146 )
147147
148148 if client_info is None :
149- client_info = (
150- google .api_core .gapic_v1 .client_info .DEFAULT_CLIENT_INFO )
151- client_info .gapic_version = _GAPIC_LIBRARY_VERSION
149+ client_info = google .api_core .gapic_v1 .client_info .ClientInfo (
150+ gapic_version = _GAPIC_LIBRARY_VERSION , )
151+ else :
152+ client_info .gapic_version = _GAPIC_LIBRARY_VERSION
152153 self ._client_info = client_info
153154
154155 # Parse out the default settings for retry and timeout for each RPC
@@ -180,13 +181,13 @@ def create_cluster(self,
180181 >>>
181182 >>> client = dataproc_v1.ClusterControllerClient()
182183 >>>
183- >>> # TODO: Initialize `` project_id` `:
184+ >>> # TODO: Initialize `project_id`:
184185 >>> project_id = ''
185186 >>>
186- >>> # TODO: Initialize `` region` `:
187+ >>> # TODO: Initialize `region`:
187188 >>> region = ''
188189 >>>
189- >>> # TODO: Initialize `` cluster` `:
190+ >>> # TODO: Initialize `cluster`:
190191 >>> cluster = {}
191192 >>>
192193 >>> response = client.create_cluster(project_id, region, cluster)
@@ -205,6 +206,7 @@ def create_cluster(self,
205206 belongs to.
206207 region (str): Required. The Cloud Dataproc region in which to handle the request.
207208 cluster (Union[dict, ~google.cloud.dataproc_v1.types.Cluster]): Required. The cluster to create.
209+
208210 If a dict is provided, it must be of the same form as the protobuf
209211 message :class:`~google.cloud.dataproc_v1.types.Cluster`
210212 retry (Optional[google.api_core.retry.Retry]): A retry object used
@@ -268,19 +270,19 @@ def update_cluster(self,
268270 >>>
269271 >>> client = dataproc_v1.ClusterControllerClient()
270272 >>>
271- >>> # TODO: Initialize `` project_id` `:
273+ >>> # TODO: Initialize `project_id`:
272274 >>> project_id = ''
273275 >>>
274- >>> # TODO: Initialize `` region` `:
276+ >>> # TODO: Initialize `region`:
275277 >>> region = ''
276278 >>>
277- >>> # TODO: Initialize `` cluster_name` `:
279+ >>> # TODO: Initialize `cluster_name`:
278280 >>> cluster_name = ''
279281 >>>
280- >>> # TODO: Initialize `` cluster` `:
282+ >>> # TODO: Initialize `cluster`:
281283 >>> cluster = {}
282284 >>>
283- >>> # TODO: Initialize `` update_mask` `:
285+ >>> # TODO: Initialize `update_mask`:
284286 >>> update_mask = {}
285287 >>>
286288 >>> response = client.update_cluster(project_id, region, cluster_name, cluster, update_mask)
@@ -300,51 +302,172 @@ def update_cluster(self,
300302 region (str): Required. The Cloud Dataproc region in which to handle the request.
301303 cluster_name (str): Required. The cluster name.
302304 cluster (Union[dict, ~google.cloud.dataproc_v1.types.Cluster]): Required. The changes to the cluster.
305+
303306 If a dict is provided, it must be of the same form as the protobuf
304307 message :class:`~google.cloud.dataproc_v1.types.Cluster`
305- update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of
306- the field to update. For example, to change the number of workers
307- in a cluster to 5, the ``update_mask`` parameter would be
308- specified as ``config.worker_config.num_instances``,
309- and the ``PATCH`` request body would specify the new value, as follows:
308+ update_mask (Union[dict, ~google.cloud.dataproc_v1.types.FieldMask]): Required. Specifies the path, relative to ``Cluster``, of the field to
309+ update. For example, to change the number of workers in a cluster to 5,
310+ the ``update_mask`` parameter would be specified as
311+ ``config.worker_config.num_instances``, and the ``PATCH`` request body
312+ would specify the new value, as follows:
310313
311314 ::
312315
313- {
314- \ " config\ " :{
315- \ " workerConfig\ " :{
316- \ " numInstances\" : \" 5 \ "
317- }
318- }
319- }
316+ {
317+ "config":{
318+ "workerConfig":{
319+ "numInstances":"5 "
320+ }
321+ }
322+ }
320323
321- Similarly, to change the number of preemptible workers in a cluster to 5,
322- the ``update_mask`` parameter would be
323- ``config.secondary_worker_config.num_instances``, and the ``PATCH`` request
324- body would be set as follows:
324+ Similarly, to change the number of preemptible workers in a cluster to
325+ 5, the ``update_mask`` parameter would be
326+ ``config.secondary_worker_config.num_instances``, and the ``PATCH``
327+ request body would be set as follows:
325328
326329 ::
327330
328- {
329- \" config\" :{
330- \" secondaryWorkerConfig\" :{
331- \" numInstances\" :\" 5\"
332- }
333- }
334- }
331+ {
332+ "config":{
333+ "secondaryWorkerConfig":{
334+ "numInstances":"5"
335+ }
336+ }
337+ }
338+
339+ Note: Currently, only the following fields can be updated:
340+
341+ .. raw:: html
342+
343+ <table>
344+
345+ .. raw:: html
346+
347+ <tbody>
348+
349+ .. raw:: html
350+
351+ <tr>
352+
353+ .. raw:: html
354+
355+ <td>
356+
357+ Mask
358+
359+ .. raw:: html
360+
361+ </td>
362+
363+ .. raw:: html
364+
365+ <td>
366+
367+ Purpose
368+
369+ .. raw:: html
370+
371+ </td>
372+
373+ .. raw:: html
374+
375+ </tr>
376+
377+ .. raw:: html
378+
379+ <tr>
380+
381+ .. raw:: html
382+
383+ <td>
384+
385+ labels
386+
387+ .. raw:: html
388+
389+ </td>
390+
391+ .. raw:: html
392+
393+ <td>
394+
395+ Update labels
396+
397+ .. raw:: html
335398
336- .. note::
399+ </td>
337400
338- Currently, only the following fields can be updated:
401+ .. raw:: html
339402
340- * ``labels``: Update labels
341- * ``config.worker_config.num_instances``: Resize primary
342- worker group
343- * ``config.secondary_worker_config.num_instances``: Resize
344- secondary worker group
403+ </tr>
345404
346- If a dict is provided, it must be of the same form as the protobuf
347- message :class:`~google.cloud.dataproc_v1.types.FieldMask`
405+ .. raw:: html
406+
407+ <tr>
408+
409+ .. raw:: html
410+
411+ <td>
412+
413+ config.worker\_config.num\_instances
414+
415+ .. raw:: html
416+
417+ </td>
418+
419+ .. raw:: html
420+
421+ <td>
422+
423+ Resize primary worker group
424+
425+ .. raw:: html
426+
427+ </td>
428+
429+ .. raw:: html
430+
431+ </tr>
432+
433+ .. raw:: html
434+
435+ <tr>
436+
437+ .. raw:: html
438+
439+ <td>
440+
441+ config.secondary\_worker\_config.num\_instances
442+
443+ .. raw:: html
444+
445+ </td>
446+
447+ .. raw:: html
448+
449+ <td>
450+
451+ Resize secondary worker group
452+
453+ .. raw:: html
454+
455+ </td>
456+
457+ .. raw:: html
458+
459+ </tr>
460+
461+ .. raw:: html
462+
463+ </tbody>
464+
465+ .. raw:: html
466+
467+ </table>
468+
469+ If a dict is provided, it must be of the same form as the protobuf
470+ message :class:`~google.cloud.dataproc_v1.types.FieldMask`
348471 retry (Optional[google.api_core.retry.Retry]): A retry object used
349472 to retry requests. If ``None`` is specified, requests will not
350473 be retried.
@@ -406,13 +529,13 @@ def delete_cluster(self,
406529 >>>
407530 >>> client = dataproc_v1.ClusterControllerClient()
408531 >>>
409- >>> # TODO: Initialize `` project_id` `:
532+ >>> # TODO: Initialize `project_id`:
410533 >>> project_id = ''
411534 >>>
412- >>> # TODO: Initialize `` region` `:
535+ >>> # TODO: Initialize `region`:
413536 >>> region = ''
414537 >>>
415- >>> # TODO: Initialize `` cluster_name` `:
538+ >>> # TODO: Initialize `cluster_name`:
416539 >>> cluster_name = ''
417540 >>>
418541 >>> response = client.delete_cluster(project_id, region, cluster_name)
@@ -490,13 +613,13 @@ def get_cluster(self,
490613 >>>
491614 >>> client = dataproc_v1.ClusterControllerClient()
492615 >>>
493- >>> # TODO: Initialize `` project_id` `:
616+ >>> # TODO: Initialize `project_id`:
494617 >>> project_id = ''
495618 >>>
496- >>> # TODO: Initialize `` region` `:
619+ >>> # TODO: Initialize `region`:
497620 >>> region = ''
498621 >>>
499- >>> # TODO: Initialize `` cluster_name` `:
622+ >>> # TODO: Initialize `cluster_name`:
500623 >>> cluster_name = ''
501624 >>>
502625 >>> response = client.get_cluster(project_id, region, cluster_name)
@@ -559,10 +682,10 @@ def list_clusters(self,
559682 >>>
560683 >>> client = dataproc_v1.ClusterControllerClient()
561684 >>>
562- >>> # TODO: Initialize `` project_id` `:
685+ >>> # TODO: Initialize `project_id`:
563686 >>> project_id = ''
564687 >>>
565- >>> # TODO: Initialize `` region` `:
688+ >>> # TODO: Initialize `region`:
566689 >>> region = ''
567690 >>>
568691 >>> # Iterate over all results
@@ -574,7 +697,7 @@ def list_clusters(self,
574697 >>> # Alternatively:
575698 >>>
576699 >>> # Iterate over results one page at a time
577- >>> for page in client.list_clusters(project_id, region, options=CallOptions(page_token=INITIAL_PAGE)) :
700+ >>> for page in client.list_clusters(project_id, region).pages :
578701 ... for element in page:
579702 ... # process element
580703 ... pass
@@ -588,20 +711,21 @@ def list_clusters(self,
588711
589712 field = value [AND [field = value]] ...
590713
591- where **field** is one of ``status.state``, ``clusterName``, or ``labels.[KEY]``,
592- and ``[KEY]`` is a label key. **value** can be ``*`` to match all values.
593- ``status.state`` can be one of the following: ``ACTIVE``, ``INACTIVE``,
594- ``CREATING``, ``RUNNING``, ``ERROR``, ``DELETING``, or ``UPDATING``. ``ACTIVE``
595- contains the ``CREATING``, ``UPDATING``, and ``RUNNING`` states. ``INACTIVE``
596- contains the ``DELETING`` and ``ERROR`` states.
597- ``clusterName`` is the name of the cluster provided at creation time.
598- Only the logical ``AND`` operator is supported; space-separated items are
599- treated as having an implicit ``AND`` operator.
714+ where **field** is one of ``status.state``, ``clusterName``, or
715+ ``labels.[KEY]``, and ``[KEY]`` is a label key. **value** can be ``*``
716+ to match all values. ``status.state`` can be one of the following:
717+ ``ACTIVE``, ``INACTIVE``, ``CREATING``, ``RUNNING``, ``ERROR``,
718+ ``DELETING``, or ``UPDATING``. ``ACTIVE`` contains the ``CREATING``,
719+ ``UPDATING``, and ``RUNNING`` states. ``INACTIVE`` contains the
720+ ``DELETING`` and ``ERROR`` states. ``clusterName`` is the name of the
721+ cluster provided at creation time. Only the logical ``AND`` operator is
722+ supported; space-separated items are treated as having an implicit
723+ ``AND`` operator.
600724
601725 Example filter:
602726
603- status.state = ACTIVE AND clusterName = mycluster
604- AND labels.env = staging AND labels.starred = *
727+ status.state = ACTIVE AND clusterName = mycluster AND labels.env =
728+ staging AND labels.starred = \ *
605729 page_size (int): The maximum number of resources contained in the
606730 underlying API response. If page streaming is performed per-
607731 resource, this parameter does not affect the return value. If page
@@ -668,22 +792,21 @@ def diagnose_cluster(self,
668792 timeout = google .api_core .gapic_v1 .method .DEFAULT ,
669793 metadata = None ):
670794 """
671- Gets cluster diagnostic information.
672- After the operation completes, the Operation.response field
673- contains ``DiagnoseClusterOutputLocation``.
795+ Gets cluster diagnostic information. After the operation completes, the
796+ Operation.response field contains ``DiagnoseClusterOutputLocation``.
674797
675798 Example:
676799 >>> from google.cloud import dataproc_v1
677800 >>>
678801 >>> client = dataproc_v1.ClusterControllerClient()
679802 >>>
680- >>> # TODO: Initialize `` project_id` `:
803+ >>> # TODO: Initialize `project_id`:
681804 >>> project_id = ''
682805 >>>
683- >>> # TODO: Initialize `` region` `:
806+ >>> # TODO: Initialize `region`:
684807 >>> region = ''
685808 >>>
686- >>> # TODO: Initialize `` cluster_name` `:
809+ >>> # TODO: Initialize `cluster_name`:
687810 >>> cluster_name = ''
688811 >>>
689812 >>> response = client.diagnose_cluster(project_id, region, cluster_name)
0 commit comments