diff --git a/google/cloud/dataproc_v1/types/clusters.py b/google/cloud/dataproc_v1/types/clusters.py index 8bd5d44d..18008b00 100644 --- a/google/cloud/dataproc_v1/types/clusters.py +++ b/google/cloud/dataproc_v1/types/clusters.py @@ -20,6 +20,7 @@ from google.protobuf import duration_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore +from google.type import interval_pb2 # type: ignore import proto # type: ignore from google.cloud.dataproc_v1.types import shared @@ -837,26 +838,20 @@ class InstanceGroupConfig(proto.Message): Instance Group. See `Dataproc -> Minimum CPU Platform `__. min_num_instances (int): - Optional. The minimum number of instances to create. If - min_num_instances is set, min_num_instances is used for a - criteria to decide the cluster. Cluster creation will be - failed by being an error state if the total number of - instances created is less than the min_num_instances. For - example, given that num_instances = 5 and min_num_instances - = 3, - - - if 4 instances are created and then registered - successfully but one instance is failed, the failed VM - will be deleted and the cluster will be resized to 4 - instances in running state. - - if 2 instances are created successfully and 3 instances - are failed, the cluster will be in an error state and - does not delete failed VMs for debugging. - - if 2 instance are created and then registered - successfully but 3 instances are failed to initialize, - the cluster will be in an error state and does not delete - failed VMs for debugging. NB: This can only be set for - primary workers now. + Optional. The minimum number of primary worker instances to + create. If ``min_num_instances`` is set, cluster creation + will succeed if the number of primary workers created is at + least equal to the ``min_num_instances`` number. + + Example: Cluster creation request with ``num_instances`` = + ``5`` and ``min_num_instances`` = ``3``: + + - If 4 VMs are created and 1 instance fails, the failed VM + is deleted. The cluster is resized to 4 instances and + placed in a ``RUNNING`` state. + - If 2 instances are created and 3 instances fail, the + cluster in placed in an ``ERROR`` state. The failed VMs + are not deleted. instance_flexibility_policy (google.cloud.dataproc_v1.types.InstanceFlexibilityPolicy): Optional. Instance flexibility Policy allowing a mixture of VM shapes and provisioning @@ -1251,13 +1246,13 @@ class NodeGroup(proto.Message): """ class Role(proto.Enum): - r"""Node group roles. + r"""Node pool roles. Values: ROLE_UNSPECIFIED (0): Required unspecified role. DRIVER (1): - Job drivers run on the node group. + Job drivers run on the node pool. """ ROLE_UNSPECIFIED = 0 DRIVER = 1 @@ -2359,6 +2354,22 @@ class DiagnoseClusterRequest(proto.Message): handle the request. cluster_name (str): Required. The cluster name. + tarball_gcs_dir (str): + Optional. The output Cloud Storage directory + for the diagnostic tarball. If not specified, a + task-specific directory in the cluster's staging + bucket will be used. + diagnosis_interval (google.type.interval_pb2.Interval): + Optional. Time interval in which diagnosis + should be carried out on the cluster. + jobs (MutableSequence[str]): + Optional. Specifies a list of jobs on which + diagnosis is to be performed. Format: + projects/{project}/regions/{region}/jobs/{job} + yarn_application_ids (MutableSequence[str]): + Optional. Specifies a list of yarn + applications on which diagnosis is to be + performed. """ project_id: str = proto.Field( @@ -2373,6 +2384,23 @@ class DiagnoseClusterRequest(proto.Message): proto.STRING, number=2, ) + tarball_gcs_dir: str = proto.Field( + proto.STRING, + number=4, + ) + diagnosis_interval: interval_pb2.Interval = proto.Field( + proto.MESSAGE, + number=6, + message=interval_pb2.Interval, + ) + jobs: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=10, + ) + yarn_application_ids: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=11, + ) class DiagnoseClusterResults(proto.Message): diff --git a/samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json index 701c4641..f1a48076 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-dataproc", - "version": "5.5.1" + "version": "0.1.0" }, "snippets": [ { diff --git a/scripts/fixup_dataproc_v1_keywords.py b/scripts/fixup_dataproc_v1_keywords.py index 593c9de3..7b4c85ab 100644 --- a/scripts/fixup_dataproc_v1_keywords.py +++ b/scripts/fixup_dataproc_v1_keywords.py @@ -50,7 +50,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'delete_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), 'delete_job': ('project_id', 'region', 'job_id', ), 'delete_workflow_template': ('name', 'version', ), - 'diagnose_cluster': ('project_id', 'region', 'cluster_name', ), + 'diagnose_cluster': ('project_id', 'region', 'cluster_name', 'tarball_gcs_dir', 'diagnosis_interval', 'jobs', 'yarn_application_ids', ), 'get_autoscaling_policy': ('name', ), 'get_batch': ('name', ), 'get_cluster': ('project_id', 'region', 'cluster_name', ), diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py index 9fb80dd4..f9c925c8 100644 --- a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -51,6 +51,7 @@ from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import json_format from google.protobuf import timestamp_pb2 # type: ignore +from google.type import interval_pb2 # type: ignore import grpc from grpc.experimental import aio from proto.marshal.rules import wrappers