diff --git a/.coveragerc b/.coveragerc index 240638d1..8f752880 100644 --- a/.coveragerc +++ b/.coveragerc @@ -10,8 +10,3 @@ exclude_lines = pragma: NO COVER # Ignore debug-only repr def __repr__ - # Ignore pkg_resources exceptions. - # This is added at the module level as a safeguard for if someone - # generates the code and tries to run it without pip installing. This - # makes it virtually impossible to test properly. - except pkg_resources.DistributionNotFound diff --git a/.github/release-please.yml b/.github/release-please.yml index e25e4be0..df590806 100644 --- a/.github/release-please.yml +++ b/.github/release-please.yml @@ -1,5 +1,6 @@ releaseType: python handleGHRelease: true +manifest: true # NOTE: this section is generated by synthtool.languages.python # See https://github.com/googleapis/synthtool/blob/master/synthtool/languages/python.py branches: diff --git a/.release-please-manifest.json b/.release-please-manifest.json new file mode 100644 index 00000000..f8d32fdd --- /dev/null +++ b/.release-please-manifest.json @@ -0,0 +1,3 @@ +{ + ".": "5.0.3" +} diff --git a/docs/dataproc_v1/node_group_controller.rst b/docs/dataproc_v1/node_group_controller.rst new file mode 100644 index 00000000..55d67f48 --- /dev/null +++ b/docs/dataproc_v1/node_group_controller.rst @@ -0,0 +1,6 @@ +NodeGroupController +------------------------------------- + +.. automodule:: google.cloud.dataproc_v1.services.node_group_controller + :members: + :inherited-members: diff --git a/docs/dataproc_v1/services.rst b/docs/dataproc_v1/services.rst index ae0031d1..aee63982 100644 --- a/docs/dataproc_v1/services.rst +++ b/docs/dataproc_v1/services.rst @@ -7,4 +7,5 @@ Services for Google Cloud Dataproc v1 API batch_controller cluster_controller job_controller + node_group_controller workflow_template_service diff --git a/docs/dataproc_v1/types.rst b/docs/dataproc_v1/types.rst index bc1a0a30..5dde0cd6 100644 --- a/docs/dataproc_v1/types.rst +++ b/docs/dataproc_v1/types.rst @@ -3,5 +3,4 @@ Types for Google Cloud Dataproc v1 API .. automodule:: google.cloud.dataproc_v1.types :members: - :undoc-members: :show-inheritance: diff --git a/docs/index.rst b/docs/index.rst index cf0a9d21..965328b5 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,23 +2,26 @@ .. include:: multiprocessing.rst + API Reference ------------- .. toctree:: - :maxdepth: 2 + :maxdepth: 2 + + dataproc_v1/services + dataproc_v1/types - dataproc_v1/services - dataproc_v1/types Migration Guide --------------- -See the guide below for instructions on migrating to the 2.x release of this library. +See the guide below for instructions on migrating to the latest version. .. toctree:: - :maxdepth: 2 + :maxdepth: 2 + +  UPGRADING - UPGRADING Changelog --------- @@ -26,6 +29,6 @@ Changelog For a list of all ``google-cloud-dataproc`` releases: .. toctree:: - :maxdepth: 2 + :maxdepth: 2 - changelog \ No newline at end of file + changelog diff --git a/google/cloud/dataproc/__init__.py b/google/cloud/dataproc/__init__.py index 214250f5..d15065d3 100644 --- a/google/cloud/dataproc/__init__.py +++ b/google/cloud/dataproc/__init__.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.dataproc import gapic_version as package_version + +__version__ = package_version.__version__ + from google.cloud.dataproc_v1.services.autoscaling_policy_service.client import ( AutoscalingPolicyServiceClient, @@ -36,6 +40,12 @@ from google.cloud.dataproc_v1.services.job_controller.async_client import ( JobControllerAsyncClient, ) +from google.cloud.dataproc_v1.services.node_group_controller.client import ( + NodeGroupControllerClient, +) +from google.cloud.dataproc_v1.services.node_group_controller.async_client import ( + NodeGroupControllerAsyncClient, +) from google.cloud.dataproc_v1.services.workflow_template_service.client import ( WorkflowTemplateServiceClient, ) @@ -83,6 +93,7 @@ from google.cloud.dataproc_v1.types.batches import SparkSqlBatch from google.cloud.dataproc_v1.types.clusters import AcceleratorConfig from google.cloud.dataproc_v1.types.clusters import AutoscalingConfig +from google.cloud.dataproc_v1.types.clusters import AuxiliaryNodeGroup from google.cloud.dataproc_v1.types.clusters import AuxiliaryServicesConfig from google.cloud.dataproc_v1.types.clusters import Cluster from google.cloud.dataproc_v1.types.clusters import ClusterConfig @@ -107,6 +118,7 @@ from google.cloud.dataproc_v1.types.clusters import ListClustersResponse from google.cloud.dataproc_v1.types.clusters import ManagedGroupConfig from google.cloud.dataproc_v1.types.clusters import MetastoreConfig +from google.cloud.dataproc_v1.types.clusters import NodeGroup from google.cloud.dataproc_v1.types.clusters import NodeGroupAffinity from google.cloud.dataproc_v1.types.clusters import NodeInitializationAction from google.cloud.dataproc_v1.types.clusters import ReservationAffinity @@ -119,6 +131,7 @@ from google.cloud.dataproc_v1.types.clusters import VirtualClusterConfig from google.cloud.dataproc_v1.types.jobs import CancelJobRequest from google.cloud.dataproc_v1.types.jobs import DeleteJobRequest +from google.cloud.dataproc_v1.types.jobs import DriverSchedulingConfig from google.cloud.dataproc_v1.types.jobs import GetJobRequest from google.cloud.dataproc_v1.types.jobs import HadoopJob from google.cloud.dataproc_v1.types.jobs import HiveJob @@ -141,9 +154,13 @@ from google.cloud.dataproc_v1.types.jobs import SubmitJobRequest from google.cloud.dataproc_v1.types.jobs import UpdateJobRequest from google.cloud.dataproc_v1.types.jobs import YarnApplication +from google.cloud.dataproc_v1.types.node_groups import CreateNodeGroupRequest +from google.cloud.dataproc_v1.types.node_groups import GetNodeGroupRequest +from google.cloud.dataproc_v1.types.node_groups import ResizeNodeGroupRequest from google.cloud.dataproc_v1.types.operations import BatchOperationMetadata from google.cloud.dataproc_v1.types.operations import ClusterOperationMetadata from google.cloud.dataproc_v1.types.operations import ClusterOperationStatus +from google.cloud.dataproc_v1.types.operations import NodeGroupOperationMetadata from google.cloud.dataproc_v1.types.shared import EnvironmentConfig from google.cloud.dataproc_v1.types.shared import ExecutionConfig from google.cloud.dataproc_v1.types.shared import GkeClusterConfig @@ -202,6 +219,8 @@ "ClusterControllerAsyncClient", "JobControllerClient", "JobControllerAsyncClient", + "NodeGroupControllerClient", + "NodeGroupControllerAsyncClient", "WorkflowTemplateServiceClient", "WorkflowTemplateServiceAsyncClient", "AutoscalingPolicy", @@ -226,6 +245,7 @@ "SparkSqlBatch", "AcceleratorConfig", "AutoscalingConfig", + "AuxiliaryNodeGroup", "AuxiliaryServicesConfig", "Cluster", "ClusterConfig", @@ -250,6 +270,7 @@ "ListClustersResponse", "ManagedGroupConfig", "MetastoreConfig", + "NodeGroup", "NodeGroupAffinity", "NodeInitializationAction", "ReservationAffinity", @@ -262,6 +283,7 @@ "VirtualClusterConfig", "CancelJobRequest", "DeleteJobRequest", + "DriverSchedulingConfig", "GetJobRequest", "HadoopJob", "HiveJob", @@ -284,9 +306,13 @@ "SubmitJobRequest", "UpdateJobRequest", "YarnApplication", + "CreateNodeGroupRequest", + "GetNodeGroupRequest", + "ResizeNodeGroupRequest", "BatchOperationMetadata", "ClusterOperationMetadata", "ClusterOperationStatus", + "NodeGroupOperationMetadata", "EnvironmentConfig", "ExecutionConfig", "GkeClusterConfig", diff --git a/google/cloud/dataproc/gapic_version.py b/google/cloud/dataproc/gapic_version.py new file mode 100644 index 00000000..6ed707c8 --- /dev/null +++ b/google/cloud/dataproc/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "5.0.3" # {x-release-please-version} diff --git a/google/cloud/dataproc_v1/__init__.py b/google/cloud/dataproc_v1/__init__.py index 729f5cbf..a5969782 100644 --- a/google/cloud/dataproc_v1/__init__.py +++ b/google/cloud/dataproc_v1/__init__.py @@ -13,6 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from google.cloud.dataproc import gapic_version as package_version + +__version__ = package_version.__version__ + from .services.autoscaling_policy_service import AutoscalingPolicyServiceClient from .services.autoscaling_policy_service import AutoscalingPolicyServiceAsyncClient @@ -22,6 +26,8 @@ from .services.cluster_controller import ClusterControllerAsyncClient from .services.job_controller import JobControllerClient from .services.job_controller import JobControllerAsyncClient +from .services.node_group_controller import NodeGroupControllerClient +from .services.node_group_controller import NodeGroupControllerAsyncClient from .services.workflow_template_service import WorkflowTemplateServiceClient from .services.workflow_template_service import WorkflowTemplateServiceAsyncClient @@ -47,6 +53,7 @@ from .types.batches import SparkSqlBatch from .types.clusters import AcceleratorConfig from .types.clusters import AutoscalingConfig +from .types.clusters import AuxiliaryNodeGroup from .types.clusters import AuxiliaryServicesConfig from .types.clusters import Cluster from .types.clusters import ClusterConfig @@ -71,6 +78,7 @@ from .types.clusters import ListClustersResponse from .types.clusters import ManagedGroupConfig from .types.clusters import MetastoreConfig +from .types.clusters import NodeGroup from .types.clusters import NodeGroupAffinity from .types.clusters import NodeInitializationAction from .types.clusters import ReservationAffinity @@ -83,6 +91,7 @@ from .types.clusters import VirtualClusterConfig from .types.jobs import CancelJobRequest from .types.jobs import DeleteJobRequest +from .types.jobs import DriverSchedulingConfig from .types.jobs import GetJobRequest from .types.jobs import HadoopJob from .types.jobs import HiveJob @@ -105,9 +114,13 @@ from .types.jobs import SubmitJobRequest from .types.jobs import UpdateJobRequest from .types.jobs import YarnApplication +from .types.node_groups import CreateNodeGroupRequest +from .types.node_groups import GetNodeGroupRequest +from .types.node_groups import ResizeNodeGroupRequest from .types.operations import BatchOperationMetadata from .types.operations import ClusterOperationMetadata from .types.operations import ClusterOperationStatus +from .types.operations import NodeGroupOperationMetadata from .types.shared import EnvironmentConfig from .types.shared import ExecutionConfig from .types.shared import GkeClusterConfig @@ -148,11 +161,13 @@ "BatchControllerAsyncClient", "ClusterControllerAsyncClient", "JobControllerAsyncClient", + "NodeGroupControllerAsyncClient", "WorkflowTemplateServiceAsyncClient", "AcceleratorConfig", "AutoscalingConfig", "AutoscalingPolicy", "AutoscalingPolicyServiceClient", + "AuxiliaryNodeGroup", "AuxiliaryServicesConfig", "BasicAutoscalingAlgorithm", "BasicYarnAutoscalingConfig", @@ -174,6 +189,7 @@ "CreateAutoscalingPolicyRequest", "CreateBatchRequest", "CreateClusterRequest", + "CreateNodeGroupRequest", "CreateWorkflowTemplateRequest", "DataprocMetricConfig", "DeleteAutoscalingPolicyRequest", @@ -184,6 +200,7 @@ "DiagnoseClusterRequest", "DiagnoseClusterResults", "DiskConfig", + "DriverSchedulingConfig", "EncryptionConfig", "EndpointConfig", "EnvironmentConfig", @@ -194,6 +211,7 @@ "GetBatchRequest", "GetClusterRequest", "GetJobRequest", + "GetNodeGroupRequest", "GetWorkflowTemplateRequest", "GkeClusterConfig", "GkeNodePoolConfig", @@ -230,7 +248,10 @@ "ManagedCluster", "ManagedGroupConfig", "MetastoreConfig", + "NodeGroup", "NodeGroupAffinity", + "NodeGroupControllerClient", + "NodeGroupOperationMetadata", "NodeInitializationAction", "OrderedJob", "ParameterValidation", @@ -242,6 +263,7 @@ "QueryList", "RegexValidation", "ReservationAffinity", + "ResizeNodeGroupRequest", "RuntimeConfig", "RuntimeInfo", "SecurityConfig", diff --git a/google/cloud/dataproc_v1/gapic_metadata.json b/google/cloud/dataproc_v1/gapic_metadata.json index 8e050e14..da52acb4 100644 --- a/google/cloud/dataproc_v1/gapic_metadata.json +++ b/google/cloud/dataproc_v1/gapic_metadata.json @@ -301,6 +301,50 @@ } } }, + "NodeGroupController": { + "clients": { + "grpc": { + "libraryClient": "NodeGroupControllerClient", + "rpcs": { + "CreateNodeGroup": { + "methods": [ + "create_node_group" + ] + }, + "GetNodeGroup": { + "methods": [ + "get_node_group" + ] + }, + "ResizeNodeGroup": { + "methods": [ + "resize_node_group" + ] + } + } + }, + "grpc-async": { + "libraryClient": "NodeGroupControllerAsyncClient", + "rpcs": { + "CreateNodeGroup": { + "methods": [ + "create_node_group" + ] + }, + "GetNodeGroup": { + "methods": [ + "get_node_group" + ] + }, + "ResizeNodeGroup": { + "methods": [ + "resize_node_group" + ] + } + } + } + } + }, "WorkflowTemplateService": { "clients": { "grpc": { diff --git a/google/cloud/dataproc_v1/gapic_version.py b/google/cloud/dataproc_v1/gapic_version.py new file mode 100644 index 00000000..6ed707c8 --- /dev/null +++ b/google/cloud/dataproc_v1/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "5.0.3" # {x-release-please-version} diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py index 2f00fb39..df07acd0 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/async_client.py @@ -16,8 +16,19 @@ from collections import OrderedDict import functools import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -169,9 +180,9 @@ def transport(self) -> AutoscalingPolicyServiceTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, AutoscalingPolicyServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the autoscaling policy service client. @@ -215,14 +226,14 @@ def __init__( async def create_autoscaling_policy( self, - request: Union[ - autoscaling_policies.CreateAutoscalingPolicyRequest, dict + request: Optional[ + Union[autoscaling_policies.CreateAutoscalingPolicyRequest, dict] ] = None, *, - parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, + parent: Optional[str] = None, + policy: Optional[autoscaling_policies.AutoscalingPolicy] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Creates new autoscaling policy. @@ -260,7 +271,7 @@ async def sample_create_autoscaling_policy(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.CreateAutoscalingPolicyRequest, dict]]): The request object. A request to create an autoscaling policy. parent (:class:`str`): @@ -346,13 +357,13 @@ async def sample_create_autoscaling_policy(): async def update_autoscaling_policy( self, - request: Union[ - autoscaling_policies.UpdateAutoscalingPolicyRequest, dict + request: Optional[ + Union[autoscaling_policies.UpdateAutoscalingPolicyRequest, dict] ] = None, *, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy: Optional[autoscaling_policies.AutoscalingPolicy] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Updates (replaces) autoscaling policy. @@ -392,7 +403,7 @@ async def sample_update_autoscaling_policy(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.UpdateAutoscalingPolicyRequest, dict]]): The request object. A request to update an autoscaling policy. policy (:class:`google.cloud.dataproc_v1.types.AutoscalingPolicy`): @@ -470,11 +481,13 @@ async def sample_update_autoscaling_policy(): async def get_autoscaling_policy( self, - request: Union[autoscaling_policies.GetAutoscalingPolicyRequest, dict] = None, + request: Optional[ + Union[autoscaling_policies.GetAutoscalingPolicyRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Retrieves autoscaling policy. @@ -506,7 +519,7 @@ async def sample_get_autoscaling_policy(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.GetAutoscalingPolicyRequest, dict]]): The request object. A request to fetch an autoscaling policy. name (:class:`str`): @@ -592,13 +605,13 @@ async def sample_get_autoscaling_policy(): async def list_autoscaling_policies( self, - request: Union[ - autoscaling_policies.ListAutoscalingPoliciesRequest, dict + request: Optional[ + Union[autoscaling_policies.ListAutoscalingPoliciesRequest, dict] ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAutoscalingPoliciesAsyncPager: r"""Lists autoscaling policies in the project. @@ -631,7 +644,7 @@ async def sample_list_autoscaling_policies(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.ListAutoscalingPoliciesRequest, dict]]): The request object. A request to list autoscaling policies in a project. parent (:class:`str`): @@ -729,13 +742,13 @@ async def sample_list_autoscaling_policies(): async def delete_autoscaling_policy( self, - request: Union[ - autoscaling_policies.DeleteAutoscalingPolicyRequest, dict + request: Optional[ + Union[autoscaling_policies.DeleteAutoscalingPolicyRequest, dict] ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an autoscaling policy. It is an error to @@ -766,7 +779,7 @@ async def sample_delete_autoscaling_policy(): await client.delete_autoscaling_policy(request=request) Args: - request (Union[google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.DeleteAutoscalingPolicyRequest, dict]]): The request object. A request to delete an autoscaling policy. Autoscaling policies in use by one or more clusters will @@ -842,14 +855,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("AutoscalingPolicyServiceAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py index 7d458872..eefe3bd8 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/client.py @@ -16,8 +16,20 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -57,7 +69,7 @@ class AutoscalingPolicyServiceClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[AutoscalingPolicyServiceTransport]: """Returns an appropriate transport class. @@ -334,8 +346,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, AutoscalingPolicyServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, AutoscalingPolicyServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the autoscaling policy service client. @@ -349,7 +361,7 @@ def __init__( transport (Union[str, AutoscalingPolicyServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -379,6 +391,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -431,14 +444,14 @@ def __init__( def create_autoscaling_policy( self, - request: Union[ - autoscaling_policies.CreateAutoscalingPolicyRequest, dict + request: Optional[ + Union[autoscaling_policies.CreateAutoscalingPolicyRequest, dict] ] = None, *, - parent: str = None, - policy: autoscaling_policies.AutoscalingPolicy = None, + parent: Optional[str] = None, + policy: Optional[autoscaling_policies.AutoscalingPolicy] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Creates new autoscaling policy. @@ -564,13 +577,13 @@ def sample_create_autoscaling_policy(): def update_autoscaling_policy( self, - request: Union[ - autoscaling_policies.UpdateAutoscalingPolicyRequest, dict + request: Optional[ + Union[autoscaling_policies.UpdateAutoscalingPolicyRequest, dict] ] = None, *, - policy: autoscaling_policies.AutoscalingPolicy = None, + policy: Optional[autoscaling_policies.AutoscalingPolicy] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Updates (replaces) autoscaling policy. @@ -680,11 +693,13 @@ def sample_update_autoscaling_policy(): def get_autoscaling_policy( self, - request: Union[autoscaling_policies.GetAutoscalingPolicyRequest, dict] = None, + request: Optional[ + Union[autoscaling_policies.GetAutoscalingPolicyRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> autoscaling_policies.AutoscalingPolicy: r"""Retrieves autoscaling policy. @@ -792,13 +807,13 @@ def sample_get_autoscaling_policy(): def list_autoscaling_policies( self, - request: Union[ - autoscaling_policies.ListAutoscalingPoliciesRequest, dict + request: Optional[ + Union[autoscaling_policies.ListAutoscalingPoliciesRequest, dict] ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListAutoscalingPoliciesPager: r"""Lists autoscaling policies in the project. @@ -921,13 +936,13 @@ def sample_list_autoscaling_policies(): def delete_autoscaling_policy( self, - request: Union[ - autoscaling_policies.DeleteAutoscalingPolicyRequest, dict + request: Optional[ + Union[autoscaling_policies.DeleteAutoscalingPolicyRequest, dict] ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes an autoscaling policy. It is an error to @@ -1043,14 +1058,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("AutoscalingPolicyServiceClient",) diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py index e2bf3b59..6449d51b 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.dataproc_v1 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -28,14 +29,9 @@ from google.cloud.dataproc_v1.types import autoscaling_policies from google.protobuf import empty_pb2 # type: ignore -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class AutoscalingPolicyServiceTransport(abc.ABC): @@ -49,7 +45,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py index 2962dd25..0da7e6c4 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc.py @@ -49,14 +49,14 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -183,8 +183,8 @@ def __init__( def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py index 87267eae..e37ed08e 100644 --- a/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/autoscaling_policy_service/transports/grpc_asyncio.py @@ -51,7 +51,7 @@ class AutoscalingPolicyServiceGrpcAsyncIOTransport(AutoscalingPolicyServiceTrans def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -94,15 +94,15 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/batch_controller/async_client.py b/google/cloud/dataproc_v1/services/batch_controller/async_client.py index 9935a0bb..c13320d1 100644 --- a/google/cloud/dataproc_v1/services/batch_controller/async_client.py +++ b/google/cloud/dataproc_v1/services/batch_controller/async_client.py @@ -16,8 +16,19 @@ from collections import OrderedDict import functools import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -165,9 +176,9 @@ def transport(self) -> BatchControllerTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, BatchControllerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the batch controller client. @@ -211,13 +222,13 @@ def __init__( async def create_batch( self, - request: Union[batches.CreateBatchRequest, dict] = None, + request: Optional[Union[batches.CreateBatchRequest, dict]] = None, *, - parent: str = None, - batch: batches.Batch = None, - batch_id: str = None, + parent: Optional[str] = None, + batch: Optional[batches.Batch] = None, + batch_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a batch workload that executes @@ -252,13 +263,13 @@ async def sample_create_batch(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.CreateBatchRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.CreateBatchRequest, dict]]): The request object. A request to create a batch workload. parent (:class:`str`): @@ -354,11 +365,11 @@ async def sample_create_batch(): async def get_batch( self, - request: Union[batches.GetBatchRequest, dict] = None, + request: Optional[Union[batches.GetBatchRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> batches.Batch: r"""Gets the batch workload resource representation. @@ -390,7 +401,7 @@ async def sample_get_batch(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.GetBatchRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.GetBatchRequest, dict]]): The request object. A request to get the resource representation for a batch workload. name (:class:`str`): @@ -456,11 +467,11 @@ async def sample_get_batch(): async def list_batches( self, - request: Union[batches.ListBatchesRequest, dict] = None, + request: Optional[Union[batches.ListBatchesRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBatchesAsyncPager: r"""Lists batch workloads. @@ -493,7 +504,7 @@ async def sample_list_batches(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.ListBatchesRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.ListBatchesRequest, dict]]): The request object. A request to list batch workloads in a project. parent (:class:`str`): @@ -570,11 +581,11 @@ async def sample_list_batches(): async def delete_batch( self, - request: Union[batches.DeleteBatchRequest, dict] = None, + request: Optional[Union[batches.DeleteBatchRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the batch workload resource. If the batch is not in @@ -605,7 +616,7 @@ async def sample_delete_batch(): await client.delete_batch(request=request) Args: - request (Union[google.cloud.dataproc_v1.types.DeleteBatchRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.DeleteBatchRequest, dict]]): The request object. A request to delete a batch workload. name (:class:`str`): @@ -667,14 +678,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BatchControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/batch_controller/client.py b/google/cloud/dataproc_v1/services/batch_controller/client.py index 40b977ae..c26d5d00 100644 --- a/google/cloud/dataproc_v1/services/batch_controller/client.py +++ b/google/cloud/dataproc_v1/services/batch_controller/client.py @@ -16,8 +16,20 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -62,7 +74,7 @@ class BatchControllerClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[BatchControllerTransport]: """Returns an appropriate transport class. @@ -339,8 +351,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BatchControllerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, BatchControllerTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the batch controller client. @@ -354,7 +366,7 @@ def __init__( transport (Union[str, BatchControllerTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -384,6 +396,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -436,13 +449,13 @@ def __init__( def create_batch( self, - request: Union[batches.CreateBatchRequest, dict] = None, + request: Optional[Union[batches.CreateBatchRequest, dict]] = None, *, - parent: str = None, - batch: batches.Batch = None, - batch_id: str = None, + parent: Optional[str] = None, + batch: Optional[batches.Batch] = None, + batch_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a batch workload that executes @@ -579,11 +592,11 @@ def sample_create_batch(): def get_batch( self, - request: Union[batches.GetBatchRequest, dict] = None, + request: Optional[Union[batches.GetBatchRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> batches.Batch: r"""Gets the batch workload resource representation. @@ -681,11 +694,11 @@ def sample_get_batch(): def list_batches( self, - request: Union[batches.ListBatchesRequest, dict] = None, + request: Optional[Union[batches.ListBatchesRequest, dict]] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListBatchesPager: r"""Lists batch workloads. @@ -795,11 +808,11 @@ def sample_list_batches(): def delete_batch( self, - request: Union[batches.DeleteBatchRequest, dict] = None, + request: Optional[Union[batches.DeleteBatchRequest, dict]] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the batch workload resource. If the batch is not in @@ -899,14 +912,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("BatchControllerClient",) diff --git a/google/cloud/dataproc_v1/services/batch_controller/transports/base.py b/google/cloud/dataproc_v1/services/batch_controller/transports/base.py index 6eae6f77..af56cafe 100644 --- a/google/cloud/dataproc_v1/services/batch_controller/transports/base.py +++ b/google/cloud/dataproc_v1/services/batch_controller/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.dataproc_v1 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -30,14 +31,9 @@ from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class BatchControllerTransport(abc.ABC): @@ -51,7 +47,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/batch_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc.py index d76c0ae0..4bd4368d 100644 --- a/google/cloud/dataproc_v1/services/batch_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc.py @@ -51,14 +51,14 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -186,8 +186,8 @@ def __init__( def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/dataproc_v1/services/batch_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc_asyncio.py index 5536f3a0..73f90799 100644 --- a/google/cloud/dataproc_v1/services/batch_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/batch_controller/transports/grpc_asyncio.py @@ -53,7 +53,7 @@ class BatchControllerGrpcAsyncIOTransport(BatchControllerTransport): def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -96,15 +96,15 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py index f81268ef..aaa45c1b 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/async_client.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/async_client.py @@ -16,8 +16,19 @@ from collections import OrderedDict import functools import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -53,6 +64,8 @@ class ClusterControllerAsyncClient: DEFAULT_ENDPOINT = ClusterControllerClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = ClusterControllerClient.DEFAULT_MTLS_ENDPOINT + node_group_path = staticmethod(ClusterControllerClient.node_group_path) + parse_node_group_path = staticmethod(ClusterControllerClient.parse_node_group_path) service_path = staticmethod(ClusterControllerClient.service_path) parse_service_path = staticmethod(ClusterControllerClient.parse_service_path) common_billing_account_path = staticmethod( @@ -165,9 +178,9 @@ def transport(self) -> ClusterControllerTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, ClusterControllerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the cluster controller client. @@ -211,13 +224,13 @@ def __init__( async def create_cluster( self, - request: Union[clusters.CreateClusterRequest, dict] = None, + request: Optional[Union[clusters.CreateClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster: clusters.Cluster = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster: Optional[clusters.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Creates a cluster in a project. The returned @@ -256,13 +269,13 @@ async def sample_create_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.CreateClusterRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.CreateClusterRequest, dict]]): The request object. A request to create a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -368,15 +381,15 @@ async def sample_create_cluster(): async def update_cluster( self, - request: Union[clusters.UpdateClusterRequest, dict] = None, + request: Optional[Union[clusters.UpdateClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - cluster: clusters.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, + cluster: Optional[clusters.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Updates a cluster in a project. The returned @@ -419,13 +432,13 @@ async def sample_update_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.UpdateClusterRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.UpdateClusterRequest, dict]]): The request object. A request to update a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -609,10 +622,10 @@ async def sample_update_cluster(): async def stop_cluster( self, - request: Union[clusters.StopClusterRequest, dict] = None, + request: Optional[Union[clusters.StopClusterRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Stops a cluster in a project. @@ -644,13 +657,13 @@ async def sample_stop_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.StopClusterRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.StopClusterRequest, dict]]): The request object. A request to stop a cluster. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -710,10 +723,10 @@ async def sample_stop_cluster(): async def start_cluster( self, - request: Union[clusters.StartClusterRequest, dict] = None, + request: Optional[Union[clusters.StartClusterRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Starts a cluster in a project. @@ -745,13 +758,13 @@ async def sample_start_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.StartClusterRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.StartClusterRequest, dict]]): The request object. A request to start a cluster. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -811,13 +824,13 @@ async def sample_start_cluster(): async def delete_cluster( self, - request: Union[clusters.DeleteClusterRequest, dict] = None, + request: Optional[Union[clusters.DeleteClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Deletes a cluster in a project. The returned @@ -852,13 +865,13 @@ async def sample_delete_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.DeleteClusterRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.DeleteClusterRequest, dict]]): The request object. A request to delete a cluster. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -973,13 +986,13 @@ async def sample_delete_cluster(): async def get_cluster( self, - request: Union[clusters.GetClusterRequest, dict] = None, + request: Optional[Union[clusters.GetClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> clusters.Cluster: r"""Gets the resource representation for a cluster in a @@ -1014,7 +1027,7 @@ async def sample_get_cluster(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.GetClusterRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.GetClusterRequest, dict]]): The request object. Request to get the resource representation for a cluster in a project. project_id (:class:`str`): @@ -1115,13 +1128,13 @@ async def sample_get_cluster(): async def list_clusters( self, - request: Union[clusters.ListClustersRequest, dict] = None, + request: Optional[Union[clusters.ListClustersRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - filter: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + filter: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListClustersAsyncPager: r"""Lists all regions/{region}/clusters in a project @@ -1156,7 +1169,7 @@ async def sample_list_clusters(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.ListClustersRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.ListClustersRequest, dict]]): The request object. A request to list the clusters in a project. project_id (:class:`str`): @@ -1290,13 +1303,13 @@ async def sample_list_clusters(): async def diagnose_cluster( self, - request: Union[clusters.DiagnoseClusterRequest, dict] = None, + request: Optional[Union[clusters.DiagnoseClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Gets cluster diagnostic information. The returned @@ -1335,13 +1348,13 @@ async def sample_diagnose_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.DiagnoseClusterRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.DiagnoseClusterRequest, dict]]): The request object. A request to collect cluster diagnostic information. project_id (:class:`str`): @@ -1455,14 +1468,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("ClusterControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/client.py b/google/cloud/dataproc_v1/services/cluster_controller/client.py index aa4eb2ae..199ceb2a 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/client.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/client.py @@ -16,8 +16,20 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -62,7 +74,7 @@ class ClusterControllerClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[ClusterControllerTransport]: """Returns an appropriate transport class. @@ -169,6 +181,30 @@ def transport(self) -> ClusterControllerTransport: """ return self._transport + @staticmethod + def node_group_path( + project: str, + region: str, + cluster: str, + node_group: str, + ) -> str: + """Returns a fully-qualified node_group string.""" + return "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}".format( + project=project, + region=region, + cluster=cluster, + node_group=node_group, + ) + + @staticmethod + def parse_node_group_path(path: str) -> Dict[str, str]: + """Parses a node_group path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/clusters/(?P.+?)/nodeGroups/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def service_path( project: str, @@ -339,8 +375,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, ClusterControllerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, ClusterControllerTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the cluster controller client. @@ -354,7 +390,7 @@ def __init__( transport (Union[str, ClusterControllerTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -384,6 +420,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -436,13 +473,13 @@ def __init__( def create_cluster( self, - request: Union[clusters.CreateClusterRequest, dict] = None, + request: Optional[Union[clusters.CreateClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster: clusters.Cluster = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster: Optional[clusters.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Creates a cluster in a project. The returned @@ -584,15 +621,15 @@ def sample_create_cluster(): def update_cluster( self, - request: Union[clusters.UpdateClusterRequest, dict] = None, + request: Optional[Union[clusters.UpdateClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, - cluster: clusters.Cluster = None, - update_mask: field_mask_pb2.FieldMask = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, + cluster: Optional[clusters.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Updates a cluster in a project. The returned @@ -816,10 +853,10 @@ def sample_update_cluster(): def stop_cluster( self, - request: Union[clusters.StopClusterRequest, dict] = None, + request: Optional[Union[clusters.StopClusterRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Stops a cluster in a project. @@ -918,10 +955,10 @@ def sample_stop_cluster(): def start_cluster( self, - request: Union[clusters.StartClusterRequest, dict] = None, + request: Optional[Union[clusters.StartClusterRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Starts a cluster in a project. @@ -1020,13 +1057,13 @@ def sample_start_cluster(): def delete_cluster( self, - request: Union[clusters.DeleteClusterRequest, dict] = None, + request: Optional[Union[clusters.DeleteClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Deletes a cluster in a project. The returned @@ -1173,13 +1210,13 @@ def sample_delete_cluster(): def get_cluster( self, - request: Union[clusters.GetClusterRequest, dict] = None, + request: Optional[Union[clusters.GetClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> clusters.Cluster: r"""Gets the resource representation for a cluster in a @@ -1304,13 +1341,13 @@ def sample_get_cluster(): def list_clusters( self, - request: Union[clusters.ListClustersRequest, dict] = None, + request: Optional[Union[clusters.ListClustersRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - filter: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + filter: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListClustersPager: r"""Lists all regions/{region}/clusters in a project @@ -1468,13 +1505,13 @@ def sample_list_clusters(): def diagnose_cluster( self, - request: Union[clusters.DiagnoseClusterRequest, dict] = None, + request: Optional[Union[clusters.DiagnoseClusterRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - cluster_name: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + cluster_name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Gets cluster diagnostic information. The returned @@ -1631,14 +1668,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("ClusterControllerClient",) diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py index 3cc68102..d6c88f6e 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.dataproc_v1 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -29,14 +30,9 @@ from google.cloud.dataproc_v1.types import clusters from google.longrunning import operations_pb2 # type: ignore -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class ClusterControllerTransport(abc.ABC): @@ -50,7 +46,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py index 73522293..fb9a1c1c 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc.py @@ -50,14 +50,14 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -185,8 +185,8 @@ def __init__( def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py index 182d7278..fda9a70f 100644 --- a/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/cluster_controller/transports/grpc_asyncio.py @@ -52,7 +52,7 @@ class ClusterControllerGrpcAsyncIOTransport(ClusterControllerTransport): def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -95,15 +95,15 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/job_controller/async_client.py b/google/cloud/dataproc_v1/services/job_controller/async_client.py index f5eb0563..3c41cd27 100644 --- a/google/cloud/dataproc_v1/services/job_controller/async_client.py +++ b/google/cloud/dataproc_v1/services/job_controller/async_client.py @@ -16,8 +16,19 @@ from collections import OrderedDict import functools import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -158,9 +169,9 @@ def transport(self) -> JobControllerTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, JobControllerTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the job controller client. @@ -204,13 +215,13 @@ def __init__( async def submit_job( self, - request: Union[jobs.SubmitJobRequest, dict] = None, + request: Optional[Union[jobs.SubmitJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job: Optional[jobs.Job] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Submits a job to a cluster. @@ -248,7 +259,7 @@ async def sample_submit_job(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.SubmitJobRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.SubmitJobRequest, dict]]): The request object. A request to submit a job. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -342,13 +353,13 @@ async def sample_submit_job(): async def submit_job_as_operation( self, - request: Union[jobs.SubmitJobRequest, dict] = None, + request: Optional[Union[jobs.SubmitJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job: Optional[jobs.Job] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Submits job to a cluster. @@ -384,13 +395,13 @@ async def sample_submit_job_as_operation(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.SubmitJobRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.SubmitJobRequest, dict]]): The request object. A request to submit a job. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -497,13 +508,13 @@ async def sample_submit_job_as_operation(): async def get_job( self, - request: Union[jobs.GetJobRequest, dict] = None, + request: Optional[Union[jobs.GetJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job_id: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Gets the resource representation for a job in a @@ -538,7 +549,7 @@ async def sample_get_job(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.GetJobRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.GetJobRequest, dict]]): The request object. A request to get the resource representation for a job in a project. project_id (:class:`str`): @@ -636,13 +647,13 @@ async def sample_get_job(): async def list_jobs( self, - request: Union[jobs.ListJobsRequest, dict] = None, + request: Optional[Union[jobs.ListJobsRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - filter: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + filter: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsAsyncPager: r"""Lists regions/{region}/jobs in a project. @@ -676,7 +687,7 @@ async def sample_list_jobs(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.ListJobsRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.ListJobsRequest, dict]]): The request object. A request to list jobs in a project. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -802,10 +813,10 @@ async def sample_list_jobs(): async def update_job( self, - request: Union[jobs.UpdateJobRequest, dict] = None, + request: Optional[Union[jobs.UpdateJobRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Updates a job in a project. @@ -844,7 +855,7 @@ async def sample_update_job(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.UpdateJobRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.UpdateJobRequest, dict]]): The request object. A request to update a job. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -901,13 +912,13 @@ async def sample_update_job(): async def cancel_job( self, - request: Union[jobs.CancelJobRequest, dict] = None, + request: Optional[Union[jobs.CancelJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job_id: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Starts a job cancellation request. To access the job resource @@ -945,7 +956,7 @@ async def sample_cancel_job(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.CancelJobRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.CancelJobRequest, dict]]): The request object. A request to cancel a job. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -1042,13 +1053,13 @@ async def sample_cancel_job(): async def delete_job( self, - request: Union[jobs.DeleteJobRequest, dict] = None, + request: Optional[Union[jobs.DeleteJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job_id: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the job from the project. If the job is active, the @@ -1080,7 +1091,7 @@ async def sample_delete_job(): await client.delete_job(request=request) Args: - request (Union[google.cloud.dataproc_v1.types.DeleteJobRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.DeleteJobRequest, dict]]): The request object. A request to delete a job. project_id (:class:`str`): Required. The ID of the Google Cloud @@ -1173,14 +1184,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("JobControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/job_controller/client.py b/google/cloud/dataproc_v1/services/job_controller/client.py index b36531b8..560d8075 100644 --- a/google/cloud/dataproc_v1/services/job_controller/client.py +++ b/google/cloud/dataproc_v1/services/job_controller/client.py @@ -16,8 +16,20 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -57,7 +69,7 @@ class JobControllerClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[JobControllerTransport]: """Returns an appropriate transport class. @@ -310,8 +322,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, JobControllerTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, JobControllerTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the job controller client. @@ -325,7 +337,7 @@ def __init__( transport (Union[str, JobControllerTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -355,6 +367,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -407,13 +420,13 @@ def __init__( def submit_job( self, - request: Union[jobs.SubmitJobRequest, dict] = None, + request: Optional[Union[jobs.SubmitJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job: Optional[jobs.Job] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Submits a job to a cluster. @@ -536,13 +549,13 @@ def sample_submit_job(): def submit_job_as_operation( self, - request: Union[jobs.SubmitJobRequest, dict] = None, + request: Optional[Union[jobs.SubmitJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job: jobs.Job = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job: Optional[jobs.Job] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Submits job to a cluster. @@ -682,13 +695,13 @@ def sample_submit_job_as_operation(): def get_job( self, - request: Union[jobs.GetJobRequest, dict] = None, + request: Optional[Union[jobs.GetJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job_id: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Gets the resource representation for a job in a @@ -810,13 +823,13 @@ def sample_get_job(): def list_jobs( self, - request: Union[jobs.ListJobsRequest, dict] = None, + request: Optional[Union[jobs.ListJobsRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - filter: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + filter: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListJobsPager: r"""Lists regions/{region}/jobs in a project. @@ -965,10 +978,10 @@ def sample_list_jobs(): def update_job( self, - request: Union[jobs.UpdateJobRequest, dict] = None, + request: Optional[Union[jobs.UpdateJobRequest, dict]] = None, *, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Updates a job in a project. @@ -1056,13 +1069,13 @@ def sample_update_job(): def cancel_job( self, - request: Union[jobs.CancelJobRequest, dict] = None, + request: Optional[Union[jobs.CancelJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job_id: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> jobs.Job: r"""Starts a job cancellation request. To access the job resource @@ -1186,13 +1199,13 @@ def sample_cancel_job(): def delete_job( self, - request: Union[jobs.DeleteJobRequest, dict] = None, + request: Optional[Union[jobs.DeleteJobRequest, dict]] = None, *, - project_id: str = None, - region: str = None, - job_id: str = None, + project_id: Optional[str] = None, + region: Optional[str] = None, + job_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes the job from the project. If the job is active, the @@ -1315,14 +1328,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("JobControllerClient",) diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/base.py b/google/cloud/dataproc_v1/services/job_controller/transports/base.py index 9e6d02b0..bce3beb2 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/base.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.dataproc_v1 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -30,14 +31,9 @@ from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class JobControllerTransport(abc.ABC): @@ -51,7 +47,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py index e2bff3e0..2fd0b373 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc.py @@ -50,14 +50,14 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -185,8 +185,8 @@ def __init__( def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py index c9454d3a..b50ffe02 100644 --- a/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/job_controller/transports/grpc_asyncio.py @@ -52,7 +52,7 @@ class JobControllerGrpcAsyncIOTransport(JobControllerTransport): def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -95,15 +95,15 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/node_group_controller/__init__.py b/google/cloud/dataproc_v1/services/node_group_controller/__init__.py new file mode 100644 index 00000000..b2a4fcee --- /dev/null +++ b/google/cloud/dataproc_v1/services/node_group_controller/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import NodeGroupControllerClient +from .async_client import NodeGroupControllerAsyncClient + +__all__ = ( + "NodeGroupControllerClient", + "NodeGroupControllerAsyncClient", +) diff --git a/google/cloud/dataproc_v1/services/node_group_controller/async_client.py b/google/cloud/dataproc_v1/services/node_group_controller/async_client.py new file mode 100644 index 00000000..bc9b1f7d --- /dev/null +++ b/google/cloud/dataproc_v1/services/node_group_controller/async_client.py @@ -0,0 +1,624 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import node_groups +from google.cloud.dataproc_v1.types import operations +from .transports.base import NodeGroupControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import NodeGroupControllerGrpcAsyncIOTransport +from .client import NodeGroupControllerClient + + +class NodeGroupControllerAsyncClient: + """The ``NodeGroupControllerService`` provides methods to manage node + groups of Compute Engine managed instances. + """ + + _client: NodeGroupControllerClient + + DEFAULT_ENDPOINT = NodeGroupControllerClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = NodeGroupControllerClient.DEFAULT_MTLS_ENDPOINT + + node_group_path = staticmethod(NodeGroupControllerClient.node_group_path) + parse_node_group_path = staticmethod( + NodeGroupControllerClient.parse_node_group_path + ) + common_billing_account_path = staticmethod( + NodeGroupControllerClient.common_billing_account_path + ) + parse_common_billing_account_path = staticmethod( + NodeGroupControllerClient.parse_common_billing_account_path + ) + common_folder_path = staticmethod(NodeGroupControllerClient.common_folder_path) + parse_common_folder_path = staticmethod( + NodeGroupControllerClient.parse_common_folder_path + ) + common_organization_path = staticmethod( + NodeGroupControllerClient.common_organization_path + ) + parse_common_organization_path = staticmethod( + NodeGroupControllerClient.parse_common_organization_path + ) + common_project_path = staticmethod(NodeGroupControllerClient.common_project_path) + parse_common_project_path = staticmethod( + NodeGroupControllerClient.parse_common_project_path + ) + common_location_path = staticmethod(NodeGroupControllerClient.common_location_path) + parse_common_location_path = staticmethod( + NodeGroupControllerClient.parse_common_location_path + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeGroupControllerAsyncClient: The constructed client. + """ + return NodeGroupControllerClient.from_service_account_info.__func__(NodeGroupControllerAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeGroupControllerAsyncClient: The constructed client. + """ + return NodeGroupControllerClient.from_service_account_file.__func__(NodeGroupControllerAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return NodeGroupControllerClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> NodeGroupControllerTransport: + """Returns the transport used by the client instance. + + Returns: + NodeGroupControllerTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial( + type(NodeGroupControllerClient).get_transport_class, + type(NodeGroupControllerClient), + ) + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, NodeGroupControllerTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the node group controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.NodeGroupControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = NodeGroupControllerClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + ) + + async def create_node_group( + self, + request: Optional[Union[node_groups.CreateNodeGroupRequest, dict]] = None, + *, + parent: Optional[str] = None, + node_group: Optional[clusters.NodeGroup] = None, + node_group_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import dataproc_v1 + + async def sample_create_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerAsyncClient() + + # Initialize request argument(s) + node_group = dataproc_v1.NodeGroup() + node_group.roles = ['DRIVER'] + + request = dataproc_v1.CreateNodeGroupRequest( + parent="parent_value", + node_group=node_group, + ) + + # Make the request + operation = client.create_node_group(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.dataproc_v1.types.CreateNodeGroupRequest, dict]]): + The request object. A request to create a node group. + parent (:class:`str`): + Required. The parent resource where this node group will + be created. Format: + ``projects/{project}/regions/{region}/clusters/{cluster}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (:class:`google.cloud.dataproc_v1.types.NodeGroup`): + Required. The node group to create. + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group_id (:class:`str`): + Optional. An optional node group ID. Generated if not + specified. + + The ID must contain only letters (a-z, A-Z), numbers + (0-9), underscores (_), and hyphens (-). Cannot begin or + end with underscore or hyphen. Must consist of from 3 to + 33 characters. + + This corresponds to the ``node_group_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.NodeGroup` Dataproc Node Group. + **The Dataproc NodeGroup resource is not related to + the Dataproc + [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] + resource.** + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, node_group, node_group_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = node_groups.CreateNodeGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if node_group is not None: + request.node_group = node_group + if node_group_id is not None: + request.node_group_id = node_group_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_node_group, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.NodeGroup, + metadata_type=operations.NodeGroupOperationMetadata, + ) + + # Done; return the response. + return response + + async def resize_node_group( + self, + request: Optional[Union[node_groups.ResizeNodeGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + size: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Resizes a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import dataproc_v1 + + async def sample_resize_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerAsyncClient() + + # Initialize request argument(s) + request = dataproc_v1.ResizeNodeGroupRequest( + name="name_value", + size=443, + ) + + # Make the request + operation = client.resize_node_group(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.dataproc_v1.types.ResizeNodeGroupRequest, dict]]): + The request object. A request to resize a node group. + name (:class:`str`): + Required. The name of the node group to resize. Format: + ``projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + size (:class:`int`): + Required. The number of running + instances for the node group to + maintain. The group adds or removes + instances to maintain the number of + instances specified by this parameter. + + This corresponds to the ``size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.NodeGroup` Dataproc Node Group. + **The Dataproc NodeGroup resource is not related to + the Dataproc + [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] + resource.** + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, size]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = node_groups.ResizeNodeGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if size is not None: + request.size = size + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.resize_node_group, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + clusters.NodeGroup, + metadata_type=operations.NodeGroupOperationMetadata, + ) + + # Done; return the response. + return response + + async def get_node_group( + self, + request: Optional[Union[node_groups.GetNodeGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.NodeGroup: + r"""Gets the resource representation for a node group in + a cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import dataproc_v1 + + async def sample_get_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerAsyncClient() + + # Initialize request argument(s) + request = dataproc_v1.GetNodeGroupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_node_group(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.dataproc_v1.types.GetNodeGroupRequest, dict]]): + The request object. A request to get a node group . + name (:class:`str`): + Required. The name of the node group to retrieve. + Format: + ``projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.NodeGroup: + Dataproc Node Group. + **The Dataproc NodeGroup resource is not related to + the Dataproc + [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] + resource.** + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = node_groups.GetNodeGroupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_node_group, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("NodeGroupControllerAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/node_group_controller/client.py b/google/cloud/dataproc_v1/services/node_group_controller/client.py new file mode 100644 index 00000000..cd79d212 --- /dev/null +++ b/google/cloud/dataproc_v1/services/node_group_controller/client.py @@ -0,0 +1,857 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import node_groups +from google.cloud.dataproc_v1.types import operations +from .transports.base import NodeGroupControllerTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import NodeGroupControllerGrpcTransport +from .transports.grpc_asyncio import NodeGroupControllerGrpcAsyncIOTransport + + +class NodeGroupControllerClientMeta(type): + """Metaclass for the NodeGroupController client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + + _transport_registry = ( + OrderedDict() + ) # type: Dict[str, Type[NodeGroupControllerTransport]] + _transport_registry["grpc"] = NodeGroupControllerGrpcTransport + _transport_registry["grpc_asyncio"] = NodeGroupControllerGrpcAsyncIOTransport + + def get_transport_class( + cls, + label: Optional[str] = None, + ) -> Type[NodeGroupControllerTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class NodeGroupControllerClient(metaclass=NodeGroupControllerClientMeta): + """The ``NodeGroupControllerService`` provides methods to manage node + groups of Compute Engine managed instances. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "dataproc.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeGroupControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + NodeGroupControllerClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file(filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> NodeGroupControllerTransport: + """Returns the transport used by the client instance. + + Returns: + NodeGroupControllerTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def node_group_path( + project: str, + region: str, + cluster: str, + node_group: str, + ) -> str: + """Returns a fully-qualified node_group string.""" + return "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}".format( + project=project, + region=region, + cluster=cluster, + node_group=node_group, + ) + + @staticmethod + def parse_node_group_path(path: str) -> Dict[str, str]: + """Parses a node_group path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/clusters/(?P.+?)/nodeGroups/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path( + billing_account: str, + ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str, str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path( + folder: str, + ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format( + folder=folder, + ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str, str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path( + organization: str, + ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format( + organization=organization, + ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str, str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path( + project: str, + ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format( + project=project, + ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str, str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path( + project: str, + location: str, + ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str, str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source( + cls, client_options: Optional[client_options_lib.ClientOptions] = None + ): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variabel is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__( + self, + *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, NodeGroupControllerTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the node group controller client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, NodeGroupControllerTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( + client_options + ) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError( + "client_options.api_key and credentials are mutually exclusive" + ) + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, NodeGroupControllerTransport): + # transport is a NodeGroupControllerTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError( + "When providing a transport instance, " + "provide its credentials directly." + ) + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr( + google.auth._default, "get_api_key_credentials" + ): + credentials = google.auth._default.get_api_key_credentials( + api_key_value + ) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_node_group( + self, + request: Optional[Union[node_groups.CreateNodeGroupRequest, dict]] = None, + *, + parent: Optional[str] = None, + node_group: Optional[clusters.NodeGroup] = None, + node_group_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import dataproc_v1 + + def sample_create_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerClient() + + # Initialize request argument(s) + node_group = dataproc_v1.NodeGroup() + node_group.roles = ['DRIVER'] + + request = dataproc_v1.CreateNodeGroupRequest( + parent="parent_value", + node_group=node_group, + ) + + # Make the request + operation = client.create_node_group(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.dataproc_v1.types.CreateNodeGroupRequest, dict]): + The request object. A request to create a node group. + parent (str): + Required. The parent resource where this node group will + be created. Format: + ``projects/{project}/regions/{region}/clusters/{cluster}`` + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group (google.cloud.dataproc_v1.types.NodeGroup): + Required. The node group to create. + This corresponds to the ``node_group`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + node_group_id (str): + Optional. An optional node group ID. Generated if not + specified. + + The ID must contain only letters (a-z, A-Z), numbers + (0-9), underscores (_), and hyphens (-). Cannot begin or + end with underscore or hyphen. Must consist of from 3 to + 33 characters. + + This corresponds to the ``node_group_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.NodeGroup` Dataproc Node Group. + **The Dataproc NodeGroup resource is not related to + the Dataproc + [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] + resource.** + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, node_group, node_group_id]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a node_groups.CreateNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, node_groups.CreateNodeGroupRequest): + request = node_groups.CreateNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if node_group is not None: + request.node_group = node_group + if node_group_id is not None: + request.node_group_id = node_group_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_node_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.NodeGroup, + metadata_type=operations.NodeGroupOperationMetadata, + ) + + # Done; return the response. + return response + + def resize_node_group( + self, + request: Optional[Union[node_groups.ResizeNodeGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + size: Optional[int] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Resizes a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import dataproc_v1 + + def sample_resize_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerClient() + + # Initialize request argument(s) + request = dataproc_v1.ResizeNodeGroupRequest( + name="name_value", + size=443, + ) + + # Make the request + operation = client.resize_node_group(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.dataproc_v1.types.ResizeNodeGroupRequest, dict]): + The request object. A request to resize a node group. + name (str): + Required. The name of the node group to resize. Format: + ``projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + size (int): + Required. The number of running + instances for the node group to + maintain. The group adds or removes + instances to maintain the number of + instances specified by this parameter. + + This corresponds to the ``size`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.dataproc_v1.types.NodeGroup` Dataproc Node Group. + **The Dataproc NodeGroup resource is not related to + the Dataproc + [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] + resource.** + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, size]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a node_groups.ResizeNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, node_groups.ResizeNodeGroupRequest): + request = node_groups.ResizeNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if size is not None: + request.size = size + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.resize_node_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + clusters.NodeGroup, + metadata_type=operations.NodeGroupOperationMetadata, + ) + + # Done; return the response. + return response + + def get_node_group( + self, + request: Optional[Union[node_groups.GetNodeGroupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> clusters.NodeGroup: + r"""Gets the resource representation for a node group in + a cluster. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import dataproc_v1 + + def sample_get_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerClient() + + # Initialize request argument(s) + request = dataproc_v1.GetNodeGroupRequest( + name="name_value", + ) + + # Make the request + response = client.get_node_group(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.dataproc_v1.types.GetNodeGroupRequest, dict]): + The request object. A request to get a node group . + name (str): + Required. The name of the node group to retrieve. + Format: + ``projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`` + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.dataproc_v1.types.NodeGroup: + Dataproc Node Group. + **The Dataproc NodeGroup resource is not related to + the Dataproc + [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] + resource.** + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a node_groups.GetNodeGroupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, node_groups.GetNodeGroupRequest): + request = node_groups.GetNodeGroupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_node_group] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +__all__ = ("NodeGroupControllerClient",) diff --git a/google/cloud/dataproc_v1/services/node_group_controller/transports/__init__.py b/google/cloud/dataproc_v1/services/node_group_controller/transports/__init__.py new file mode 100644 index 00000000..0803a9a0 --- /dev/null +++ b/google/cloud/dataproc_v1/services/node_group_controller/transports/__init__.py @@ -0,0 +1,35 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import NodeGroupControllerTransport +from .grpc import NodeGroupControllerGrpcTransport +from .grpc_asyncio import NodeGroupControllerGrpcAsyncIOTransport + + +# Compile a registry of transports. +_transport_registry = ( + OrderedDict() +) # type: Dict[str, Type[NodeGroupControllerTransport]] +_transport_registry["grpc"] = NodeGroupControllerGrpcTransport +_transport_registry["grpc_asyncio"] = NodeGroupControllerGrpcAsyncIOTransport + +__all__ = ( + "NodeGroupControllerTransport", + "NodeGroupControllerGrpcTransport", + "NodeGroupControllerGrpcAsyncIOTransport", +) diff --git a/google/cloud/dataproc_v1/services/node_group_controller/transports/base.py b/google/cloud/dataproc_v1/services/node_group_controller/transports/base.py new file mode 100644 index 00000000..ae2cf6df --- /dev/null +++ b/google/cloud/dataproc_v1/services/node_group_controller/transports/base.py @@ -0,0 +1,192 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.dataproc_v1 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import node_groups +from google.longrunning import operations_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) + + +class NodeGroupControllerTransport(abc.ABC): + """Abstract transport class for NodeGroupController.""" + + AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",) + + DEFAULT_HOST: str = "dataproc.googleapis.com" + + def __init__( + self, + *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs( + "'credentials_file' and 'credentials' are mutually exclusive" + ) + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, **scopes_kwargs, quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default( + **scopes_kwargs, quota_project_id=quota_project_id + ) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience( + api_audience if api_audience else host + ) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if ( + always_use_jwt_access + and isinstance(credentials, service_account.Credentials) + and hasattr(service_account.Credentials, "with_always_use_jwt_access") + ): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ":" not in host: + host += ":443" + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_node_group: gapic_v1.method.wrap_method( + self.create_node_group, + default_timeout=None, + client_info=client_info, + ), + self.resize_node_group: gapic_v1.method.wrap_method( + self.resize_node_group, + default_timeout=None, + client_info=client_info, + ), + self.get_node_group: gapic_v1.method.wrap_method( + self.get_node_group, + default_timeout=None, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_node_group( + self, + ) -> Callable[ + [node_groups.CreateNodeGroupRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def resize_node_group( + self, + ) -> Callable[ + [node_groups.ResizeNodeGroupRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + + @property + def get_node_group( + self, + ) -> Callable[ + [node_groups.GetNodeGroupRequest], + Union[clusters.NodeGroup, Awaitable[clusters.NodeGroup]], + ]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ("NodeGroupControllerTransport",) diff --git a/google/cloud/dataproc_v1/services/node_group_controller/transports/grpc.py b/google/cloud/dataproc_v1/services/node_group_controller/transports/grpc.py new file mode 100644 index 00000000..3db942c3 --- /dev/null +++ b/google/cloud/dataproc_v1/services/node_group_controller/transports/grpc.py @@ -0,0 +1,342 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import node_groups +from google.longrunning import operations_pb2 # type: ignore +from .base import NodeGroupControllerTransport, DEFAULT_CLIENT_INFO + + +class NodeGroupControllerGrpcTransport(NodeGroupControllerTransport): + """gRPC backend transport for NodeGroupController. + + The ``NodeGroupControllerService`` provides methods to manage node + groups of Compute Engine managed instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _stubs: Dict[str, Callable] + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service.""" + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + + # Return the client from cache. + return self._operations_client + + @property + def create_node_group( + self, + ) -> Callable[[node_groups.CreateNodeGroupRequest], operations_pb2.Operation]: + r"""Return a callable for the create node group method over gRPC. + + Creates a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + Returns: + Callable[[~.CreateNodeGroupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_node_group" not in self._stubs: + self._stubs["create_node_group"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.NodeGroupController/CreateNodeGroup", + request_serializer=node_groups.CreateNodeGroupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_node_group"] + + @property + def resize_node_group( + self, + ) -> Callable[[node_groups.ResizeNodeGroupRequest], operations_pb2.Operation]: + r"""Return a callable for the resize node group method over gRPC. + + Resizes a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + Returns: + Callable[[~.ResizeNodeGroupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "resize_node_group" not in self._stubs: + self._stubs["resize_node_group"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.NodeGroupController/ResizeNodeGroup", + request_serializer=node_groups.ResizeNodeGroupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["resize_node_group"] + + @property + def get_node_group( + self, + ) -> Callable[[node_groups.GetNodeGroupRequest], clusters.NodeGroup]: + r"""Return a callable for the get node group method over gRPC. + + Gets the resource representation for a node group in + a cluster. + + Returns: + Callable[[~.GetNodeGroupRequest], + ~.NodeGroup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_node_group" not in self._stubs: + self._stubs["get_node_group"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.NodeGroupController/GetNodeGroup", + request_serializer=node_groups.GetNodeGroupRequest.serialize, + response_deserializer=clusters.NodeGroup.deserialize, + ) + return self._stubs["get_node_group"] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ("NodeGroupControllerGrpcTransport",) diff --git a/google/cloud/dataproc_v1/services/node_group_controller/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/node_group_controller/transports/grpc_asyncio.py new file mode 100644 index 00000000..da94f442 --- /dev/null +++ b/google/cloud/dataproc_v1/services/node_group_controller/transports/grpc_asyncio.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import node_groups +from google.longrunning import operations_pb2 # type: ignore +from .base import NodeGroupControllerTransport, DEFAULT_CLIENT_INFO +from .grpc import NodeGroupControllerGrpcTransport + + +class NodeGroupControllerGrpcAsyncIOTransport(NodeGroupControllerTransport): + """gRPC AsyncIO backend transport for NodeGroupController. + + The ``NodeGroupControllerService`` provides methods to manage node + groups of Compute Engine managed instances. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel( + cls, + host: str = "dataproc.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs, + ) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs, + ) + + def __init__( + self, + *, + host: str = "dataproc.googleapis.com", + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_node_group( + self, + ) -> Callable[ + [node_groups.CreateNodeGroupRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the create node group method over gRPC. + + Creates a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + Returns: + Callable[[~.CreateNodeGroupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "create_node_group" not in self._stubs: + self._stubs["create_node_group"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.NodeGroupController/CreateNodeGroup", + request_serializer=node_groups.CreateNodeGroupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["create_node_group"] + + @property + def resize_node_group( + self, + ) -> Callable[ + [node_groups.ResizeNodeGroupRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the resize node group method over gRPC. + + Resizes a node group in a cluster. The returned + [Operation.metadata][google.longrunning.Operation.metadata] is + `NodeGroupOperationMetadata `__. + + Returns: + Callable[[~.ResizeNodeGroupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "resize_node_group" not in self._stubs: + self._stubs["resize_node_group"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.NodeGroupController/ResizeNodeGroup", + request_serializer=node_groups.ResizeNodeGroupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["resize_node_group"] + + @property + def get_node_group( + self, + ) -> Callable[[node_groups.GetNodeGroupRequest], Awaitable[clusters.NodeGroup]]: + r"""Return a callable for the get node group method over gRPC. + + Gets the resource representation for a node group in + a cluster. + + Returns: + Callable[[~.GetNodeGroupRequest], + Awaitable[~.NodeGroup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "get_node_group" not in self._stubs: + self._stubs["get_node_group"] = self.grpc_channel.unary_unary( + "/google.cloud.dataproc.v1.NodeGroupController/GetNodeGroup", + request_serializer=node_groups.GetNodeGroupRequest.serialize, + response_deserializer=clusters.NodeGroup.deserialize, + ) + return self._stubs["get_node_group"] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ("NodeGroupControllerGrpcAsyncIOTransport",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py index 4b264eba..bc45b2e1 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/async_client.py @@ -16,8 +16,19 @@ from collections import OrderedDict import functools import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions @@ -53,6 +64,10 @@ class WorkflowTemplateServiceAsyncClient: DEFAULT_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = WorkflowTemplateServiceClient.DEFAULT_MTLS_ENDPOINT + node_group_path = staticmethod(WorkflowTemplateServiceClient.node_group_path) + parse_node_group_path = staticmethod( + WorkflowTemplateServiceClient.parse_node_group_path + ) service_path = staticmethod(WorkflowTemplateServiceClient.service_path) parse_service_path = staticmethod(WorkflowTemplateServiceClient.parse_service_path) workflow_template_path = staticmethod( @@ -176,9 +191,9 @@ def transport(self) -> WorkflowTemplateServiceTransport: def __init__( self, *, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, transport: Union[str, WorkflowTemplateServiceTransport] = "grpc_asyncio", - client_options: ClientOptions = None, + client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the workflow template service client. @@ -222,12 +237,14 @@ def __init__( async def create_workflow_template( self, - request: Union[workflow_templates.CreateWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.CreateWorkflowTemplateRequest, dict] + ] = None, *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, + parent: Optional[str] = None, + template: Optional[workflow_templates.WorkflowTemplate] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> workflow_templates.WorkflowTemplate: r"""Creates new workflow template. @@ -266,7 +283,7 @@ async def sample_create_workflow_template(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.CreateWorkflowTemplateRequest, dict]]): The request object. A request to create a workflow template. parent (:class:`str`): @@ -360,11 +377,13 @@ async def sample_create_workflow_template(): async def get_workflow_template( self, - request: Union[workflow_templates.GetWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.GetWorkflowTemplateRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> workflow_templates.WorkflowTemplate: r"""Retrieves the latest workflow template. @@ -398,7 +417,7 @@ async def sample_get_workflow_template(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.GetWorkflowTemplateRequest, dict]]): The request object. A request to fetch a workflow template. name (:class:`str`): @@ -486,14 +505,14 @@ async def sample_get_workflow_template(): async def instantiate_workflow_template( self, - request: Union[ - workflow_templates.InstantiateWorkflowTemplateRequest, dict + request: Optional[ + Union[workflow_templates.InstantiateWorkflowTemplateRequest, dict] ] = None, *, - name: str = None, - parameters: Mapping[str, str] = None, + name: Optional[str] = None, + parameters: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Instantiates a template and begins execution. @@ -543,13 +562,13 @@ async def sample_instantiate_workflow_template(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.InstantiateWorkflowTemplateRequest, dict]]): The request object. A request to instantiate a workflow template. name (:class:`str`): @@ -572,7 +591,7 @@ async def sample_instantiate_workflow_template(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (:class:`Mapping[str, str]`): + parameters (:class:`MutableMapping[str, str]`): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 @@ -667,14 +686,14 @@ async def sample_instantiate_workflow_template(): async def instantiate_inline_workflow_template( self, - request: Union[ - workflow_templates.InstantiateInlineWorkflowTemplateRequest, dict + request: Optional[ + Union[workflow_templates.InstantiateInlineWorkflowTemplateRequest, dict] ] = None, *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, + parent: Optional[str] = None, + template: Optional[workflow_templates.WorkflowTemplate] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: r"""Instantiates a template and begins execution. @@ -736,13 +755,13 @@ async def sample_instantiate_inline_workflow_template(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) Args: - request (Union[google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.InstantiateInlineWorkflowTemplateRequest, dict]]): The request object. A request to instantiate an inline workflow template. parent (:class:`str`): @@ -856,11 +875,13 @@ async def sample_instantiate_inline_workflow_template(): async def update_workflow_template( self, - request: Union[workflow_templates.UpdateWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.UpdateWorkflowTemplateRequest, dict] + ] = None, *, - template: workflow_templates.WorkflowTemplate = None, + template: Optional[workflow_templates.WorkflowTemplate] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> workflow_templates.WorkflowTemplate: r"""Updates (replaces) workflow template. The updated @@ -900,7 +921,7 @@ async def sample_update_workflow_template(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.UpdateWorkflowTemplateRequest, dict]]): The request object. A request to update a workflow template. template (:class:`google.cloud.dataproc_v1.types.WorkflowTemplate`): @@ -979,11 +1000,13 @@ async def sample_update_workflow_template(): async def list_workflow_templates( self, - request: Union[workflow_templates.ListWorkflowTemplatesRequest, dict] = None, + request: Optional[ + Union[workflow_templates.ListWorkflowTemplatesRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListWorkflowTemplatesAsyncPager: r"""Lists workflows that match the specified filter in @@ -1017,7 +1040,7 @@ async def sample_list_workflow_templates(): print(response) Args: - request (Union[google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.ListWorkflowTemplatesRequest, dict]]): The request object. A request to list workflow templates in a project. parent (:class:`str`): @@ -1116,11 +1139,13 @@ async def sample_list_workflow_templates(): async def delete_workflow_template( self, - request: Union[workflow_templates.DeleteWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.DeleteWorkflowTemplateRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a workflow template. It does not cancel @@ -1150,7 +1175,7 @@ async def sample_delete_workflow_template(): await client.delete_workflow_template(request=request) Args: - request (Union[google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest, dict]): + request (Optional[Union[google.cloud.dataproc_v1.types.DeleteWorkflowTemplateRequest, dict]]): The request object. A request to delete a workflow template. Currently started workflows will remain running. @@ -1234,14 +1259,9 @@ async def __aexit__(self, exc_type, exc, tb): await self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("WorkflowTemplateServiceAsyncClient",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/client.py b/google/cloud/dataproc_v1/services/workflow_template_service/client.py index 6add50bb..5d34ed42 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/client.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/client.py @@ -16,8 +16,20 @@ from collections import OrderedDict import os import re -from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union -import pkg_resources +from typing import ( + Dict, + Mapping, + MutableMapping, + MutableSequence, + Optional, + Sequence, + Tuple, + Type, + Union, + cast, +) + +from google.cloud.dataproc_v1 import gapic_version as package_version from google.api_core import client_options as client_options_lib from google.api_core import exceptions as core_exceptions @@ -62,7 +74,7 @@ class WorkflowTemplateServiceClientMeta(type): def get_transport_class( cls, - label: str = None, + label: Optional[str] = None, ) -> Type[WorkflowTemplateServiceTransport]: """Returns an appropriate transport class. @@ -169,6 +181,30 @@ def transport(self) -> WorkflowTemplateServiceTransport: """ return self._transport + @staticmethod + def node_group_path( + project: str, + region: str, + cluster: str, + node_group: str, + ) -> str: + """Returns a fully-qualified node_group string.""" + return "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}".format( + project=project, + region=region, + cluster=cluster, + node_group=node_group, + ) + + @staticmethod + def parse_node_group_path(path: str) -> Dict[str, str]: + """Parses a node_group path into its component segments.""" + m = re.match( + r"^projects/(?P.+?)/regions/(?P.+?)/clusters/(?P.+?)/nodeGroups/(?P.+?)$", + path, + ) + return m.groupdict() if m else {} + @staticmethod def service_path( project: str, @@ -361,8 +397,8 @@ def __init__( self, *, credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, WorkflowTemplateServiceTransport, None] = None, - client_options: Optional[client_options_lib.ClientOptions] = None, + transport: Optional[Union[str, WorkflowTemplateServiceTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: """Instantiates the workflow template service client. @@ -376,7 +412,7 @@ def __init__( transport (Union[str, WorkflowTemplateServiceTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (google.api_core.client_options.ClientOptions): Custom options for the + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the client. It won't take effect if a ``transport`` instance is provided. (1) The ``api_endpoint`` property can be used to override the default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT @@ -406,6 +442,7 @@ def __init__( client_options = client_options_lib.from_dict(client_options) if client_options is None: client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( client_options @@ -458,12 +495,14 @@ def __init__( def create_workflow_template( self, - request: Union[workflow_templates.CreateWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.CreateWorkflowTemplateRequest, dict] + ] = None, *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, + parent: Optional[str] = None, + template: Optional[workflow_templates.WorkflowTemplate] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> workflow_templates.WorkflowTemplate: r"""Creates new workflow template. @@ -587,11 +626,13 @@ def sample_create_workflow_template(): def get_workflow_template( self, - request: Union[workflow_templates.GetWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.GetWorkflowTemplateRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> workflow_templates.WorkflowTemplate: r"""Retrieves the latest workflow template. @@ -702,14 +743,14 @@ def sample_get_workflow_template(): def instantiate_workflow_template( self, - request: Union[ - workflow_templates.InstantiateWorkflowTemplateRequest, dict + request: Optional[ + Union[workflow_templates.InstantiateWorkflowTemplateRequest, dict] ] = None, *, - name: str = None, - parameters: Mapping[str, str] = None, + name: Optional[str] = None, + parameters: Optional[MutableMapping[str, str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Instantiates a template and begins execution. @@ -788,7 +829,7 @@ def sample_instantiate_workflow_template(): This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - parameters (Mapping[str, str]): + parameters (MutableMapping[str, str]): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 @@ -877,14 +918,14 @@ def sample_instantiate_workflow_template(): def instantiate_inline_workflow_template( self, - request: Union[ - workflow_templates.InstantiateInlineWorkflowTemplateRequest, dict + request: Optional[ + Union[workflow_templates.InstantiateInlineWorkflowTemplateRequest, dict] ] = None, *, - parent: str = None, - template: workflow_templates.WorkflowTemplate = None, + parent: Optional[str] = None, + template: Optional[workflow_templates.WorkflowTemplate] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: r"""Instantiates a template and begins execution. @@ -1063,11 +1104,13 @@ def sample_instantiate_inline_workflow_template(): def update_workflow_template( self, - request: Union[workflow_templates.UpdateWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.UpdateWorkflowTemplateRequest, dict] + ] = None, *, - template: workflow_templates.WorkflowTemplate = None, + template: Optional[workflow_templates.WorkflowTemplate] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> workflow_templates.WorkflowTemplate: r"""Updates (replaces) workflow template. The updated @@ -1177,11 +1220,13 @@ def sample_update_workflow_template(): def list_workflow_templates( self, - request: Union[workflow_templates.ListWorkflowTemplatesRequest, dict] = None, + request: Optional[ + Union[workflow_templates.ListWorkflowTemplatesRequest, dict] + ] = None, *, - parent: str = None, + parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> pagers.ListWorkflowTemplatesPager: r"""Lists workflows that match the specified filter in @@ -1303,11 +1348,13 @@ def sample_list_workflow_templates(): def delete_workflow_template( self, - request: Union[workflow_templates.DeleteWorkflowTemplateRequest, dict] = None, + request: Optional[ + Union[workflow_templates.DeleteWorkflowTemplateRequest, dict] + ] = None, *, - name: str = None, + name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: float = None, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Deletes a workflow template. It does not cancel @@ -1419,14 +1466,9 @@ def __exit__(self, type, value, traceback): self.transport.close() -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) __all__ = ("WorkflowTemplateServiceClient",) diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py index 779f15b6..f59a1ada 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/base.py @@ -15,7 +15,8 @@ # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union -import pkg_resources + +from google.cloud.dataproc_v1 import gapic_version as package_version import google.auth # type: ignore import google.api_core @@ -30,14 +31,9 @@ from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore -try: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=pkg_resources.get_distribution( - "google-cloud-dataproc", - ).version, - ) -except pkg_resources.DistributionNotFound: - DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=package_version.__version__ +) class WorkflowTemplateServiceTransport(abc.ABC): @@ -51,7 +47,7 @@ def __init__( self, *, host: str = DEFAULT_HOST, - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py index 758128b8..eba1616f 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc.py @@ -51,14 +51,14 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, - scopes: Sequence[str] = None, - channel: grpc.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, @@ -186,8 +186,8 @@ def __init__( def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, - credentials_file: str = None, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs, diff --git a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py index 686b55a8..8ec0fd4d 100644 --- a/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py +++ b/google/cloud/dataproc_v1/services/workflow_template_service/transports/grpc_asyncio.py @@ -53,7 +53,7 @@ class WorkflowTemplateServiceGrpcAsyncIOTransport(WorkflowTemplateServiceTranspo def create_channel( cls, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, @@ -96,15 +96,15 @@ def __init__( self, *, host: str = "dataproc.googleapis.com", - credentials: ga_credentials.Credentials = None, + credentials: Optional[ga_credentials.Credentials] = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, - channel: aio.Channel = None, - api_mtls_endpoint: str = None, - client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, - ssl_channel_credentials: grpc.ChannelCredentials = None, - client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, - quota_project_id=None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, api_audience: Optional[str] = None, diff --git a/google/cloud/dataproc_v1/types/__init__.py b/google/cloud/dataproc_v1/types/__init__.py index 2b3c58ff..48be150d 100644 --- a/google/cloud/dataproc_v1/types/__init__.py +++ b/google/cloud/dataproc_v1/types/__init__.py @@ -40,6 +40,7 @@ from .clusters import ( AcceleratorConfig, AutoscalingConfig, + AuxiliaryNodeGroup, AuxiliaryServicesConfig, Cluster, ClusterConfig, @@ -64,6 +65,7 @@ ListClustersResponse, ManagedGroupConfig, MetastoreConfig, + NodeGroup, NodeGroupAffinity, NodeInitializationAction, ReservationAffinity, @@ -78,6 +80,7 @@ from .jobs import ( CancelJobRequest, DeleteJobRequest, + DriverSchedulingConfig, GetJobRequest, HadoopJob, HiveJob, @@ -101,10 +104,16 @@ UpdateJobRequest, YarnApplication, ) +from .node_groups import ( + CreateNodeGroupRequest, + GetNodeGroupRequest, + ResizeNodeGroupRequest, +) from .operations import ( BatchOperationMetadata, ClusterOperationMetadata, ClusterOperationStatus, + NodeGroupOperationMetadata, ) from .shared import ( EnvironmentConfig, @@ -168,6 +177,7 @@ "SparkSqlBatch", "AcceleratorConfig", "AutoscalingConfig", + "AuxiliaryNodeGroup", "AuxiliaryServicesConfig", "Cluster", "ClusterConfig", @@ -192,6 +202,7 @@ "ListClustersResponse", "ManagedGroupConfig", "MetastoreConfig", + "NodeGroup", "NodeGroupAffinity", "NodeInitializationAction", "ReservationAffinity", @@ -204,6 +215,7 @@ "VirtualClusterConfig", "CancelJobRequest", "DeleteJobRequest", + "DriverSchedulingConfig", "GetJobRequest", "HadoopJob", "HiveJob", @@ -226,9 +238,13 @@ "SubmitJobRequest", "UpdateJobRequest", "YarnApplication", + "CreateNodeGroupRequest", + "GetNodeGroupRequest", + "ResizeNodeGroupRequest", "BatchOperationMetadata", "ClusterOperationMetadata", "ClusterOperationStatus", + "NodeGroupOperationMetadata", "EnvironmentConfig", "ExecutionConfig", "GkeClusterConfig", diff --git a/google/cloud/dataproc_v1/types/autoscaling_policies.py b/google/cloud/dataproc_v1/types/autoscaling_policies.py index 56e4c942..7eae0b8f 100644 --- a/google/cloud/dataproc_v1/types/autoscaling_policies.py +++ b/google/cloud/dataproc_v1/types/autoscaling_policies.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import duration_pb2 # type: ignore @@ -71,7 +73,7 @@ class AutoscalingPolicy(proto.Message): secondary_worker_config (google.cloud.dataproc_v1.types.InstanceGroupAutoscalingPolicyConfig): Optional. Describes how the autoscaler will operate for secondary workers. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Optional. The labels to associate with this autoscaling policy. Label **keys** must contain 1 to 63 characters, and must conform to `RFC @@ -82,31 +84,31 @@ class AutoscalingPolicy(proto.Message): 32 labels can be associated with an autoscaling policy. """ - id = proto.Field( + id: str = proto.Field( proto.STRING, number=1, ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=2, ) - basic_algorithm = proto.Field( + basic_algorithm: "BasicAutoscalingAlgorithm" = proto.Field( proto.MESSAGE, number=3, oneof="algorithm", message="BasicAutoscalingAlgorithm", ) - worker_config = proto.Field( + worker_config: "InstanceGroupAutoscalingPolicyConfig" = proto.Field( proto.MESSAGE, number=4, message="InstanceGroupAutoscalingPolicyConfig", ) - secondary_worker_config = proto.Field( + secondary_worker_config: "InstanceGroupAutoscalingPolicyConfig" = proto.Field( proto.MESSAGE, number=5, message="InstanceGroupAutoscalingPolicyConfig", ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=6, @@ -131,13 +133,13 @@ class BasicAutoscalingAlgorithm(proto.Message): Bounds: [2m, 1d]. Default: 2m. """ - yarn_config = proto.Field( + yarn_config: "BasicYarnAutoscalingConfig" = proto.Field( proto.MESSAGE, number=1, oneof="config", message="BasicYarnAutoscalingConfig", ) - cooldown_period = proto.Field( + cooldown_period: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=2, message=duration_pb2.Duration, @@ -200,24 +202,24 @@ class BasicYarnAutoscalingConfig(proto.Message): Bounds: [0.0, 1.0]. Default: 0.0. """ - graceful_decommission_timeout = proto.Field( + graceful_decommission_timeout: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=5, message=duration_pb2.Duration, ) - scale_up_factor = proto.Field( + scale_up_factor: float = proto.Field( proto.DOUBLE, number=1, ) - scale_down_factor = proto.Field( + scale_down_factor: float = proto.Field( proto.DOUBLE, number=2, ) - scale_up_min_worker_fraction = proto.Field( + scale_up_min_worker_fraction: float = proto.Field( proto.DOUBLE, number=3, ) - scale_down_min_worker_fraction = proto.Field( + scale_down_min_worker_fraction: float = proto.Field( proto.DOUBLE, number=4, ) @@ -265,15 +267,15 @@ class InstanceGroupAutoscalingPolicyConfig(proto.Message): only and no secondary workers. """ - min_instances = proto.Field( + min_instances: int = proto.Field( proto.INT32, number=1, ) - max_instances = proto.Field( + max_instances: int = proto.Field( proto.INT32, number=2, ) - weight = proto.Field( + weight: int = proto.Field( proto.INT32, number=3, ) @@ -299,11 +301,11 @@ class CreateAutoscalingPolicyRequest(proto.Message): Required. The autoscaling policy to create. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - policy = proto.Field( + policy: "AutoscalingPolicy" = proto.Field( proto.MESSAGE, number=2, message="AutoscalingPolicy", @@ -328,7 +330,7 @@ class GetAutoscalingPolicyRequest(proto.Message): ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -342,7 +344,7 @@ class UpdateAutoscalingPolicyRequest(proto.Message): Required. The updated autoscaling policy. """ - policy = proto.Field( + policy: "AutoscalingPolicy" = proto.Field( proto.MESSAGE, number=1, message="AutoscalingPolicy", @@ -369,7 +371,7 @@ class DeleteAutoscalingPolicyRequest(proto.Message): ``projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`` """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -401,15 +403,15 @@ class ListAutoscalingPoliciesRequest(proto.Message): results. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=2, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=3, ) @@ -420,7 +422,7 @@ class ListAutoscalingPoliciesResponse(proto.Message): project. Attributes: - policies (Sequence[google.cloud.dataproc_v1.types.AutoscalingPolicy]): + policies (MutableSequence[google.cloud.dataproc_v1.types.AutoscalingPolicy]): Output only. Autoscaling policies list. next_page_token (str): Output only. This token is included in the @@ -431,12 +433,12 @@ class ListAutoscalingPoliciesResponse(proto.Message): def raw_page(self): return self - policies = proto.RepeatedField( + policies: MutableSequence["AutoscalingPolicy"] = proto.RepeatedField( proto.MESSAGE, number=1, message="AutoscalingPolicy", ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) diff --git a/google/cloud/dataproc_v1/types/batches.py b/google/cloud/dataproc_v1/types/batches.py index e014bff1..1aa48877 100644 --- a/google/cloud/dataproc_v1/types/batches.py +++ b/google/cloud/dataproc_v1/types/batches.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.dataproc_v1.types import shared @@ -67,20 +69,20 @@ class CreateBatchRequest(proto.Message): is 40 characters. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - batch = proto.Field( + batch: "Batch" = proto.Field( proto.MESSAGE, number=2, message="Batch", ) - batch_id = proto.Field( + batch_id: str = proto.Field( proto.STRING, number=3, ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=4, ) @@ -95,7 +97,7 @@ class GetBatchRequest(proto.Message): Required. The name of the batch to retrieve. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -119,15 +121,15 @@ class ListBatchesRequest(proto.Message): subsequent page. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=2, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=3, ) @@ -137,7 +139,7 @@ class ListBatchesResponse(proto.Message): r"""A list of batch workloads. Attributes: - batches (Sequence[google.cloud.dataproc_v1.types.Batch]): + batches (MutableSequence[google.cloud.dataproc_v1.types.Batch]): The batches from the specified collection. next_page_token (str): A token, which can be sent as ``page_token`` to retrieve the @@ -149,12 +151,12 @@ class ListBatchesResponse(proto.Message): def raw_page(self): return self - batches = proto.RepeatedField( + batches: MutableSequence["Batch"] = proto.RepeatedField( proto.MESSAGE, number=1, message="Batch", ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) @@ -169,7 +171,7 @@ class DeleteBatchRequest(proto.Message): delete. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) @@ -225,7 +227,7 @@ class Batch(proto.Message): creator (str): Output only. The email address of the user who created the batch. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Optional. The labels to associate with this batch. Label **keys** must contain 1 to 63 characters, and must conform to `RFC 1035 `__. @@ -242,7 +244,7 @@ class Batch(proto.Message): operation (str): Output only. The resource name of the operation associated with this batch. - state_history (Sequence[google.cloud.dataproc_v1.types.Batch.StateHistory]): + state_history (MutableSequence[google.cloud.dataproc_v1.types.Batch.StateHistory]): Output only. Historical state information for the batch. """ @@ -272,101 +274,101 @@ class StateHistory(proto.Message): the historical state. """ - state = proto.Field( + state: "Batch.State" = proto.Field( proto.ENUM, number=1, enum="Batch.State", ) - state_message = proto.Field( + state_message: str = proto.Field( proto.STRING, number=2, ) - state_start_time = proto.Field( + state_start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - uuid = proto.Field( + uuid: str = proto.Field( proto.STRING, number=2, ) - create_time = proto.Field( + create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - pyspark_batch = proto.Field( + pyspark_batch: "PySparkBatch" = proto.Field( proto.MESSAGE, number=4, oneof="batch_config", message="PySparkBatch", ) - spark_batch = proto.Field( + spark_batch: "SparkBatch" = proto.Field( proto.MESSAGE, number=5, oneof="batch_config", message="SparkBatch", ) - spark_r_batch = proto.Field( + spark_r_batch: "SparkRBatch" = proto.Field( proto.MESSAGE, number=6, oneof="batch_config", message="SparkRBatch", ) - spark_sql_batch = proto.Field( + spark_sql_batch: "SparkSqlBatch" = proto.Field( proto.MESSAGE, number=7, oneof="batch_config", message="SparkSqlBatch", ) - runtime_info = proto.Field( + runtime_info: shared.RuntimeInfo = proto.Field( proto.MESSAGE, number=8, message=shared.RuntimeInfo, ) - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=9, enum=State, ) - state_message = proto.Field( + state_message: str = proto.Field( proto.STRING, number=10, ) - state_time = proto.Field( + state_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=11, message=timestamp_pb2.Timestamp, ) - creator = proto.Field( + creator: str = proto.Field( proto.STRING, number=12, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=13, ) - runtime_config = proto.Field( + runtime_config: shared.RuntimeConfig = proto.Field( proto.MESSAGE, number=14, message=shared.RuntimeConfig, ) - environment_config = proto.Field( + environment_config: shared.EnvironmentConfig = proto.Field( proto.MESSAGE, number=15, message=shared.EnvironmentConfig, ) - operation = proto.Field( + operation: str = proto.Field( proto.STRING, number=16, ) - state_history = proto.RepeatedField( + state_history: MutableSequence[StateHistory] = proto.RepeatedField( proto.MESSAGE, number=17, message=StateHistory, @@ -383,48 +385,48 @@ class PySparkBatch(proto.Message): Required. The HCFS URI of the main Python file to use as the Spark driver. Must be a .py file. - args (Sequence[str]): + args (MutableSequence[str]): Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as ``--conf``, since a collision can occur that causes an incorrect batch submission. - python_file_uris (Sequence[str]): + python_file_uris (MutableSequence[str]): Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: ``.py``, ``.egg``, and ``.zip``. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. - file_uris (Sequence[str]): + file_uris (MutableSequence[str]): Optional. HCFS URIs of files to be placed in the working directory of each executor. - archive_uris (Sequence[str]): + archive_uris (MutableSequence[str]): Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: ``.jar``, ``.tar``, ``.tar.gz``, ``.tgz``, and ``.zip``. """ - main_python_file_uri = proto.Field( + main_python_file_uri: str = proto.Field( proto.STRING, number=1, ) - args = proto.RepeatedField( + args: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - python_file_uris = proto.RepeatedField( + python_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) - file_uris = proto.RepeatedField( + file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=5, ) - archive_uris = proto.RepeatedField( + archive_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) @@ -453,46 +455,46 @@ class SparkBatch(proto.Message): specified in ``jar_file_uris``. This field is a member of `oneof`_ ``driver``. - args (Sequence[str]): + args (MutableSequence[str]): Optional. The arguments to pass to the driver. Do not include arguments that can be set as batch properties, such as ``--conf``, since a collision can occur that causes an incorrect batch submission. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to add to the classpath of the Spark driver and tasks. - file_uris (Sequence[str]): + file_uris (MutableSequence[str]): Optional. HCFS URIs of files to be placed in the working directory of each executor. - archive_uris (Sequence[str]): + archive_uris (MutableSequence[str]): Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: ``.jar``, ``.tar``, ``.tar.gz``, ``.tgz``, and ``.zip``. """ - main_jar_file_uri = proto.Field( + main_jar_file_uri: str = proto.Field( proto.STRING, number=1, oneof="driver", ) - main_class = proto.Field( + main_class: str = proto.Field( proto.STRING, number=2, oneof="driver", ) - args = proto.RepeatedField( + args: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) - file_uris = proto.RepeatedField( + file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=5, ) - archive_uris = proto.RepeatedField( + archive_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) @@ -507,33 +509,33 @@ class SparkRBatch(proto.Message): main_r_file_uri (str): Required. The HCFS URI of the main R file to use as the driver. Must be a ``.R`` or ``.r`` file. - args (Sequence[str]): + args (MutableSequence[str]): Optional. The arguments to pass to the Spark driver. Do not include arguments that can be set as batch properties, such as ``--conf``, since a collision can occur that causes an incorrect batch submission. - file_uris (Sequence[str]): + file_uris (MutableSequence[str]): Optional. HCFS URIs of files to be placed in the working directory of each executor. - archive_uris (Sequence[str]): + archive_uris (MutableSequence[str]): Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: ``.jar``, ``.tar``, ``.tar.gz``, ``.tgz``, and ``.zip``. """ - main_r_file_uri = proto.Field( + main_r_file_uri: str = proto.Field( proto.STRING, number=1, ) - args = proto.RepeatedField( + args: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - file_uris = proto.RepeatedField( + file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - archive_uris = proto.RepeatedField( + archive_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) @@ -547,25 +549,25 @@ class SparkSqlBatch(proto.Message): query_file_uri (str): Required. The HCFS URI of the script that contains Spark SQL queries to execute. - query_variables (Mapping[str, str]): + query_variables (MutableMapping[str, str]): Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: ``SET name="value";``). - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. """ - query_file_uri = proto.Field( + query_file_uri: str = proto.Field( proto.STRING, number=1, ) - query_variables = proto.MapField( + query_variables: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=2, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) diff --git a/google/cloud/dataproc_v1/types/clusters.py b/google/cloud/dataproc_v1/types/clusters.py index b1b0debf..070a255e 100644 --- a/google/cloud/dataproc_v1/types/clusters.py +++ b/google/cloud/dataproc_v1/types/clusters.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.dataproc_v1.types import shared @@ -39,6 +41,8 @@ "ManagedGroupConfig", "AcceleratorConfig", "DiskConfig", + "AuxiliaryNodeGroup", + "NodeGroup", "NodeInitializationAction", "ClusterStatus", "SecurityConfig", @@ -73,24 +77,29 @@ class Cluster(proto.Message): Required. The Google Cloud Platform project ID that the cluster belongs to. cluster_name (str): - Required. The cluster name. Cluster names - within a project must be unique. Names of - deleted clusters can be reused. + Required. The cluster name, which must be + unique within a project. The name must start + with a lowercase letter, and can contain up to + 51 lowercase letters, numbers, and hyphens. It + cannot end with a hyphen. The name of a deleted + cluster can be reused. config (google.cloud.dataproc_v1.types.ClusterConfig): Optional. The cluster config for a cluster of Compute Engine Instances. Note that Dataproc may set default values, and values may change when clusters are updated. virtual_cluster_config (google.cloud.dataproc_v1.types.VirtualClusterConfig): - Optional. The virtual cluster config, used when creating a + Optional. The virtual cluster config is used when creating a Dataproc cluster that does not directly control the underlying compute resources, for example, when creating a `Dataproc-on-GKE - cluster `__. - Note that Dataproc may set default values, and values may - change when clusters are updated. Exactly one of config or - virtualClusterConfig must be specified. - labels (Mapping[str, str]): + cluster `__. + Dataproc may set default values, and values may change when + clusters are updated. Exactly one of + [config][google.cloud.dataproc.v1.Cluster.config] or + [virtual_cluster_config][google.cloud.dataproc.v1.Cluster.virtual_cluster_config] + must be specified. + labels (MutableMapping[str, str]): Optional. The labels to associate with this cluster. Label **keys** must contain 1 to 63 characters, and must conform to `RFC 1035 `__. @@ -100,7 +109,7 @@ class Cluster(proto.Message): 32 labels can be associated with a cluster. status (google.cloud.dataproc_v1.types.ClusterStatus): Output only. Cluster status. - status_history (Sequence[google.cloud.dataproc_v1.types.ClusterStatus]): + status_history (MutableSequence[google.cloud.dataproc_v1.types.ClusterStatus]): Output only. The previous cluster status. cluster_uuid (str): Output only. A cluster UUID (Unique Universal @@ -114,44 +123,44 @@ class Cluster(proto.Message): purposes only. It may be changed before final release. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=2, ) - config = proto.Field( + config: "ClusterConfig" = proto.Field( proto.MESSAGE, number=3, message="ClusterConfig", ) - virtual_cluster_config = proto.Field( + virtual_cluster_config: "VirtualClusterConfig" = proto.Field( proto.MESSAGE, number=10, message="VirtualClusterConfig", ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=8, ) - status = proto.Field( + status: "ClusterStatus" = proto.Field( proto.MESSAGE, number=4, message="ClusterStatus", ) - status_history = proto.RepeatedField( + status_history: MutableSequence["ClusterStatus"] = proto.RepeatedField( proto.MESSAGE, number=7, message="ClusterStatus", ) - cluster_uuid = proto.Field( + cluster_uuid: str = proto.Field( proto.STRING, number=6, ) - metrics = proto.Field( + metrics: "ClusterMetrics" = proto.Field( proto.MESSAGE, number=9, message="ClusterMetrics", @@ -203,7 +212,7 @@ class ClusterConfig(proto.Message): software_config (google.cloud.dataproc_v1.types.SoftwareConfig): Optional. The config settings for cluster software. - initialization_actions (Sequence[google.cloud.dataproc_v1.types.NodeInitializationAction]): + initialization_actions (MutableSequence[google.cloud.dataproc_v1.types.NodeInitializationAction]): Optional. Commands to execute on each node after config is completed. By default, executables are run on master and all worker nodes. You can test a node's ``role`` metadata to run @@ -237,101 +246,110 @@ class ClusterConfig(proto.Message): Optional. Metastore configuration. dataproc_metric_config (google.cloud.dataproc_v1.types.DataprocMetricConfig): Optional. The config for Dataproc metrics. + auxiliary_node_groups (MutableSequence[google.cloud.dataproc_v1.types.AuxiliaryNodeGroup]): + Optional. The node group settings. """ - config_bucket = proto.Field( + config_bucket: str = proto.Field( proto.STRING, number=1, ) - temp_bucket = proto.Field( + temp_bucket: str = proto.Field( proto.STRING, number=2, ) - gce_cluster_config = proto.Field( + gce_cluster_config: "GceClusterConfig" = proto.Field( proto.MESSAGE, number=8, message="GceClusterConfig", ) - master_config = proto.Field( + master_config: "InstanceGroupConfig" = proto.Field( proto.MESSAGE, number=9, message="InstanceGroupConfig", ) - worker_config = proto.Field( + worker_config: "InstanceGroupConfig" = proto.Field( proto.MESSAGE, number=10, message="InstanceGroupConfig", ) - secondary_worker_config = proto.Field( + secondary_worker_config: "InstanceGroupConfig" = proto.Field( proto.MESSAGE, number=12, message="InstanceGroupConfig", ) - software_config = proto.Field( + software_config: "SoftwareConfig" = proto.Field( proto.MESSAGE, number=13, message="SoftwareConfig", ) - initialization_actions = proto.RepeatedField( + initialization_actions: MutableSequence[ + "NodeInitializationAction" + ] = proto.RepeatedField( proto.MESSAGE, number=11, message="NodeInitializationAction", ) - encryption_config = proto.Field( + encryption_config: "EncryptionConfig" = proto.Field( proto.MESSAGE, number=15, message="EncryptionConfig", ) - autoscaling_config = proto.Field( + autoscaling_config: "AutoscalingConfig" = proto.Field( proto.MESSAGE, number=18, message="AutoscalingConfig", ) - security_config = proto.Field( + security_config: "SecurityConfig" = proto.Field( proto.MESSAGE, number=16, message="SecurityConfig", ) - lifecycle_config = proto.Field( + lifecycle_config: "LifecycleConfig" = proto.Field( proto.MESSAGE, number=17, message="LifecycleConfig", ) - endpoint_config = proto.Field( + endpoint_config: "EndpointConfig" = proto.Field( proto.MESSAGE, number=19, message="EndpointConfig", ) - metastore_config = proto.Field( + metastore_config: "MetastoreConfig" = proto.Field( proto.MESSAGE, number=20, message="MetastoreConfig", ) - dataproc_metric_config = proto.Field( + dataproc_metric_config: "DataprocMetricConfig" = proto.Field( proto.MESSAGE, number=23, message="DataprocMetricConfig", ) + auxiliary_node_groups: MutableSequence["AuxiliaryNodeGroup"] = proto.RepeatedField( + proto.MESSAGE, + number=25, + message="AuxiliaryNodeGroup", + ) class VirtualClusterConfig(proto.Message): - r"""Dataproc cluster config for a cluster that does not directly control - the underlying compute resources, such as a `Dataproc-on-GKE - cluster `__. + r"""The Dataproc cluster config for a cluster that does not directly + control the underlying compute resources, such as a `Dataproc-on-GKE + cluster `__. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: staging_bucket (str): - Optional. A Storage bucket used to stage job dependencies, - config files, and job driver console output. If you do not - specify a staging bucket, Cloud Dataproc will determine a - Cloud Storage location (US, ASIA, or EU) for your cluster's - staging bucket according to the Compute Engine zone where - your cluster is deployed, and then create and manage this - project-level, per-location bucket (see `Dataproc staging - and temp + Optional. A Cloud Storage bucket used to stage job + dependencies, config files, and job driver console output. + If you do not specify a staging bucket, Cloud Dataproc will + determine a Cloud Storage location (US, ASIA, or EU) for + your cluster's staging bucket according to the Compute + Engine zone where your cluster is deployed, and then create + and manage this project-level, per-location bucket (see + `Dataproc staging and temp buckets `__). **This field requires a Cloud Storage bucket name, not a ``gs://...`` URI to a Cloud Storage bucket.** @@ -345,17 +363,17 @@ class VirtualClusterConfig(proto.Message): used by this cluster. """ - staging_bucket = proto.Field( + staging_bucket: str = proto.Field( proto.STRING, number=1, ) - kubernetes_cluster_config = proto.Field( + kubernetes_cluster_config: shared.KubernetesClusterConfig = proto.Field( proto.MESSAGE, number=6, oneof="infrastructure_config", message=shared.KubernetesClusterConfig, ) - auxiliary_services_config = proto.Field( + auxiliary_services_config: "AuxiliaryServicesConfig" = proto.Field( proto.MESSAGE, number=7, message="AuxiliaryServicesConfig", @@ -374,12 +392,12 @@ class AuxiliaryServicesConfig(proto.Message): configuration for the workload. """ - metastore_config = proto.Field( + metastore_config: "MetastoreConfig" = proto.Field( proto.MESSAGE, number=1, message="MetastoreConfig", ) - spark_history_server_config = proto.Field( + spark_history_server_config: shared.SparkHistoryServerConfig = proto.Field( proto.MESSAGE, number=2, message=shared.SparkHistoryServerConfig, @@ -390,7 +408,7 @@ class EndpointConfig(proto.Message): r"""Endpoint config for this cluster Attributes: - http_ports (Mapping[str, str]): + http_ports (MutableMapping[str, str]): Output only. The map of port descriptions to URLs. Will only be populated if enable_http_port_access is true. enable_http_port_access (bool): @@ -399,12 +417,12 @@ class EndpointConfig(proto.Message): sources. Defaults to false. """ - http_ports = proto.MapField( + http_ports: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=1, ) - enable_http_port_access = proto.Field( + enable_http_port_access: bool = proto.Field( proto.BOOL, number=2, ) @@ -427,7 +445,7 @@ class AutoscalingConfig(proto.Message): Dataproc region. """ - policy_uri = proto.Field( + policy_uri: str = proto.Field( proto.STRING, number=1, ) @@ -443,7 +461,7 @@ class EncryptionConfig(proto.Message): cluster. """ - gce_pd_kms_key_name = proto.Field( + gce_pd_kms_key_name: str = proto.Field( proto.STRING, number=1, ) @@ -513,7 +531,7 @@ class GceClusterConfig(proto.Message): If not specified, the `Compute Engine default service account `__ is used. - service_account_scopes (Sequence[str]): + service_account_scopes (MutableSequence[str]): Optional. The URIs of service account scopes to be included in Compute Engine instances. The following base set of scopes is always included: @@ -529,11 +547,11 @@ class GceClusterConfig(proto.Message): - https://www.googleapis.com/auth/bigtable.admin.table - https://www.googleapis.com/auth/bigtable.data - https://www.googleapis.com/auth/devstorage.full_control - tags (Sequence[str]): + tags (MutableSequence[str]): The Compute Engine tags to add to all instances (see `Tagging instances `__). - metadata (Mapping[str, str]): + metadata (MutableMapping[str, str]): The Compute Engine metadata entries to add to all instances (see `Project and instance metadata `__). @@ -565,60 +583,60 @@ class PrivateIpv6GoogleAccess(proto.Enum): OUTBOUND = 2 BIDIRECTIONAL = 3 - zone_uri = proto.Field( + zone_uri: str = proto.Field( proto.STRING, number=1, ) - network_uri = proto.Field( + network_uri: str = proto.Field( proto.STRING, number=2, ) - subnetwork_uri = proto.Field( + subnetwork_uri: str = proto.Field( proto.STRING, number=6, ) - internal_ip_only = proto.Field( + internal_ip_only: bool = proto.Field( proto.BOOL, number=7, ) - private_ipv6_google_access = proto.Field( + private_ipv6_google_access: PrivateIpv6GoogleAccess = proto.Field( proto.ENUM, number=12, enum=PrivateIpv6GoogleAccess, ) - service_account = proto.Field( + service_account: str = proto.Field( proto.STRING, number=8, ) - service_account_scopes = proto.RepeatedField( + service_account_scopes: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - tags = proto.RepeatedField( + tags: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) - metadata = proto.MapField( + metadata: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=5, ) - reservation_affinity = proto.Field( + reservation_affinity: "ReservationAffinity" = proto.Field( proto.MESSAGE, number=11, message="ReservationAffinity", ) - node_group_affinity = proto.Field( + node_group_affinity: "NodeGroupAffinity" = proto.Field( proto.MESSAGE, number=13, message="NodeGroupAffinity", ) - shielded_instance_config = proto.Field( + shielded_instance_config: "ShieldedInstanceConfig" = proto.Field( proto.MESSAGE, number=14, message="ShieldedInstanceConfig", ) - confidential_instance_config = proto.Field( + confidential_instance_config: "ConfidentialInstanceConfig" = proto.Field( proto.MESSAGE, number=15, message="ConfidentialInstanceConfig", @@ -626,8 +644,9 @@ class PrivateIpv6GoogleAccess(proto.Enum): class NodeGroupAffinity(proto.Message): - r"""Node Group Affinity for clusters using sole-tenant node - groups. + r"""Node Group Affinity for clusters using sole-tenant node groups. + **The Dataproc ``NodeGroupAffinity`` resource is not related to the + Dataproc [NodeGroup][google.cloud.dataproc.v1.NodeGroup] resource.** Attributes: node_group_uri (str): @@ -643,7 +662,7 @@ class NodeGroupAffinity(proto.Message): - ``node-group-1`` """ - node_group_uri = proto.Field( + node_group_uri: str = proto.Field( proto.STRING, number=1, ) @@ -665,15 +684,15 @@ class ShieldedInstanceConfig(proto.Message): integrity monitoring enabled. """ - enable_secure_boot = proto.Field( + enable_secure_boot: bool = proto.Field( proto.BOOL, number=1, ) - enable_vtpm = proto.Field( + enable_vtpm: bool = proto.Field( proto.BOOL, number=2, ) - enable_integrity_monitoring = proto.Field( + enable_integrity_monitoring: bool = proto.Field( proto.BOOL, number=3, ) @@ -689,7 +708,7 @@ class ConfidentialInstanceConfig(proto.Message): have confidential compute enabled. """ - enable_confidential_compute = proto.Field( + enable_confidential_compute: bool = proto.Field( proto.BOOL, number=1, ) @@ -708,7 +727,7 @@ class InstanceGroupConfig(proto.Message): set to 3**. For standard cluster `master_config <#FIELDS.master_config>`__ groups, **must be set to 1**. - instance_names (Sequence[str]): + instance_names (MutableSequence[str]): Output only. The list of instance names. Dataproc derives the names from ``cluster_name``, ``num_instances``, and the instance group. @@ -766,7 +785,7 @@ class InstanceGroupConfig(proto.Message): Instance Group Manager that manages this group. This is only used for preemptible instance groups. - accelerators (Sequence[google.cloud.dataproc_v1.types.AcceleratorConfig]): + accelerators (MutableSequence[google.cloud.dataproc_v1.types.AcceleratorConfig]): Optional. The Compute Engine accelerator configuration for these instances. min_cpu_platform (str): @@ -776,55 +795,52 @@ class InstanceGroupConfig(proto.Message): """ class Preemptibility(proto.Enum): - r"""Controls the use of [preemptible instances] - (https://cloud.google.com/compute/docs/instances/preemptible) within - the group. - """ + r"""Controls the use of preemptible instances within the group.""" PREEMPTIBILITY_UNSPECIFIED = 0 NON_PREEMPTIBLE = 1 PREEMPTIBLE = 2 - num_instances = proto.Field( + num_instances: int = proto.Field( proto.INT32, number=1, ) - instance_names = proto.RepeatedField( + instance_names: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - image_uri = proto.Field( + image_uri: str = proto.Field( proto.STRING, number=3, ) - machine_type_uri = proto.Field( + machine_type_uri: str = proto.Field( proto.STRING, number=4, ) - disk_config = proto.Field( + disk_config: "DiskConfig" = proto.Field( proto.MESSAGE, number=5, message="DiskConfig", ) - is_preemptible = proto.Field( + is_preemptible: bool = proto.Field( proto.BOOL, number=6, ) - preemptibility = proto.Field( + preemptibility: Preemptibility = proto.Field( proto.ENUM, number=10, enum=Preemptibility, ) - managed_group_config = proto.Field( + managed_group_config: "ManagedGroupConfig" = proto.Field( proto.MESSAGE, number=7, message="ManagedGroupConfig", ) - accelerators = proto.RepeatedField( + accelerators: MutableSequence["AcceleratorConfig"] = proto.RepeatedField( proto.MESSAGE, number=8, message="AcceleratorConfig", ) - min_cpu_platform = proto.Field( + min_cpu_platform: str = proto.Field( proto.STRING, number=9, ) @@ -843,11 +859,11 @@ class ManagedGroupConfig(proto.Message): Manager for this group. """ - instance_template_name = proto.Field( + instance_template_name: str = proto.Field( proto.STRING, number=1, ) - instance_group_manager_name = proto.Field( + instance_group_manager_name: str = proto.Field( proto.STRING, number=2, ) @@ -880,11 +896,11 @@ class AcceleratorConfig(proto.Message): type exposed to this instance. """ - accelerator_type_uri = proto.Field( + accelerator_type_uri: str = proto.Field( proto.STRING, number=1, ) - accelerator_count = proto.Field( + accelerator_count: int = proto.Field( proto.INT32, number=2, ) @@ -906,7 +922,7 @@ class DiskConfig(proto.Message): Optional. Size in GB of the boot disk (default is 500GB). num_local_ssds (int): - Optional. Number of attached SSDs, from 0 to 4 (default is + Optional. Number of attached SSDs, from 0 to 8 (default is 0). If SSDs are not attached, the boot disk is used to store runtime logs and `HDFS `__ @@ -920,19 +936,97 @@ class DiskConfig(proto.Message): performance `__. """ - boot_disk_type = proto.Field( + boot_disk_type: str = proto.Field( proto.STRING, number=3, ) - boot_disk_size_gb = proto.Field( + boot_disk_size_gb: int = proto.Field( proto.INT32, number=1, ) - num_local_ssds = proto.Field( + num_local_ssds: int = proto.Field( proto.INT32, number=2, ) - local_ssd_interface = proto.Field( + local_ssd_interface: str = proto.Field( + proto.STRING, + number=4, + ) + + +class AuxiliaryNodeGroup(proto.Message): + r"""Node group identification and configuration information. + + Attributes: + node_group (google.cloud.dataproc_v1.types.NodeGroup): + Required. Node group configuration. + node_group_id (str): + Optional. A node group ID. Generated if not specified. + + The ID must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of from 3 to 33 + characters. + """ + + node_group: "NodeGroup" = proto.Field( + proto.MESSAGE, + number=1, + message="NodeGroup", + ) + node_group_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class NodeGroup(proto.Message): + r"""Dataproc Node Group. **The Dataproc ``NodeGroup`` resource is not + related to the Dataproc + [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity] + resource.** + + Attributes: + name (str): + The Node group `resource name `__. + roles (MutableSequence[google.cloud.dataproc_v1.types.NodeGroup.Role]): + Required. Node group roles. + node_group_config (google.cloud.dataproc_v1.types.InstanceGroupConfig): + Optional. The node group instance group + configuration. + labels (MutableMapping[str, str]): + Optional. Node group labels. + + - Label **keys** must consist of from 1 to 63 characters + and conform to `RFC + 1035 `__. + - Label **values** can be empty. If specified, they must + consist of from 1 to 63 characters and conform to [RFC + 1035] (https://www.ietf.org/rfc/rfc1035.txt). + - The node group must have no more than 32 labels. + """ + + class Role(proto.Enum): + r"""Node group roles.""" + ROLE_UNSPECIFIED = 0 + DRIVER = 1 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + roles: MutableSequence[Role] = proto.RepeatedField( + proto.ENUM, + number=2, + enum=Role, + ) + node_group_config: "InstanceGroupConfig" = proto.Field( + proto.MESSAGE, + number=3, + message="InstanceGroupConfig", + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, proto.STRING, number=4, ) @@ -957,11 +1051,11 @@ class NodeInitializationAction(proto.Message): at end of the timeout period. """ - executable_file = proto.Field( + executable_file: str = proto.Field( proto.STRING, number=1, ) - execution_timeout = proto.Field( + execution_timeout: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=2, message=duration_pb2.Duration, @@ -1005,21 +1099,21 @@ class Substate(proto.Enum): UNHEALTHY = 1 STALE_STATUS = 2 - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=1, enum=State, ) - detail = proto.Field( + detail: str = proto.Field( proto.STRING, number=2, ) - state_start_time = proto.Field( + state_start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - substate = proto.Field( + substate: Substate = proto.Field( proto.ENUM, number=4, enum=Substate, @@ -1039,12 +1133,12 @@ class SecurityConfig(proto.Message): multi-tenancy user mappings. """ - kerberos_config = proto.Field( + kerberos_config: "KerberosConfig" = proto.Field( proto.MESSAGE, number=1, message="KerberosConfig", ) - identity_config = proto.Field( + identity_config: "IdentityConfig" = proto.Field( proto.MESSAGE, number=2, message="IdentityConfig", @@ -1126,63 +1220,63 @@ class KerberosConfig(proto.Message): of hostnames will be the realm. """ - enable_kerberos = proto.Field( + enable_kerberos: bool = proto.Field( proto.BOOL, number=1, ) - root_principal_password_uri = proto.Field( + root_principal_password_uri: str = proto.Field( proto.STRING, number=2, ) - kms_key_uri = proto.Field( + kms_key_uri: str = proto.Field( proto.STRING, number=3, ) - keystore_uri = proto.Field( + keystore_uri: str = proto.Field( proto.STRING, number=4, ) - truststore_uri = proto.Field( + truststore_uri: str = proto.Field( proto.STRING, number=5, ) - keystore_password_uri = proto.Field( + keystore_password_uri: str = proto.Field( proto.STRING, number=6, ) - key_password_uri = proto.Field( + key_password_uri: str = proto.Field( proto.STRING, number=7, ) - truststore_password_uri = proto.Field( + truststore_password_uri: str = proto.Field( proto.STRING, number=8, ) - cross_realm_trust_realm = proto.Field( + cross_realm_trust_realm: str = proto.Field( proto.STRING, number=9, ) - cross_realm_trust_kdc = proto.Field( + cross_realm_trust_kdc: str = proto.Field( proto.STRING, number=10, ) - cross_realm_trust_admin_server = proto.Field( + cross_realm_trust_admin_server: str = proto.Field( proto.STRING, number=11, ) - cross_realm_trust_shared_password_uri = proto.Field( + cross_realm_trust_shared_password_uri: str = proto.Field( proto.STRING, number=12, ) - kdc_db_key_uri = proto.Field( + kdc_db_key_uri: str = proto.Field( proto.STRING, number=13, ) - tgt_lifetime_hours = proto.Field( + tgt_lifetime_hours: int = proto.Field( proto.INT32, number=14, ) - realm = proto.Field( + realm: str = proto.Field( proto.STRING, number=15, ) @@ -1193,11 +1287,11 @@ class IdentityConfig(proto.Message): based secure multi-tenancy user mappings. Attributes: - user_service_account_mapping (Mapping[str, str]): + user_service_account_mapping (MutableMapping[str, str]): Required. Map of user to service account. """ - user_service_account_mapping = proto.MapField( + user_service_account_mapping: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=1, @@ -1217,7 +1311,7 @@ class SoftwareConfig(proto.Message): "1.2.29"), or the `"preview" version `__. If unspecified, it defaults to the latest Debian version. - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. The properties to set on daemon config files. Property keys are specified in ``prefix:property`` format, @@ -1236,21 +1330,21 @@ class SoftwareConfig(proto.Message): For more information, see `Cluster properties `__. - optional_components (Sequence[google.cloud.dataproc_v1.types.Component]): + optional_components (MutableSequence[google.cloud.dataproc_v1.types.Component]): Optional. The set of components to activate on the cluster. """ - image_version = proto.Field( + image_version: str = proto.Field( proto.STRING, number=1, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=2, ) - optional_components = proto.RepeatedField( + optional_components: MutableSequence[shared.Component] = proto.RepeatedField( proto.ENUM, number=3, enum=shared.Component, @@ -1296,24 +1390,24 @@ class LifecycleConfig(proto.Message): `Timestamp `__). """ - idle_delete_ttl = proto.Field( + idle_delete_ttl: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=1, message=duration_pb2.Duration, ) - auto_delete_time = proto.Field( + auto_delete_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=2, oneof="ttl", message=timestamp_pb2.Timestamp, ) - auto_delete_ttl = proto.Field( + auto_delete_ttl: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=3, oneof="ttl", message=duration_pb2.Duration, ) - idle_start_time = proto.Field( + idle_start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, @@ -1333,7 +1427,7 @@ class MetastoreConfig(proto.Message): - ``projects/[project_id]/locations/[dataproc_region]/services/[service-name]`` """ - dataproc_metastore_service = proto.Field( + dataproc_metastore_service: str = proto.Field( proto.STRING, number=1, ) @@ -1343,7 +1437,7 @@ class DataprocMetricConfig(proto.Message): r"""Dataproc metric config. Attributes: - metrics (Sequence[google.cloud.dataproc_v1.types.DataprocMetricConfig.Metric]): + metrics (MutableSequence[google.cloud.dataproc_v1.types.DataprocMetricConfig.Metric]): Required. Metrics sources to enable. """ @@ -1370,7 +1464,7 @@ class Metric(proto.Message): [Available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) for more information). - metric_overrides (Sequence[str]): + metric_overrides (MutableSequence[str]): Optional. Specify one or more [available OSS metrics] (https://cloud.google.com/dataproc/docs/guides/monitoring#available_oss_metrics) to collect for the metric course (for the ``SPARK`` metric @@ -1404,17 +1498,17 @@ class Metric(proto.Message): default YARN metrics will be collected. """ - metric_source = proto.Field( + metric_source: "DataprocMetricConfig.MetricSource" = proto.Field( proto.ENUM, number=1, enum="DataprocMetricConfig.MetricSource", ) - metric_overrides = proto.RepeatedField( + metric_overrides: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - metrics = proto.RepeatedField( + metrics: MutableSequence[Metric] = proto.RepeatedField( proto.MESSAGE, number=1, message=Metric, @@ -1428,18 +1522,18 @@ class ClusterMetrics(proto.Message): only. It may be changed before final release. Attributes: - hdfs_metrics (Mapping[str, int]): + hdfs_metrics (MutableMapping[str, int]): The HDFS metrics. - yarn_metrics (Mapping[str, int]): + yarn_metrics (MutableMapping[str, int]): The YARN metrics. """ - hdfs_metrics = proto.MapField( + hdfs_metrics: MutableMapping[str, int] = proto.MapField( proto.STRING, proto.INT64, number=1, ) - yarn_metrics = proto.MapField( + yarn_metrics: MutableMapping[str, int] = proto.MapField( proto.STRING, proto.INT64, number=2, @@ -1478,24 +1572,24 @@ class CreateClusterRequest(proto.Message): creation fails. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - cluster = proto.Field( + cluster: "Cluster" = proto.Field( proto.MESSAGE, number=2, message="Cluster", ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=4, ) - action_on_failed_primary_workers = proto.Field( + action_on_failed_primary_workers: shared.FailureAction = proto.Field( proto.ENUM, number=5, enum=shared.FailureAction, @@ -1606,34 +1700,34 @@ class UpdateClusterRequest(proto.Message): characters. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=5, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=2, ) - cluster = proto.Field( + cluster: "Cluster" = proto.Field( proto.MESSAGE, number=3, message="Cluster", ) - graceful_decommission_timeout = proto.Field( + graceful_decommission_timeout: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=6, message=duration_pb2.Duration, ) - update_mask = proto.Field( + update_mask: field_mask_pb2.FieldMask = proto.Field( proto.MESSAGE, number=4, message=field_mask_pb2.FieldMask, ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=7, ) @@ -1672,23 +1766,23 @@ class StopClusterRequest(proto.Message): characters. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=2, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=3, ) - cluster_uuid = proto.Field( + cluster_uuid: str = proto.Field( proto.STRING, number=4, ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=5, ) @@ -1727,23 +1821,23 @@ class StartClusterRequest(proto.Message): characters. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=2, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=3, ) - cluster_uuid = proto.Field( + cluster_uuid: str = proto.Field( proto.STRING, number=4, ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=5, ) @@ -1782,23 +1876,23 @@ class DeleteClusterRequest(proto.Message): characters. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=2, ) - cluster_uuid = proto.Field( + cluster_uuid: str = proto.Field( proto.STRING, number=4, ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=5, ) @@ -1819,15 +1913,15 @@ class GetClusterRequest(proto.Message): Required. The cluster name. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=2, ) @@ -1871,23 +1965,23 @@ class ListClustersRequest(proto.Message): Optional. The standard List page token. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=4, ) - filter = proto.Field( + filter: str = proto.Field( proto.STRING, number=5, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=2, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=3, ) @@ -1897,7 +1991,7 @@ class ListClustersResponse(proto.Message): r"""The list of all clusters in a project. Attributes: - clusters (Sequence[google.cloud.dataproc_v1.types.Cluster]): + clusters (MutableSequence[google.cloud.dataproc_v1.types.Cluster]): Output only. The clusters in the project. next_page_token (str): Output only. This token is included in the response if there @@ -1910,12 +2004,12 @@ class ListClustersResponse(proto.Message): def raw_page(self): return self - clusters = proto.RepeatedField( + clusters: MutableSequence["Cluster"] = proto.RepeatedField( proto.MESSAGE, number=1, message="Cluster", ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) @@ -1935,15 +2029,15 @@ class DiagnoseClusterRequest(proto.Message): Required. The cluster name. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=2, ) @@ -1960,7 +2054,7 @@ class DiagnoseClusterResults(proto.Message): diagnostics. """ - output_uri = proto.Field( + output_uri: str = proto.Field( proto.STRING, number=1, ) @@ -1975,7 +2069,7 @@ class ReservationAffinity(proto.Message): key (str): Optional. Corresponds to the label key of reservation resource. - values (Sequence[str]): + values (MutableSequence[str]): Optional. Corresponds to the label values of reservation resource. """ @@ -1989,16 +2083,16 @@ class Type(proto.Enum): ANY_RESERVATION = 2 SPECIFIC_RESERVATION = 3 - consume_reservation_type = proto.Field( + consume_reservation_type: Type = proto.Field( proto.ENUM, number=1, enum=Type, ) - key = proto.Field( + key: str = proto.Field( proto.STRING, number=2, ) - values = proto.RepeatedField( + values: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) diff --git a/google/cloud/dataproc_v1/types/jobs.py b/google/cloud/dataproc_v1/types/jobs.py index 2e785931..501b9446 100644 --- a/google/cloud/dataproc_v1/types/jobs.py +++ b/google/cloud/dataproc_v1/types/jobs.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import field_mask_pb2 # type: ignore @@ -37,6 +39,7 @@ "JobReference", "YarnApplication", "Job", + "DriverSchedulingConfig", "JobScheduling", "SubmitJobRequest", "JobMetadata", @@ -54,7 +57,7 @@ class LoggingConfig(proto.Message): r"""The runtime logging config of the job. Attributes: - driver_log_levels (Mapping[str, google.cloud.dataproc_v1.types.LoggingConfig.Level]): + driver_log_levels (MutableMapping[str, google.cloud.dataproc_v1.types.LoggingConfig.Level]): The per-package log levels for the driver. This may include "root" package name to configure rootLogger. Examples: @@ -77,7 +80,7 @@ class Level(proto.Enum): FATAL = 7 OFF = 8 - driver_log_levels = proto.MapField( + driver_log_levels: MutableMapping[str, Level] = proto.MapField( proto.STRING, proto.ENUM, number=2, @@ -113,25 +116,25 @@ class HadoopJob(proto.Message): ``jar_file_uris``. This field is a member of `oneof`_ ``driver``. - args (Sequence[str]): + args (MutableSequence[str]): Optional. The arguments to pass to the driver. Do not include arguments, such as ``-libjars`` or ``-Dfoo=bar``, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. Jar file URIs to add to the CLASSPATHs of the Hadoop driver and tasks. - file_uris (Sequence[str]): + file_uris (MutableSequence[str]): Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied to the working directory of Hadoop drivers and distributed tasks. Useful for naively parallel tasks. - archive_uris (Sequence[str]): + archive_uris (MutableSequence[str]): Optional. HCFS URIs of archives to be extracted in the working directory of Hadoop drivers and tasks. Supported file types: .jar, .tar, .tar.gz, .tgz, or .zip. - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values, used to configure Hadoop. Properties that conflict with values set by the Dataproc API may be overwritten. Can include @@ -142,38 +145,38 @@ class HadoopJob(proto.Message): execution. """ - main_jar_file_uri = proto.Field( + main_jar_file_uri: str = proto.Field( proto.STRING, number=1, oneof="driver", ) - main_class = proto.Field( + main_class: str = proto.Field( proto.STRING, number=2, oneof="driver", ) - args = proto.RepeatedField( + args: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) - file_uris = proto.RepeatedField( + file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=5, ) - archive_uris = proto.RepeatedField( + archive_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=7, ) - logging_config = proto.Field( + logging_config: "LoggingConfig" = proto.Field( proto.MESSAGE, number=8, message="LoggingConfig", @@ -182,7 +185,7 @@ class HadoopJob(proto.Message): class SparkJob(proto.Message): r"""A Dataproc job for running `Apache - Spark `__ applications on YARN. + Spark `__ applications on YARN. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -203,24 +206,24 @@ class SparkJob(proto.Message): specified in ``jar_file_uris``. This field is a member of `oneof`_ ``driver``. - args (Sequence[str]): + args (MutableSequence[str]): Optional. The arguments to pass to the driver. Do not include arguments, such as ``--conf``, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Spark driver and tasks. - file_uris (Sequence[str]): + file_uris (MutableSequence[str]): Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. - archive_uris (Sequence[str]): + archive_uris (MutableSequence[str]): Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values, used to configure Spark. Properties that conflict with values set by the Dataproc API may @@ -232,38 +235,38 @@ class SparkJob(proto.Message): execution. """ - main_jar_file_uri = proto.Field( + main_jar_file_uri: str = proto.Field( proto.STRING, number=1, oneof="driver", ) - main_class = proto.Field( + main_class: str = proto.Field( proto.STRING, number=2, oneof="driver", ) - args = proto.RepeatedField( + args: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) - file_uris = proto.RepeatedField( + file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=5, ) - archive_uris = proto.RepeatedField( + archive_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=7, ) - logging_config = proto.Field( + logging_config: "LoggingConfig" = proto.Field( proto.MESSAGE, number=8, message="LoggingConfig", @@ -279,28 +282,28 @@ class PySparkJob(proto.Message): main_python_file_uri (str): Required. The HCFS URI of the main Python file to use as the driver. Must be a .py file. - args (Sequence[str]): + args (MutableSequence[str]): Optional. The arguments to pass to the driver. Do not include arguments, such as ``--conf``, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - python_file_uris (Sequence[str]): + python_file_uris (MutableSequence[str]): Optional. HCFS file URIs of Python files to pass to the PySpark framework. Supported file types: .py, .egg, and .zip. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to add to the CLASSPATHs of the Python driver and tasks. - file_uris (Sequence[str]): + file_uris (MutableSequence[str]): Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. - archive_uris (Sequence[str]): + archive_uris (MutableSequence[str]): Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values, used to configure PySpark. Properties that conflict with values set by the Dataproc @@ -313,36 +316,36 @@ class PySparkJob(proto.Message): execution. """ - main_python_file_uri = proto.Field( + main_python_file_uri: str = proto.Field( proto.STRING, number=1, ) - args = proto.RepeatedField( + args: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - python_file_uris = proto.RepeatedField( + python_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) - file_uris = proto.RepeatedField( + file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=5, ) - archive_uris = proto.RepeatedField( + archive_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=7, ) - logging_config = proto.Field( + logging_config: "LoggingConfig" = proto.Field( proto.MESSAGE, number=8, message="LoggingConfig", @@ -353,7 +356,7 @@ class QueryList(proto.Message): r"""A list of queries to run on a cluster. Attributes: - queries (Sequence[str]): + queries (MutableSequence[str]): Required. The queries to execute. You do not need to end a query expression with a semicolon. Multiple queries can be specified in one string by separating each with a semicolon. @@ -373,7 +376,7 @@ class QueryList(proto.Message): } """ - queries = proto.RepeatedField( + queries: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) @@ -404,48 +407,48 @@ class HiveJob(proto.Message): Optional. Whether to continue executing queries if a query fails. The default value is ``false``. Setting to ``true`` can be useful when executing independent parallel queries. - script_variables (Mapping[str, str]): + script_variables (MutableMapping[str, str]): Optional. Mapping of query variable names to values (equivalent to the Hive command: ``SET name="value";``). - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names and values, used to configure Hive. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/hive/conf/hive-site.xml, and classes in user code. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to add to the CLASSPATH of the Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes and UDFs. """ - query_file_uri = proto.Field( + query_file_uri: str = proto.Field( proto.STRING, number=1, oneof="queries", ) - query_list = proto.Field( + query_list: "QueryList" = proto.Field( proto.MESSAGE, number=2, oneof="queries", message="QueryList", ) - continue_on_failure = proto.Field( + continue_on_failure: bool = proto.Field( proto.BOOL, number=3, ) - script_variables = proto.MapField( + script_variables: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=4, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=5, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) @@ -453,7 +456,7 @@ class HiveJob(proto.Message): class SparkSqlJob(proto.Message): r"""A Dataproc job for running `Apache Spark - SQL `__ queries. + SQL `__ queries. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -472,16 +475,16 @@ class SparkSqlJob(proto.Message): A list of queries. This field is a member of `oneof`_ ``queries``. - script_variables (Mapping[str, str]): + script_variables (MutableMapping[str, str]): Optional. Mapping of query variable names to values (equivalent to the Spark SQL command: SET ``name="value";``). - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values, used to configure Spark SQL's SparkConf. Properties that conflict with values set by the Dataproc API may be overwritten. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. logging_config (google.cloud.dataproc_v1.types.LoggingConfig): @@ -489,32 +492,32 @@ class SparkSqlJob(proto.Message): execution. """ - query_file_uri = proto.Field( + query_file_uri: str = proto.Field( proto.STRING, number=1, oneof="queries", ) - query_list = proto.Field( + query_list: "QueryList" = proto.Field( proto.MESSAGE, number=2, oneof="queries", message="QueryList", ) - script_variables = proto.MapField( + script_variables: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=3, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=4, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=56, ) - logging_config = proto.Field( + logging_config: "LoggingConfig" = proto.Field( proto.MESSAGE, number=6, message="LoggingConfig", @@ -546,16 +549,16 @@ class PigJob(proto.Message): Optional. Whether to continue executing queries if a query fails. The default value is ``false``. Setting to ``true`` can be useful when executing independent parallel queries. - script_variables (Mapping[str, str]): + script_variables (MutableMapping[str, str]): Optional. Mapping of query variable names to values (equivalent to the Pig command: ``name=[value]``). - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values, used to configure Pig. Properties that conflict with values set by the Dataproc API may be overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, /etc/pig/conf/pig.properties, and classes in user code. - jar_file_uris (Sequence[str]): + jar_file_uris (MutableSequence[str]): Optional. HCFS URIs of jar files to add to the CLASSPATH of the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. @@ -564,36 +567,36 @@ class PigJob(proto.Message): execution. """ - query_file_uri = proto.Field( + query_file_uri: str = proto.Field( proto.STRING, number=1, oneof="queries", ) - query_list = proto.Field( + query_list: "QueryList" = proto.Field( proto.MESSAGE, number=2, oneof="queries", message="QueryList", ) - continue_on_failure = proto.Field( + continue_on_failure: bool = proto.Field( proto.BOOL, number=3, ) - script_variables = proto.MapField( + script_variables: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=4, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=5, ) - jar_file_uris = proto.RepeatedField( + jar_file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) - logging_config = proto.Field( + logging_config: "LoggingConfig" = proto.Field( proto.MESSAGE, number=7, message="LoggingConfig", @@ -609,21 +612,21 @@ class SparkRJob(proto.Message): main_r_file_uri (str): Required. The HCFS URI of the main R file to use as the driver. Must be a .R file. - args (Sequence[str]): + args (MutableSequence[str]): Optional. The arguments to pass to the driver. Do not include arguments, such as ``--conf``, that can be set as job properties, since a collision may occur that causes an incorrect job submission. - file_uris (Sequence[str]): + file_uris (MutableSequence[str]): Optional. HCFS URIs of files to be placed in the working directory of each executor. Useful for naively parallel tasks. - archive_uris (Sequence[str]): + archive_uris (MutableSequence[str]): Optional. HCFS URIs of archives to be extracted into the working directory of each executor. Supported file types: .jar, .tar, .tar.gz, .tgz, and .zip. - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values, used to configure SparkR. Properties that conflict with values set by the Dataproc @@ -636,28 +639,28 @@ class SparkRJob(proto.Message): execution. """ - main_r_file_uri = proto.Field( + main_r_file_uri: str = proto.Field( proto.STRING, number=1, ) - args = proto.RepeatedField( + args: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - file_uris = proto.RepeatedField( + file_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=3, ) - archive_uris = proto.RepeatedField( + archive_uris: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=4, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=5, ) - logging_config = proto.Field( + logging_config: "LoggingConfig" = proto.Field( proto.MESSAGE, number=6, message="LoggingConfig", @@ -696,10 +699,10 @@ class PrestoJob(proto.Message): Optional. The format in which query output will be displayed. See the Presto documentation for supported output formats - client_tags (Sequence[str]): + client_tags (MutableSequence[str]): Optional. Presto client tags to attach to this query - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values. Used to set Presto `session properties `__ @@ -709,35 +712,35 @@ class PrestoJob(proto.Message): execution. """ - query_file_uri = proto.Field( + query_file_uri: str = proto.Field( proto.STRING, number=1, oneof="queries", ) - query_list = proto.Field( + query_list: "QueryList" = proto.Field( proto.MESSAGE, number=2, oneof="queries", message="QueryList", ) - continue_on_failure = proto.Field( + continue_on_failure: bool = proto.Field( proto.BOOL, number=3, ) - output_format = proto.Field( + output_format: str = proto.Field( proto.STRING, number=4, ) - client_tags = proto.RepeatedField( + client_tags: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=5, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=6, ) - logging_config = proto.Field( + logging_config: "LoggingConfig" = proto.Field( proto.MESSAGE, number=7, message="LoggingConfig", @@ -754,20 +757,20 @@ class JobPlacement(proto.Message): cluster_uuid (str): Output only. A cluster UUID generated by the Dataproc service when the job is submitted. - cluster_labels (Mapping[str, str]): + cluster_labels (MutableMapping[str, str]): Optional. Cluster labels to identify a cluster where the job will be submitted. """ - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=1, ) - cluster_uuid = proto.Field( + cluster_uuid: str = proto.Field( proto.STRING, number=2, ) - cluster_labels = proto.MapField( + cluster_labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=3, @@ -813,21 +816,21 @@ class Substate(proto.Enum): QUEUED = 2 STALE_STATUS = 3 - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=1, enum=State, ) - details = proto.Field( + details: str = proto.Field( proto.STRING, number=2, ) - state_start_time = proto.Field( + state_start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=6, message=timestamp_pb2.Timestamp, ) - substate = proto.Field( + substate: Substate = proto.Field( proto.ENUM, number=7, enum=Substate, @@ -854,11 +857,11 @@ class JobReference(proto.Message): by the server. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - job_id = proto.Field( + job_id: str = proto.Field( proto.STRING, number=2, ) @@ -903,20 +906,20 @@ class State(proto.Enum): FAILED = 7 KILLED = 8 - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=2, enum=State, ) - progress = proto.Field( + progress: float = proto.Field( proto.FLOAT, number=3, ) - tracking_url = proto.Field( + tracking_url: str = proto.Field( proto.STRING, number=4, ) @@ -977,9 +980,9 @@ class Job(proto.Message): Output only. The job status. Additional application-specific status information may be contained in the type_job and yarn_applications fields. - status_history (Sequence[google.cloud.dataproc_v1.types.JobStatus]): + status_history (MutableSequence[google.cloud.dataproc_v1.types.JobStatus]): Output only. The previous job status. - yarn_applications (Sequence[google.cloud.dataproc_v1.types.YarnApplication]): + yarn_applications (MutableSequence[google.cloud.dataproc_v1.types.YarnApplication]): Output only. The collection of YARN applications spun up by this job. @@ -993,7 +996,7 @@ class Job(proto.Message): control files which may be used as part of job setup and handling. If not present, control files may be placed in the same location as ``driver_output_uri``. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Optional. The labels to associate with this job. Label **keys** must contain 1 to 63 characters, and must conform to `RFC 1035 `__. @@ -1012,107 +1015,136 @@ class Job(proto.Message): value is ``false``, the job is still in progress. If ``true``, the job is completed, and ``status.state`` field will indicate if it was successful, failed, or cancelled. + driver_scheduling_config (google.cloud.dataproc_v1.types.DriverSchedulingConfig): + Optional. Driver scheduling configuration. """ - reference = proto.Field( + reference: "JobReference" = proto.Field( proto.MESSAGE, number=1, message="JobReference", ) - placement = proto.Field( + placement: "JobPlacement" = proto.Field( proto.MESSAGE, number=2, message="JobPlacement", ) - hadoop_job = proto.Field( + hadoop_job: "HadoopJob" = proto.Field( proto.MESSAGE, number=3, oneof="type_job", message="HadoopJob", ) - spark_job = proto.Field( + spark_job: "SparkJob" = proto.Field( proto.MESSAGE, number=4, oneof="type_job", message="SparkJob", ) - pyspark_job = proto.Field( + pyspark_job: "PySparkJob" = proto.Field( proto.MESSAGE, number=5, oneof="type_job", message="PySparkJob", ) - hive_job = proto.Field( + hive_job: "HiveJob" = proto.Field( proto.MESSAGE, number=6, oneof="type_job", message="HiveJob", ) - pig_job = proto.Field( + pig_job: "PigJob" = proto.Field( proto.MESSAGE, number=7, oneof="type_job", message="PigJob", ) - spark_r_job = proto.Field( + spark_r_job: "SparkRJob" = proto.Field( proto.MESSAGE, number=21, oneof="type_job", message="SparkRJob", ) - spark_sql_job = proto.Field( + spark_sql_job: "SparkSqlJob" = proto.Field( proto.MESSAGE, number=12, oneof="type_job", message="SparkSqlJob", ) - presto_job = proto.Field( + presto_job: "PrestoJob" = proto.Field( proto.MESSAGE, number=23, oneof="type_job", message="PrestoJob", ) - status = proto.Field( + status: "JobStatus" = proto.Field( proto.MESSAGE, number=8, message="JobStatus", ) - status_history = proto.RepeatedField( + status_history: MutableSequence["JobStatus"] = proto.RepeatedField( proto.MESSAGE, number=13, message="JobStatus", ) - yarn_applications = proto.RepeatedField( + yarn_applications: MutableSequence["YarnApplication"] = proto.RepeatedField( proto.MESSAGE, number=9, message="YarnApplication", ) - driver_output_resource_uri = proto.Field( + driver_output_resource_uri: str = proto.Field( proto.STRING, number=17, ) - driver_control_files_uri = proto.Field( + driver_control_files_uri: str = proto.Field( proto.STRING, number=15, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=18, ) - scheduling = proto.Field( + scheduling: "JobScheduling" = proto.Field( proto.MESSAGE, number=20, message="JobScheduling", ) - job_uuid = proto.Field( + job_uuid: str = proto.Field( proto.STRING, number=22, ) - done = proto.Field( + done: bool = proto.Field( proto.BOOL, number=24, ) + driver_scheduling_config: "DriverSchedulingConfig" = proto.Field( + proto.MESSAGE, + number=27, + message="DriverSchedulingConfig", + ) + + +class DriverSchedulingConfig(proto.Message): + r"""Driver scheduling configuration. + + Attributes: + memory_mb (int): + Required. The amount of memory in MB the + driver is requesting. + vcores (int): + Required. The number of vCPUs the driver is + requesting. + """ + + memory_mb: int = proto.Field( + proto.INT32, + number=1, + ) + vcores: int = proto.Field( + proto.INT32, + number=2, + ) class JobScheduling(proto.Message): @@ -1124,31 +1156,32 @@ class JobScheduling(proto.Message): restarted as a result of driver exiting with non-zero code before job is reported failed. - A job may be reported as thrashing if driver exits with - non-zero code 4 times within 10 minute window. + A job may be reported as thrashing if the driver exits with + a non-zero code four times within a 10-minute window. Maximum value is 10. - **Note:** Currently, this restartable job option is not - supported in Dataproc `workflow - template `__ - jobs. + **Note:** This restartable job option is not supported in + Dataproc [workflow templates] + (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template). max_failures_total (int): - Optional. Maximum number of times in total a driver may be - restarted as a result of driver exiting with non-zero code - before job is reported failed. Maximum value is 240. + Optional. Maximum total number of times a driver may be + restarted as a result of the driver exiting with a non-zero + code. After the maximum number is reached, the job will be + reported as failed. + + Maximum value is 240. **Note:** Currently, this restartable job option is not supported in Dataproc `workflow - template `__ - jobs. + templates `__. """ - max_failures_per_hour = proto.Field( + max_failures_per_hour: int = proto.Field( proto.INT32, number=1, ) - max_failures_total = proto.Field( + max_failures_total: int = proto.Field( proto.INT32, number=2, ) @@ -1182,20 +1215,20 @@ class SubmitJobRequest(proto.Message): characters. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - job = proto.Field( + job: "Job" = proto.Field( proto.MESSAGE, number=2, message="Job", ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=4, ) @@ -1215,20 +1248,20 @@ class JobMetadata(proto.Message): Output only. Job submission time. """ - job_id = proto.Field( + job_id: str = proto.Field( proto.STRING, number=1, ) - status = proto.Field( + status: "JobStatus" = proto.Field( proto.MESSAGE, number=2, message="JobStatus", ) - operation_type = proto.Field( + operation_type: str = proto.Field( proto.STRING, number=3, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, @@ -1250,15 +1283,15 @@ class GetJobRequest(proto.Message): Required. The job ID. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - job_id = proto.Field( + job_id: str = proto.Field( proto.STRING, number=2, ) @@ -1316,32 +1349,32 @@ class JobStateMatcher(proto.Enum): ACTIVE = 1 NON_ACTIVE = 2 - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=6, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=2, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=3, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=4, ) - job_state_matcher = proto.Field( + job_state_matcher: JobStateMatcher = proto.Field( proto.ENUM, number=5, enum=JobStateMatcher, ) - filter = proto.Field( + filter: str = proto.Field( proto.STRING, number=7, ) @@ -1369,24 +1402,24 @@ class UpdateJobRequest(proto.Message): Currently, labels is the only field that can be updated. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=2, ) - job_id = proto.Field( + job_id: str = proto.Field( proto.STRING, number=3, ) - job = proto.Field( + job: "Job" = proto.Field( proto.MESSAGE, number=4, message="Job", ) - update_mask = proto.Field( + update_mask: field_mask_pb2.FieldMask = proto.Field( proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask, @@ -1397,7 +1430,7 @@ class ListJobsResponse(proto.Message): r"""A list of jobs in a project. Attributes: - jobs (Sequence[google.cloud.dataproc_v1.types.Job]): + jobs (MutableSequence[google.cloud.dataproc_v1.types.Job]): Output only. Jobs list. next_page_token (str): Optional. This token is included in the response if there @@ -1410,12 +1443,12 @@ class ListJobsResponse(proto.Message): def raw_page(self): return self - jobs = proto.RepeatedField( + jobs: MutableSequence["Job"] = proto.RepeatedField( proto.MESSAGE, number=1, message="Job", ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) @@ -1435,15 +1468,15 @@ class CancelJobRequest(proto.Message): Required. The job ID. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - job_id = proto.Field( + job_id: str = proto.Field( proto.STRING, number=2, ) @@ -1463,15 +1496,15 @@ class DeleteJobRequest(proto.Message): Required. The job ID. """ - project_id = proto.Field( + project_id: str = proto.Field( proto.STRING, number=1, ) - region = proto.Field( + region: str = proto.Field( proto.STRING, number=3, ) - job_id = proto.Field( + job_id: str = proto.Field( proto.STRING, number=2, ) diff --git a/google/cloud/dataproc_v1/types/node_groups.py b/google/cloud/dataproc_v1/types/node_groups.py new file mode 100644 index 00000000..49c4f572 --- /dev/null +++ b/google/cloud/dataproc_v1/types/node_groups.py @@ -0,0 +1,165 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.dataproc_v1.types import clusters +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package="google.cloud.dataproc.v1", + manifest={ + "CreateNodeGroupRequest", + "ResizeNodeGroupRequest", + "GetNodeGroupRequest", + }, +) + + +class CreateNodeGroupRequest(proto.Message): + r"""A request to create a node group. + + Attributes: + parent (str): + Required. The parent resource where this node group will be + created. Format: + ``projects/{project}/regions/{region}/clusters/{cluster}`` + node_group (google.cloud.dataproc_v1.types.NodeGroup): + Required. The node group to create. + node_group_id (str): + Optional. An optional node group ID. Generated if not + specified. + + The ID must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). Cannot begin or end with + underscore or hyphen. Must consist of from 3 to 33 + characters. + request_id (str): + Optional. A unique ID used to identify the request. If the + server receives two + `CreateNodeGroupRequest `__ + with the same ID, the second request is ignored and the + first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + Recommendation: Set this value to a + `UUID `__. + + The ID must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + node_group: clusters.NodeGroup = proto.Field( + proto.MESSAGE, + number=2, + message=clusters.NodeGroup, + ) + node_group_id: str = proto.Field( + proto.STRING, + number=4, + ) + request_id: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ResizeNodeGroupRequest(proto.Message): + r"""A request to resize a node group. + + Attributes: + name (str): + Required. The name of the node group to resize. Format: + ``projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`` + size (int): + Required. The number of running instances for + the node group to maintain. The group adds or + removes instances to maintain the number of + instances specified by this parameter. + request_id (str): + Optional. A unique ID used to identify the request. If the + server receives two + `ResizeNodeGroupRequest `__ + with the same ID, the second request is ignored and the + first + [google.longrunning.Operation][google.longrunning.Operation] + created and stored in the backend is returned. + + Recommendation: Set this value to a + `UUID `__. + + The ID must contain only letters (a-z, A-Z), numbers (0-9), + underscores (_), and hyphens (-). The maximum length is 40 + characters. + graceful_decommission_timeout (google.protobuf.duration_pb2.Duration): + Optional. Timeout for graceful YARN decommissioning. + [Graceful decommissioning] + (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning) + allows the removal of nodes from the Compute Engine node + group without interrupting jobs in progress. This timeout + specifies how long to wait for jobs in progress to finish + before forcefully removing nodes (and potentially + interrupting jobs). Default timeout is 0 (for forceful + decommission), and the maximum allowed timeout is 1 day. + (see JSON representation of + `Duration `__). + + Only supported on Dataproc image versions 1.2 and higher. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + size: int = proto.Field( + proto.INT32, + number=2, + ) + request_id: str = proto.Field( + proto.STRING, + number=3, + ) + graceful_decommission_timeout: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + + +class GetNodeGroupRequest(proto.Message): + r"""A request to get a node group . + + Attributes: + name (str): + Required. The name of the node group to retrieve. Format: + ``projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`` + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1/types/operations.py b/google/cloud/dataproc_v1/types/operations.py index 572f35f9..3b7e353d 100644 --- a/google/cloud/dataproc_v1/types/operations.py +++ b/google/cloud/dataproc_v1/types/operations.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.protobuf import timestamp_pb2 # type: ignore @@ -24,6 +26,7 @@ "BatchOperationMetadata", "ClusterOperationStatus", "ClusterOperationMetadata", + "NodeGroupOperationMetadata", }, ) @@ -44,9 +47,9 @@ class BatchOperationMetadata(proto.Message): The operation type. description (str): Short description of the operation. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Labels associated with the operation. - warnings (Sequence[str]): + warnings (MutableSequence[str]): Warnings encountered during operation execution. """ @@ -56,39 +59,39 @@ class BatchOperationType(proto.Enum): BATCH_OPERATION_TYPE_UNSPECIFIED = 0 BATCH = 1 - batch = proto.Field( + batch: str = proto.Field( proto.STRING, number=1, ) - batch_uuid = proto.Field( + batch_uuid: str = proto.Field( proto.STRING, number=2, ) - create_time = proto.Field( + create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp, ) - done_time = proto.Field( + done_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) - operation_type = proto.Field( + operation_type: BatchOperationType = proto.Field( proto.ENUM, number=6, enum=BatchOperationType, ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=7, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=8, ) - warnings = proto.RepeatedField( + warnings: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=9, ) @@ -118,20 +121,20 @@ class State(proto.Enum): RUNNING = 2 DONE = 3 - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=1, enum=State, ) - inner_state = proto.Field( + inner_state: str = proto.Field( proto.STRING, number=2, ) - details = proto.Field( + details: str = proto.Field( proto.STRING, number=3, ) - state_start_time = proto.Field( + state_start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, @@ -149,55 +152,126 @@ class ClusterOperationMetadata(proto.Message): Output only. Cluster UUID for the operation. status (google.cloud.dataproc_v1.types.ClusterOperationStatus): Output only. Current operation status. - status_history (Sequence[google.cloud.dataproc_v1.types.ClusterOperationStatus]): + status_history (MutableSequence[google.cloud.dataproc_v1.types.ClusterOperationStatus]): Output only. The previous operation status. operation_type (str): Output only. The operation type. description (str): Output only. Short description of operation. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Output only. Labels associated with the operation - warnings (Sequence[str]): + warnings (MutableSequence[str]): Output only. Errors encountered during operation execution. """ - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=7, ) - cluster_uuid = proto.Field( + cluster_uuid: str = proto.Field( proto.STRING, number=8, ) - status = proto.Field( + status: "ClusterOperationStatus" = proto.Field( proto.MESSAGE, number=9, message="ClusterOperationStatus", ) - status_history = proto.RepeatedField( + status_history: MutableSequence["ClusterOperationStatus"] = proto.RepeatedField( proto.MESSAGE, number=10, message="ClusterOperationStatus", ) - operation_type = proto.Field( + operation_type: str = proto.Field( proto.STRING, number=11, ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=12, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=13, ) - warnings = proto.RepeatedField( + warnings: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=14, ) +class NodeGroupOperationMetadata(proto.Message): + r"""Metadata describing the node group operation. + + Attributes: + node_group_id (str): + Output only. Node group ID for the operation. + cluster_uuid (str): + Output only. Cluster UUID associated with the + node group operation. + status (google.cloud.dataproc_v1.types.ClusterOperationStatus): + Output only. Current operation status. + status_history (MutableSequence[google.cloud.dataproc_v1.types.ClusterOperationStatus]): + Output only. The previous operation status. + operation_type (google.cloud.dataproc_v1.types.NodeGroupOperationMetadata.NodeGroupOperationType): + The operation type. + description (str): + Output only. Short description of operation. + labels (MutableMapping[str, str]): + Output only. Labels associated with the + operation. + warnings (MutableSequence[str]): + Output only. Errors encountered during + operation execution. + """ + + class NodeGroupOperationType(proto.Enum): + r"""Operation type for node group resources.""" + NODE_GROUP_OPERATION_TYPE_UNSPECIFIED = 0 + CREATE = 1 + UPDATE = 2 + DELETE = 3 + RESIZE = 4 + + node_group_id: str = proto.Field( + proto.STRING, + number=1, + ) + cluster_uuid: str = proto.Field( + proto.STRING, + number=2, + ) + status: "ClusterOperationStatus" = proto.Field( + proto.MESSAGE, + number=3, + message="ClusterOperationStatus", + ) + status_history: MutableSequence["ClusterOperationStatus"] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message="ClusterOperationStatus", + ) + operation_type: NodeGroupOperationType = proto.Field( + proto.ENUM, + number=5, + enum=NodeGroupOperationType, + ) + description: str = proto.Field( + proto.STRING, + number=6, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=7, + ) + warnings: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=8, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/dataproc_v1/types/shared.py b/google/cloud/dataproc_v1/types/shared.py index e0fd8024..95603a13 100644 --- a/google/cloud/dataproc_v1/types/shared.py +++ b/google/cloud/dataproc_v1/types/shared.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore @@ -72,21 +74,21 @@ class RuntimeConfig(proto.Message): Optional. Optional custom container image for the job runtime environment. If not specified, a default container image will be used. - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): Optional. A mapping of property names to values, which are used to configure workload execution. """ - version = proto.Field( + version: str = proto.Field( proto.STRING, number=1, ) - container_image = proto.Field( + container_image: str = proto.Field( proto.STRING, number=2, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=3, @@ -105,12 +107,12 @@ class EnvironmentConfig(proto.Message): workload has access to. """ - execution_config = proto.Field( + execution_config: "ExecutionConfig" = proto.Field( proto.MESSAGE, number=1, message="ExecutionConfig", ) - peripherals_config = proto.Field( + peripherals_config: "PeripheralsConfig" = proto.Field( proto.MESSAGE, number=2, message="PeripheralsConfig", @@ -140,7 +142,7 @@ class ExecutionConfig(proto.Message): to. This field is a member of `oneof`_ ``network``. - network_tags (Sequence[str]): + network_tags (MutableSequence[str]): Optional. Tags used for network traffic control. kms_key (str): @@ -148,25 +150,25 @@ class ExecutionConfig(proto.Message): encryption. """ - service_account = proto.Field( + service_account: str = proto.Field( proto.STRING, number=2, ) - network_uri = proto.Field( + network_uri: str = proto.Field( proto.STRING, number=4, oneof="network", ) - subnetwork_uri = proto.Field( + subnetwork_uri: str = proto.Field( proto.STRING, number=5, oneof="network", ) - network_tags = proto.RepeatedField( + network_tags: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=6, ) - kms_key = proto.Field( + kms_key: str = proto.Field( proto.STRING, number=7, ) @@ -185,7 +187,7 @@ class SparkHistoryServerConfig(proto.Message): - ``projects/[project_id]/regions/[region]/clusters/[cluster_name]`` """ - dataproc_cluster = proto.Field( + dataproc_cluster: str = proto.Field( proto.STRING, number=1, ) @@ -207,11 +209,11 @@ class PeripheralsConfig(proto.Message): configuration for the workload. """ - metastore_service = proto.Field( + metastore_service: str = proto.Field( proto.STRING, number=1, ) - spark_history_server_config = proto.Field( + spark_history_server_config: "SparkHistoryServerConfig" = proto.Field( proto.MESSAGE, number=2, message="SparkHistoryServerConfig", @@ -222,7 +224,7 @@ class RuntimeInfo(proto.Message): r"""Runtime information about workload execution. Attributes: - endpoints (Mapping[str, str]): + endpoints (MutableMapping[str, str]): Output only. Map of remote access endpoints (such as web interfaces and APIs) to their URIs. output_uri (str): @@ -233,16 +235,16 @@ class RuntimeInfo(proto.Message): of the diagnostics tarball. """ - endpoints = proto.MapField( + endpoints: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=1, ) - output_uri = proto.Field( + output_uri: str = proto.Field( proto.STRING, number=2, ) - diagnostic_output_uri = proto.Field( + diagnostic_output_uri: str = proto.Field( proto.STRING, number=3, ) @@ -257,7 +259,7 @@ class GkeClusterConfig(proto.Message): the same project and region as the Dataproc cluster (the GKE cluster can be zonal or regional). Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}' - node_pool_target (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget]): + node_pool_target (MutableSequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget]): Optional. GKE NodePools where workloads will be scheduled. At least one node pool must be assigned the 'default' role. Each role can be @@ -267,11 +269,11 @@ class GkeClusterConfig(proto.Message): constructs a default nodePoolTarget. """ - gke_cluster_target = proto.Field( + gke_cluster_target: str = proto.Field( proto.STRING, number=2, ) - node_pool_target = proto.RepeatedField( + node_pool_target: MutableSequence["GkeNodePoolTarget"] = proto.RepeatedField( proto.MESSAGE, number=3, message="GkeNodePoolTarget", @@ -303,17 +305,17 @@ class KubernetesClusterConfig(proto.Message): Dataproc cluster running on Kubernetes. """ - kubernetes_namespace = proto.Field( + kubernetes_namespace: str = proto.Field( proto.STRING, number=1, ) - gke_cluster_config = proto.Field( + gke_cluster_config: "GkeClusterConfig" = proto.Field( proto.MESSAGE, number=2, oneof="config", message="GkeClusterConfig", ) - kubernetes_software_config = proto.Field( + kubernetes_software_config: "KubernetesSoftwareConfig" = proto.Field( proto.MESSAGE, number=3, message="KubernetesSoftwareConfig", @@ -325,14 +327,14 @@ class KubernetesSoftwareConfig(proto.Message): on Kubernetes. Attributes: - component_version (Mapping[str, str]): + component_version (MutableMapping[str, str]): The components that should be installed in this Dataproc cluster. The key must be a string from the KubernetesComponent enumeration. The value is the version of the software to be installed. At least one entry must be specified. - properties (Mapping[str, str]): + properties (MutableMapping[str, str]): The properties to set on daemon config files. Property keys are specified in ``prefix:property`` format, @@ -345,12 +347,12 @@ class KubernetesSoftwareConfig(proto.Message): properties `__. """ - component_version = proto.MapField( + component_version: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=1, ) - properties = proto.MapField( + properties: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=2, @@ -364,7 +366,7 @@ class GkeNodePoolTarget(proto.Message): node_pool (str): Required. The target GKE NodePool. Format: 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}' - roles (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget.Role]): + roles (MutableSequence[google.cloud.dataproc_v1.types.GkeNodePoolTarget.Role]): Required. The types of role for a GKE NodePool node_pool_config (google.cloud.dataproc_v1.types.GkeNodePoolConfig): @@ -394,16 +396,16 @@ class Role(proto.Enum): SPARK_DRIVER = 3 SPARK_EXECUTOR = 4 - node_pool = proto.Field( + node_pool: str = proto.Field( proto.STRING, number=1, ) - roles = proto.RepeatedField( + roles: MutableSequence[Role] = proto.RepeatedField( proto.ENUM, number=2, enum=Role, ) - node_pool_config = proto.Field( + node_pool_config: "GkeNodePoolConfig" = proto.Field( proto.MESSAGE, number=3, message="GkeNodePoolConfig", @@ -417,7 +419,7 @@ class GkeNodePoolConfig(proto.Message): Attributes: config (google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodeConfig): Optional. The node pool configuration. - locations (Sequence[str]): + locations (MutableSequence[str]): Optional. The list of Compute Engine `zones `__ where NodePool's nodes will be located. @@ -447,7 +449,7 @@ class GkeNodeConfig(proto.Message): node, which is limited by the maximum number of disks allowable per zone (see `Adding Local SSDs `__). - accelerators (Sequence[google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig]): + accelerators (MutableSequence[google.cloud.dataproc_v1.types.GkeNodePoolConfig.GkeNodePoolAcceleratorConfig]): Optional. A list of `hardware accelerators `__ to attach to each node. @@ -460,24 +462,26 @@ class GkeNodeConfig(proto.Message): or Intel Sandy Bridge". """ - machine_type = proto.Field( + machine_type: str = proto.Field( proto.STRING, number=1, ) - preemptible = proto.Field( + preemptible: bool = proto.Field( proto.BOOL, number=10, ) - local_ssd_count = proto.Field( + local_ssd_count: int = proto.Field( proto.INT32, number=7, ) - accelerators = proto.RepeatedField( + accelerators: MutableSequence[ + "GkeNodePoolConfig.GkeNodePoolAcceleratorConfig" + ] = proto.RepeatedField( proto.MESSAGE, number=11, message="GkeNodePoolConfig.GkeNodePoolAcceleratorConfig", ) - min_cpu_platform = proto.Field( + min_cpu_platform: str = proto.Field( proto.STRING, number=13, ) @@ -495,11 +499,11 @@ class GkeNodePoolAcceleratorConfig(proto.Message): GPUs on Compute Engine). """ - accelerator_count = proto.Field( + accelerator_count: int = proto.Field( proto.INT64, number=1, ) - accelerator_type = proto.Field( + accelerator_type: str = proto.Field( proto.STRING, number=2, ) @@ -519,25 +523,25 @@ class GkeNodePoolAutoscalingConfig(proto.Message): up the cluster. """ - min_node_count = proto.Field( + min_node_count: int = proto.Field( proto.INT32, number=2, ) - max_node_count = proto.Field( + max_node_count: int = proto.Field( proto.INT32, number=3, ) - config = proto.Field( + config: GkeNodeConfig = proto.Field( proto.MESSAGE, number=2, message=GkeNodeConfig, ) - locations = proto.RepeatedField( + locations: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=13, ) - autoscaling = proto.Field( + autoscaling: GkeNodePoolAutoscalingConfig = proto.Field( proto.MESSAGE, number=4, message=GkeNodePoolAutoscalingConfig, diff --git a/google/cloud/dataproc_v1/types/workflow_templates.py b/google/cloud/dataproc_v1/types/workflow_templates.py index 17bff61c..08ec9eda 100644 --- a/google/cloud/dataproc_v1/types/workflow_templates.py +++ b/google/cloud/dataproc_v1/types/workflow_templates.py @@ -13,6 +13,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # +from typing import MutableMapping, MutableSequence + import proto # type: ignore from google.cloud.dataproc_v1.types import clusters @@ -84,7 +86,7 @@ class WorkflowTemplate(proto.Message): update_time (google.protobuf.timestamp_pb2.Timestamp): Output only. The time template was last updated. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Optional. The labels to associate with this template. These labels will be propagated to all jobs and clusters created by the workflow instance. @@ -101,10 +103,10 @@ class WorkflowTemplate(proto.Message): placement (google.cloud.dataproc_v1.types.WorkflowTemplatePlacement): Required. WorkflowTemplate scheduling information. - jobs (Sequence[google.cloud.dataproc_v1.types.OrderedJob]): + jobs (MutableSequence[google.cloud.dataproc_v1.types.OrderedJob]): Required. The Directed Acyclic Graph of Jobs to submit. - parameters (Sequence[google.cloud.dataproc_v1.types.TemplateParameter]): + parameters (MutableSequence[google.cloud.dataproc_v1.types.TemplateParameter]): Optional. Template parameters whose values are substituted into the template. Values for parameters must be provided when the template is @@ -123,49 +125,49 @@ class WorkflowTemplate(proto.Message): the cluster is deleted. """ - id = proto.Field( + id: str = proto.Field( proto.STRING, number=2, ) - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - version = proto.Field( + version: int = proto.Field( proto.INT32, number=3, ) - create_time = proto.Field( + create_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=4, message=timestamp_pb2.Timestamp, ) - update_time = proto.Field( + update_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=5, message=timestamp_pb2.Timestamp, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=6, ) - placement = proto.Field( + placement: "WorkflowTemplatePlacement" = proto.Field( proto.MESSAGE, number=7, message="WorkflowTemplatePlacement", ) - jobs = proto.RepeatedField( + jobs: MutableSequence["OrderedJob"] = proto.RepeatedField( proto.MESSAGE, number=8, message="OrderedJob", ) - parameters = proto.RepeatedField( + parameters: MutableSequence["TemplateParameter"] = proto.RepeatedField( proto.MESSAGE, number=9, message="TemplateParameter", ) - dag_timeout = proto.Field( + dag_timeout: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=10, message=duration_pb2.Duration, @@ -199,13 +201,13 @@ class WorkflowTemplatePlacement(proto.Message): This field is a member of `oneof`_ ``placement``. """ - managed_cluster = proto.Field( + managed_cluster: "ManagedCluster" = proto.Field( proto.MESSAGE, number=1, oneof="placement", message="ManagedCluster", ) - cluster_selector = proto.Field( + cluster_selector: "ClusterSelector" = proto.Field( proto.MESSAGE, number=2, oneof="placement", @@ -228,7 +230,7 @@ class ManagedCluster(proto.Message): characters. config (google.cloud.dataproc_v1.types.ClusterConfig): Required. The cluster configuration. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Optional. The labels to associate with this cluster. Label keys must be between 1 and 63 characters long, and @@ -243,16 +245,16 @@ class ManagedCluster(proto.Message): cluster. """ - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=2, ) - config = proto.Field( + config: clusters.ClusterConfig = proto.Field( proto.MESSAGE, number=3, message=clusters.ClusterConfig, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=4, @@ -270,16 +272,16 @@ class ClusterSelector(proto.Message): selection of the cluster. If unspecified, the zone of the first cluster matching the selector is used. - cluster_labels (Mapping[str, str]): + cluster_labels (MutableMapping[str, str]): Required. The cluster labels. Cluster must have all labels to match. """ - zone = proto.Field( + zone: str = proto.Field( proto.STRING, number=1, ) - cluster_labels = proto.MapField( + cluster_labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=2, @@ -342,7 +344,7 @@ class OrderedJob(proto.Message): Optional. Job is a Presto job. This field is a member of `oneof`_ ``job_type``. - labels (Mapping[str, str]): + labels (MutableMapping[str, str]): Optional. The labels to associate with this job. Label keys must be between 1 and 63 characters long, and @@ -356,75 +358,75 @@ class OrderedJob(proto.Message): No more than 32 labels can be associated with a given job. scheduling (google.cloud.dataproc_v1.types.JobScheduling): Optional. Job scheduling configuration. - prerequisite_step_ids (Sequence[str]): + prerequisite_step_ids (MutableSequence[str]): Optional. The optional list of prerequisite job step_ids. If not specified, the job will start at the beginning of workflow. """ - step_id = proto.Field( + step_id: str = proto.Field( proto.STRING, number=1, ) - hadoop_job = proto.Field( + hadoop_job: gcd_jobs.HadoopJob = proto.Field( proto.MESSAGE, number=2, oneof="job_type", message=gcd_jobs.HadoopJob, ) - spark_job = proto.Field( + spark_job: gcd_jobs.SparkJob = proto.Field( proto.MESSAGE, number=3, oneof="job_type", message=gcd_jobs.SparkJob, ) - pyspark_job = proto.Field( + pyspark_job: gcd_jobs.PySparkJob = proto.Field( proto.MESSAGE, number=4, oneof="job_type", message=gcd_jobs.PySparkJob, ) - hive_job = proto.Field( + hive_job: gcd_jobs.HiveJob = proto.Field( proto.MESSAGE, number=5, oneof="job_type", message=gcd_jobs.HiveJob, ) - pig_job = proto.Field( + pig_job: gcd_jobs.PigJob = proto.Field( proto.MESSAGE, number=6, oneof="job_type", message=gcd_jobs.PigJob, ) - spark_r_job = proto.Field( + spark_r_job: gcd_jobs.SparkRJob = proto.Field( proto.MESSAGE, number=11, oneof="job_type", message=gcd_jobs.SparkRJob, ) - spark_sql_job = proto.Field( + spark_sql_job: gcd_jobs.SparkSqlJob = proto.Field( proto.MESSAGE, number=7, oneof="job_type", message=gcd_jobs.SparkSqlJob, ) - presto_job = proto.Field( + presto_job: gcd_jobs.PrestoJob = proto.Field( proto.MESSAGE, number=12, oneof="job_type", message=gcd_jobs.PrestoJob, ) - labels = proto.MapField( + labels: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=8, ) - scheduling = proto.Field( + scheduling: gcd_jobs.JobScheduling = proto.Field( proto.MESSAGE, number=9, message=gcd_jobs.JobScheduling, ) - prerequisite_step_ids = proto.RepeatedField( + prerequisite_step_ids: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=10, ) @@ -449,7 +451,7 @@ class TemplateParameter(proto.Message): must contain only capital letters (A-Z), numbers (0-9), and underscores (_), and must not start with a number. The maximum length is 40 characters. - fields (Sequence[str]): + fields (MutableSequence[str]): Required. Paths to all fields that the parameter replaces. A field is allowed to appear in at most one parameter's list of field paths. @@ -509,19 +511,19 @@ class TemplateParameter(proto.Message): this parameter's value. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - fields = proto.RepeatedField( + fields: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - description = proto.Field( + description: str = proto.Field( proto.STRING, number=3, ) - validation = proto.Field( + validation: "ParameterValidation" = proto.Field( proto.MESSAGE, number=4, message="ParameterValidation", @@ -549,13 +551,13 @@ class ParameterValidation(proto.Message): This field is a member of `oneof`_ ``validation_type``. """ - regex = proto.Field( + regex: "RegexValidation" = proto.Field( proto.MESSAGE, number=1, oneof="validation_type", message="RegexValidation", ) - values = proto.Field( + values: "ValueValidation" = proto.Field( proto.MESSAGE, number=2, oneof="validation_type", @@ -567,14 +569,14 @@ class RegexValidation(proto.Message): r"""Validation based on regular expressions. Attributes: - regexes (Sequence[str]): + regexes (MutableSequence[str]): Required. RE2 regular expressions used to validate the parameter's value. The value must match the regex in its entirety (substring matches are not sufficient). """ - regexes = proto.RepeatedField( + regexes: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) @@ -584,12 +586,12 @@ class ValueValidation(proto.Message): r"""Validation based on a list of allowed values. Attributes: - values (Sequence[str]): + values (MutableSequence[str]): Required. List of allowed values for the parameter. """ - values = proto.RepeatedField( + values: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=1, ) @@ -626,7 +628,7 @@ class WorkflowMetadata(proto.Message): Output only. The workflow state. cluster_name (str): Output only. The name of the target cluster. - parameters (Mapping[str, str]): + parameters (MutableMapping[str, str]): Map from parameter names to values that were used for those parameters. start_time (google.protobuf.timestamp_pb2.Timestamp): @@ -656,68 +658,68 @@ class State(proto.Enum): RUNNING = 2 DONE = 3 - template = proto.Field( + template: str = proto.Field( proto.STRING, number=1, ) - version = proto.Field( + version: int = proto.Field( proto.INT32, number=2, ) - create_cluster = proto.Field( + create_cluster: "ClusterOperation" = proto.Field( proto.MESSAGE, number=3, message="ClusterOperation", ) - graph = proto.Field( + graph: "WorkflowGraph" = proto.Field( proto.MESSAGE, number=4, message="WorkflowGraph", ) - delete_cluster = proto.Field( + delete_cluster: "ClusterOperation" = proto.Field( proto.MESSAGE, number=5, message="ClusterOperation", ) - state = proto.Field( + state: State = proto.Field( proto.ENUM, number=6, enum=State, ) - cluster_name = proto.Field( + cluster_name: str = proto.Field( proto.STRING, number=7, ) - parameters = proto.MapField( + parameters: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=8, ) - start_time = proto.Field( + start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=9, message=timestamp_pb2.Timestamp, ) - end_time = proto.Field( + end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=10, message=timestamp_pb2.Timestamp, ) - cluster_uuid = proto.Field( + cluster_uuid: str = proto.Field( proto.STRING, number=11, ) - dag_timeout = proto.Field( + dag_timeout: duration_pb2.Duration = proto.Field( proto.MESSAGE, number=12, message=duration_pb2.Duration, ) - dag_start_time = proto.Field( + dag_start_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=13, message=timestamp_pb2.Timestamp, ) - dag_end_time = proto.Field( + dag_end_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=14, message=timestamp_pb2.Timestamp, @@ -736,15 +738,15 @@ class ClusterOperation(proto.Message): Output only. Indicates the operation is done. """ - operation_id = proto.Field( + operation_id: str = proto.Field( proto.STRING, number=1, ) - error = proto.Field( + error: str = proto.Field( proto.STRING, number=2, ) - done = proto.Field( + done: bool = proto.Field( proto.BOOL, number=3, ) @@ -754,11 +756,11 @@ class WorkflowGraph(proto.Message): r"""The workflow graph. Attributes: - nodes (Sequence[google.cloud.dataproc_v1.types.WorkflowNode]): + nodes (MutableSequence[google.cloud.dataproc_v1.types.WorkflowNode]): Output only. The workflow nodes. """ - nodes = proto.RepeatedField( + nodes: MutableSequence["WorkflowNode"] = proto.RepeatedField( proto.MESSAGE, number=1, message="WorkflowNode", @@ -771,7 +773,7 @@ class WorkflowNode(proto.Message): Attributes: step_id (str): Output only. The name of the node. - prerequisite_step_ids (Sequence[str]): + prerequisite_step_ids (MutableSequence[str]): Output only. Node's prerequisite nodes. job_id (str): Output only. The job id; populated after the @@ -791,24 +793,24 @@ class NodeState(proto.Enum): COMPLETED = 4 FAILED = 5 - step_id = proto.Field( + step_id: str = proto.Field( proto.STRING, number=1, ) - prerequisite_step_ids = proto.RepeatedField( + prerequisite_step_ids: MutableSequence[str] = proto.RepeatedField( proto.STRING, number=2, ) - job_id = proto.Field( + job_id: str = proto.Field( proto.STRING, number=3, ) - state = proto.Field( + state: NodeState = proto.Field( proto.ENUM, number=5, enum=NodeState, ) - error = proto.Field( + error: str = proto.Field( proto.STRING, number=6, ) @@ -835,11 +837,11 @@ class CreateWorkflowTemplateRequest(proto.Message): create. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - template = proto.Field( + template: "WorkflowTemplate" = proto.Field( proto.MESSAGE, number=2, message="WorkflowTemplate", @@ -869,11 +871,11 @@ class GetWorkflowTemplateRequest(proto.Message): If unspecified, retrieves the current version. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - version = proto.Field( + version: int = proto.Field( proto.INT32, number=2, ) @@ -915,25 +917,25 @@ class InstantiateWorkflowTemplateRequest(proto.Message): The tag must contain only letters (a-z, A-Z), numbers (0-9), underscores (_), and hyphens (-). The maximum length is 40 characters. - parameters (Mapping[str, str]): + parameters (MutableMapping[str, str]): Optional. Map from parameter names to values that should be used for those parameters. Values may not exceed 1000 characters. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - version = proto.Field( + version: int = proto.Field( proto.INT32, number=2, ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=5, ) - parameters = proto.MapField( + parameters: MutableMapping[str, str] = proto.MapField( proto.STRING, proto.STRING, number=6, @@ -974,16 +976,16 @@ class InstantiateInlineWorkflowTemplateRequest(proto.Message): characters. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - template = proto.Field( + template: "WorkflowTemplate" = proto.Field( proto.MESSAGE, number=2, message="WorkflowTemplate", ) - request_id = proto.Field( + request_id: str = proto.Field( proto.STRING, number=3, ) @@ -1000,7 +1002,7 @@ class UpdateWorkflowTemplateRequest(proto.Message): version. """ - template = proto.Field( + template: "WorkflowTemplate" = proto.Field( proto.MESSAGE, number=1, message="WorkflowTemplate", @@ -1032,15 +1034,15 @@ class ListWorkflowTemplatesRequest(proto.Message): results. """ - parent = proto.Field( + parent: str = proto.Field( proto.STRING, number=1, ) - page_size = proto.Field( + page_size: int = proto.Field( proto.INT32, number=2, ) - page_token = proto.Field( + page_token: str = proto.Field( proto.STRING, number=3, ) @@ -1051,7 +1053,7 @@ class ListWorkflowTemplatesResponse(proto.Message): project. Attributes: - templates (Sequence[google.cloud.dataproc_v1.types.WorkflowTemplate]): + templates (MutableSequence[google.cloud.dataproc_v1.types.WorkflowTemplate]): Output only. WorkflowTemplates list. next_page_token (str): Output only. This token is included in the response if there @@ -1064,12 +1066,12 @@ class ListWorkflowTemplatesResponse(proto.Message): def raw_page(self): return self - templates = proto.RepeatedField( + templates: MutableSequence["WorkflowTemplate"] = proto.RepeatedField( proto.MESSAGE, number=1, message="WorkflowTemplate", ) - next_page_token = proto.Field( + next_page_token: str = proto.Field( proto.STRING, number=2, ) @@ -1100,11 +1102,11 @@ class DeleteWorkflowTemplateRequest(proto.Message): specified version. """ - name = proto.Field( + name: str = proto.Field( proto.STRING, number=1, ) - version = proto.Field( + version: int = proto.Field( proto.INT32, number=2, ) diff --git a/owlbot.py b/owlbot.py index a68f29df..29f936a1 100644 --- a/owlbot.py +++ b/owlbot.py @@ -1,4 +1,4 @@ -# Copyright 2018 Google LLC +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,47 +12,50 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""This script is used to synthesize generated parts of this library.""" - -import re +import json +from pathlib import Path +import shutil import synthtool as s -from synthtool import gcp +import synthtool.gcp as gcp from synthtool.languages import python -common = gcp.CommonTemplates() +# ---------------------------------------------------------------------------- +# Copy the generated client from the owl-bot staging directory +# ---------------------------------------------------------------------------- + +clean_up_generated_samples = True -default_version = "v1" +# Load the default version defined in .repo-metadata.json. +default_version = json.load(open(".repo-metadata.json", "rt")).get( + "default_version" +) for library in s.get_staging_dirs(default_version): - # Rename `policy_` to `policy` to avoid breaking change in a GA library - # Only replace if a non-alphanumeric (\W) character follows `policy_` - s.replace(library / "**/*.py", "policy_(\W)", "policy\g<1>") - - s.move(library, excludes=["docs/index.rst", "nox.py", "README.rst", "setup.py"]) + if clean_up_generated_samples: + shutil.rmtree("samples/generated_samples", ignore_errors=True) + clean_up_generated_samples = False + s.move(library, excludes=["**/gapic_version.py"]) s.remove_staging_dirs() # ---------------------------------------------------------------------------- # Add templated files # ---------------------------------------------------------------------------- -templated_files = common.py_library( - samples=True, # set to True only if there are samples + +templated_files = gcp.CommonTemplates().py_library( + cov_level=100, microgenerator=True, - cov_level=100 + versions=gcp.common.detect_versions(path="./google", default_first=True), ) -s.move(templated_files, excludes=[".coveragerc"]) # microgenerator has a good .coveragerc file +s.move(templated_files, excludes=[".coveragerc", ".github/release-please.yml"]) -# ---------------------------------------------------------------------------- -# Samples templates -# ---------------------------------------------------------------------------- python.py_samples(skip_readmes=True) -python.configure_previous_major_version_branches() - # Temporarily disable warnings due to # https://github.com/googleapis/gapic-generator-python/issues/525 s.replace("noxfile.py", '[\"\']-W[\"\']', '# "-W"') -s.shell.run(["nox", "-s", "blacken"], hide_output=False) - +# run format session for all directories which have a noxfile +for noxfile in Path(".").glob("**/noxfile.py"): + s.shell.run(["nox", "-s", "blacken"], cwd=noxfile.parent, hide_output=False) diff --git a/release-please-config.json b/release-please-config.json new file mode 100644 index 00000000..4088aad7 --- /dev/null +++ b/release-please-config.json @@ -0,0 +1,24 @@ +{ + "$schema": "https://raw.githubusercontent.com/googleapis/release-please/main/schemas/config.json", + "packages": { + ".": { + "release-type": "python", + "extra-files": [ + "google/cloud/dataproc_v1/gapic_version.py", + "google/cloud/dataproc/gapic_version.py", + { + "type": "json", + "path": "samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json", + "jsonpath": "$.clientLibrary.version" + } + ] + } + }, + "release-type": "python", + "plugins": [ + { + "type": "sentence-case" + } + ], + "initial-version": "0.1.0" +} diff --git a/samples/generated_samples/dataproc_v1_generated_batch_controller_create_batch_async.py b/samples/generated_samples/dataproc_v1_generated_batch_controller_create_batch_async.py index 6ddc2433..3fccbdb6 100644 --- a/samples/generated_samples/dataproc_v1_generated_batch_controller_create_batch_async.py +++ b/samples/generated_samples/dataproc_v1_generated_batch_controller_create_batch_async.py @@ -52,7 +52,7 @@ async def sample_create_batch(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_cluster_controller_create_cluster_async.py b/samples/generated_samples/dataproc_v1_generated_cluster_controller_create_cluster_async.py index 02a47541..959706a7 100644 --- a/samples/generated_samples/dataproc_v1_generated_cluster_controller_create_cluster_async.py +++ b/samples/generated_samples/dataproc_v1_generated_cluster_controller_create_cluster_async.py @@ -54,7 +54,7 @@ async def sample_create_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_cluster_controller_delete_cluster_async.py b/samples/generated_samples/dataproc_v1_generated_cluster_controller_delete_cluster_async.py index 45efb13d..2bb27e4b 100644 --- a/samples/generated_samples/dataproc_v1_generated_cluster_controller_delete_cluster_async.py +++ b/samples/generated_samples/dataproc_v1_generated_cluster_controller_delete_cluster_async.py @@ -50,7 +50,7 @@ async def sample_delete_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_cluster_controller_diagnose_cluster_async.py b/samples/generated_samples/dataproc_v1_generated_cluster_controller_diagnose_cluster_async.py index 4ad32f03..262d0214 100644 --- a/samples/generated_samples/dataproc_v1_generated_cluster_controller_diagnose_cluster_async.py +++ b/samples/generated_samples/dataproc_v1_generated_cluster_controller_diagnose_cluster_async.py @@ -50,7 +50,7 @@ async def sample_diagnose_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_cluster_controller_start_cluster_async.py b/samples/generated_samples/dataproc_v1_generated_cluster_controller_start_cluster_async.py index 4d765d83..c422a025 100644 --- a/samples/generated_samples/dataproc_v1_generated_cluster_controller_start_cluster_async.py +++ b/samples/generated_samples/dataproc_v1_generated_cluster_controller_start_cluster_async.py @@ -50,7 +50,7 @@ async def sample_start_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_cluster_controller_stop_cluster_async.py b/samples/generated_samples/dataproc_v1_generated_cluster_controller_stop_cluster_async.py index 9e998bcc..d1c16454 100644 --- a/samples/generated_samples/dataproc_v1_generated_cluster_controller_stop_cluster_async.py +++ b/samples/generated_samples/dataproc_v1_generated_cluster_controller_stop_cluster_async.py @@ -50,7 +50,7 @@ async def sample_stop_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_cluster_controller_update_cluster_async.py b/samples/generated_samples/dataproc_v1_generated_cluster_controller_update_cluster_async.py index 2452f5ea..1408ebe0 100644 --- a/samples/generated_samples/dataproc_v1_generated_cluster_controller_update_cluster_async.py +++ b/samples/generated_samples/dataproc_v1_generated_cluster_controller_update_cluster_async.py @@ -55,7 +55,7 @@ async def sample_update_cluster(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_job_controller_submit_job_as_operation_async.py b/samples/generated_samples/dataproc_v1_generated_job_controller_submit_job_as_operation_async.py index e9924188..215bafca 100644 --- a/samples/generated_samples/dataproc_v1_generated_job_controller_submit_job_as_operation_async.py +++ b/samples/generated_samples/dataproc_v1_generated_job_controller_submit_job_as_operation_async.py @@ -54,7 +54,7 @@ async def sample_submit_job_as_operation(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_node_group_controller_create_node_group_async.py b/samples/generated_samples/dataproc_v1_generated_node_group_controller_create_node_group_async.py new file mode 100644 index 00000000..8898bc64 --- /dev/null +++ b/samples/generated_samples/dataproc_v1_generated_node_group_controller_create_node_group_async.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNodeGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-dataproc + + +# [START dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import dataproc_v1 + + +async def sample_create_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerAsyncClient() + + # Initialize request argument(s) + node_group = dataproc_v1.NodeGroup() + node_group.roles = ['DRIVER'] + + request = dataproc_v1.CreateNodeGroupRequest( + parent="parent_value", + node_group=node_group, + ) + + # Make the request + operation = client.create_node_group(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async] diff --git a/samples/generated_samples/dataproc_v1_generated_node_group_controller_create_node_group_sync.py b/samples/generated_samples/dataproc_v1_generated_node_group_controller_create_node_group_sync.py new file mode 100644 index 00000000..584705f4 --- /dev/null +++ b/samples/generated_samples/dataproc_v1_generated_node_group_controller_create_node_group_sync.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for CreateNodeGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-dataproc + + +# [START dataproc_v1_generated_NodeGroupController_CreateNodeGroup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import dataproc_v1 + + +def sample_create_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerClient() + + # Initialize request argument(s) + node_group = dataproc_v1.NodeGroup() + node_group.roles = ['DRIVER'] + + request = dataproc_v1.CreateNodeGroupRequest( + parent="parent_value", + node_group=node_group, + ) + + # Make the request + operation = client.create_node_group(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END dataproc_v1_generated_NodeGroupController_CreateNodeGroup_sync] diff --git a/samples/generated_samples/dataproc_v1_generated_node_group_controller_get_node_group_async.py b/samples/generated_samples/dataproc_v1_generated_node_group_controller_get_node_group_async.py new file mode 100644 index 00000000..27249c74 --- /dev/null +++ b/samples/generated_samples/dataproc_v1_generated_node_group_controller_get_node_group_async.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNodeGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-dataproc + + +# [START dataproc_v1_generated_NodeGroupController_GetNodeGroup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import dataproc_v1 + + +async def sample_get_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerAsyncClient() + + # Initialize request argument(s) + request = dataproc_v1.GetNodeGroupRequest( + name="name_value", + ) + + # Make the request + response = await client.get_node_group(request=request) + + # Handle the response + print(response) + +# [END dataproc_v1_generated_NodeGroupController_GetNodeGroup_async] diff --git a/samples/generated_samples/dataproc_v1_generated_node_group_controller_get_node_group_sync.py b/samples/generated_samples/dataproc_v1_generated_node_group_controller_get_node_group_sync.py new file mode 100644 index 00000000..cb232497 --- /dev/null +++ b/samples/generated_samples/dataproc_v1_generated_node_group_controller_get_node_group_sync.py @@ -0,0 +1,52 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for GetNodeGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-dataproc + + +# [START dataproc_v1_generated_NodeGroupController_GetNodeGroup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import dataproc_v1 + + +def sample_get_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerClient() + + # Initialize request argument(s) + request = dataproc_v1.GetNodeGroupRequest( + name="name_value", + ) + + # Make the request + response = client.get_node_group(request=request) + + # Handle the response + print(response) + +# [END dataproc_v1_generated_NodeGroupController_GetNodeGroup_sync] diff --git a/samples/generated_samples/dataproc_v1_generated_node_group_controller_resize_node_group_async.py b/samples/generated_samples/dataproc_v1_generated_node_group_controller_resize_node_group_async.py new file mode 100644 index 00000000..cbf2188d --- /dev/null +++ b/samples/generated_samples/dataproc_v1_generated_node_group_controller_resize_node_group_async.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResizeNodeGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-dataproc + + +# [START dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import dataproc_v1 + + +async def sample_resize_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerAsyncClient() + + # Initialize request argument(s) + request = dataproc_v1.ResizeNodeGroupRequest( + name="name_value", + size=443, + ) + + # Make the request + operation = client.resize_node_group(request=request) + + print("Waiting for operation to complete...") + + response = (await operation).result() + + # Handle the response + print(response) + +# [END dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async] diff --git a/samples/generated_samples/dataproc_v1_generated_node_group_controller_resize_node_group_sync.py b/samples/generated_samples/dataproc_v1_generated_node_group_controller_resize_node_group_sync.py new file mode 100644 index 00000000..6a7f7616 --- /dev/null +++ b/samples/generated_samples/dataproc_v1_generated_node_group_controller_resize_node_group_sync.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ResizeNodeGroup +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-dataproc + + +# [START dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import dataproc_v1 + + +def sample_resize_node_group(): + # Create a client + client = dataproc_v1.NodeGroupControllerClient() + + # Initialize request argument(s) + request = dataproc_v1.ResizeNodeGroupRequest( + name="name_value", + size=443, + ) + + # Make the request + operation = client.resize_node_group(request=request) + + print("Waiting for operation to complete...") + + response = operation.result() + + # Handle the response + print(response) + +# [END dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_sync] diff --git a/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_inline_workflow_template_async.py b/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_inline_workflow_template_async.py index aa8120f3..74181006 100644 --- a/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_inline_workflow_template_async.py +++ b/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_inline_workflow_template_async.py @@ -55,7 +55,7 @@ async def sample_instantiate_inline_workflow_template(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_workflow_template_async.py b/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_workflow_template_async.py index 50cf5b45..d54d6904 100644 --- a/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_workflow_template_async.py +++ b/samples/generated_samples/dataproc_v1_generated_workflow_template_service_instantiate_workflow_template_async.py @@ -48,7 +48,7 @@ async def sample_instantiate_workflow_template(): print("Waiting for operation to complete...") - response = await operation.result() + response = (await operation).result() # Handle the response print(response) diff --git a/samples/generated_samples/snippet_metadata_dataproc_v1.json b/samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json similarity index 91% rename from samples/generated_samples/snippet_metadata_dataproc_v1.json rename to samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json index 58a3fe7c..f1a48076 100644 --- a/samples/generated_samples/snippet_metadata_dataproc_v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.dataproc.v1.json @@ -7,7 +7,8 @@ } ], "language": "PYTHON", - "name": "google-cloud-dataproc" + "name": "google-cloud-dataproc", + "version": "0.1.0" }, "snippets": [ { @@ -4064,6 +4065,513 @@ ], "title": "dataproc_v1_generated_job_controller_update_job_sync.py" }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerAsyncClient", + "shortName": "NodeGroupControllerAsyncClient" + }, + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerAsyncClient.create_node_group", + "method": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", + "service": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController", + "shortName": "NodeGroupController" + }, + "shortName": "CreateNodeGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.dataproc_v1.types.CreateNodeGroupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "node_group", + "type": "google.cloud.dataproc_v1.types.NodeGroup" + }, + { + "name": "node_group_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "create_node_group" + }, + "description": "Sample for CreateNodeGroup", + "file": "dataproc_v1_generated_node_group_controller_create_node_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "dataproc_v1_generated_NodeGroupController_CreateNodeGroup_async", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "dataproc_v1_generated_node_group_controller_create_node_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerClient", + "shortName": "NodeGroupControllerClient" + }, + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerClient.create_node_group", + "method": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController.CreateNodeGroup", + "service": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController", + "shortName": "NodeGroupController" + }, + "shortName": "CreateNodeGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.dataproc_v1.types.CreateNodeGroupRequest" + }, + { + "name": "parent", + "type": "str" + }, + { + "name": "node_group", + "type": "google.cloud.dataproc_v1.types.NodeGroup" + }, + { + "name": "node_group_id", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "create_node_group" + }, + "description": "Sample for CreateNodeGroup", + "file": "dataproc_v1_generated_node_group_controller_create_node_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "dataproc_v1_generated_NodeGroupController_CreateNodeGroup_sync", + "segments": [ + { + "end": 59, + "start": 27, + "type": "FULL" + }, + { + "end": 59, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 49, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 56, + "start": 50, + "type": "REQUEST_EXECUTION" + }, + { + "end": 60, + "start": 57, + "type": "RESPONSE_HANDLING" + } + ], + "title": "dataproc_v1_generated_node_group_controller_create_node_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerAsyncClient", + "shortName": "NodeGroupControllerAsyncClient" + }, + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerAsyncClient.get_node_group", + "method": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", + "service": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController", + "shortName": "NodeGroupController" + }, + "shortName": "GetNodeGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.dataproc_v1.types.GetNodeGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.dataproc_v1.types.NodeGroup", + "shortName": "get_node_group" + }, + "description": "Sample for GetNodeGroup", + "file": "dataproc_v1_generated_node_group_controller_get_node_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "dataproc_v1_generated_NodeGroupController_GetNodeGroup_async", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "dataproc_v1_generated_node_group_controller_get_node_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerClient", + "shortName": "NodeGroupControllerClient" + }, + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerClient.get_node_group", + "method": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController.GetNodeGroup", + "service": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController", + "shortName": "NodeGroupController" + }, + "shortName": "GetNodeGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.dataproc_v1.types.GetNodeGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.dataproc_v1.types.NodeGroup", + "shortName": "get_node_group" + }, + "description": "Sample for GetNodeGroup", + "file": "dataproc_v1_generated_node_group_controller_get_node_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "dataproc_v1_generated_NodeGroupController_GetNodeGroup_sync", + "segments": [ + { + "end": 51, + "start": 27, + "type": "FULL" + }, + { + "end": 51, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 45, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 48, + "start": 46, + "type": "REQUEST_EXECUTION" + }, + { + "end": 52, + "start": 49, + "type": "RESPONSE_HANDLING" + } + ], + "title": "dataproc_v1_generated_node_group_controller_get_node_group_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerAsyncClient", + "shortName": "NodeGroupControllerAsyncClient" + }, + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerAsyncClient.resize_node_group", + "method": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", + "service": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController", + "shortName": "NodeGroupController" + }, + "shortName": "ResizeNodeGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.dataproc_v1.types.ResizeNodeGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "size", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation_async.AsyncOperation", + "shortName": "resize_node_group" + }, + "description": "Sample for ResizeNodeGroup", + "file": "dataproc_v1_generated_node_group_controller_resize_node_group_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_async", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "dataproc_v1_generated_node_group_controller_resize_node_group_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerClient", + "shortName": "NodeGroupControllerClient" + }, + "fullName": "google.cloud.dataproc_v1.NodeGroupControllerClient.resize_node_group", + "method": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController.ResizeNodeGroup", + "service": { + "fullName": "google.cloud.dataproc.v1.NodeGroupController", + "shortName": "NodeGroupController" + }, + "shortName": "ResizeNodeGroup" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.dataproc_v1.types.ResizeNodeGroupRequest" + }, + { + "name": "name", + "type": "str" + }, + { + "name": "size", + "type": "int" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.api_core.operation.Operation", + "shortName": "resize_node_group" + }, + "description": "Sample for ResizeNodeGroup", + "file": "dataproc_v1_generated_node_group_controller_resize_node_group_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "dataproc_v1_generated_NodeGroupController_ResizeNodeGroup_sync", + "segments": [ + { + "end": 56, + "start": 27, + "type": "FULL" + }, + { + "end": 56, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 46, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 53, + "start": 47, + "type": "REQUEST_EXECUTION" + }, + { + "end": 57, + "start": 54, + "type": "RESPONSE_HANDLING" + } + ], + "title": "dataproc_v1_generated_node_group_controller_resize_node_group_sync.py" + }, { "canonical": true, "clientMethod": { @@ -4746,7 +5254,7 @@ }, { "name": "parameters", - "type": "Mapping[str, str]" + "type": "MutableMapping[str, str]" }, { "name": "retry", @@ -4830,7 +5338,7 @@ }, { "name": "parameters", - "type": "Mapping[str, str]" + "type": "MutableMapping[str, str]" }, { "name": "retry", diff --git a/scripts/fixup_dataproc_v1_keywords.py b/scripts/fixup_dataproc_v1_keywords.py index 15acbb36..64ed85f6 100644 --- a/scripts/fixup_dataproc_v1_keywords.py +++ b/scripts/fixup_dataproc_v1_keywords.py @@ -43,6 +43,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'create_autoscaling_policy': ('parent', 'policy', ), 'create_batch': ('parent', 'batch', 'batch_id', 'request_id', ), 'create_cluster': ('project_id', 'region', 'cluster', 'request_id', 'action_on_failed_primary_workers', ), + 'create_node_group': ('parent', 'node_group', 'node_group_id', 'request_id', ), 'create_workflow_template': ('parent', 'template', ), 'delete_autoscaling_policy': ('name', ), 'delete_batch': ('name', ), @@ -54,6 +55,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'get_batch': ('name', ), 'get_cluster': ('project_id', 'region', 'cluster_name', ), 'get_job': ('project_id', 'region', 'job_id', ), + 'get_node_group': ('name', ), 'get_workflow_template': ('name', 'version', ), 'instantiate_inline_workflow_template': ('parent', 'template', 'request_id', ), 'instantiate_workflow_template': ('name', 'version', 'request_id', 'parameters', ), @@ -62,6 +64,7 @@ class dataprocCallTransformer(cst.CSTTransformer): 'list_clusters': ('project_id', 'region', 'filter', 'page_size', 'page_token', ), 'list_jobs': ('project_id', 'region', 'page_size', 'page_token', 'cluster_name', 'job_state_matcher', 'filter', ), 'list_workflow_templates': ('parent', 'page_size', 'page_token', ), + 'resize_node_group': ('name', 'size', 'request_id', 'graceful_decommission_timeout', ), 'start_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), 'stop_cluster': ('project_id', 'region', 'cluster_name', 'cluster_uuid', 'request_id', ), 'submit_job': ('project_id', 'region', 'job', 'request_id', ), diff --git a/setup.py b/setup.py index 38af9291..0aa8585d 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,5 @@ -# Copyright 2018 Google LLC +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -11,32 +12,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - +# import io import os -import setuptools +import setuptools # type: ignore - -# Package metadata. +package_root = os.path.abspath(os.path.dirname(__file__)) name = "google-cloud-dataproc" + + description = "Google Cloud Dataproc API client library" -version = "5.0.3" -# Should be one of: -# 'Development Status :: 3 - Alpha' -# 'Development Status :: 4 - Beta' -# 'Development Status :: 5 - Production/Stable' -release_status = "Development Status :: 5 - Production/Stable" + +version = {} +with open(os.path.join(package_root, "google/cloud/dataproc/gapic_version.py")) as fp: + exec(fp.read(), version) +version = version["__version__"] + +if version[0] == "0": + release_status = "Development Status :: 4 - Beta" +else: + release_status = "Development Status :: 5 - Production/Stable" + dependencies = [ - "google-api-core[grpc] >= 1.32.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*", + "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", "proto-plus >= 1.22.0, <2.0.0dev", "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", ] -extras = {"libcst": "libcst >= 0.2.5"} - - -# Setup boilerplate below this line. +url = "https://github.com/googleapis/python-dataproc" package_root = os.path.abspath(os.path.dirname(__file__)) @@ -44,20 +48,16 @@ with io.open(readme_filename, encoding="utf-8") as readme_file: readme = readme_file.read() -# Only include packages under the 'google' namespace. Do not include tests, -# benchmarks, etc. packages = [ package for package in setuptools.PEP420PackageFinder.find() if package.startswith("google") ] -# Determine which namespaces are needed. namespaces = ["google"] if "google.cloud" in packages: namespaces.append("google.cloud") - setuptools.setup( name=name, version=version, @@ -66,7 +66,7 @@ author="Google LLC", author_email="googleapis-packages@google.com", license="Apache 2.0", - url="https://github.com/googleapis/python-dataproc", + url=url, classifiers=[ release_status, "Intended Audience :: Developers", @@ -82,13 +82,9 @@ ], platforms="Posix; MacOS X; Windows", packages=packages, + python_requires=">=3.7", namespace_packages=namespaces, install_requires=dependencies, - extras_require=extras, - python_requires=">=3.7", - scripts=[ - "scripts/fixup_dataproc_v1_keywords.py", - ], include_package_data=True, zip_safe=False, ) diff --git a/testing/constraints-3.10.txt b/testing/constraints-3.10.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.10.txt +++ b/testing/constraints-3.10.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/testing/constraints-3.11.txt b/testing/constraints-3.11.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.11.txt +++ b/testing/constraints-3.11.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt index 12b4d142..6c44adfe 100644 --- a/testing/constraints-3.7.txt +++ b/testing/constraints-3.7.txt @@ -1,11 +1,9 @@ # This constraints file is used to check that lower bounds # are correct in setup.py -# List *all* library dependencies and extras in this file. +# List all library dependencies and extras in this file. # Pin the version to the lower bound. -# -# e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", -# Then this file should have foo==1.14.0 -google-api-core==1.32.0 -libcst==0.2.5 +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.34.0 proto-plus==1.22.0 protobuf==3.19.5 diff --git a/testing/constraints-3.8.txt b/testing/constraints-3.8.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.8.txt +++ b/testing/constraints-3.8.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/testing/constraints-3.9.txt b/testing/constraints-3.9.txt index e69de29b..ed7f9aed 100644 --- a/testing/constraints-3.9.txt +++ b/testing/constraints-3.9.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py index 2444b55e..dbb07acb 100644 --- a/tests/unit/gapic/dataproc_v1/test_cluster_controller.py +++ b/tests/unit/gapic/dataproc_v1/test_cluster_controller.py @@ -53,6 +53,7 @@ from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth @@ -3257,10 +3258,41 @@ def test_cluster_controller_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_service_path(): +def test_node_group_path(): project = "squid" - location = "clam" - service = "whelk" + region = "clam" + cluster = "whelk" + node_group = "octopus" + expected = "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}".format( + project=project, + region=region, + cluster=cluster, + node_group=node_group, + ) + actual = ClusterControllerClient.node_group_path( + project, region, cluster, node_group + ) + assert expected == actual + + +def test_parse_node_group_path(): + expected = { + "project": "oyster", + "region": "nudibranch", + "cluster": "cuttlefish", + "node_group": "mussel", + } + path = ClusterControllerClient.node_group_path(**expected) + + # Check that the path construction is reversible. + actual = ClusterControllerClient.parse_node_group_path(path) + assert expected == actual + + +def test_service_path(): + project = "winkle" + location = "nautilus" + service = "scallop" expected = "projects/{project}/locations/{location}/services/{service}".format( project=project, location=location, @@ -3272,9 +3304,9 @@ def test_service_path(): def test_parse_service_path(): expected = { - "project": "octopus", - "location": "oyster", - "service": "nudibranch", + "project": "abalone", + "location": "squid", + "service": "clam", } path = ClusterControllerClient.service_path(**expected) @@ -3284,7 +3316,7 @@ def test_parse_service_path(): def test_common_billing_account_path(): - billing_account = "cuttlefish" + billing_account = "whelk" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -3294,7 +3326,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "mussel", + "billing_account": "octopus", } path = ClusterControllerClient.common_billing_account_path(**expected) @@ -3304,7 +3336,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "winkle" + folder = "oyster" expected = "folders/{folder}".format( folder=folder, ) @@ -3314,7 +3346,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "nautilus", + "folder": "nudibranch", } path = ClusterControllerClient.common_folder_path(**expected) @@ -3324,7 +3356,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "scallop" + organization = "cuttlefish" expected = "organizations/{organization}".format( organization=organization, ) @@ -3334,7 +3366,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "abalone", + "organization": "mussel", } path = ClusterControllerClient.common_organization_path(**expected) @@ -3344,7 +3376,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "squid" + project = "winkle" expected = "projects/{project}".format( project=project, ) @@ -3354,7 +3386,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "clam", + "project": "nautilus", } path = ClusterControllerClient.common_project_path(**expected) @@ -3364,8 +3396,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "whelk" - location = "octopus" + project = "scallop" + location = "abalone" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -3376,8 +3408,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "oyster", - "location": "nudibranch", + "project": "squid", + "location": "clam", } path = ClusterControllerClient.common_location_path(**expected) diff --git a/tests/unit/gapic/dataproc_v1/test_node_group_controller.py b/tests/unit/gapic/dataproc_v1/test_node_group_controller.py new file mode 100644 index 00000000..3046da76 --- /dev/null +++ b/tests/unit/gapic/dataproc_v1/test_node_group_controller.py @@ -0,0 +1,2220 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os + +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.dataproc_v1.services.node_group_controller import ( + NodeGroupControllerAsyncClient, +) +from google.cloud.dataproc_v1.services.node_group_controller import ( + NodeGroupControllerClient, +) +from google.cloud.dataproc_v1.services.node_group_controller import transports +from google.cloud.dataproc_v1.types import clusters +from google.cloud.dataproc_v1.types import node_groups +from google.cloud.dataproc_v1.types import operations +from google.longrunning import operations_pb2 +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return ( + "foo.googleapis.com" + if ("localhost" in client.DEFAULT_ENDPOINT) + else client.DEFAULT_ENDPOINT + ) + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert NodeGroupControllerClient._get_default_mtls_endpoint(None) is None + assert ( + NodeGroupControllerClient._get_default_mtls_endpoint(api_endpoint) + == api_mtls_endpoint + ) + assert ( + NodeGroupControllerClient._get_default_mtls_endpoint(api_mtls_endpoint) + == api_mtls_endpoint + ) + assert ( + NodeGroupControllerClient._get_default_mtls_endpoint(sandbox_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + NodeGroupControllerClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) + == sandbox_mtls_endpoint + ) + assert ( + NodeGroupControllerClient._get_default_mtls_endpoint(non_googleapi) + == non_googleapi + ) + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (NodeGroupControllerClient, "grpc"), + (NodeGroupControllerAsyncClient, "grpc_asyncio"), + ], +) +def test_node_group_controller_client_from_service_account_info( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_info" + ) as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ("dataproc.googleapis.com:443") + + +@pytest.mark.parametrize( + "transport_class,transport_name", + [ + (transports.NodeGroupControllerGrpcTransport, "grpc"), + (transports.NodeGroupControllerGrpcAsyncIOTransport, "grpc_asyncio"), + ], +) +def test_node_group_controller_client_service_account_always_use_jwt( + transport_class, transport_name +): + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object( + service_account.Credentials, "with_always_use_jwt_access", create=True + ) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize( + "client_class,transport_name", + [ + (NodeGroupControllerClient, "grpc"), + (NodeGroupControllerAsyncClient, "grpc_asyncio"), + ], +) +def test_node_group_controller_client_from_service_account_file( + client_class, transport_name +): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object( + service_account.Credentials, "from_service_account_file" + ) as factory: + factory.return_value = creds + client = client_class.from_service_account_file( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json( + "dummy/file/path.json", transport=transport_name + ) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ("dataproc.googleapis.com:443") + + +def test_node_group_controller_client_get_transport_class(): + transport = NodeGroupControllerClient.get_transport_class() + available_transports = [ + transports.NodeGroupControllerGrpcTransport, + ] + assert transport in available_transports + + transport = NodeGroupControllerClient.get_transport_class("grpc") + assert transport == transports.NodeGroupControllerGrpcTransport + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + NodeGroupControllerClient, + transports.NodeGroupControllerGrpcTransport, + "grpc", + ), + ( + NodeGroupControllerAsyncClient, + transports.NodeGroupControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +@mock.patch.object( + NodeGroupControllerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NodeGroupControllerClient), +) +@mock.patch.object( + NodeGroupControllerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NodeGroupControllerAsyncClient), +) +def test_node_group_controller_client_client_options( + client_class, transport_class, transport_name +): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(NodeGroupControllerClient, "get_transport_class") as gtc: + transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(NodeGroupControllerClient, "get_transport_class") as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions( + api_audience="https://language.googleapis.com" + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com", + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,use_client_cert_env", + [ + ( + NodeGroupControllerClient, + transports.NodeGroupControllerGrpcTransport, + "grpc", + "true", + ), + ( + NodeGroupControllerAsyncClient, + transports.NodeGroupControllerGrpcAsyncIOTransport, + "grpc_asyncio", + "true", + ), + ( + NodeGroupControllerClient, + transports.NodeGroupControllerGrpcTransport, + "grpc", + "false", + ), + ( + NodeGroupControllerAsyncClient, + transports.NodeGroupControllerGrpcAsyncIOTransport, + "grpc_asyncio", + "false", + ), + ], +) +@mock.patch.object( + NodeGroupControllerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NodeGroupControllerClient), +) +@mock.patch.object( + NodeGroupControllerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NodeGroupControllerAsyncClient), +) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_node_group_controller_client_mtls_env_auto( + client_class, transport_class, transport_name, use_client_cert_env +): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + options = client_options.ClientOptions( + client_cert_source=client_cert_source_callback + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=client_cert_source_callback, + ): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env} + ): + with mock.patch.object(transport_class, "__init__") as patched: + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class", [NodeGroupControllerClient, NodeGroupControllerAsyncClient] +) +@mock.patch.object( + NodeGroupControllerClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NodeGroupControllerClient), +) +@mock.patch.object( + NodeGroupControllerAsyncClient, + "DEFAULT_ENDPOINT", + modify_default_endpoint(NodeGroupControllerAsyncClient), +) +def test_node_group_controller_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint + ) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source( + options + ) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=False, + ): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", + return_value=True, + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_client_cert_source, + ): + ( + api_endpoint, + cert_source, + ) = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + NodeGroupControllerClient, + transports.NodeGroupControllerGrpcTransport, + "grpc", + ), + ( + NodeGroupControllerAsyncClient, + transports.NodeGroupControllerGrpcAsyncIOTransport, + "grpc_asyncio", + ), + ], +) +def test_node_group_controller_client_client_options_scopes( + client_class, transport_class, transport_name +): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + NodeGroupControllerClient, + transports.NodeGroupControllerGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + NodeGroupControllerAsyncClient, + transports.NodeGroupControllerGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_node_group_controller_client_client_options_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +def test_node_group_controller_client_client_options_from_dict(): + with mock.patch( + "google.cloud.dataproc_v1.services.node_group_controller.transports.NodeGroupControllerGrpcTransport.__init__" + ) as grpc_transport: + grpc_transport.return_value = None + client = NodeGroupControllerClient( + client_options={"api_endpoint": "squid.clam.whelk"} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name,grpc_helpers", + [ + ( + NodeGroupControllerClient, + transports.NodeGroupControllerGrpcTransport, + "grpc", + grpc_helpers, + ), + ( + NodeGroupControllerAsyncClient, + transports.NodeGroupControllerGrpcAsyncIOTransport, + "grpc_asyncio", + grpc_helpers_async, + ), + ], +) +def test_node_group_controller_client_create_channel_credentials_file( + client_class, transport_class, transport_name, grpc_helpers +): + # Check the case credentials file is provided. + options = client_options.ClientOptions(credentials_file="credentials.json") + + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=None, + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "request_type", + [ + node_groups.CreateNodeGroupRequest, + dict, + ], +) +def test_create_node_group(request_type, transport: str = "grpc"): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.create_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.CreateNodeGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_node_group_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_group), "__call__" + ) as call: + client.create_node_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.CreateNodeGroupRequest() + + +@pytest.mark.asyncio +async def test_create_node_group_async( + transport: str = "grpc_asyncio", request_type=node_groups.CreateNodeGroupRequest +): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.create_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.CreateNodeGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_node_group_async_from_dict(): + await test_create_node_group_async(request_type=dict) + + +def test_create_node_group_field_headers(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = node_groups.CreateNodeGroupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_group), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.create_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_create_node_group_field_headers_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = node_groups.CreateNodeGroupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_group), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.create_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_create_node_group_flattened(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_node_group( + parent="parent_value", + node_group=clusters.NodeGroup(name="name_value"), + node_group_id="node_group_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].node_group + mock_val = clusters.NodeGroup(name="name_value") + assert arg == mock_val + arg = args[0].node_group_id + mock_val = "node_group_id_value" + assert arg == mock_val + + +def test_create_node_group_flattened_error(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_node_group( + node_groups.CreateNodeGroupRequest(), + parent="parent_value", + node_group=clusters.NodeGroup(name="name_value"), + node_group_id="node_group_id_value", + ) + + +@pytest.mark.asyncio +async def test_create_node_group_flattened_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_node_group( + parent="parent_value", + node_group=clusters.NodeGroup(name="name_value"), + node_group_id="node_group_id_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].node_group + mock_val = clusters.NodeGroup(name="name_value") + assert arg == mock_val + arg = args[0].node_group_id + mock_val = "node_group_id_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_create_node_group_flattened_error_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_node_group( + node_groups.CreateNodeGroupRequest(), + parent="parent_value", + node_group=clusters.NodeGroup(name="name_value"), + node_group_id="node_group_id_value", + ) + + +@pytest.mark.parametrize( + "request_type", + [ + node_groups.ResizeNodeGroupRequest, + dict, + ], +) +def test_resize_node_group(request_type, transport: str = "grpc"): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resize_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.resize_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.ResizeNodeGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_resize_node_group_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resize_node_group), "__call__" + ) as call: + client.resize_node_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.ResizeNodeGroupRequest() + + +@pytest.mark.asyncio +async def test_resize_node_group_async( + transport: str = "grpc_asyncio", request_type=node_groups.ResizeNodeGroupRequest +): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resize_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.resize_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.ResizeNodeGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_resize_node_group_async_from_dict(): + await test_resize_node_group_async(request_type=dict) + + +def test_resize_node_group_field_headers(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = node_groups.ResizeNodeGroupRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resize_node_group), "__call__" + ) as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.resize_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_resize_node_group_field_headers_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = node_groups.ResizeNodeGroupRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resize_node_group), "__call__" + ) as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.resize_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_resize_node_group_flattened(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resize_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.resize_node_group( + name="name_value", + size=443, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].size + mock_val = 443 + assert arg == mock_val + + +def test_resize_node_group_flattened_error(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.resize_node_group( + node_groups.ResizeNodeGroupRequest(), + name="name_value", + size=443, + ) + + +@pytest.mark.asyncio +async def test_resize_node_group_flattened_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.resize_node_group), "__call__" + ) as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.resize_node_group( + name="name_value", + size=443, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + arg = args[0].size + mock_val = 443 + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_resize_node_group_flattened_error_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.resize_node_group( + node_groups.ResizeNodeGroupRequest(), + name="name_value", + size=443, + ) + + +@pytest.mark.parametrize( + "request_type", + [ + node_groups.GetNodeGroupRequest, + dict, + ], +) +def test_get_node_group(request_type, transport: str = "grpc"): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_node_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.NodeGroup( + name="name_value", + roles=[clusters.NodeGroup.Role.DRIVER], + ) + response = client.get_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.GetNodeGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.NodeGroup) + assert response.name == "name_value" + assert response.roles == [clusters.NodeGroup.Role.DRIVER] + + +def test_get_node_group_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_node_group), "__call__") as call: + client.get_node_group() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.GetNodeGroupRequest() + + +@pytest.mark.asyncio +async def test_get_node_group_async( + transport: str = "grpc_asyncio", request_type=node_groups.GetNodeGroupRequest +): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_node_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + clusters.NodeGroup( + name="name_value", + roles=[clusters.NodeGroup.Role.DRIVER], + ) + ) + response = await client.get_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == node_groups.GetNodeGroupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, clusters.NodeGroup) + assert response.name == "name_value" + assert response.roles == [clusters.NodeGroup.Role.DRIVER] + + +@pytest.mark.asyncio +async def test_get_node_group_async_from_dict(): + await test_get_node_group_async(request_type=dict) + + +def test_get_node_group_field_headers(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = node_groups.GetNodeGroupRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_node_group), "__call__") as call: + call.return_value = clusters.NodeGroup() + client.get_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_get_node_group_field_headers_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = node_groups.GetNodeGroupRequest() + + request.name = "name_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_node_group), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.NodeGroup()) + await client.get_node_group(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "name=name_value", + ) in kw["metadata"] + + +def test_get_node_group_flattened(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_node_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.NodeGroup() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_node_group( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +def test_get_node_group_flattened_error(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_node_group( + node_groups.GetNodeGroupRequest(), + name="name_value", + ) + + +@pytest.mark.asyncio +async def test_get_node_group_flattened_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.get_node_group), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = clusters.NodeGroup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(clusters.NodeGroup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_node_group( + name="name_value", + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = "name_value" + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_get_node_group_flattened_error_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_node_group( + node_groups.GetNodeGroupRequest(), + name="name_value", + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.NodeGroupControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.NodeGroupControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeGroupControllerClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.NodeGroupControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = NodeGroupControllerClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = NodeGroupControllerClient( + client_options=options, credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.NodeGroupControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = NodeGroupControllerClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.NodeGroupControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = NodeGroupControllerClient(transport=transport) + assert client.transport is transport + + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.NodeGroupControllerGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.NodeGroupControllerGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NodeGroupControllerGrpcTransport, + transports.NodeGroupControllerGrpcAsyncIOTransport, + ], +) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + ], +) +def test_transport_kind(transport_name): + transport = NodeGroupControllerClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.NodeGroupControllerGrpcTransport, + ) + + +def test_node_group_controller_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.NodeGroupControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json", + ) + + +def test_node_group_controller_base_transport(): + # Instantiate the base transport. + with mock.patch( + "google.cloud.dataproc_v1.services.node_group_controller.transports.NodeGroupControllerTransport.__init__" + ) as Transport: + Transport.return_value = None + transport = transports.NodeGroupControllerTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + "create_node_group", + "resize_node_group", + "get_node_group", + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + "kind", + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_node_group_controller_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch( + "google.cloud.dataproc_v1.services.node_group_controller.transports.NodeGroupControllerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeGroupControllerTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with( + "credentials.json", + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +def test_node_group_controller_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch( + "google.cloud.dataproc_v1.services.node_group_controller.transports.NodeGroupControllerTransport._prep_wrapped_messages" + ) as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.NodeGroupControllerTransport() + adc.assert_called_once() + + +def test_node_group_controller_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + NodeGroupControllerClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NodeGroupControllerGrpcTransport, + transports.NodeGroupControllerGrpcAsyncIOTransport, + ], +) +def test_node_group_controller_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NodeGroupControllerGrpcTransport, + transports.NodeGroupControllerGrpcAsyncIOTransport, + ], +) +def test_node_group_controller_transport_auth_gdch_credentials(transport_class): + host = "https://language.com" + api_audience_tests = [None, "https://language2.com"] + api_audience_expect = [host, "https://language2.com"] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, "default", autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock( + return_value=gdch_mock + ) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with(e) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.NodeGroupControllerGrpcTransport, grpc_helpers), + (transports.NodeGroupControllerGrpcAsyncIOTransport, grpc_helpers_async), + ], +) +def test_node_group_controller_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + + create_channel.assert_called_with( + "dataproc.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=("https://www.googleapis.com/auth/cloud-platform",), + scopes=["1", "2"], + default_host="dataproc.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.NodeGroupControllerGrpcTransport, + transports.NodeGroupControllerGrpcAsyncIOTransport, + ], +) +def test_node_group_controller_grpc_transport_client_cert_source_for_mtls( + transport_class, +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds, + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback, + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, private_key=expected_key + ) + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_node_group_controller_host_no_port(transport_name): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com" + ), + transport=transport_name, + ) + assert client.transport._host == ("dataproc.googleapis.com:443") + + +@pytest.mark.parametrize( + "transport_name", + [ + "grpc", + "grpc_asyncio", + ], +) +def test_node_group_controller_host_with_port(transport_name): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions( + api_endpoint="dataproc.googleapis.com:8000" + ), + transport=transport_name, + ) + assert client.transport._host == ("dataproc.googleapis.com:8000") + + +def test_node_group_controller_grpc_transport_channel(): + channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.NodeGroupControllerGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_node_group_controller_grpc_asyncio_transport_channel(): + channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.NodeGroupControllerGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.NodeGroupControllerGrpcTransport, + transports.NodeGroupControllerGrpcAsyncIOTransport, + ], +) +def test_node_group_controller_transport_channel_mtls_with_client_cert_source( + transport_class, +): + with mock.patch( + "grpc.ssl_channel_credentials", autospec=True + ) as grpc_ssl_channel_cred: + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, "default") as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize( + "transport_class", + [ + transports.NodeGroupControllerGrpcTransport, + transports.NodeGroupControllerGrpcAsyncIOTransport, + ], +) +def test_node_group_controller_transport_channel_mtls_with_adc(transport_class): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object( + transport_class, "create_channel" + ) as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_node_group_controller_grpc_lro_client(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_node_group_controller_grpc_lro_async_client(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_node_group_path(): + project = "squid" + region = "clam" + cluster = "whelk" + node_group = "octopus" + expected = "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}".format( + project=project, + region=region, + cluster=cluster, + node_group=node_group, + ) + actual = NodeGroupControllerClient.node_group_path( + project, region, cluster, node_group + ) + assert expected == actual + + +def test_parse_node_group_path(): + expected = { + "project": "oyster", + "region": "nudibranch", + "cluster": "cuttlefish", + "node_group": "mussel", + } + path = NodeGroupControllerClient.node_group_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupControllerClient.parse_node_group_path(path) + assert expected == actual + + +def test_common_billing_account_path(): + billing_account = "winkle" + expected = "billingAccounts/{billing_account}".format( + billing_account=billing_account, + ) + actual = NodeGroupControllerClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "nautilus", + } + path = NodeGroupControllerClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupControllerClient.parse_common_billing_account_path(path) + assert expected == actual + + +def test_common_folder_path(): + folder = "scallop" + expected = "folders/{folder}".format( + folder=folder, + ) + actual = NodeGroupControllerClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "abalone", + } + path = NodeGroupControllerClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupControllerClient.parse_common_folder_path(path) + assert expected == actual + + +def test_common_organization_path(): + organization = "squid" + expected = "organizations/{organization}".format( + organization=organization, + ) + actual = NodeGroupControllerClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "clam", + } + path = NodeGroupControllerClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupControllerClient.parse_common_organization_path(path) + assert expected == actual + + +def test_common_project_path(): + project = "whelk" + expected = "projects/{project}".format( + project=project, + ) + actual = NodeGroupControllerClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "octopus", + } + path = NodeGroupControllerClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupControllerClient.parse_common_project_path(path) + assert expected == actual + + +def test_common_location_path(): + project = "oyster" + location = "nudibranch" + expected = "projects/{project}/locations/{location}".format( + project=project, + location=location, + ) + actual = NodeGroupControllerClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "cuttlefish", + "location": "mussel", + } + path = NodeGroupControllerClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = NodeGroupControllerClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object( + transports.NodeGroupControllerTransport, "_prep_wrapped_messages" + ) as prep: + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object( + transports.NodeGroupControllerTransport, "_prep_wrapped_messages" + ) as prep: + transport_class = NodeGroupControllerClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = NodeGroupControllerAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object( + type(getattr(client.transport, "grpc_channel")), "close" + ) as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + with mock.patch.object( + type(getattr(client.transport, close_name)), "close" + ) as close: + with client: + close.assert_not_called() + close.assert_called_once() + + +def test_client_ctx(): + transports = [ + "grpc", + ] + for transport in transports: + client = NodeGroupControllerClient( + credentials=ga_credentials.AnonymousCredentials(), transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + + +@pytest.mark.parametrize( + "client_class,transport_class", + [ + (NodeGroupControllerClient, transports.NodeGroupControllerGrpcTransport), + ( + NodeGroupControllerAsyncClient, + transports.NodeGroupControllerGrpcAsyncIOTransport, + ), + ], +) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py index 1fe49bec..a746971d 100644 --- a/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py +++ b/tests/unit/gapic/dataproc_v1/test_workflow_template_service.py @@ -56,6 +56,7 @@ from google.longrunning import operations_pb2 from google.oauth2 import service_account from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore import google.auth @@ -3208,10 +3209,41 @@ def test_workflow_template_service_grpc_lro_async_client(): assert transport.operations_client is transport.operations_client -def test_service_path(): +def test_node_group_path(): project = "squid" - location = "clam" - service = "whelk" + region = "clam" + cluster = "whelk" + node_group = "octopus" + expected = "projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{node_group}".format( + project=project, + region=region, + cluster=cluster, + node_group=node_group, + ) + actual = WorkflowTemplateServiceClient.node_group_path( + project, region, cluster, node_group + ) + assert expected == actual + + +def test_parse_node_group_path(): + expected = { + "project": "oyster", + "region": "nudibranch", + "cluster": "cuttlefish", + "node_group": "mussel", + } + path = WorkflowTemplateServiceClient.node_group_path(**expected) + + # Check that the path construction is reversible. + actual = WorkflowTemplateServiceClient.parse_node_group_path(path) + assert expected == actual + + +def test_service_path(): + project = "winkle" + location = "nautilus" + service = "scallop" expected = "projects/{project}/locations/{location}/services/{service}".format( project=project, location=location, @@ -3223,9 +3255,9 @@ def test_service_path(): def test_parse_service_path(): expected = { - "project": "octopus", - "location": "oyster", - "service": "nudibranch", + "project": "abalone", + "location": "squid", + "service": "clam", } path = WorkflowTemplateServiceClient.service_path(**expected) @@ -3235,9 +3267,9 @@ def test_parse_service_path(): def test_workflow_template_path(): - project = "cuttlefish" - region = "mussel" - workflow_template = "winkle" + project = "whelk" + region = "octopus" + workflow_template = "oyster" expected = "projects/{project}/regions/{region}/workflowTemplates/{workflow_template}".format( project=project, region=region, @@ -3251,9 +3283,9 @@ def test_workflow_template_path(): def test_parse_workflow_template_path(): expected = { - "project": "nautilus", - "region": "scallop", - "workflow_template": "abalone", + "project": "nudibranch", + "region": "cuttlefish", + "workflow_template": "mussel", } path = WorkflowTemplateServiceClient.workflow_template_path(**expected) @@ -3263,7 +3295,7 @@ def test_parse_workflow_template_path(): def test_common_billing_account_path(): - billing_account = "squid" + billing_account = "winkle" expected = "billingAccounts/{billing_account}".format( billing_account=billing_account, ) @@ -3273,7 +3305,7 @@ def test_common_billing_account_path(): def test_parse_common_billing_account_path(): expected = { - "billing_account": "clam", + "billing_account": "nautilus", } path = WorkflowTemplateServiceClient.common_billing_account_path(**expected) @@ -3283,7 +3315,7 @@ def test_parse_common_billing_account_path(): def test_common_folder_path(): - folder = "whelk" + folder = "scallop" expected = "folders/{folder}".format( folder=folder, ) @@ -3293,7 +3325,7 @@ def test_common_folder_path(): def test_parse_common_folder_path(): expected = { - "folder": "octopus", + "folder": "abalone", } path = WorkflowTemplateServiceClient.common_folder_path(**expected) @@ -3303,7 +3335,7 @@ def test_parse_common_folder_path(): def test_common_organization_path(): - organization = "oyster" + organization = "squid" expected = "organizations/{organization}".format( organization=organization, ) @@ -3313,7 +3345,7 @@ def test_common_organization_path(): def test_parse_common_organization_path(): expected = { - "organization": "nudibranch", + "organization": "clam", } path = WorkflowTemplateServiceClient.common_organization_path(**expected) @@ -3323,7 +3355,7 @@ def test_parse_common_organization_path(): def test_common_project_path(): - project = "cuttlefish" + project = "whelk" expected = "projects/{project}".format( project=project, ) @@ -3333,7 +3365,7 @@ def test_common_project_path(): def test_parse_common_project_path(): expected = { - "project": "mussel", + "project": "octopus", } path = WorkflowTemplateServiceClient.common_project_path(**expected) @@ -3343,8 +3375,8 @@ def test_parse_common_project_path(): def test_common_location_path(): - project = "winkle" - location = "nautilus" + project = "oyster" + location = "nudibranch" expected = "projects/{project}/locations/{location}".format( project=project, location=location, @@ -3355,8 +3387,8 @@ def test_common_location_path(): def test_parse_common_location_path(): expected = { - "project": "scallop", - "location": "abalone", + "project": "cuttlefish", + "location": "mussel", } path = WorkflowTemplateServiceClient.common_location_path(**expected)