diff --git a/google/cloud/language/__init__.py b/google/cloud/language/__init__.py index 3e7674b2..a6faa72d 100644 --- a/google/cloud/language/__init__.py +++ b/google/cloud/language/__init__.py @@ -44,6 +44,8 @@ EncodingType, Entity, EntityMention, + ModerateTextRequest, + ModerateTextResponse, PartOfSpeech, Sentence, Sentiment, @@ -72,6 +74,8 @@ "Document", "Entity", "EntityMention", + "ModerateTextRequest", + "ModerateTextResponse", "PartOfSpeech", "Sentence", "Sentiment", diff --git a/google/cloud/language_v1/__init__.py b/google/cloud/language_v1/__init__.py index 6df3e48a..166ac220 100644 --- a/google/cloud/language_v1/__init__.py +++ b/google/cloud/language_v1/__init__.py @@ -39,6 +39,8 @@ EncodingType, Entity, EntityMention, + ModerateTextRequest, + ModerateTextResponse, PartOfSpeech, Sentence, Sentiment, @@ -68,6 +70,8 @@ "Entity", "EntityMention", "LanguageServiceClient", + "ModerateTextRequest", + "ModerateTextResponse", "PartOfSpeech", "Sentence", "Sentiment", diff --git a/google/cloud/language_v1/gapic_metadata.json b/google/cloud/language_v1/gapic_metadata.json index e475aad9..fa2c065a 100644 --- a/google/cloud/language_v1/gapic_metadata.json +++ b/google/cloud/language_v1/gapic_metadata.json @@ -39,6 +39,11 @@ "methods": [ "classify_text" ] + }, + "ModerateText": { + "methods": [ + "moderate_text" + ] } } }, @@ -74,6 +79,11 @@ "methods": [ "classify_text" ] + }, + "ModerateText": { + "methods": [ + "moderate_text" + ] } } }, @@ -109,6 +119,11 @@ "methods": [ "classify_text" ] + }, + "ModerateText": { + "methods": [ + "moderate_text" + ] } } } diff --git a/google/cloud/language_v1/services/language_service/async_client.py b/google/cloud/language_v1/services/language_service/async_client.py index 8389f19c..54fda3ae 100644 --- a/google/cloud/language_v1/services/language_service/async_client.py +++ b/google/cloud/language_v1/services/language_service/async_client.py @@ -792,6 +792,104 @@ async def sample_classify_text(): # Done; return the response. return response + async def moderate_text( + self, + request: Optional[Union[language_service.ModerateTextRequest, dict]] = None, + *, + document: Optional[language_service.Document] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ModerateTextResponse: + r"""Moderates a document for harmful and sensitive + categories. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import language_v1 + + async def sample_moderate_text(): + # Create a client + client = language_v1.LanguageServiceAsyncClient() + + # Initialize request argument(s) + document = language_v1.Document() + document.content = "content_value" + + request = language_v1.ModerateTextRequest( + document=document, + ) + + # Make the request + response = await client.moderate_text(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.language_v1.types.ModerateTextRequest, dict]]): + The request object. The document moderation request + message. + document (:class:`google.cloud.language_v1.types.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.language_v1.types.ModerateTextResponse: + The document moderation response + message. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.ModerateTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.moderate_text, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def annotate_text( self, request: Optional[Union[language_service.AnnotateTextRequest, dict]] = None, diff --git a/google/cloud/language_v1/services/language_service/client.py b/google/cloud/language_v1/services/language_service/client.py index d79cffc5..e9b4ccec 100644 --- a/google/cloud/language_v1/services/language_service/client.py +++ b/google/cloud/language_v1/services/language_service/client.py @@ -951,6 +951,104 @@ def sample_classify_text(): # Done; return the response. return response + def moderate_text( + self, + request: Optional[Union[language_service.ModerateTextRequest, dict]] = None, + *, + document: Optional[language_service.Document] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ModerateTextResponse: + r"""Moderates a document for harmful and sensitive + categories. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import language_v1 + + def sample_moderate_text(): + # Create a client + client = language_v1.LanguageServiceClient() + + # Initialize request argument(s) + document = language_v1.Document() + document.content = "content_value" + + request = language_v1.ModerateTextRequest( + document=document, + ) + + # Make the request + response = client.moderate_text(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.language_v1.types.ModerateTextRequest, dict]): + The request object. The document moderation request + message. + document (google.cloud.language_v1.types.Document): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.language_v1.types.ModerateTextResponse: + The document moderation response + message. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.ModerateTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.ModerateTextRequest): + request = language_service.ModerateTextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.moderate_text] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def annotate_text( self, request: Optional[Union[language_service.AnnotateTextRequest, dict]] = None, diff --git a/google/cloud/language_v1/services/language_service/transports/base.py b/google/cloud/language_v1/services/language_service/transports/base.py index d8bfc5d9..c900f0af 100644 --- a/google/cloud/language_v1/services/language_service/transports/base.py +++ b/google/cloud/language_v1/services/language_service/transports/base.py @@ -200,6 +200,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=600.0, client_info=client_info, ), + self.moderate_text: gapic_v1.method.wrap_method( + self.moderate_text, + default_timeout=None, + client_info=client_info, + ), self.annotate_text: gapic_v1.method.wrap_method( self.annotate_text, default_retry=retries.Retry( @@ -286,6 +291,18 @@ def classify_text( ]: raise NotImplementedError() + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], + Union[ + language_service.ModerateTextResponse, + Awaitable[language_service.ModerateTextResponse], + ], + ]: + raise NotImplementedError() + @property def annotate_text( self, diff --git a/google/cloud/language_v1/services/language_service/transports/grpc.py b/google/cloud/language_v1/services/language_service/transports/grpc.py index f46b19fd..dd9abdb0 100644 --- a/google/cloud/language_v1/services/language_service/transports/grpc.py +++ b/google/cloud/language_v1/services/language_service/transports/grpc.py @@ -380,6 +380,35 @@ def classify_text( ) return self._stubs["classify_text"] + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], language_service.ModerateTextResponse + ]: + r"""Return a callable for the moderate text method over gRPC. + + Moderates a document for harmful and sensitive + categories. + + Returns: + Callable[[~.ModerateTextRequest], + ~.ModerateTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "moderate_text" not in self._stubs: + self._stubs["moderate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/ModerateText", + request_serializer=language_service.ModerateTextRequest.serialize, + response_deserializer=language_service.ModerateTextResponse.deserialize, + ) + return self._stubs["moderate_text"] + @property def annotate_text( self, diff --git a/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py b/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py index dd19f8f1..00e1ea21 100644 --- a/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py +++ b/google/cloud/language_v1/services/language_service/transports/grpc_asyncio.py @@ -385,6 +385,36 @@ def classify_text( ) return self._stubs["classify_text"] + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], + Awaitable[language_service.ModerateTextResponse], + ]: + r"""Return a callable for the moderate text method over gRPC. + + Moderates a document for harmful and sensitive + categories. + + Returns: + Callable[[~.ModerateTextRequest], + Awaitable[~.ModerateTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "moderate_text" not in self._stubs: + self._stubs["moderate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1.LanguageService/ModerateText", + request_serializer=language_service.ModerateTextRequest.serialize, + response_deserializer=language_service.ModerateTextResponse.deserialize, + ) + return self._stubs["moderate_text"] + @property def annotate_text( self, diff --git a/google/cloud/language_v1/services/language_service/transports/rest.py b/google/cloud/language_v1/services/language_service/transports/rest.py index d92e6672..990da259 100644 --- a/google/cloud/language_v1/services/language_service/transports/rest.py +++ b/google/cloud/language_v1/services/language_service/transports/rest.py @@ -111,6 +111,14 @@ def post_classify_text(self, response): logging.log(f"Received response: {response}") return response + def pre_moderate_text(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_moderate_text(self, response): + logging.log(f"Received response: {response}") + return response + transport = LanguageServiceRestTransport(interceptor=MyCustomLanguageServiceInterceptor()) client = LanguageServiceClient(transport=transport) @@ -257,6 +265,29 @@ def post_classify_text( """ return response + def pre_moderate_text( + self, + request: language_service.ModerateTextRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[language_service.ModerateTextRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for moderate_text + + Override in a subclass to manipulate the request or metadata + before they are sent to the LanguageService server. + """ + return request, metadata + + def post_moderate_text( + self, response: language_service.ModerateTextResponse + ) -> language_service.ModerateTextResponse: + """Post-rpc interceptor for moderate_text + + Override in a subclass to manipulate the response + after it is returned by the LanguageService server but before + it is returned to user code. + """ + return response + @dataclasses.dataclass class LanguageServiceRestStub: @@ -945,6 +976,104 @@ def __call__( resp = self._interceptor.post_classify_text(resp) return resp + class _ModerateText(LanguageServiceRestStub): + def __hash__(self): + return hash("ModerateText") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: language_service.ModerateTextRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ModerateTextResponse: + r"""Call the moderate text method over HTTP. + + Args: + request (~.language_service.ModerateTextRequest): + The request object. The document moderation request + message. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.ModerateTextResponse: + The document moderation response + message. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1/documents:moderateText", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_moderate_text(request, metadata) + pb_request = language_service.ModerateTextRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = language_service.ModerateTextResponse() + pb_resp = language_service.ModerateTextResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_moderate_text(resp) + return resp + @property def analyze_entities( self, @@ -1008,6 +1137,16 @@ def classify_text( # In C++ this would require a dynamic_cast return self._ClassifyText(self._session, self._host, self._interceptor) # type: ignore + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], language_service.ModerateTextResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ModerateText(self._session, self._host, self._interceptor) # type: ignore + @property def kind(self) -> str: return "rest" diff --git a/google/cloud/language_v1/types/__init__.py b/google/cloud/language_v1/types/__init__.py index b4b20c9c..4f98d4d3 100644 --- a/google/cloud/language_v1/types/__init__.py +++ b/google/cloud/language_v1/types/__init__.py @@ -33,6 +33,8 @@ EncodingType, Entity, EntityMention, + ModerateTextRequest, + ModerateTextResponse, PartOfSpeech, Sentence, Sentiment, @@ -59,6 +61,8 @@ "Document", "Entity", "EntityMention", + "ModerateTextRequest", + "ModerateTextResponse", "PartOfSpeech", "Sentence", "Sentiment", diff --git a/google/cloud/language_v1/types/language_service.py b/google/cloud/language_v1/types/language_service.py index bd330ffe..da423978 100644 --- a/google/cloud/language_v1/types/language_service.py +++ b/google/cloud/language_v1/types/language_service.py @@ -44,6 +44,8 @@ "AnalyzeSyntaxResponse", "ClassifyTextRequest", "ClassifyTextResponse", + "ModerateTextRequest", + "ModerateTextResponse", "AnnotateTextRequest", "AnnotateTextResponse", }, @@ -1194,9 +1196,8 @@ class ClassificationCategory(proto.Message): Attributes: name (str): - The name of the category representing the document, from the - `predefined - taxonomy `__. + The name of the category representing the + document. confidence (float): The classifier's confidence of the category. Number represents how certain the classifier is @@ -1536,6 +1537,39 @@ class ClassifyTextResponse(proto.Message): ) +class ModerateTextRequest(proto.Message): + r"""The document moderation request message. + + Attributes: + document (google.cloud.language_v1.types.Document): + Required. Input document. + """ + + document: "Document" = proto.Field( + proto.MESSAGE, + number=1, + message="Document", + ) + + +class ModerateTextResponse(proto.Message): + r"""The document moderation response message. + + Attributes: + moderation_categories (MutableSequence[google.cloud.language_v1.types.ClassificationCategory]): + Harmful and sensitive categories representing + the input document. + """ + + moderation_categories: MutableSequence[ + "ClassificationCategory" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ClassificationCategory", + ) + + class AnnotateTextRequest(proto.Message): r"""The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and @@ -1568,6 +1602,9 @@ class Features(proto.Message): sentiment. classify_text (bool): Classify the full document into categories. + moderate_text (bool): + Moderate the document for harmful and + sensitive categories. classification_model_options (google.cloud.language_v1.types.ClassificationModelOptions): The model options to use for classification. Defaults to v1 options if not specified. Only used if ``classify_text`` is @@ -1594,6 +1631,10 @@ class Features(proto.Message): proto.BOOL, number=6, ) + moderate_text: bool = proto.Field( + proto.BOOL, + number=11, + ) classification_model_options: "ClassificationModelOptions" = proto.Field( proto.MESSAGE, number=10, @@ -1645,6 +1686,9 @@ class AnnotateTextResponse(proto.Message): field for more details. categories (MutableSequence[google.cloud.language_v1.types.ClassificationCategory]): Categories identified in the input document. + moderation_categories (MutableSequence[google.cloud.language_v1.types.ClassificationCategory]): + Harmful and sensitive categories identified + in the input document. """ sentences: MutableSequence["Sentence"] = proto.RepeatedField( @@ -1676,6 +1720,13 @@ class AnnotateTextResponse(proto.Message): number=6, message="ClassificationCategory", ) + moderation_categories: MutableSequence[ + "ClassificationCategory" + ] = proto.RepeatedField( + proto.MESSAGE, + number=7, + message="ClassificationCategory", + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/language_v1beta2/__init__.py b/google/cloud/language_v1beta2/__init__.py index 459b226d..d8e5de1b 100644 --- a/google/cloud/language_v1beta2/__init__.py +++ b/google/cloud/language_v1beta2/__init__.py @@ -39,6 +39,8 @@ EncodingType, Entity, EntityMention, + ModerateTextRequest, + ModerateTextResponse, PartOfSpeech, Sentence, Sentiment, @@ -68,6 +70,8 @@ "Entity", "EntityMention", "LanguageServiceClient", + "ModerateTextRequest", + "ModerateTextResponse", "PartOfSpeech", "Sentence", "Sentiment", diff --git a/google/cloud/language_v1beta2/gapic_metadata.json b/google/cloud/language_v1beta2/gapic_metadata.json index fca8f442..85a901f9 100644 --- a/google/cloud/language_v1beta2/gapic_metadata.json +++ b/google/cloud/language_v1beta2/gapic_metadata.json @@ -39,6 +39,11 @@ "methods": [ "classify_text" ] + }, + "ModerateText": { + "methods": [ + "moderate_text" + ] } } }, @@ -74,6 +79,11 @@ "methods": [ "classify_text" ] + }, + "ModerateText": { + "methods": [ + "moderate_text" + ] } } }, @@ -109,6 +119,11 @@ "methods": [ "classify_text" ] + }, + "ModerateText": { + "methods": [ + "moderate_text" + ] } } } diff --git a/google/cloud/language_v1beta2/services/language_service/async_client.py b/google/cloud/language_v1beta2/services/language_service/async_client.py index 2676da9f..0ce7f723 100644 --- a/google/cloud/language_v1beta2/services/language_service/async_client.py +++ b/google/cloud/language_v1beta2/services/language_service/async_client.py @@ -793,6 +793,104 @@ async def sample_classify_text(): # Done; return the response. return response + async def moderate_text( + self, + request: Optional[Union[language_service.ModerateTextRequest, dict]] = None, + *, + document: Optional[language_service.Document] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ModerateTextResponse: + r"""Moderates a document for harmful and sensitive + categories. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import language_v1beta2 + + async def sample_moderate_text(): + # Create a client + client = language_v1beta2.LanguageServiceAsyncClient() + + # Initialize request argument(s) + document = language_v1beta2.Document() + document.content = "content_value" + + request = language_v1beta2.ModerateTextRequest( + document=document, + ) + + # Make the request + response = await client.moderate_text(request=request) + + # Handle the response + print(response) + + Args: + request (Optional[Union[google.cloud.language_v1beta2.types.ModerateTextRequest, dict]]): + The request object. The document moderation request + message. + document (:class:`google.cloud.language_v1beta2.types.Document`): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.language_v1beta2.types.ModerateTextResponse: + The document moderation response + message. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = language_service.ModerateTextRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.moderate_text, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + async def annotate_text( self, request: Optional[Union[language_service.AnnotateTextRequest, dict]] = None, diff --git a/google/cloud/language_v1beta2/services/language_service/client.py b/google/cloud/language_v1beta2/services/language_service/client.py index 4c45046d..4888fcfc 100644 --- a/google/cloud/language_v1beta2/services/language_service/client.py +++ b/google/cloud/language_v1beta2/services/language_service/client.py @@ -952,6 +952,104 @@ def sample_classify_text(): # Done; return the response. return response + def moderate_text( + self, + request: Optional[Union[language_service.ModerateTextRequest, dict]] = None, + *, + document: Optional[language_service.Document] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ModerateTextResponse: + r"""Moderates a document for harmful and sensitive + categories. + + .. code-block:: python + + # This snippet has been automatically generated and should be regarded as a + # code template only. + # It will require modifications to work: + # - It may require correct/in-range values for request initialization. + # - It may require specifying regional endpoints when creating the service + # client as shown in: + # https://googleapis.dev/python/google-api-core/latest/client_options.html + from google.cloud import language_v1beta2 + + def sample_moderate_text(): + # Create a client + client = language_v1beta2.LanguageServiceClient() + + # Initialize request argument(s) + document = language_v1beta2.Document() + document.content = "content_value" + + request = language_v1beta2.ModerateTextRequest( + document=document, + ) + + # Make the request + response = client.moderate_text(request=request) + + # Handle the response + print(response) + + Args: + request (Union[google.cloud.language_v1beta2.types.ModerateTextRequest, dict]): + The request object. The document moderation request + message. + document (google.cloud.language_v1beta2.types.Document): + Required. Input document. + This corresponds to the ``document`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.language_v1beta2.types.ModerateTextResponse: + The document moderation response + message. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([document]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a language_service.ModerateTextRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, language_service.ModerateTextRequest): + request = language_service.ModerateTextRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if document is not None: + request.document = document + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.moderate_text] + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + def annotate_text( self, request: Optional[Union[language_service.AnnotateTextRequest, dict]] = None, diff --git a/google/cloud/language_v1beta2/services/language_service/transports/base.py b/google/cloud/language_v1beta2/services/language_service/transports/base.py index 5204e4c9..06cd03ef 100644 --- a/google/cloud/language_v1beta2/services/language_service/transports/base.py +++ b/google/cloud/language_v1beta2/services/language_service/transports/base.py @@ -200,6 +200,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=600.0, client_info=client_info, ), + self.moderate_text: gapic_v1.method.wrap_method( + self.moderate_text, + default_timeout=None, + client_info=client_info, + ), self.annotate_text: gapic_v1.method.wrap_method( self.annotate_text, default_retry=retries.Retry( @@ -286,6 +291,18 @@ def classify_text( ]: raise NotImplementedError() + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], + Union[ + language_service.ModerateTextResponse, + Awaitable[language_service.ModerateTextResponse], + ], + ]: + raise NotImplementedError() + @property def annotate_text( self, diff --git a/google/cloud/language_v1beta2/services/language_service/transports/grpc.py b/google/cloud/language_v1beta2/services/language_service/transports/grpc.py index f89362eb..ec629428 100644 --- a/google/cloud/language_v1beta2/services/language_service/transports/grpc.py +++ b/google/cloud/language_v1beta2/services/language_service/transports/grpc.py @@ -380,6 +380,35 @@ def classify_text( ) return self._stubs["classify_text"] + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], language_service.ModerateTextResponse + ]: + r"""Return a callable for the moderate text method over gRPC. + + Moderates a document for harmful and sensitive + categories. + + Returns: + Callable[[~.ModerateTextRequest], + ~.ModerateTextResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "moderate_text" not in self._stubs: + self._stubs["moderate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/ModerateText", + request_serializer=language_service.ModerateTextRequest.serialize, + response_deserializer=language_service.ModerateTextResponse.deserialize, + ) + return self._stubs["moderate_text"] + @property def annotate_text( self, diff --git a/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py b/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py index dc0f8f26..4d1a8065 100644 --- a/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py +++ b/google/cloud/language_v1beta2/services/language_service/transports/grpc_asyncio.py @@ -385,6 +385,36 @@ def classify_text( ) return self._stubs["classify_text"] + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], + Awaitable[language_service.ModerateTextResponse], + ]: + r"""Return a callable for the moderate text method over gRPC. + + Moderates a document for harmful and sensitive + categories. + + Returns: + Callable[[~.ModerateTextRequest], + Awaitable[~.ModerateTextResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "moderate_text" not in self._stubs: + self._stubs["moderate_text"] = self.grpc_channel.unary_unary( + "/google.cloud.language.v1beta2.LanguageService/ModerateText", + request_serializer=language_service.ModerateTextRequest.serialize, + response_deserializer=language_service.ModerateTextResponse.deserialize, + ) + return self._stubs["moderate_text"] + @property def annotate_text( self, diff --git a/google/cloud/language_v1beta2/services/language_service/transports/rest.py b/google/cloud/language_v1beta2/services/language_service/transports/rest.py index e0821a77..90a221e3 100644 --- a/google/cloud/language_v1beta2/services/language_service/transports/rest.py +++ b/google/cloud/language_v1beta2/services/language_service/transports/rest.py @@ -111,6 +111,14 @@ def post_classify_text(self, response): logging.log(f"Received response: {response}") return response + def pre_moderate_text(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_moderate_text(self, response): + logging.log(f"Received response: {response}") + return response + transport = LanguageServiceRestTransport(interceptor=MyCustomLanguageServiceInterceptor()) client = LanguageServiceClient(transport=transport) @@ -257,6 +265,29 @@ def post_classify_text( """ return response + def pre_moderate_text( + self, + request: language_service.ModerateTextRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[language_service.ModerateTextRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for moderate_text + + Override in a subclass to manipulate the request or metadata + before they are sent to the LanguageService server. + """ + return request, metadata + + def post_moderate_text( + self, response: language_service.ModerateTextResponse + ) -> language_service.ModerateTextResponse: + """Post-rpc interceptor for moderate_text + + Override in a subclass to manipulate the response + after it is returned by the LanguageService server but before + it is returned to user code. + """ + return response + @dataclasses.dataclass class LanguageServiceRestStub: @@ -945,6 +976,104 @@ def __call__( resp = self._interceptor.post_classify_text(resp) return resp + class _ModerateText(LanguageServiceRestStub): + def __hash__(self): + return hash("ModerateText") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: language_service.ModerateTextRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> language_service.ModerateTextResponse: + r"""Call the moderate text method over HTTP. + + Args: + request (~.language_service.ModerateTextRequest): + The request object. The document moderation request + message. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.language_service.ModerateTextResponse: + The document moderation response + message. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v1beta2/documents:moderateText", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_moderate_text(request, metadata) + pb_request = language_service.ModerateTextRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = language_service.ModerateTextResponse() + pb_resp = language_service.ModerateTextResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_moderate_text(resp) + return resp + @property def analyze_entities( self, @@ -1008,6 +1137,16 @@ def classify_text( # In C++ this would require a dynamic_cast return self._ClassifyText(self._session, self._host, self._interceptor) # type: ignore + @property + def moderate_text( + self, + ) -> Callable[ + [language_service.ModerateTextRequest], language_service.ModerateTextResponse + ]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ModerateText(self._session, self._host, self._interceptor) # type: ignore + @property def kind(self) -> str: return "rest" diff --git a/google/cloud/language_v1beta2/types/__init__.py b/google/cloud/language_v1beta2/types/__init__.py index b4b20c9c..4f98d4d3 100644 --- a/google/cloud/language_v1beta2/types/__init__.py +++ b/google/cloud/language_v1beta2/types/__init__.py @@ -33,6 +33,8 @@ EncodingType, Entity, EntityMention, + ModerateTextRequest, + ModerateTextResponse, PartOfSpeech, Sentence, Sentiment, @@ -59,6 +61,8 @@ "Document", "Entity", "EntityMention", + "ModerateTextRequest", + "ModerateTextResponse", "PartOfSpeech", "Sentence", "Sentiment", diff --git a/google/cloud/language_v1beta2/types/language_service.py b/google/cloud/language_v1beta2/types/language_service.py index 884a1512..d88273fd 100644 --- a/google/cloud/language_v1beta2/types/language_service.py +++ b/google/cloud/language_v1beta2/types/language_service.py @@ -44,6 +44,8 @@ "AnalyzeSyntaxResponse", "ClassifyTextRequest", "ClassifyTextResponse", + "ModerateTextRequest", + "ModerateTextResponse", "AnnotateTextRequest", "AnnotateTextResponse", }, @@ -1227,9 +1229,8 @@ class ClassificationCategory(proto.Message): Attributes: name (str): - The name of the category representing the document, from the - `predefined - taxonomy `__. + The name of the category representing the + document. confidence (float): The classifier's confidence of the category. Number represents how certain the classifier is @@ -1570,6 +1571,39 @@ class ClassifyTextResponse(proto.Message): ) +class ModerateTextRequest(proto.Message): + r"""The document moderation request message. + + Attributes: + document (google.cloud.language_v1beta2.types.Document): + Required. Input document. + """ + + document: "Document" = proto.Field( + proto.MESSAGE, + number=1, + message="Document", + ) + + +class ModerateTextResponse(proto.Message): + r"""The document moderation response message. + + Attributes: + moderation_categories (MutableSequence[google.cloud.language_v1beta2.types.ClassificationCategory]): + Harmful and sensitive categories representing + the input document. + """ + + moderation_categories: MutableSequence[ + "ClassificationCategory" + ] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message="ClassificationCategory", + ) + + class AnnotateTextRequest(proto.Message): r"""The request message for the text annotation API, which can perform multiple analysis types (sentiment, entities, and @@ -1588,7 +1622,7 @@ class AnnotateTextRequest(proto.Message): class Features(proto.Message): r"""All available features for sentiment, syntax, and semantic analysis. Setting each one to true will enable that specific - analysis for the input. Next ID: 11 + analysis for the input. Next ID: 12 Attributes: extract_syntax (bool): @@ -1605,6 +1639,9 @@ class Features(proto.Message): the API will use the default model which classifies into a `predefined taxonomy `__. + moderate_text (bool): + Moderate the document for harmful and + sensitive categories. classification_model_options (google.cloud.language_v1beta2.types.ClassificationModelOptions): The model options to use for classification. Defaults to v1 options if not specified. Only used if ``classify_text`` is @@ -1631,6 +1668,10 @@ class Features(proto.Message): proto.BOOL, number=6, ) + moderate_text: bool = proto.Field( + proto.BOOL, + number=11, + ) classification_model_options: "ClassificationModelOptions" = proto.Field( proto.MESSAGE, number=10, @@ -1682,6 +1723,9 @@ class AnnotateTextResponse(proto.Message): field for more details. categories (MutableSequence[google.cloud.language_v1beta2.types.ClassificationCategory]): Categories identified in the input document. + moderation_categories (MutableSequence[google.cloud.language_v1beta2.types.ClassificationCategory]): + Harmful and sensitive categories identified + in the input document. """ sentences: MutableSequence["Sentence"] = proto.RepeatedField( @@ -1713,6 +1757,13 @@ class AnnotateTextResponse(proto.Message): number=6, message="ClassificationCategory", ) + moderation_categories: MutableSequence[ + "ClassificationCategory" + ] = proto.RepeatedField( + proto.MESSAGE, + number=8, + message="ClassificationCategory", + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/samples/generated_samples/language_v1_generated_language_service_moderate_text_async.py b/samples/generated_samples/language_v1_generated_language_service_moderate_text_async.py new file mode 100644 index 00000000..6f4d033d --- /dev/null +++ b/samples/generated_samples/language_v1_generated_language_service_moderate_text_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModerateText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-language + + +# [START language_v1_generated_LanguageService_ModerateText_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import language_v1 + + +async def sample_moderate_text(): + # Create a client + client = language_v1.LanguageServiceAsyncClient() + + # Initialize request argument(s) + document = language_v1.Document() + document.content = "content_value" + + request = language_v1.ModerateTextRequest( + document=document, + ) + + # Make the request + response = await client.moderate_text(request=request) + + # Handle the response + print(response) + +# [END language_v1_generated_LanguageService_ModerateText_async] diff --git a/samples/generated_samples/language_v1_generated_language_service_moderate_text_sync.py b/samples/generated_samples/language_v1_generated_language_service_moderate_text_sync.py new file mode 100644 index 00000000..788b4a08 --- /dev/null +++ b/samples/generated_samples/language_v1_generated_language_service_moderate_text_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModerateText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-language + + +# [START language_v1_generated_LanguageService_ModerateText_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import language_v1 + + +def sample_moderate_text(): + # Create a client + client = language_v1.LanguageServiceClient() + + # Initialize request argument(s) + document = language_v1.Document() + document.content = "content_value" + + request = language_v1.ModerateTextRequest( + document=document, + ) + + # Make the request + response = client.moderate_text(request=request) + + # Handle the response + print(response) + +# [END language_v1_generated_LanguageService_ModerateText_sync] diff --git a/samples/generated_samples/language_v1beta2_generated_language_service_moderate_text_async.py b/samples/generated_samples/language_v1beta2_generated_language_service_moderate_text_async.py new file mode 100644 index 00000000..d8385285 --- /dev/null +++ b/samples/generated_samples/language_v1beta2_generated_language_service_moderate_text_async.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModerateText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-language + + +# [START language_v1beta2_generated_LanguageService_ModerateText_async] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import language_v1beta2 + + +async def sample_moderate_text(): + # Create a client + client = language_v1beta2.LanguageServiceAsyncClient() + + # Initialize request argument(s) + document = language_v1beta2.Document() + document.content = "content_value" + + request = language_v1beta2.ModerateTextRequest( + document=document, + ) + + # Make the request + response = await client.moderate_text(request=request) + + # Handle the response + print(response) + +# [END language_v1beta2_generated_LanguageService_ModerateText_async] diff --git a/samples/generated_samples/language_v1beta2_generated_language_service_moderate_text_sync.py b/samples/generated_samples/language_v1beta2_generated_language_service_moderate_text_sync.py new file mode 100644 index 00000000..78d11521 --- /dev/null +++ b/samples/generated_samples/language_v1beta2_generated_language_service_moderate_text_sync.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Generated code. DO NOT EDIT! +# +# Snippet for ModerateText +# NOTE: This snippet has been automatically generated for illustrative purposes only. +# It may require modifications to work in your environment. + +# To install the latest published package dependency, execute the following: +# python3 -m pip install google-cloud-language + + +# [START language_v1beta2_generated_LanguageService_ModerateText_sync] +# This snippet has been automatically generated and should be regarded as a +# code template only. +# It will require modifications to work: +# - It may require correct/in-range values for request initialization. +# - It may require specifying regional endpoints when creating the service +# client as shown in: +# https://googleapis.dev/python/google-api-core/latest/client_options.html +from google.cloud import language_v1beta2 + + +def sample_moderate_text(): + # Create a client + client = language_v1beta2.LanguageServiceClient() + + # Initialize request argument(s) + document = language_v1beta2.Document() + document.content = "content_value" + + request = language_v1beta2.ModerateTextRequest( + document=document, + ) + + # Make the request + response = client.moderate_text(request=request) + + # Handle the response + print(response) + +# [END language_v1beta2_generated_LanguageService_ModerateText_sync] diff --git a/samples/generated_samples/snippet_metadata_google.cloud.language.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.language.v1.json index 9651e696..4e481f59 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.language.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.language.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-language", - "version": "2.9.1" + "version": "0.1.0" }, "snippets": [ { @@ -1024,6 +1024,167 @@ } ], "title": "language_v1_generated_language_service_classify_text_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.language_v1.LanguageServiceAsyncClient", + "shortName": "LanguageServiceAsyncClient" + }, + "fullName": "google.cloud.language_v1.LanguageServiceAsyncClient.moderate_text", + "method": { + "fullName": "google.cloud.language.v1.LanguageService.ModerateText", + "service": { + "fullName": "google.cloud.language.v1.LanguageService", + "shortName": "LanguageService" + }, + "shortName": "ModerateText" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.language_v1.types.ModerateTextRequest" + }, + { + "name": "document", + "type": "google.cloud.language_v1.types.Document" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.language_v1.types.ModerateTextResponse", + "shortName": "moderate_text" + }, + "description": "Sample for ModerateText", + "file": "language_v1_generated_language_service_moderate_text_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "language_v1_generated_LanguageService_ModerateText_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "language_v1_generated_language_service_moderate_text_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.language_v1.LanguageServiceClient", + "shortName": "LanguageServiceClient" + }, + "fullName": "google.cloud.language_v1.LanguageServiceClient.moderate_text", + "method": { + "fullName": "google.cloud.language.v1.LanguageService.ModerateText", + "service": { + "fullName": "google.cloud.language.v1.LanguageService", + "shortName": "LanguageService" + }, + "shortName": "ModerateText" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.language_v1.types.ModerateTextRequest" + }, + { + "name": "document", + "type": "google.cloud.language_v1.types.Document" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.language_v1.types.ModerateTextResponse", + "shortName": "moderate_text" + }, + "description": "Sample for ModerateText", + "file": "language_v1_generated_language_service_moderate_text_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "language_v1_generated_LanguageService_ModerateText_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "language_v1_generated_language_service_moderate_text_sync.py" } ] } diff --git a/samples/generated_samples/snippet_metadata_google.cloud.language.v1beta2.json b/samples/generated_samples/snippet_metadata_google.cloud.language.v1beta2.json index 6ef547cc..fb6633f2 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.language.v1beta2.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.language.v1beta2.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-language", - "version": "2.9.1" + "version": "0.1.0" }, "snippets": [ { @@ -1024,6 +1024,167 @@ } ], "title": "language_v1beta2_generated_language_service_classify_text_sync.py" + }, + { + "canonical": true, + "clientMethod": { + "async": true, + "client": { + "fullName": "google.cloud.language_v1beta2.LanguageServiceAsyncClient", + "shortName": "LanguageServiceAsyncClient" + }, + "fullName": "google.cloud.language_v1beta2.LanguageServiceAsyncClient.moderate_text", + "method": { + "fullName": "google.cloud.language.v1beta2.LanguageService.ModerateText", + "service": { + "fullName": "google.cloud.language.v1beta2.LanguageService", + "shortName": "LanguageService" + }, + "shortName": "ModerateText" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.language_v1beta2.types.ModerateTextRequest" + }, + { + "name": "document", + "type": "google.cloud.language_v1beta2.types.Document" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.language_v1beta2.types.ModerateTextResponse", + "shortName": "moderate_text" + }, + "description": "Sample for ModerateText", + "file": "language_v1beta2_generated_language_service_moderate_text_async.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "language_v1beta2_generated_LanguageService_ModerateText_async", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "language_v1beta2_generated_language_service_moderate_text_async.py" + }, + { + "canonical": true, + "clientMethod": { + "client": { + "fullName": "google.cloud.language_v1beta2.LanguageServiceClient", + "shortName": "LanguageServiceClient" + }, + "fullName": "google.cloud.language_v1beta2.LanguageServiceClient.moderate_text", + "method": { + "fullName": "google.cloud.language.v1beta2.LanguageService.ModerateText", + "service": { + "fullName": "google.cloud.language.v1beta2.LanguageService", + "shortName": "LanguageService" + }, + "shortName": "ModerateText" + }, + "parameters": [ + { + "name": "request", + "type": "google.cloud.language_v1beta2.types.ModerateTextRequest" + }, + { + "name": "document", + "type": "google.cloud.language_v1beta2.types.Document" + }, + { + "name": "retry", + "type": "google.api_core.retry.Retry" + }, + { + "name": "timeout", + "type": "float" + }, + { + "name": "metadata", + "type": "Sequence[Tuple[str, str]" + } + ], + "resultType": "google.cloud.language_v1beta2.types.ModerateTextResponse", + "shortName": "moderate_text" + }, + "description": "Sample for ModerateText", + "file": "language_v1beta2_generated_language_service_moderate_text_sync.py", + "language": "PYTHON", + "origin": "API_DEFINITION", + "regionTag": "language_v1beta2_generated_LanguageService_ModerateText_sync", + "segments": [ + { + "end": 54, + "start": 27, + "type": "FULL" + }, + { + "end": 54, + "start": 27, + "type": "SHORT" + }, + { + "end": 40, + "start": 38, + "type": "CLIENT_INITIALIZATION" + }, + { + "end": 48, + "start": 41, + "type": "REQUEST_INITIALIZATION" + }, + { + "end": 51, + "start": 49, + "type": "REQUEST_EXECUTION" + }, + { + "end": 55, + "start": 52, + "type": "RESPONSE_HANDLING" + } + ], + "title": "language_v1beta2_generated_language_service_moderate_text_sync.py" } ] } diff --git a/scripts/fixup_language_v1_keywords.py b/scripts/fixup_language_v1_keywords.py index fc15df57..10fa218c 100644 --- a/scripts/fixup_language_v1_keywords.py +++ b/scripts/fixup_language_v1_keywords.py @@ -45,6 +45,7 @@ class languageCallTransformer(cst.CSTTransformer): 'analyze_syntax': ('document', 'encoding_type', ), 'annotate_text': ('document', 'features', 'encoding_type', ), 'classify_text': ('document', 'classification_model_options', ), + 'moderate_text': ('document', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/scripts/fixup_language_v1beta2_keywords.py b/scripts/fixup_language_v1beta2_keywords.py index fc15df57..10fa218c 100644 --- a/scripts/fixup_language_v1beta2_keywords.py +++ b/scripts/fixup_language_v1beta2_keywords.py @@ -45,6 +45,7 @@ class languageCallTransformer(cst.CSTTransformer): 'analyze_syntax': ('document', 'encoding_type', ), 'annotate_text': ('document', 'features', 'encoding_type', ), 'classify_text': ('document', 'classification_model_options', ), + 'moderate_text': ('document', ), } def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: diff --git a/tests/unit/gapic/language_v1/test_language_service.py b/tests/unit/gapic/language_v1/test_language_service.py index 4904c456..77957902 100644 --- a/tests/unit/gapic/language_v1/test_language_service.py +++ b/tests/unit/gapic/language_v1/test_language_service.py @@ -1700,6 +1700,183 @@ async def test_classify_text_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + language_service.ModerateTextRequest, + dict, + ], +) +def test_moderate_text(request_type, transport: str = "grpc"): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ModerateTextResponse() + response = client.moderate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == language_service.ModerateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ModerateTextResponse) + + +def test_moderate_text_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + client.moderate_text() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == language_service.ModerateTextRequest() + + +@pytest.mark.asyncio +async def test_moderate_text_async( + transport: str = "grpc_asyncio", request_type=language_service.ModerateTextRequest +): + client = LanguageServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ModerateTextResponse() + ) + response = await client.moderate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == language_service.ModerateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ModerateTextResponse) + + +@pytest.mark.asyncio +async def test_moderate_text_async_from_dict(): + await test_moderate_text_async(request_type=dict) + + +def test_moderate_text_flattened(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ModerateTextResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.moderate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].document + mock_val = language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + assert arg == mock_val + + +def test_moderate_text_flattened_error(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.moderate_text( + language_service.ModerateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +@pytest.mark.asyncio +async def test_moderate_text_flattened_async(): + client = LanguageServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ModerateTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ModerateTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.moderate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].document + mock_val = language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_moderate_text_flattened_error_async(): + client = LanguageServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.moderate_text( + language_service.ModerateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -3246,6 +3423,269 @@ def test_classify_text_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + language_service.ModerateTextRequest, + dict, + ], +) +def test_moderate_text_rest(request_type): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = language_service.ModerateTextResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = language_service.ModerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.moderate_text(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ModerateTextResponse) + + +def test_moderate_text_rest_required_fields( + request_type=language_service.ModerateTextRequest, +): + transport_class = transports.LanguageServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).moderate_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).moderate_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = language_service.ModerateTextResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = language_service.ModerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.moderate_text(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_moderate_text_rest_unset_required_fields(): + transport = transports.LanguageServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.moderate_text._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("document",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_moderate_text_rest_interceptors(null_interceptor): + transport = transports.LanguageServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.LanguageServiceRestInterceptor(), + ) + client = LanguageServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.LanguageServiceRestInterceptor, "post_moderate_text" + ) as post, mock.patch.object( + transports.LanguageServiceRestInterceptor, "pre_moderate_text" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = language_service.ModerateTextRequest.pb( + language_service.ModerateTextRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = language_service.ModerateTextResponse.to_json( + language_service.ModerateTextResponse() + ) + + request = language_service.ModerateTextRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = language_service.ModerateTextResponse() + + client.moderate_text( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_moderate_text_rest_bad_request( + transport: str = "rest", request_type=language_service.ModerateTextRequest +): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.moderate_text(request) + + +def test_moderate_text_rest_flattened(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = language_service.ModerateTextResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = language_service.ModerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.moderate_text(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1/documents:moderateText" % client.transport._host, args[1] + ) + + +def test_moderate_text_rest_flattened_error(transport: str = "rest"): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.moderate_text( + language_service.ModerateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +def test_moderate_text_rest_error(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ @@ -3668,6 +4108,7 @@ def test_language_service_base_transport(): "analyze_entity_sentiment", "analyze_syntax", "classify_text", + "moderate_text", "annotate_text", ) for method in methods: @@ -3954,6 +4395,9 @@ def test_language_service_client_transport_session_collision(transport_name): session1 = client1.transport.classify_text._session session2 = client2.transport.classify_text._session assert session1 != session2 + session1 = client1.transport.moderate_text._session + session2 = client2.transport.moderate_text._session + assert session1 != session2 session1 = client1.transport.annotate_text._session session2 = client2.transport.annotate_text._session assert session1 != session2 diff --git a/tests/unit/gapic/language_v1beta2/test_language_service.py b/tests/unit/gapic/language_v1beta2/test_language_service.py index 809ce0e0..adfc2301 100644 --- a/tests/unit/gapic/language_v1beta2/test_language_service.py +++ b/tests/unit/gapic/language_v1beta2/test_language_service.py @@ -1700,6 +1700,183 @@ async def test_classify_text_flattened_error_async(): ) +@pytest.mark.parametrize( + "request_type", + [ + language_service.ModerateTextRequest, + dict, + ], +) +def test_moderate_text(request_type, transport: str = "grpc"): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ModerateTextResponse() + response = client.moderate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == language_service.ModerateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ModerateTextResponse) + + +def test_moderate_text_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + client.moderate_text() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == language_service.ModerateTextRequest() + + +@pytest.mark.asyncio +async def test_moderate_text_async( + transport: str = "grpc_asyncio", request_type=language_service.ModerateTextRequest +): + client = LanguageServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ModerateTextResponse() + ) + response = await client.moderate_text(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == language_service.ModerateTextRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ModerateTextResponse) + + +@pytest.mark.asyncio +async def test_moderate_text_async_from_dict(): + await test_moderate_text_async(request_type=dict) + + +def test_moderate_text_flattened(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ModerateTextResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.moderate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].document + mock_val = language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + assert arg == mock_val + + +def test_moderate_text_flattened_error(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.moderate_text( + language_service.ModerateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +@pytest.mark.asyncio +async def test_moderate_text_flattened_async(): + client = LanguageServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.moderate_text), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = language_service.ModerateTextResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + language_service.ModerateTextResponse() + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.moderate_text( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].document + mock_val = language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ) + assert arg == mock_val + + +@pytest.mark.asyncio +async def test_moderate_text_flattened_error_async(): + client = LanguageServiceAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.moderate_text( + language_service.ModerateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -3247,6 +3424,269 @@ def test_classify_text_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + language_service.ModerateTextRequest, + dict, + ], +) +def test_moderate_text_rest(request_type): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = language_service.ModerateTextResponse() + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = language_service.ModerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.moderate_text(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, language_service.ModerateTextResponse) + + +def test_moderate_text_rest_required_fields( + request_type=language_service.ModerateTextRequest, +): + transport_class = transports.LanguageServiceRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).moderate_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).moderate_text._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = language_service.ModerateTextResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + pb_return_value = language_service.ModerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.moderate_text(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_moderate_text_rest_unset_required_fields(): + transport = transports.LanguageServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.moderate_text._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("document",))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_moderate_text_rest_interceptors(null_interceptor): + transport = transports.LanguageServiceRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.LanguageServiceRestInterceptor(), + ) + client = LanguageServiceClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + transports.LanguageServiceRestInterceptor, "post_moderate_text" + ) as post, mock.patch.object( + transports.LanguageServiceRestInterceptor, "pre_moderate_text" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = language_service.ModerateTextRequest.pb( + language_service.ModerateTextRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = language_service.ModerateTextResponse.to_json( + language_service.ModerateTextResponse() + ) + + request = language_service.ModerateTextRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = language_service.ModerateTextResponse() + + client.moderate_text( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_moderate_text_rest_bad_request( + transport: str = "rest", request_type=language_service.ModerateTextRequest +): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.moderate_text(request) + + +def test_moderate_text_rest_flattened(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = language_service.ModerateTextResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {} + + # get truthy value for each flattened field + mock_args = dict( + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + pb_return_value = language_service.ModerateTextResponse.pb(return_value) + json_return_value = json_format.MessageToJson(pb_return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.moderate_text(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v1beta2/documents:moderateText" % client.transport._host, args[1] + ) + + +def test_moderate_text_rest_flattened_error(transport: str = "rest"): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.moderate_text( + language_service.ModerateTextRequest(), + document=language_service.Document( + type_=language_service.Document.Type.PLAIN_TEXT + ), + ) + + +def test_moderate_text_rest_error(): + client = LanguageServiceClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ @@ -3669,6 +4109,7 @@ def test_language_service_base_transport(): "analyze_entity_sentiment", "analyze_syntax", "classify_text", + "moderate_text", "annotate_text", ) for method in methods: @@ -3955,6 +4396,9 @@ def test_language_service_client_transport_session_collision(transport_name): session1 = client1.transport.classify_text._session session2 = client2.transport.classify_text._session assert session1 != session2 + session1 = client1.transport.moderate_text._session + session2 = client2.transport.moderate_text._session + assert session1 != session2 session1 = client1.transport.annotate_text._session session2 = client2.transport.annotate_text._session assert session1 != session2