From e0860549da6a4a9ba7b57cdf2ea626491618e3f6 Mon Sep 17 00:00:00 2001 From: Sheepsta300 <128811766+Sheepsta300@users.noreply.github.com> Date: Wed, 11 Sep 2024 18:08:50 +1200 Subject: [PATCH] adhere to linting recommendations --- .../content_safety.py | 97 +++++++++---------- 1 file changed, 47 insertions(+), 50 deletions(-) diff --git a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py index 9be95c1a8acc0..d6c7780858dbd 100644 --- a/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py +++ b/libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py @@ -9,33 +9,32 @@ logger = logging.getLogger(__name__) - class AzureContentSafetyTextTool(BaseTool): """ A tool that interacts with the Azure AI Content Safety API. - This tool queries the Azure AI Content Safety API to analyze text for harmful + This tool queries the Azure AI Content Safety API to analyze text for harmful content and identify sentiment. It requires an API key and endpoint, which can be set up as described in the following guide: - + https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python Attributes: - content_safety_key (str): + content_safety_key (str): The API key used to authenticate requests with Azure Content Safety API. - content_safety_endpoint (str): + content_safety_endpoint (str): The endpoint URL for the Azure Content Safety API. - content_safety_client (Any): - An instance of the Azure Content Safety Client used for making API requests. + content_safety_client (Any): + An instance of the Azure Content Safety Client used for making API + requests. Methods: _sentiment_analysis(text: str) -> Dict: - Analyzes the provided text to assess its sentiment and safety, + Analyzes the provided text to assess its sentiment and safety, returning the analysis results. - _run(query: str, - run_manager: Optional[CallbackManagerForToolRun] = None) -> str: - Uses the tool to analyze the given query and returns the result. + _run(query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> str: + Uses the tool to analyze the given query and returns the result. Raises a RuntimeError if an exception occurs. """ @@ -46,44 +45,41 @@ class AzureContentSafetyTextTool(BaseTool): name: str = "azure_content_safety_tool" description: str = ( "A wrapper around Azure AI Content Safety. " - '''Useful for when you need to identify the sentiment of text - and whether or not a text is harmful. ''' - "Input should be text." + "Useful for when you need to identify the sentiment of text and whether" + " or not a text is harmful. Input should be text." ) def __init__( - self, - *, - content_safety_key: Optional[str] = None, - content_safety_endpoint: Optional[str] = None, - ) -> None: + self, + *, + content_safety_key: Optional[str] = None, + content_safety_endpoint: Optional[str] = None, + ) -> None: """ Initialize the AzureContentSafetyTextTool with the given API key and endpoint. - This constructor sets up the API key and endpoint, and initializes - the Azure Content Safety Client. If API key or endpoint is not provided, - they are fetched from environment variables. + If not provided, the API key and endpoint are fetched from environment + variables. Args: - content_safety_key (Optional[str]): - The API key for Azure Content Safety API. If not provided, - it will be fetched from the environment - variable 'CONTENT_SAFETY_API_KEY'. - content_safety_endpoint (Optional[str]): - The endpoint URL for Azure Content Safety API. If not provided, - it will be fetched from the environment - variable 'CONTENT_SAFETY_ENDPOINT'. + content_safety_key (Optional[str]): + The API key for Azure Content Safety API. If not provided, it will + be fetched from the environment variable 'CONTENT_SAFETY_API_KEY'. + content_safety_endpoint (Optional[str]): + The endpoint URL for Azure Content Safety API. If not provided, it + will be fetched from the environment variable 'CONTENT_SAFETY_ENDPOINT'. Raises: ImportError: If the 'azure-ai-contentsafety' package is not installed. - ValueError: - If API key or endpoint is not provided - and environment variables are missing. + ValueError: If API key or endpoint is not provided and environment + variables are missing. """ - content_safety_key = (content_safety_key or - os.environ['CONTENT_SAFETY_API_KEY']) - content_safety_endpoint = (content_safety_endpoint or - os.environ['CONTENT_SAFETY_ENDPOINT']) + content_safety_key = content_safety_key or os.environ[ + "CONTENT_SAFETY_API_KEY" + ] + content_safety_endpoint = content_safety_endpoint or os.environ[ + "CONTENT_SAFETY_ENDPOINT" + ] try: import azure.ai.contentsafety as sdk from azure.core.credentials import AzureKeyCredential @@ -98,16 +94,18 @@ def __init__( "azure-ai-contentsafety is not installed. " "Run `pip install azure-ai-contentsafety` to install." ) - super().__init__(content_safety_key=content_safety_key, - content_safety_endpoint=content_safety_endpoint, - content_safety_client=content_safety_client) + super().__init__( + content_safety_key=content_safety_key, + content_safety_endpoint=content_safety_endpoint, + content_safety_client=content_safety_client, + ) def _sentiment_analysis(self, text: str) -> Dict: """ Perform sentiment analysis on the provided text. - This method uses the Azure Content Safety Client to analyze - the text and determine its sentiment and safety categories. + This method uses the Azure Content Safety Client to analyze the text and + determine its sentiment and safety categories. Args: text (str): The text to be analyzed. @@ -115,7 +113,6 @@ def _sentiment_analysis(self, text: str) -> Dict: Returns: Dict: The analysis results containing sentiment and safety categories. """ - from azure.ai.contentsafety.models import AnalyzeTextOptions request = AnalyzeTextOptions(text=text) @@ -131,14 +128,13 @@ def _run( """ Analyze the given query using the tool. - This method calls `_sentiment_analysis` to process the - query and returns the result. It raises a RuntimeError if an - exception occurs during analysis. + This method calls `_sentiment_analysis` to process the query and returns + the result. It raises a RuntimeError if an exception occurs during + analysis. Args: - query (str): - The query text to be analyzed. - run_manager (Optional[CallbackManagerForToolRun], optional): + query (str): The query text to be analyzed. + run_manager (Optional[CallbackManagerForToolRun], optional): A callback manager for tracking the tool run. Defaults to None. Returns: @@ -152,4 +148,5 @@ def _run( except Exception as e: raise RuntimeError( f"Error while running AzureContentSafetyTextTool: {e}" - ) \ No newline at end of file + ) +