Skip to content

Commit

Permalink
adhere to linting recommendations
Browse files Browse the repository at this point in the history
  • Loading branch information
Sheepsta300 committed Sep 11, 2024
1 parent c18d000 commit e086054
Showing 1 changed file with 47 additions and 50 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -9,33 +9,32 @@

logger = logging.getLogger(__name__)


class AzureContentSafetyTextTool(BaseTool):
"""
A tool that interacts with the Azure AI Content Safety API.
This tool queries the Azure AI Content Safety API to analyze text for harmful
This tool queries the Azure AI Content Safety API to analyze text for harmful
content and identify sentiment. It requires an API key and endpoint,
which can be set up as described in the following guide:
https://learn.microsoft.com/python/api/overview/azure/ai-contentsafety-readme?view=azure-python
Attributes:
content_safety_key (str):
content_safety_key (str):
The API key used to authenticate requests with Azure Content Safety API.
content_safety_endpoint (str):
content_safety_endpoint (str):
The endpoint URL for the Azure Content Safety API.
content_safety_client (Any):
An instance of the Azure Content Safety Client used for making API requests.
content_safety_client (Any):
An instance of the Azure Content Safety Client used for making API
requests.
Methods:
_sentiment_analysis(text: str) -> Dict:
Analyzes the provided text to assess its sentiment and safety,
Analyzes the provided text to assess its sentiment and safety,
returning the analysis results.
_run(query: str,
run_manager: Optional[CallbackManagerForToolRun] = None) -> str:
Uses the tool to analyze the given query and returns the result.
_run(query: str, run_manager: Optional[CallbackManagerForToolRun] = None) -> str:

Check failure on line 36 in libs/community/langchain_community/tools/azure_cognitive_services/content_safety.py

View workflow job for this annotation

GitHub Actions / cd libs/community / make lint #3.12

Ruff (E501)

langchain_community/tools/azure_cognitive_services/content_safety.py:36:89: E501 Line too long (89 > 88)
Uses the tool to analyze the given query and returns the result.
Raises a RuntimeError if an exception occurs.
"""

Expand All @@ -46,44 +45,41 @@ class AzureContentSafetyTextTool(BaseTool):
name: str = "azure_content_safety_tool"
description: str = (
"A wrapper around Azure AI Content Safety. "
'''Useful for when you need to identify the sentiment of text
and whether or not a text is harmful. '''
"Input should be text."
"Useful for when you need to identify the sentiment of text and whether"
" or not a text is harmful. Input should be text."
)

def __init__(
self,
*,
content_safety_key: Optional[str] = None,
content_safety_endpoint: Optional[str] = None,
) -> None:
self,
*,
content_safety_key: Optional[str] = None,
content_safety_endpoint: Optional[str] = None,
) -> None:
"""
Initialize the AzureContentSafetyTextTool with the given API key and endpoint.
This constructor sets up the API key and endpoint, and initializes
the Azure Content Safety Client. If API key or endpoint is not provided,
they are fetched from environment variables.
If not provided, the API key and endpoint are fetched from environment
variables.
Args:
content_safety_key (Optional[str]):
The API key for Azure Content Safety API. If not provided,
it will be fetched from the environment
variable 'CONTENT_SAFETY_API_KEY'.
content_safety_endpoint (Optional[str]):
The endpoint URL for Azure Content Safety API. If not provided,
it will be fetched from the environment
variable 'CONTENT_SAFETY_ENDPOINT'.
content_safety_key (Optional[str]):
The API key for Azure Content Safety API. If not provided, it will
be fetched from the environment variable 'CONTENT_SAFETY_API_KEY'.
content_safety_endpoint (Optional[str]):
The endpoint URL for Azure Content Safety API. If not provided, it
will be fetched from the environment variable 'CONTENT_SAFETY_ENDPOINT'.
Raises:
ImportError: If the 'azure-ai-contentsafety' package is not installed.
ValueError:
If API key or endpoint is not provided
and environment variables are missing.
ValueError: If API key or endpoint is not provided and environment
variables are missing.
"""
content_safety_key = (content_safety_key or
os.environ['CONTENT_SAFETY_API_KEY'])
content_safety_endpoint = (content_safety_endpoint or
os.environ['CONTENT_SAFETY_ENDPOINT'])
content_safety_key = content_safety_key or os.environ[
"CONTENT_SAFETY_API_KEY"
]
content_safety_endpoint = content_safety_endpoint or os.environ[
"CONTENT_SAFETY_ENDPOINT"
]
try:
import azure.ai.contentsafety as sdk
from azure.core.credentials import AzureKeyCredential
Expand All @@ -98,24 +94,25 @@ def __init__(
"azure-ai-contentsafety is not installed. "
"Run `pip install azure-ai-contentsafety` to install."
)
super().__init__(content_safety_key=content_safety_key,
content_safety_endpoint=content_safety_endpoint,
content_safety_client=content_safety_client)
super().__init__(
content_safety_key=content_safety_key,
content_safety_endpoint=content_safety_endpoint,
content_safety_client=content_safety_client,
)

def _sentiment_analysis(self, text: str) -> Dict:
"""
Perform sentiment analysis on the provided text.
This method uses the Azure Content Safety Client to analyze
the text and determine its sentiment and safety categories.
This method uses the Azure Content Safety Client to analyze the text and
determine its sentiment and safety categories.
Args:
text (str): The text to be analyzed.
Returns:
Dict: The analysis results containing sentiment and safety categories.
"""

from azure.ai.contentsafety.models import AnalyzeTextOptions

request = AnalyzeTextOptions(text=text)
Expand All @@ -131,14 +128,13 @@ def _run(
"""
Analyze the given query using the tool.
This method calls `_sentiment_analysis` to process the
query and returns the result. It raises a RuntimeError if an
exception occurs during analysis.
This method calls `_sentiment_analysis` to process the query and returns
the result. It raises a RuntimeError if an exception occurs during
analysis.
Args:
query (str):
The query text to be analyzed.
run_manager (Optional[CallbackManagerForToolRun], optional):
query (str): The query text to be analyzed.
run_manager (Optional[CallbackManagerForToolRun], optional):
A callback manager for tracking the tool run. Defaults to None.
Returns:
Expand All @@ -152,4 +148,5 @@ def _run(
except Exception as e:
raise RuntimeError(
f"Error while running AzureContentSafetyTextTool: {e}"
)
)

0 comments on commit e086054

Please sign in to comment.