From 4b79413667b7539750a6c7dde15737013a3d4bd5 Mon Sep 17 00:00:00 2001 From: Mohamed Shaban Date: Sat, 30 Oct 2021 04:08:01 +0200 Subject: [PATCH] re-gen clu LLC client (#21417) * regenrate client * update model names * tmp commit * update model names * fixing tests * fix samples * update readme * fix remaining samples * fix env key error * add recorded tests * update samples * add additional samples * async samples * disable some samples * update samples readme * revert setup.py * fix broken link --- .../azure-ai-language-conversations/README.md | 44 +- .../language/conversations/_configuration.py | 2 +- .../_conversation_analysis_client.py | 2 +- .../conversations/aio/_configuration.py | 2 +- .../aio/_conversation_analysis_client.py | 2 +- .../aio/operations/_operations.py | 10 +- .../language/conversations/models/__init__.py | 86 +- .../_conversation_analysis_client_enums.py | 16 +- .../language/conversations/models/_models.py | 781 ++++++++++------ .../conversations/models/_models_py3.py | 855 +++++++++++------- .../conversations/operations/_operations.py | 12 +- .../samples/README.md | 30 +- .../sample_analyze_conversation_app_async.py | 10 +- ...ze_conversation_app_language_parm_async.py | 78 ++ ...sample_analyze_orchestration_app_async.py} | 30 +- ...tration_app_conversation_response_async.py | 86 ++ ...e_orchestration_app_luis_response_async.py | 77 ++ ...ze_orchestration_app_qna_response_async.py | 79 ++ ...ze_orchestration_app_with_params_async.py} | 32 +- .../sample_analyze_conversation_app.py | 10 +- ..._analyze_conversation_app_language_parm.py | 73 ++ ...py => sample_analyze_orchestration_app.py} | 31 +- ...orchestration_app_conversation_response.py | 80 ++ ...analyze_orchestration_app_luis_response.py | 71 ++ ..._analyze_orchestration_app_qna_response.py | 72 ++ ..._analyze_orchestration_app_with_params.py} | 34 +- ...ation_app_async.test_conversation_app.yaml | 20 +- ...test_conversation_app_with_dictparams.yaml | 20 +- ...tion_app_async.test_orchestration_app.yaml | 76 ++ ...ync.test_orchestration_app_with_model.yaml | 44 + ...est_orchestration_app_with_parameters.yaml | 44 + .../test_conversation_app_async.py | 10 +- .../test_orchestration_app_async.py} | 62 +- .../test_orchestration_direct_async.py} | 68 +- ...onversation_app.test_conversation_app.yaml | 23 +- ...test_conversation_app_with_dictparams.yaml | 23 +- ...hestration_app.test_orchestration_app.yaml | 102 +++ ...app.test_orchestration_app_with_model.yaml | 57 ++ ...est_orchestration_app_with_parameters.yaml | 57 ++ .../test_workflow_app.test_workflow_app.yaml | 215 ----- ...flow_app.test_workflow_app_with_model.yaml | 142 --- ...app.test_workflow_app_with_parameters.yaml | 142 --- ..._workflow_app_async.test_workflow_app.yaml | 186 ---- ...pp_async.test_workflow_app_with_model.yaml | 127 --- ...ync.test_workflow_app_with_parameters.yaml | 127 --- .../tests/test_conversation_app.py | 11 +- ...kflow_app.py => test_orchestration_app.py} | 60 +- ...direct.py => test_orchestration_direct.py} | 54 +- .../tests/testcase.py | 4 +- 49 files changed, 2361 insertions(+), 1918 deletions(-) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_language_parm_async.py rename sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/{sample_analyze_workflow_app_async.py => sample_analyze_orchestration_app_async.py} (69%) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_conversation_response_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_qna_response_async.py rename sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/{sample_analyze_workflow_app_with_params_async.py => sample_analyze_orchestration_app_with_params_async.py} (74%) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app_language_parm.py rename sdk/cognitivelanguage/azure-ai-language-conversations/samples/{sample_analyze_workflow_app.py => sample_analyze_orchestration_app.py} (68%) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_conversation_response.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_luis_response.py create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_qna_response.py rename sdk/cognitivelanguage/azure-ai-language-conversations/samples/{sample_analyze_workflow_app_with_params.py => sample_analyze_orchestration_app_with_params.py} (72%) rename sdk/cognitivelanguage/azure-ai-language-conversations/tests/{ => async}/recordings/test_conversation_app_async.test_conversation_app.yaml (54%) rename sdk/cognitivelanguage/azure-ai-language-conversations/tests/{ => async}/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml (54%) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_model.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_parameters.yaml rename sdk/cognitivelanguage/azure-ai-language-conversations/tests/{ => async}/test_conversation_app_async.py (92%) rename sdk/cognitivelanguage/azure-ai-language-conversations/tests/{test_workflow_app_async.py => async/test_orchestration_app_async.py} (66%) rename sdk/cognitivelanguage/azure-ai-language-conversations/tests/{test_workflow_direct_async.py => async/test_orchestration_direct_async.py} (75%) create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_model.yaml create mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_parameters.yaml delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml delete mode 100644 sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml rename sdk/cognitivelanguage/azure-ai-language-conversations/tests/{test_workflow_app.py => test_orchestration_app.py} (66%) rename sdk/cognitivelanguage/azure-ai-language-conversations/tests/{test_workflow_direct.py => test_orchestration_direct.py} (76%) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/README.md b/sdk/cognitivelanguage/azure-ai-language-conversations/README.md index 14106ed8a3fd..b78c0622eb3f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/README.md +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/README.md @@ -2,9 +2,9 @@ # Azure Conversational Language Understanding client library for Python Conversational Language Understanding, aka **CLU** for short, is a cloud-based conversational AI service which is mainly used in bots to extract useful information from user utterance (natural language processing). -The CLU **analyze api** encompasses two projects; deepstack, and workflow projects. -You can use the "deepstack" project if you want to extract intents (intention behind a user utterance] and custom entities. -You can also use the "workflow" project which orchestrates multiple language apps to get the best response (language apps like Question Answering, Luis, and Deepstack). +The CLU **analyze api** encompasses two projects; conversation, and orchestration projects. +You can use the "conversation" project if you want to extract intents (intention behind a user utterance) and custom entities. +You can also use the "orchestration" project which orchestrates multiple language apps to get the best response (language apps like Question Answering, Luis, and Conversation). [Source code][conversationallanguage_client_src] | [Package (PyPI)][conversationallanguage_pypi_package] | [API reference documentation][conversationallanguage_refdocs] | [Product documentation][conversationallanguage_docs] | [Samples][conversationallanguage_samples] @@ -67,8 +67,8 @@ The `azure-ai-language-conversation` client library provides both synchronous an The following examples show common scenarios using the `client` [created above](#create-conversationanalysisclient). -### Analyze a conversation with a Deepstack App -If you would like to extract custom intents and entities from a user utterance, you can call the `client.analyze_conversations()` method with your deepstack's project name as follows: +### Analyze a conversation with a Conversation App +If you would like to extract custom intents and entities from a user utterance, you can call the `client.analyze_conversations()` method with your conversation's project name as follows: ```python # import libraries @@ -76,7 +76,7 @@ import os from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations import ConversationAnalysisClient -from azure.ai.language.conversations.models import AnalyzeConversationOptions +from azure.ai.language.conversations.models import ConversationAnalysisOptions # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] @@ -85,7 +85,7 @@ conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"] # prepare data query = "One california maki please." -input = AnalyzeConversationOptions( +input = ConversationAnalysisOptions( query=query ) @@ -103,7 +103,7 @@ print("query: {}".format(result.query)) print("project kind: {}\n".format(result.prediction.project_kind)) print("view top intent:") -print("top intent: {}".format(result.prediction.top_intent)) +print("\ttop intent: {}".format(result.prediction.top_intent)) print("\tcategory: {}".format(result.prediction.intents[0].category)) print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) @@ -114,9 +114,9 @@ for entity in result.prediction.entities: print("\tconfidence score: {}".format(entity.confidence_score)) ``` -### Analyze conversation with a Workflow App +### Analyze conversation with a Orchestration App -If you would like to pass the user utterance to your orchestrator (worflow) app, you can call the `client.analyze_conversations()` method with your workflow's project name. The orchestrator project simply orchestrates the submitted user utterance between your language apps (Luis, Deepstack, and Question Answering) to get the best response according to the user intent. See the next example: +If you would like to pass the user utterance to your orchestrator (worflow) app, you can call the `client.analyze_conversations()` method with your orchestration's project name. The orchestrator project simply orchestrates the submitted user utterance between your language apps (Luis, Conversation, and Question Answering) to get the best response according to the user intent. See the next example: ```python # import libraries @@ -124,16 +124,16 @@ import os from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations import ConversationAnalysisClient -from azure.ai.language.conversations.models import AnalyzeConversationOptions +from azure.ai.language.conversations.models import ConversationAnalysisOptions # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] -workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT") +orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT") # prepare data query = "How do you make sushi rice?", -input = AnalyzeConversationOptions( +input = ConversationAnalysisOptions( query=query ) @@ -142,7 +142,7 @@ client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) @@ -151,7 +151,7 @@ print("query: {}".format(result.query)) print("project kind: {}\n".format(result.prediction.project_kind)) print("view top intent:") -print("top intent: {}".format(result.prediction.top_intent)) +print("\ttop intent: {}".format(result.prediction.top_intent)) print("\tcategory: {}".format(result.prediction.intents[0].category)) print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) @@ -159,9 +159,9 @@ print("view Question Answering result:") print("\tresult: {}\n".format(result.prediction.intents[0].result)) ``` -### Analyze conversation with a Workflow (Direct) App +### Analyze conversation with a Orchestration (Direct) App -If you would like to use an orchestrator (workflow) app, and you want to call a specific one of your language apps directly, you can call the `client.analyze_conversations()` method with your workflow's project name and the diirect target name which corresponds to your one of you language apps as follows: +If you would like to use an orchestrator (orchestration) app, and you want to call a specific one of your language apps directly, you can call the `client.analyze_conversations()` method with your orchestration's project name and the diirect target name which corresponds to your one of you language apps as follows: ```python # import libraries @@ -169,17 +169,17 @@ import os from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations import ConversationAnalysisClient -from azure.ai.language.conversations.models import AnalyzeConversationOptions +from azure.ai.language.conversations.models import ConversationAnalysisOptions # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] -workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT") +orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT") # prepare data query = "How do you make sushi rice?", target_intent = "SushiMaking" -input = AnalyzeConversationOptions( +input = ConversationAnalysisOptions( query=query, direct_target=target_intent, parameters={ @@ -198,7 +198,7 @@ client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) @@ -207,7 +207,7 @@ print("query: {}".format(result.query)) print("project kind: {}\n".format(result.prediction.project_kind)) print("view top intent:") -print("top intent: {}".format(result.prediction.top_intent)) +print("\ttop intent: {}".format(result.prediction.top_intent)) print("\tcategory: {}".format(result.prediction.intents[0].category)) print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py index 12a99c2f6eed..ff94c8b5d93b 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_configuration.py @@ -47,7 +47,7 @@ def __init__( self.endpoint = endpoint self.credential = credential - self.api_version = "2021-07-15-preview" + self.api_version = "2021-11-01-preview" kwargs.setdefault('sdk_moniker', 'ai-language-conversations/{}'.format(VERSION)) self._configure(**kwargs) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py index 0626bb6e4fa1..e661eaeabfb0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/_conversation_analysis_client.py @@ -24,7 +24,7 @@ from azure.core.rest import HttpRequest, HttpResponse class ConversationAnalysisClient(ConversationAnalysisClientOperationsMixin): - """This API accepts a request and mediates among multiple language projects, such as LUIS Generally Available, Question Answering, LUIS Deepstack, and then calls the best candidate service to handle the request. At last, it returns a response with the candidate service's response as a payload. + """This API accepts a request and mediates among multiple language projects, such as LUIS Generally Available, Question Answering, Conversation, and then calls the best candidate service to handle the request. At last, it returns a response with the candidate service's response as a payload. In some cases, this API needs to forward requests and responses between the caller and an upstream service. diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py index 7dc15b360c92..c478d8965c08 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_configuration.py @@ -41,7 +41,7 @@ def __init__( self.endpoint = endpoint self.credential = credential - self.api_version = "2021-07-15-preview" + self.api_version = "2021-11-01-preview" kwargs.setdefault('sdk_moniker', 'ai-language-conversations/{}'.format(VERSION)) self._configure(**kwargs) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py index aec88d6bbf2b..8d70499a97e7 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/_conversation_analysis_client.py @@ -19,7 +19,7 @@ from .operations import ConversationAnalysisClientOperationsMixin class ConversationAnalysisClient(ConversationAnalysisClientOperationsMixin): - """This API accepts a request and mediates among multiple language projects, such as LUIS Generally Available, Question Answering, LUIS Deepstack, and then calls the best candidate service to handle the request. At last, it returns a response with the candidate service's response as a payload. + """This API accepts a request and mediates among multiple language projects, such as LUIS Generally Available, Question Answering, Conversation, and then calls the best candidate service to handle the request. At last, it returns a response with the candidate service's response as a payload. In some cases, this API needs to forward requests and responses between the caller and an upstream service. diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py index d279fae87db2..1531d91022df 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/aio/operations/_operations.py @@ -26,7 +26,7 @@ class ConversationAnalysisClientOperationsMixin: @distributed_trace_async async def analyze_conversations( self, - analyze_conversation_options: "_models.AnalyzeConversationOptions", + conversation_analysis_options: "_models.ConversationAnalysisOptions", *, project_name: str, deployment_name: str, @@ -34,9 +34,9 @@ async def analyze_conversations( ) -> "_models.AnalyzeConversationResult": """Analyzes the input conversation utterance. - :param analyze_conversation_options: Post body of the request. - :type analyze_conversation_options: - ~azure.ai.language.conversations.models.AnalyzeConversationOptions + :param conversation_analysis_options: Post body of the request. + :type conversation_analysis_options: + ~azure.ai.language.conversations.models.ConversationAnalysisOptions :keyword project_name: The name of the project to use. :paramtype project_name: str :keyword deployment_name: The name of the specific deployment of the project to use. @@ -53,7 +53,7 @@ async def analyze_conversations( content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] - json = self._serialize.body(analyze_conversation_options, 'AnalyzeConversationOptions') + json = self._serialize.body(conversation_analysis_options, 'ConversationAnalysisOptions') request = build_analyze_conversations_request( content_type=content_type, diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py index 69d031432af2..04df4af52f87 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/__init__.py @@ -7,89 +7,99 @@ # -------------------------------------------------------------------------- try: - from ._models_py3 import AnalyzeConversationOptions + from ._models_py3 import AnalysisParameters from ._models_py3 import AnalyzeConversationResult - from ._models_py3 import AnalyzeParameters + from ._models_py3 import AnswerSpan from ._models_py3 import BasePrediction - from ._models_py3 import DSTargetIntentResult - from ._models_py3 import DeepStackEntityResolution - from ._models_py3 import DeepstackCallingOptions - from ._models_py3 import DeepstackEntity - from ._models_py3 import DeepstackIntent - from ._models_py3 import DeepstackParameters - from ._models_py3 import DeepstackPrediction - from ._models_py3 import DeepstackResult - from ._models_py3 import DictionaryNormalizedValueResolution + from ._models_py3 import ConversationAnalysisOptions + from ._models_py3 import ConversationCallingOptions + from ._models_py3 import ConversationEntity + from ._models_py3 import ConversationIntent + from ._models_py3 import ConversationParameters + from ._models_py3 import ConversationPrediction + from ._models_py3 import ConversationResult + from ._models_py3 import ConversationTargetIntentResult from ._models_py3 import Error from ._models_py3 import ErrorResponse from ._models_py3 import InnerErrorModel + from ._models_py3 import KnowledgeBaseAnswer + from ._models_py3 import KnowledgeBaseAnswerDialog + from ._models_py3 import KnowledgeBaseAnswerPrompt + from ._models_py3 import KnowledgeBaseAnswers from ._models_py3 import LUISCallingOptions from ._models_py3 import LUISParameters from ._models_py3 import LUISTargetIntentResult + from ._models_py3 import NoneLinkedTargetIntentResult + from ._models_py3 import OrchestratorPrediction from ._models_py3 import QuestionAnsweringParameters from ._models_py3 import QuestionAnsweringTargetIntentResult from ._models_py3 import TargetIntentResult - from ._models_py3 import WorkflowPrediction except (SyntaxError, ImportError): - from ._models import AnalyzeConversationOptions # type: ignore + from ._models import AnalysisParameters # type: ignore from ._models import AnalyzeConversationResult # type: ignore - from ._models import AnalyzeParameters # type: ignore + from ._models import AnswerSpan # type: ignore from ._models import BasePrediction # type: ignore - from ._models import DSTargetIntentResult # type: ignore - from ._models import DeepStackEntityResolution # type: ignore - from ._models import DeepstackCallingOptions # type: ignore - from ._models import DeepstackEntity # type: ignore - from ._models import DeepstackIntent # type: ignore - from ._models import DeepstackParameters # type: ignore - from ._models import DeepstackPrediction # type: ignore - from ._models import DeepstackResult # type: ignore - from ._models import DictionaryNormalizedValueResolution # type: ignore + from ._models import ConversationAnalysisOptions # type: ignore + from ._models import ConversationCallingOptions # type: ignore + from ._models import ConversationEntity # type: ignore + from ._models import ConversationIntent # type: ignore + from ._models import ConversationParameters # type: ignore + from ._models import ConversationPrediction # type: ignore + from ._models import ConversationResult # type: ignore + from ._models import ConversationTargetIntentResult # type: ignore from ._models import Error # type: ignore from ._models import ErrorResponse # type: ignore from ._models import InnerErrorModel # type: ignore + from ._models import KnowledgeBaseAnswer # type: ignore + from ._models import KnowledgeBaseAnswerDialog # type: ignore + from ._models import KnowledgeBaseAnswerPrompt # type: ignore + from ._models import KnowledgeBaseAnswers # type: ignore from ._models import LUISCallingOptions # type: ignore from ._models import LUISParameters # type: ignore from ._models import LUISTargetIntentResult # type: ignore + from ._models import NoneLinkedTargetIntentResult # type: ignore + from ._models import OrchestratorPrediction # type: ignore from ._models import QuestionAnsweringParameters # type: ignore from ._models import QuestionAnsweringTargetIntentResult # type: ignore from ._models import TargetIntentResult # type: ignore - from ._models import WorkflowPrediction # type: ignore from ._conversation_analysis_client_enums import ( ErrorCode, InnerErrorCode, ProjectKind, - ResolutionKind, TargetKind, ) __all__ = [ - 'AnalyzeConversationOptions', + 'AnalysisParameters', 'AnalyzeConversationResult', - 'AnalyzeParameters', + 'AnswerSpan', 'BasePrediction', - 'DSTargetIntentResult', - 'DeepStackEntityResolution', - 'DeepstackCallingOptions', - 'DeepstackEntity', - 'DeepstackIntent', - 'DeepstackParameters', - 'DeepstackPrediction', - 'DeepstackResult', - 'DictionaryNormalizedValueResolution', + 'ConversationAnalysisOptions', + 'ConversationCallingOptions', + 'ConversationEntity', + 'ConversationIntent', + 'ConversationParameters', + 'ConversationPrediction', + 'ConversationResult', + 'ConversationTargetIntentResult', 'Error', 'ErrorResponse', 'InnerErrorModel', + 'KnowledgeBaseAnswer', + 'KnowledgeBaseAnswerDialog', + 'KnowledgeBaseAnswerPrompt', + 'KnowledgeBaseAnswers', 'LUISCallingOptions', 'LUISParameters', 'LUISTargetIntentResult', + 'NoneLinkedTargetIntentResult', + 'OrchestratorPrediction', 'QuestionAnsweringParameters', 'QuestionAnsweringTargetIntentResult', 'TargetIntentResult', - 'WorkflowPrediction', 'ErrorCode', 'InnerErrorCode', 'ProjectKind', - 'ResolutionKind', 'TargetKind', ] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py index cdc67ea5d6e5..7446025efa04 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_conversation_analysis_client_enums.py @@ -20,7 +20,13 @@ class ErrorCode(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): UNAUTHORIZED = "Unauthorized" FORBIDDEN = "Forbidden" NOT_FOUND = "NotFound" + PROJECT_NOT_FOUND = "ProjectNotFound" + OPERATION_NOT_FOUND = "OperationNotFound" + AZURE_COGNITIVE_SEARCH_NOT_FOUND = "AzureCognitiveSearchNotFound" + AZURE_COGNITIVE_SEARCH_INDEX_NOT_FOUND = "AzureCognitiveSearchIndexNotFound" TOO_MANY_REQUESTS = "TooManyRequests" + AZURE_COGNITIVE_SEARCH_THROTTLING = "AzureCognitiveSearchThrottling" + AZURE_COGNITIVE_SEARCH_INDEX_LIMIT_REACHED = "AzureCognitiveSearchIndexLimitReached" INTERNAL_SERVER_ERROR = "InternalServerError" SERVICE_UNAVAILABLE = "ServiceUnavailable" @@ -42,17 +48,11 @@ class ProjectKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): CONVERSATION = "conversation" WORKFLOW = "workflow" -class ResolutionKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): - """The type of an entity resolution. - """ - - #: Dictionary normalized entities. - DICTIONARY_NORMALIZED_VALUE = "DictionaryNormalizedValue" - class TargetKind(with_metaclass(CaseInsensitiveEnumMeta, str, Enum)): """The type of a target service. """ LUIS = "luis" - LUIS_DEEPSTACK = "luis_deepstack" + CONVERSATION = "conversation" QUESTION_ANSWERING = "question_answering" + NON_LINKED = "non_linked" diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py index fd2c107aae65..aa651cbf3338 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models.py @@ -10,38 +10,32 @@ import msrest.serialization -class AnalyzeConversationOptions(msrest.serialization.Model): - """The request body. +class AnalysisParameters(msrest.serialization.Model): + """This is the parameter set of either the Orchestration project itself or one of the target services. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ConversationParameters, LUISParameters, QuestionAnsweringParameters. All required parameters must be populated in order to send to Azure. - :ivar query: Required. The conversation utterance to be analyzed. - :vartype query: str - :ivar direct_target: The name of the target project this request is sending to directly. - :vartype direct_target: str - :ivar language: The language to use in this request. This will be the language setting when - communicating with all other target projects. - :vartype language: str - :ivar verbose: If true, the service will return more detailed information in the response. - :vartype verbose: bool - :ivar is_logging_enabled: If true, the query will be kept by the service for customers to - further review, to improve the model quality. - :vartype is_logging_enabled: bool - :ivar parameters: A dictionary representing the input for each target project. - :vartype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str """ _validation = { - 'query': {'required': True}, + 'target_kind': {'required': True}, } _attribute_map = { - 'query': {'key': 'query', 'type': 'str'}, - 'direct_target': {'key': 'directTarget', 'type': 'str'}, - 'language': {'key': 'language', 'type': 'str'}, - 'verbose': {'key': 'verbose', 'type': 'bool'}, - 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, - 'parameters': {'key': 'parameters', 'type': '{AnalyzeParameters}'}, + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'target_kind': {'conversation': 'ConversationParameters', 'luis': 'LUISParameters', 'question_answering': 'QuestionAnsweringParameters'} } def __init__( @@ -49,28 +43,12 @@ def __init__( **kwargs ): """ - :keyword query: Required. The conversation utterance to be analyzed. - :paramtype query: str - :keyword direct_target: The name of the target project this request is sending to directly. - :paramtype direct_target: str - :keyword language: The language to use in this request. This will be the language setting when - communicating with all other target projects. - :paramtype language: str - :keyword verbose: If true, the service will return more detailed information in the response. - :paramtype verbose: bool - :keyword is_logging_enabled: If true, the query will be kept by the service for customers to - further review, to improve the model quality. - :paramtype is_logging_enabled: bool - :keyword parameters: A dictionary representing the input for each target project. - :paramtype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str """ - super(AnalyzeConversationOptions, self).__init__(**kwargs) - self.query = kwargs['query'] - self.direct_target = kwargs.get('direct_target', None) - self.language = kwargs.get('language', None) - self.verbose = kwargs.get('verbose', None) - self.is_logging_enabled = kwargs.get('is_logging_enabled', None) - self.parameters = kwargs.get('parameters', None) + super(AnalysisParameters, self).__init__(**kwargs) + self.target_kind = None # type: Optional[str] + self.api_version = kwargs.get('api_version', None) class AnalyzeConversationResult(msrest.serialization.Model): @@ -115,32 +93,28 @@ def __init__( self.prediction = kwargs['prediction'] -class AnalyzeParameters(msrest.serialization.Model): - """This is the parameter set of either the conversation application itself or one of the target services. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LUISParameters, DeepstackParameters, QuestionAnsweringParameters. +class AnswerSpan(msrest.serialization.Model): + """Answer span object of QnA. - All required parameters must be populated in order to send to Azure. - - :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind - :ivar api_version: The API version to use when call a specific target service. - :vartype api_version: str + :ivar text: Predicted text of answer span. + :vartype text: str + :ivar confidence_score: Predicted score of answer span, value ranges from 0 to 1. + :vartype confidence_score: float + :ivar offset: The answer span offset from the start of answer. + :vartype offset: int + :ivar length: The length of the answer span. + :vartype length: int """ _validation = { - 'target_kind': {'required': True}, + 'confidence_score': {'maximum': 1, 'minimum': 0}, } _attribute_map = { - 'target_kind': {'key': 'targetKind', 'type': 'str'}, - 'api_version': {'key': 'apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'target_kind': {'luis': 'LUISParameters', 'luis_deepstack': 'DeepstackParameters', 'question_answering': 'QuestionAnsweringParameters'} + 'text': {'key': 'text', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, } def __init__( @@ -148,27 +122,35 @@ def __init__( **kwargs ): """ - :keyword api_version: The API version to use when call a specific target service. - :paramtype api_version: str + :keyword text: Predicted text of answer span. + :paramtype text: str + :keyword confidence_score: Predicted score of answer span, value ranges from 0 to 1. + :paramtype confidence_score: float + :keyword offset: The answer span offset from the start of answer. + :paramtype offset: int + :keyword length: The length of the answer span. + :paramtype length: int """ - super(AnalyzeParameters, self).__init__(**kwargs) - self.target_kind = None # type: Optional[str] - self.api_version = kwargs.get('api_version', None) + super(AnswerSpan, self).__init__(**kwargs) + self.text = kwargs.get('text', None) + self.confidence_score = kwargs.get('confidence_score', None) + self.offset = kwargs.get('offset', None) + self.length = kwargs.get('length', None) class BasePrediction(msrest.serialization.Model): """This is the base class of prediction. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeepstackPrediction, WorkflowPrediction. + sub-classes are: ConversationPrediction, OrchestratorPrediction. All required parameters must be populated in order to send to Azure. - :ivar top_intent: The intent with the highest score. - :vartype top_intent: str :ivar project_kind: Required. The type of the project.Constant filled by server. Possible values include: "conversation", "workflow". :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str """ _validation = { @@ -176,12 +158,12 @@ class BasePrediction(msrest.serialization.Model): } _attribute_map = { + 'project_kind': {'key': 'projectKind', 'type': 'str'}, 'top_intent': {'key': 'topIntent', 'type': 'str'}, - 'project_kind': {'key': 'projectType', 'type': 'str'}, } _subtype_map = { - 'project_kind': {'conversation': 'DeepstackPrediction', 'workflow': 'WorkflowPrediction'} + 'project_kind': {'conversation': 'ConversationPrediction', 'workflow': 'OrchestratorPrediction'} } def __init__( @@ -193,12 +175,75 @@ def __init__( :paramtype top_intent: str """ super(BasePrediction, self).__init__(**kwargs) - self.top_intent = kwargs.get('top_intent', None) self.project_kind = None # type: Optional[str] + self.top_intent = kwargs.get('top_intent', None) + + +class ConversationAnalysisOptions(msrest.serialization.Model): + """The request body. + + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The conversation utterance to be analyzed. + :vartype query: str + :ivar direct_target: The name of the target project this request is sending to directly. + :vartype direct_target: str + :ivar language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :vartype language: str + :ivar verbose: If true, the service will return more detailed information in the response. + :vartype verbose: bool + :ivar is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :vartype is_logging_enabled: bool + :ivar parameters: A dictionary representing the input for each target project. + :vartype parameters: dict[str, ~azure.ai.language.conversations.models.AnalysisParameters] + """ + _validation = { + 'query': {'required': True}, + } -class DeepstackCallingOptions(msrest.serialization.Model): - """The option to set to call a LUIS Deepstack project. + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'direct_target': {'key': 'directTarget', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, + 'parameters': {'key': 'parameters', 'type': '{AnalysisParameters}'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword query: Required. The conversation utterance to be analyzed. + :paramtype query: str + :keyword direct_target: The name of the target project this request is sending to directly. + :paramtype direct_target: str + :keyword language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :paramtype language: str + :keyword verbose: If true, the service will return more detailed information in the response. + :paramtype verbose: bool + :keyword is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :paramtype is_logging_enabled: bool + :keyword parameters: A dictionary representing the input for each target project. + :paramtype parameters: dict[str, ~azure.ai.language.conversations.models.AnalysisParameters] + """ + super(ConversationAnalysisOptions, self).__init__(**kwargs) + self.query = kwargs['query'] + self.direct_target = kwargs.get('direct_target', None) + self.language = kwargs.get('language', None) + self.verbose = kwargs.get('verbose', None) + self.is_logging_enabled = kwargs.get('is_logging_enabled', None) + self.parameters = kwargs.get('parameters', None) + + +class ConversationCallingOptions(msrest.serialization.Model): + """The option to set to call a Conversation project. :ivar language: The language of the query. :vartype language: str @@ -228,14 +273,14 @@ def __init__( in authoring, to improve the model quality. :paramtype is_logging_enabled: bool """ - super(DeepstackCallingOptions, self).__init__(**kwargs) + super(ConversationCallingOptions, self).__init__(**kwargs) self.language = kwargs.get('language', None) self.verbose = kwargs.get('verbose', None) self.is_logging_enabled = kwargs.get('is_logging_enabled', None) -class DeepstackEntity(msrest.serialization.Model): - """The entity extraction result of a LUIS Deepstack project. +class ConversationEntity(msrest.serialization.Model): + """The entity extraction result of a Conversation project. All required parameters must be populated in order to send to Azure. @@ -249,8 +294,8 @@ class DeepstackEntity(msrest.serialization.Model): :vartype length: int :ivar confidence_score: Required. The entity confidence score. :vartype confidence_score: float - :ivar resolution: A array with extra information about the entity. - :vartype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + :ivar list_keys: List of keys. + :vartype list_keys: list[str] """ _validation = { @@ -267,7 +312,7 @@ class DeepstackEntity(msrest.serialization.Model): 'offset': {'key': 'offset', 'type': 'int'}, 'length': {'key': 'length', 'type': 'int'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'resolution': {'key': 'resolution', 'type': '[DeepStackEntityResolution]'}, + 'list_keys': {'key': 'listKeys', 'type': '[str]'}, } def __init__( @@ -285,59 +330,20 @@ def __init__( :paramtype length: int :keyword confidence_score: Required. The entity confidence score. :paramtype confidence_score: float - :keyword resolution: A array with extra information about the entity. - :paramtype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + :keyword list_keys: List of keys. + :paramtype list_keys: list[str] """ - super(DeepstackEntity, self).__init__(**kwargs) + super(ConversationEntity, self).__init__(**kwargs) self.category = kwargs['category'] self.text = kwargs['text'] self.offset = kwargs['offset'] self.length = kwargs['length'] self.confidence_score = kwargs['confidence_score'] - self.resolution = kwargs.get('resolution', None) - - -class DeepStackEntityResolution(msrest.serialization.Model): - """This is the base class of all kinds of entity resolutions. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - """ + self.list_keys = kwargs.get('list_keys', None) - _validation = { - 'resolution_kind': {'required': True}, - } - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - """ - super(DeepStackEntityResolution, self).__init__(**kwargs) - self.additional_properties = kwargs.get('additional_properties', None) - self.resolution_kind = kwargs['resolution_kind'] - - -class DeepstackIntent(msrest.serialization.Model): - """The intent classification result of a LUIS Deepstack project. +class ConversationIntent(msrest.serialization.Model): + """The intent classification result of a Conversation project. All required parameters must be populated in order to send to Azure. @@ -367,23 +373,23 @@ def __init__( :keyword confidence_score: Required. The confidence score of the class from 0.0 to 1.0. :paramtype confidence_score: float """ - super(DeepstackIntent, self).__init__(**kwargs) + super(ConversationIntent, self).__init__(**kwargs) self.category = kwargs['category'] self.confidence_score = kwargs['confidence_score'] -class DeepstackParameters(AnalyzeParameters): - """This is a set of request parameters for LUIS Deepstack projects. +class ConversationParameters(AnalysisParameters): + """This is a set of request parameters for Customized Conversation projects. All required parameters must be populated in order to send to Azure. :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". + values include: "luis", "conversation", "question_answering", "non_linked". :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version to use when call a specific target service. :vartype api_version: str - :ivar calling_options: The option to set to call a LUIS Deepstack project. - :vartype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + :ivar calling_options: The option to set to call a Conversation project. + :vartype calling_options: ~azure.ai.language.conversations.models.ConversationCallingOptions """ _validation = { @@ -393,7 +399,7 @@ class DeepstackParameters(AnalyzeParameters): _attribute_map = { 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, - 'calling_options': {'key': 'callingOptions', 'type': 'DeepstackCallingOptions'}, + 'calling_options': {'key': 'callingOptions', 'type': 'ConversationCallingOptions'}, } def __init__( @@ -403,28 +409,28 @@ def __init__( """ :keyword api_version: The API version to use when call a specific target service. :paramtype api_version: str - :keyword calling_options: The option to set to call a LUIS Deepstack project. - :paramtype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + :keyword calling_options: The option to set to call a Conversation project. + :paramtype calling_options: ~azure.ai.language.conversations.models.ConversationCallingOptions """ - super(DeepstackParameters, self).__init__(**kwargs) - self.target_kind = 'luis_deepstack' # type: str + super(ConversationParameters, self).__init__(**kwargs) + self.target_kind = 'conversation' # type: str self.calling_options = kwargs.get('calling_options', None) -class DeepstackPrediction(BasePrediction): - """Represents the prediction section of a LUIS Deepstack project. +class ConversationPrediction(BasePrediction): + """Represents the prediction section of a Conversation project. All required parameters must be populated in order to send to Azure. - :ivar top_intent: The intent with the highest score. - :vartype top_intent: str :ivar project_kind: Required. The type of the project.Constant filled by server. Possible values include: "conversation", "workflow". :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str :ivar intents: Required. The intent classification results. - :vartype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :vartype intents: list[~azure.ai.language.conversations.models.ConversationIntent] :ivar entities: Required. The entity extraction results. - :vartype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + :vartype entities: list[~azure.ai.language.conversations.models.ConversationEntity] """ _validation = { @@ -434,10 +440,10 @@ class DeepstackPrediction(BasePrediction): } _attribute_map = { + 'project_kind': {'key': 'projectKind', 'type': 'str'}, 'top_intent': {'key': 'topIntent', 'type': 'str'}, - 'project_kind': {'key': 'projectType', 'type': 'str'}, - 'intents': {'key': 'intents', 'type': '[DeepstackIntent]'}, - 'entities': {'key': 'entities', 'type': '[DeepstackEntity]'}, + 'intents': {'key': 'intents', 'type': '[ConversationIntent]'}, + 'entities': {'key': 'entities', 'type': '[ConversationEntity]'}, } def __init__( @@ -448,18 +454,18 @@ def __init__( :keyword top_intent: The intent with the highest score. :paramtype top_intent: str :keyword intents: Required. The intent classification results. - :paramtype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :paramtype intents: list[~azure.ai.language.conversations.models.ConversationIntent] :keyword entities: Required. The entity extraction results. - :paramtype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + :paramtype entities: list[~azure.ai.language.conversations.models.ConversationEntity] """ - super(DeepstackPrediction, self).__init__(**kwargs) + super(ConversationPrediction, self).__init__(**kwargs) self.project_kind = 'conversation' # type: str self.intents = kwargs['intents'] self.entities = kwargs['entities'] -class DeepstackResult(msrest.serialization.Model): - """The response returned by a LUIS Deepstack project. +class ConversationResult(msrest.serialization.Model): + """The response returned by a Conversation project. All required parameters must be populated in order to send to Azure. @@ -468,7 +474,7 @@ class DeepstackResult(msrest.serialization.Model): :ivar detected_language: The detected language from the query. :vartype detected_language: str :ivar prediction: Required. The predicted result for the query. - :vartype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + :vartype prediction: ~azure.ai.language.conversations.models.ConversationPrediction """ _validation = { @@ -479,7 +485,7 @@ class DeepstackResult(msrest.serialization.Model): _attribute_map = { 'query': {'key': 'query', 'type': 'str'}, 'detected_language': {'key': 'detectedLanguage', 'type': 'str'}, - 'prediction': {'key': 'prediction', 'type': 'DeepstackPrediction'}, + 'prediction': {'key': 'prediction', 'type': 'ConversationPrediction'}, } def __init__( @@ -492,89 +498,45 @@ def __init__( :keyword detected_language: The detected language from the query. :paramtype detected_language: str :keyword prediction: Required. The predicted result for the query. - :paramtype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + :paramtype prediction: ~azure.ai.language.conversations.models.ConversationPrediction """ - super(DeepstackResult, self).__init__(**kwargs) + super(ConversationResult, self).__init__(**kwargs) self.query = kwargs['query'] self.detected_language = kwargs.get('detected_language', None) self.prediction = kwargs['prediction'] -class DictionaryNormalizedValueResolution(DeepStackEntityResolution): - """The DictionaryNormalizedValue resolution indicates entity values are extracted from a predefined dictionary. For example, Coca could be a normalized name for Coca-Cola. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - :ivar values: A list of normalized entities. - :vartype values: list[str] - """ - - _validation = { - 'resolution_kind': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[str]'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - :keyword values: A list of normalized entities. - :paramtype values: list[str] - """ - super(DictionaryNormalizedValueResolution, self).__init__(**kwargs) - self.values = kwargs.get('values', None) - - class TargetIntentResult(msrest.serialization.Model): """This is the base class of an intent prediction. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LUISTargetIntentResult, DSTargetIntentResult, QuestionAnsweringTargetIntentResult. + sub-classes are: ConversationTargetIntentResult, LUISTargetIntentResult, NoneLinkedTargetIntentResult, QuestionAnsweringTargetIntentResult. All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, } _subtype_map = { - 'target_kind': {'luis': 'LUISTargetIntentResult', 'luis_deepstack': 'DSTargetIntentResult', 'question_answering': 'QuestionAnsweringTargetIntentResult'} + 'target_kind': {'conversation': 'ConversationTargetIntentResult', 'luis': 'LUISTargetIntentResult', 'non_linked': 'NoneLinkedTargetIntentResult', 'question_answering': 'QuestionAnsweringTargetIntentResult'} } def __init__( @@ -584,43 +546,42 @@ def __init__( """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float """ super(TargetIntentResult, self).__init__(**kwargs) - self.api_version = kwargs.get('api_version', None) - self.confidence_score = kwargs.get('confidence_score', None) self.target_kind = None # type: Optional[str] + self.api_version = kwargs.get('api_version', None) + self.confidence_score = kwargs['confidence_score'] -class DSTargetIntentResult(TargetIntentResult): - """A wrap up of LUIS Deepstack response. +class ConversationTargetIntentResult(TargetIntentResult): + """A wrap up of Conversation project response. All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind - :ivar result: The actual response from a LUIS Deepstack application. - :vartype result: ~azure.ai.language.conversations.models.DeepstackResult + :ivar result: The actual response from a Conversation project. + :vartype result: ~azure.ai.language.conversations.models.ConversationResult """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, - 'result': {'key': 'result', 'type': 'DeepstackResult'}, + 'result': {'key': 'result', 'type': 'ConversationResult'}, } def __init__( @@ -630,13 +591,13 @@ def __init__( """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float - :keyword result: The actual response from a LUIS Deepstack application. - :paramtype result: ~azure.ai.language.conversations.models.DeepstackResult + :keyword result: The actual response from a Conversation project. + :paramtype result: ~azure.ai.language.conversations.models.ConversationResult """ - super(DSTargetIntentResult, self).__init__(**kwargs) - self.target_kind = 'luis_deepstack' # type: str + super(ConversationTargetIntentResult, self).__init__(**kwargs) + self.target_kind = 'conversation' # type: str self.result = kwargs.get('result', None) @@ -647,7 +608,9 @@ class Error(msrest.serialization.Model): :ivar code: Required. One of a server-defined set of error codes. Possible values include: "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", - "TooManyRequests", "InternalServerError", "ServiceUnavailable". + "ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound", + "AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling", + "AzureCognitiveSearchIndexLimitReached", "InternalServerError", "ServiceUnavailable". :vartype code: str or ~azure.ai.language.conversations.models.ErrorCode :ivar message: Required. A human-readable representation of the error. :vartype message: str @@ -680,7 +643,9 @@ def __init__( """ :keyword code: Required. One of a server-defined set of error codes. Possible values include: "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", - "TooManyRequests", "InternalServerError", "ServiceUnavailable". + "ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound", + "AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling", + "AzureCognitiveSearchIndexLimitReached", "InternalServerError", "ServiceUnavailable". :paramtype code: str or ~azure.ai.language.conversations.models.ErrorCode :keyword message: Required. A human-readable representation of the error. :paramtype message: str @@ -783,6 +748,176 @@ def __init__( self.innererror = kwargs.get('innererror', None) +class KnowledgeBaseAnswer(msrest.serialization.Model): + """Represents knowledge base answer. + + :ivar questions: List of questions associated with the answer. + :vartype questions: list[str] + :ivar answer: Answer text. + :vartype answer: str + :ivar confidence_score: Answer confidence score, value ranges from 0 to 1. + :vartype confidence_score: float + :ivar id: ID of the QnA result. + :vartype id: int + :ivar source: Source of QnA result. + :vartype source: str + :ivar metadata: Metadata associated with the answer, useful to categorize or filter question + answers. + :vartype metadata: dict[str, str] + :ivar dialog: Dialog associated with Answer. + :vartype dialog: ~azure.ai.language.conversations.models.KnowledgeBaseAnswerDialog + :ivar answer_span: Answer span object of QnA with respect to user's question. + :vartype answer_span: ~azure.ai.language.conversations.models.AnswerSpan + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + } + + _attribute_map = { + 'questions': {'key': 'questions', 'type': '[str]'}, + 'answer': {'key': 'answer', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'id': {'key': 'id', 'type': 'int'}, + 'source': {'key': 'source', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': '{str}'}, + 'dialog': {'key': 'dialog', 'type': 'KnowledgeBaseAnswerDialog'}, + 'answer_span': {'key': 'answerSpan', 'type': 'AnswerSpan'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword questions: List of questions associated with the answer. + :paramtype questions: list[str] + :keyword answer: Answer text. + :paramtype answer: str + :keyword confidence_score: Answer confidence score, value ranges from 0 to 1. + :paramtype confidence_score: float + :keyword id: ID of the QnA result. + :paramtype id: int + :keyword source: Source of QnA result. + :paramtype source: str + :keyword metadata: Metadata associated with the answer, useful to categorize or filter question + answers. + :paramtype metadata: dict[str, str] + :keyword dialog: Dialog associated with Answer. + :paramtype dialog: ~azure.ai.language.conversations.models.KnowledgeBaseAnswerDialog + :keyword answer_span: Answer span object of QnA with respect to user's question. + :paramtype answer_span: ~azure.ai.language.conversations.models.AnswerSpan + """ + super(KnowledgeBaseAnswer, self).__init__(**kwargs) + self.questions = kwargs.get('questions', None) + self.answer = kwargs.get('answer', None) + self.confidence_score = kwargs.get('confidence_score', None) + self.id = kwargs.get('id', None) + self.source = kwargs.get('source', None) + self.metadata = kwargs.get('metadata', None) + self.dialog = kwargs.get('dialog', None) + self.answer_span = kwargs.get('answer_span', None) + + +class KnowledgeBaseAnswerDialog(msrest.serialization.Model): + """Dialog associated with Answer. + + :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If + true, do not include this QnA as search result for queries without context; otherwise, if + false, ignores context and includes this QnA in search result. + :vartype is_context_only: bool + :ivar prompts: List of prompts associated with the answer. + :vartype prompts: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswerPrompt] + """ + + _validation = { + 'prompts': {'max_items': 20, 'min_items': 0}, + } + + _attribute_map = { + 'is_context_only': {'key': 'isContextOnly', 'type': 'bool'}, + 'prompts': {'key': 'prompts', 'type': '[KnowledgeBaseAnswerPrompt]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not. + If true, do not include this QnA as search result for queries without context; otherwise, if + false, ignores context and includes this QnA in search result. + :paramtype is_context_only: bool + :keyword prompts: List of prompts associated with the answer. + :paramtype prompts: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswerPrompt] + """ + super(KnowledgeBaseAnswerDialog, self).__init__(**kwargs) + self.is_context_only = kwargs.get('is_context_only', None) + self.prompts = kwargs.get('prompts', None) + + +class KnowledgeBaseAnswerPrompt(msrest.serialization.Model): + """Prompt for an answer. + + :ivar display_order: Index of the prompt - used in ordering of the prompts. + :vartype display_order: int + :ivar qna_id: QnA ID corresponding to the prompt. + :vartype qna_id: int + :ivar display_text: Text displayed to represent a follow up question prompt. + :vartype display_text: str + """ + + _validation = { + 'display_text': {'max_length': 200, 'min_length': 0}, + } + + _attribute_map = { + 'display_order': {'key': 'displayOrder', 'type': 'int'}, + 'qna_id': {'key': 'qnaId', 'type': 'int'}, + 'display_text': {'key': 'displayText', 'type': 'str'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword display_order: Index of the prompt - used in ordering of the prompts. + :paramtype display_order: int + :keyword qna_id: QnA ID corresponding to the prompt. + :paramtype qna_id: int + :keyword display_text: Text displayed to represent a follow up question prompt. + :paramtype display_text: str + """ + super(KnowledgeBaseAnswerPrompt, self).__init__(**kwargs) + self.display_order = kwargs.get('display_order', None) + self.qna_id = kwargs.get('qna_id', None) + self.display_text = kwargs.get('display_text', None) + + +class KnowledgeBaseAnswers(msrest.serialization.Model): + """Represents List of Question Answers. + + :ivar answers: Represents Answer Result list. + :vartype answers: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswer] + """ + + _attribute_map = { + 'answers': {'key': 'answers', 'type': '[KnowledgeBaseAnswer]'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword answers: Represents Answer Result list. + :paramtype answers: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswer] + """ + super(KnowledgeBaseAnswers, self).__init__(**kwargs) + self.answers = kwargs.get('answers', None) + + class LUISCallingOptions(msrest.serialization.Model): """This customizes how the service calls LUIS Generally Available projects. @@ -838,13 +973,13 @@ def __init__( self.bing_spell_check_subscription_key = kwargs.get('bing_spell_check_subscription_key', None) -class LUISParameters(AnalyzeParameters): +class LUISParameters(AnalysisParameters): """This is a set of request parameters for LUIS Generally Available projects. All required parameters must be populated in order to send to Azure. :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". + values include: "luis", "conversation", "question_answering", "non_linked". :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version to use when call a specific target service. :vartype api_version: str @@ -898,28 +1033,27 @@ class LUISTargetIntentResult(TargetIntentResult): All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar result: The actual response from a LUIS Generally Available application. :vartype result: any """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, 'result': {'key': 'result', 'type': 'object'}, } @@ -930,7 +1064,7 @@ def __init__( """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float :keyword result: The actual response from a LUIS Generally Available application. :paramtype result: any @@ -940,13 +1074,103 @@ def __init__( self.result = kwargs.get('result', None) -class QuestionAnsweringParameters(AnalyzeParameters): +class NoneLinkedTargetIntentResult(TargetIntentResult): + """A wrap up of non-linked intent response. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar result: The actual response from a Conversation project. + :vartype result: ~azure.ai.language.conversations.models.ConversationResult + """ + + _validation = { + 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'result': {'key': 'result', 'type': 'ConversationResult'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The actual response from a Conversation project. + :paramtype result: ~azure.ai.language.conversations.models.ConversationResult + """ + super(NoneLinkedTargetIntentResult, self).__init__(**kwargs) + self.target_kind = 'non_linked' # type: str + self.result = kwargs.get('result', None) + + +class OrchestratorPrediction(BasePrediction): + """This represents the prediction result of an Orchestrator project. + + All required parameters must be populated in order to send to Azure. + + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar intents: Required. A dictionary that contains all intents. A key is an intent name and a + value is its confidence score and target type. The top intent's value also contains the actual + response from the target project. + :vartype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + + _validation = { + 'project_kind': {'required': True}, + 'intents': {'required': True}, + } + + _attribute_map = { + 'project_kind': {'key': 'projectKind', 'type': 'str'}, + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'intents': {'key': 'intents', 'type': '{TargetIntentResult}'}, + } + + def __init__( + self, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + :keyword intents: Required. A dictionary that contains all intents. A key is an intent name and + a value is its confidence score and target type. The top intent's value also contains the + actual response from the target project. + :paramtype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + super(OrchestratorPrediction, self).__init__(**kwargs) + self.project_kind = 'workflow' # type: str + self.intents = kwargs['intents'] + + +class QuestionAnsweringParameters(AnalysisParameters): """This is a set of request parameters for Question Answering knowledge bases. All required parameters must be populated in order to send to Azure. :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". + values include: "luis", "conversation", "question_answering", "non_linked". :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version to use when call a specific target service. :vartype api_version: str @@ -984,29 +1208,28 @@ class QuestionAnsweringTargetIntentResult(TargetIntentResult): All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar result: The generated answer by a Question Answering KB. - :vartype result: any + :vartype result: ~azure.ai.language.conversations.models.KnowledgeBaseAnswers """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, - 'result': {'key': 'result', 'type': 'object'}, + 'result': {'key': 'result', 'type': 'KnowledgeBaseAnswers'}, } def __init__( @@ -1016,55 +1239,11 @@ def __init__( """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float :keyword result: The generated answer by a Question Answering KB. - :paramtype result: any + :paramtype result: ~azure.ai.language.conversations.models.KnowledgeBaseAnswers """ super(QuestionAnsweringTargetIntentResult, self).__init__(**kwargs) self.target_kind = 'question_answering' # type: str self.result = kwargs.get('result', None) - - -class WorkflowPrediction(BasePrediction): - """This represents the prediction result of an Workflow project. - - All required parameters must be populated in order to send to Azure. - - :ivar top_intent: The intent with the highest score. - :vartype top_intent: str - :ivar project_kind: Required. The type of the project.Constant filled by server. Possible - values include: "conversation", "workflow". - :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind - :ivar intents: Required. A dictionary that contains all intents. A key is an intent name and a - value is its confidence score and target type. The top intent's value also contains the actual - response from the target project. - :vartype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] - """ - - _validation = { - 'project_kind': {'required': True}, - 'intents': {'required': True}, - } - - _attribute_map = { - 'top_intent': {'key': 'topIntent', 'type': 'str'}, - 'project_kind': {'key': 'projectType', 'type': 'str'}, - 'intents': {'key': 'intents', 'type': '{TargetIntentResult}'}, - } - - def __init__( - self, - **kwargs - ): - """ - :keyword top_intent: The intent with the highest score. - :paramtype top_intent: str - :keyword intents: Required. A dictionary that contains all intents. A key is an intent name and - a value is its confidence score and target type. The top intent's value also contains the - actual response from the target project. - :paramtype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] - """ - super(WorkflowPrediction, self).__init__(**kwargs) - self.project_kind = 'workflow' # type: str - self.intents = kwargs['intents'] diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py index 7faf499e3998..d0f471721332 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/models/_models_py3.py @@ -14,74 +14,47 @@ from ._conversation_analysis_client_enums import * -class AnalyzeConversationOptions(msrest.serialization.Model): - """The request body. +class AnalysisParameters(msrest.serialization.Model): + """This is the parameter set of either the Orchestration project itself or one of the target services. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ConversationParameters, LUISParameters, QuestionAnsweringParameters. All required parameters must be populated in order to send to Azure. - :ivar query: Required. The conversation utterance to be analyzed. - :vartype query: str - :ivar direct_target: The name of the target project this request is sending to directly. - :vartype direct_target: str - :ivar language: The language to use in this request. This will be the language setting when - communicating with all other target projects. - :vartype language: str - :ivar verbose: If true, the service will return more detailed information in the response. - :vartype verbose: bool - :ivar is_logging_enabled: If true, the query will be kept by the service for customers to - further review, to improve the model quality. - :vartype is_logging_enabled: bool - :ivar parameters: A dictionary representing the input for each target project. - :vartype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible + values include: "luis", "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version to use when call a specific target service. + :vartype api_version: str """ _validation = { - 'query': {'required': True}, + 'target_kind': {'required': True}, } _attribute_map = { - 'query': {'key': 'query', 'type': 'str'}, - 'direct_target': {'key': 'directTarget', 'type': 'str'}, - 'language': {'key': 'language', 'type': 'str'}, - 'verbose': {'key': 'verbose', 'type': 'bool'}, - 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, - 'parameters': {'key': 'parameters', 'type': '{AnalyzeParameters}'}, + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + } + + _subtype_map = { + 'target_kind': {'conversation': 'ConversationParameters', 'luis': 'LUISParameters', 'question_answering': 'QuestionAnsweringParameters'} } def __init__( self, *, - query: str, - direct_target: Optional[str] = None, - language: Optional[str] = None, - verbose: Optional[bool] = None, - is_logging_enabled: Optional[bool] = None, - parameters: Optional[Dict[str, "AnalyzeParameters"]] = None, + api_version: Optional[str] = None, **kwargs ): """ - :keyword query: Required. The conversation utterance to be analyzed. - :paramtype query: str - :keyword direct_target: The name of the target project this request is sending to directly. - :paramtype direct_target: str - :keyword language: The language to use in this request. This will be the language setting when - communicating with all other target projects. - :paramtype language: str - :keyword verbose: If true, the service will return more detailed information in the response. - :paramtype verbose: bool - :keyword is_logging_enabled: If true, the query will be kept by the service for customers to - further review, to improve the model quality. - :paramtype is_logging_enabled: bool - :keyword parameters: A dictionary representing the input for each target project. - :paramtype parameters: dict[str, ~azure.ai.language.conversations.models.AnalyzeParameters] + :keyword api_version: The API version to use when call a specific target service. + :paramtype api_version: str """ - super(AnalyzeConversationOptions, self).__init__(**kwargs) - self.query = query - self.direct_target = direct_target - self.language = language - self.verbose = verbose - self.is_logging_enabled = is_logging_enabled - self.parameters = parameters + super(AnalysisParameters, self).__init__(**kwargs) + self.target_kind = None # type: Optional[str] + self.api_version = api_version class AnalyzeConversationResult(msrest.serialization.Model): @@ -130,62 +103,69 @@ def __init__( self.prediction = prediction -class AnalyzeParameters(msrest.serialization.Model): - """This is the parameter set of either the conversation application itself or one of the target services. - - You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LUISParameters, DeepstackParameters, QuestionAnsweringParameters. - - All required parameters must be populated in order to send to Azure. +class AnswerSpan(msrest.serialization.Model): + """Answer span object of QnA. - :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind - :ivar api_version: The API version to use when call a specific target service. - :vartype api_version: str + :ivar text: Predicted text of answer span. + :vartype text: str + :ivar confidence_score: Predicted score of answer span, value ranges from 0 to 1. + :vartype confidence_score: float + :ivar offset: The answer span offset from the start of answer. + :vartype offset: int + :ivar length: The length of the answer span. + :vartype length: int """ _validation = { - 'target_kind': {'required': True}, + 'confidence_score': {'maximum': 1, 'minimum': 0}, } _attribute_map = { - 'target_kind': {'key': 'targetKind', 'type': 'str'}, - 'api_version': {'key': 'apiVersion', 'type': 'str'}, - } - - _subtype_map = { - 'target_kind': {'luis': 'LUISParameters', 'luis_deepstack': 'DeepstackParameters', 'question_answering': 'QuestionAnsweringParameters'} + 'text': {'key': 'text', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'offset': {'key': 'offset', 'type': 'int'}, + 'length': {'key': 'length', 'type': 'int'}, } def __init__( self, *, - api_version: Optional[str] = None, + text: Optional[str] = None, + confidence_score: Optional[float] = None, + offset: Optional[int] = None, + length: Optional[int] = None, **kwargs ): """ - :keyword api_version: The API version to use when call a specific target service. - :paramtype api_version: str + :keyword text: Predicted text of answer span. + :paramtype text: str + :keyword confidence_score: Predicted score of answer span, value ranges from 0 to 1. + :paramtype confidence_score: float + :keyword offset: The answer span offset from the start of answer. + :paramtype offset: int + :keyword length: The length of the answer span. + :paramtype length: int """ - super(AnalyzeParameters, self).__init__(**kwargs) - self.target_kind = None # type: Optional[str] - self.api_version = api_version + super(AnswerSpan, self).__init__(**kwargs) + self.text = text + self.confidence_score = confidence_score + self.offset = offset + self.length = length class BasePrediction(msrest.serialization.Model): """This is the base class of prediction. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: DeepstackPrediction, WorkflowPrediction. + sub-classes are: ConversationPrediction, OrchestratorPrediction. All required parameters must be populated in order to send to Azure. - :ivar top_intent: The intent with the highest score. - :vartype top_intent: str :ivar project_kind: Required. The type of the project.Constant filled by server. Possible values include: "conversation", "workflow". :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str """ _validation = { @@ -193,12 +173,12 @@ class BasePrediction(msrest.serialization.Model): } _attribute_map = { + 'project_kind': {'key': 'projectKind', 'type': 'str'}, 'top_intent': {'key': 'topIntent', 'type': 'str'}, - 'project_kind': {'key': 'projectType', 'type': 'str'}, } _subtype_map = { - 'project_kind': {'conversation': 'DeepstackPrediction', 'workflow': 'WorkflowPrediction'} + 'project_kind': {'conversation': 'ConversationPrediction', 'workflow': 'OrchestratorPrediction'} } def __init__( @@ -212,12 +192,82 @@ def __init__( :paramtype top_intent: str """ super(BasePrediction, self).__init__(**kwargs) - self.top_intent = top_intent self.project_kind = None # type: Optional[str] + self.top_intent = top_intent + +class ConversationAnalysisOptions(msrest.serialization.Model): + """The request body. -class DeepstackCallingOptions(msrest.serialization.Model): - """The option to set to call a LUIS Deepstack project. + All required parameters must be populated in order to send to Azure. + + :ivar query: Required. The conversation utterance to be analyzed. + :vartype query: str + :ivar direct_target: The name of the target project this request is sending to directly. + :vartype direct_target: str + :ivar language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :vartype language: str + :ivar verbose: If true, the service will return more detailed information in the response. + :vartype verbose: bool + :ivar is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :vartype is_logging_enabled: bool + :ivar parameters: A dictionary representing the input for each target project. + :vartype parameters: dict[str, ~azure.ai.language.conversations.models.AnalysisParameters] + """ + + _validation = { + 'query': {'required': True}, + } + + _attribute_map = { + 'query': {'key': 'query', 'type': 'str'}, + 'direct_target': {'key': 'directTarget', 'type': 'str'}, + 'language': {'key': 'language', 'type': 'str'}, + 'verbose': {'key': 'verbose', 'type': 'bool'}, + 'is_logging_enabled': {'key': 'isLoggingEnabled', 'type': 'bool'}, + 'parameters': {'key': 'parameters', 'type': '{AnalysisParameters}'}, + } + + def __init__( + self, + *, + query: str, + direct_target: Optional[str] = None, + language: Optional[str] = None, + verbose: Optional[bool] = None, + is_logging_enabled: Optional[bool] = None, + parameters: Optional[Dict[str, "AnalysisParameters"]] = None, + **kwargs + ): + """ + :keyword query: Required. The conversation utterance to be analyzed. + :paramtype query: str + :keyword direct_target: The name of the target project this request is sending to directly. + :paramtype direct_target: str + :keyword language: The language to use in this request. This will be the language setting when + communicating with all other target projects. + :paramtype language: str + :keyword verbose: If true, the service will return more detailed information in the response. + :paramtype verbose: bool + :keyword is_logging_enabled: If true, the query will be kept by the service for customers to + further review, to improve the model quality. + :paramtype is_logging_enabled: bool + :keyword parameters: A dictionary representing the input for each target project. + :paramtype parameters: dict[str, ~azure.ai.language.conversations.models.AnalysisParameters] + """ + super(ConversationAnalysisOptions, self).__init__(**kwargs) + self.query = query + self.direct_target = direct_target + self.language = language + self.verbose = verbose + self.is_logging_enabled = is_logging_enabled + self.parameters = parameters + + +class ConversationCallingOptions(msrest.serialization.Model): + """The option to set to call a Conversation project. :ivar language: The language of the query. :vartype language: str @@ -251,14 +301,14 @@ def __init__( in authoring, to improve the model quality. :paramtype is_logging_enabled: bool """ - super(DeepstackCallingOptions, self).__init__(**kwargs) + super(ConversationCallingOptions, self).__init__(**kwargs) self.language = language self.verbose = verbose self.is_logging_enabled = is_logging_enabled -class DeepstackEntity(msrest.serialization.Model): - """The entity extraction result of a LUIS Deepstack project. +class ConversationEntity(msrest.serialization.Model): + """The entity extraction result of a Conversation project. All required parameters must be populated in order to send to Azure. @@ -272,8 +322,8 @@ class DeepstackEntity(msrest.serialization.Model): :vartype length: int :ivar confidence_score: Required. The entity confidence score. :vartype confidence_score: float - :ivar resolution: A array with extra information about the entity. - :vartype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + :ivar list_keys: List of keys. + :vartype list_keys: list[str] """ _validation = { @@ -290,7 +340,7 @@ class DeepstackEntity(msrest.serialization.Model): 'offset': {'key': 'offset', 'type': 'int'}, 'length': {'key': 'length', 'type': 'int'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'resolution': {'key': 'resolution', 'type': '[DeepStackEntityResolution]'}, + 'list_keys': {'key': 'listKeys', 'type': '[str]'}, } def __init__( @@ -301,7 +351,7 @@ def __init__( offset: int, length: int, confidence_score: float, - resolution: Optional[List["DeepStackEntityResolution"]] = None, + list_keys: Optional[List[str]] = None, **kwargs ): """ @@ -315,62 +365,20 @@ def __init__( :paramtype length: int :keyword confidence_score: Required. The entity confidence score. :paramtype confidence_score: float - :keyword resolution: A array with extra information about the entity. - :paramtype resolution: list[~azure.ai.language.conversations.models.DeepStackEntityResolution] + :keyword list_keys: List of keys. + :paramtype list_keys: list[str] """ - super(DeepstackEntity, self).__init__(**kwargs) + super(ConversationEntity, self).__init__(**kwargs) self.category = category self.text = text self.offset = offset self.length = length self.confidence_score = confidence_score - self.resolution = resolution - - -class DeepStackEntityResolution(msrest.serialization.Model): - """This is the base class of all kinds of entity resolutions. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - """ + self.list_keys = list_keys - _validation = { - 'resolution_kind': {'required': True}, - } - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, - } - - def __init__( - self, - *, - resolution_kind: Union[str, "ResolutionKind"], - additional_properties: Optional[Dict[str, Any]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - """ - super(DeepStackEntityResolution, self).__init__(**kwargs) - self.additional_properties = additional_properties - self.resolution_kind = resolution_kind - - -class DeepstackIntent(msrest.serialization.Model): - """The intent classification result of a LUIS Deepstack project. +class ConversationIntent(msrest.serialization.Model): + """The intent classification result of a Conversation project. All required parameters must be populated in order to send to Azure. @@ -403,23 +411,23 @@ def __init__( :keyword confidence_score: Required. The confidence score of the class from 0.0 to 1.0. :paramtype confidence_score: float """ - super(DeepstackIntent, self).__init__(**kwargs) + super(ConversationIntent, self).__init__(**kwargs) self.category = category self.confidence_score = confidence_score -class DeepstackParameters(AnalyzeParameters): - """This is a set of request parameters for LUIS Deepstack projects. +class ConversationParameters(AnalysisParameters): + """This is a set of request parameters for Customized Conversation projects. All required parameters must be populated in order to send to Azure. :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". + values include: "luis", "conversation", "question_answering", "non_linked". :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version to use when call a specific target service. :vartype api_version: str - :ivar calling_options: The option to set to call a LUIS Deepstack project. - :vartype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + :ivar calling_options: The option to set to call a Conversation project. + :vartype calling_options: ~azure.ai.language.conversations.models.ConversationCallingOptions """ _validation = { @@ -429,41 +437,41 @@ class DeepstackParameters(AnalyzeParameters): _attribute_map = { 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, - 'calling_options': {'key': 'callingOptions', 'type': 'DeepstackCallingOptions'}, + 'calling_options': {'key': 'callingOptions', 'type': 'ConversationCallingOptions'}, } def __init__( self, *, api_version: Optional[str] = None, - calling_options: Optional["DeepstackCallingOptions"] = None, + calling_options: Optional["ConversationCallingOptions"] = None, **kwargs ): """ :keyword api_version: The API version to use when call a specific target service. :paramtype api_version: str - :keyword calling_options: The option to set to call a LUIS Deepstack project. - :paramtype calling_options: ~azure.ai.language.conversations.models.DeepstackCallingOptions + :keyword calling_options: The option to set to call a Conversation project. + :paramtype calling_options: ~azure.ai.language.conversations.models.ConversationCallingOptions """ - super(DeepstackParameters, self).__init__(api_version=api_version, **kwargs) - self.target_kind = 'luis_deepstack' # type: str + super(ConversationParameters, self).__init__(api_version=api_version, **kwargs) + self.target_kind = 'conversation' # type: str self.calling_options = calling_options -class DeepstackPrediction(BasePrediction): - """Represents the prediction section of a LUIS Deepstack project. +class ConversationPrediction(BasePrediction): + """Represents the prediction section of a Conversation project. All required parameters must be populated in order to send to Azure. - :ivar top_intent: The intent with the highest score. - :vartype top_intent: str :ivar project_kind: Required. The type of the project.Constant filled by server. Possible values include: "conversation", "workflow". :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str :ivar intents: Required. The intent classification results. - :vartype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :vartype intents: list[~azure.ai.language.conversations.models.ConversationIntent] :ivar entities: Required. The entity extraction results. - :vartype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + :vartype entities: list[~azure.ai.language.conversations.models.ConversationEntity] """ _validation = { @@ -473,17 +481,17 @@ class DeepstackPrediction(BasePrediction): } _attribute_map = { + 'project_kind': {'key': 'projectKind', 'type': 'str'}, 'top_intent': {'key': 'topIntent', 'type': 'str'}, - 'project_kind': {'key': 'projectType', 'type': 'str'}, - 'intents': {'key': 'intents', 'type': '[DeepstackIntent]'}, - 'entities': {'key': 'entities', 'type': '[DeepstackEntity]'}, + 'intents': {'key': 'intents', 'type': '[ConversationIntent]'}, + 'entities': {'key': 'entities', 'type': '[ConversationEntity]'}, } def __init__( self, *, - intents: List["DeepstackIntent"], - entities: List["DeepstackEntity"], + intents: List["ConversationIntent"], + entities: List["ConversationEntity"], top_intent: Optional[str] = None, **kwargs ): @@ -491,18 +499,18 @@ def __init__( :keyword top_intent: The intent with the highest score. :paramtype top_intent: str :keyword intents: Required. The intent classification results. - :paramtype intents: list[~azure.ai.language.conversations.models.DeepstackIntent] + :paramtype intents: list[~azure.ai.language.conversations.models.ConversationIntent] :keyword entities: Required. The entity extraction results. - :paramtype entities: list[~azure.ai.language.conversations.models.DeepstackEntity] + :paramtype entities: list[~azure.ai.language.conversations.models.ConversationEntity] """ - super(DeepstackPrediction, self).__init__(top_intent=top_intent, **kwargs) + super(ConversationPrediction, self).__init__(top_intent=top_intent, **kwargs) self.project_kind = 'conversation' # type: str self.intents = intents self.entities = entities -class DeepstackResult(msrest.serialization.Model): - """The response returned by a LUIS Deepstack project. +class ConversationResult(msrest.serialization.Model): + """The response returned by a Conversation project. All required parameters must be populated in order to send to Azure. @@ -511,7 +519,7 @@ class DeepstackResult(msrest.serialization.Model): :ivar detected_language: The detected language from the query. :vartype detected_language: str :ivar prediction: Required. The predicted result for the query. - :vartype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + :vartype prediction: ~azure.ai.language.conversations.models.ConversationPrediction """ _validation = { @@ -522,14 +530,14 @@ class DeepstackResult(msrest.serialization.Model): _attribute_map = { 'query': {'key': 'query', 'type': 'str'}, 'detected_language': {'key': 'detectedLanguage', 'type': 'str'}, - 'prediction': {'key': 'prediction', 'type': 'DeepstackPrediction'}, + 'prediction': {'key': 'prediction', 'type': 'ConversationPrediction'}, } def __init__( self, *, query: str, - prediction: "DeepstackPrediction", + prediction: "ConversationPrediction", detected_language: Optional[str] = None, **kwargs ): @@ -539,162 +547,113 @@ def __init__( :keyword detected_language: The detected language from the query. :paramtype detected_language: str :keyword prediction: Required. The predicted result for the query. - :paramtype prediction: ~azure.ai.language.conversations.models.DeepstackPrediction + :paramtype prediction: ~azure.ai.language.conversations.models.ConversationPrediction """ - super(DeepstackResult, self).__init__(**kwargs) + super(ConversationResult, self).__init__(**kwargs) self.query = query self.detected_language = detected_language self.prediction = prediction -class DictionaryNormalizedValueResolution(DeepStackEntityResolution): - """The DictionaryNormalizedValue resolution indicates entity values are extracted from a predefined dictionary. For example, Coca could be a normalized name for Coca-Cola. - - All required parameters must be populated in order to send to Azure. - - :ivar additional_properties: Unmatched properties from the message are deserialized to this - collection. - :vartype additional_properties: dict[str, any] - :ivar resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :vartype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - :ivar values: A list of normalized entities. - :vartype values: list[str] - """ - - _validation = { - 'resolution_kind': {'required': True}, - } - - _attribute_map = { - 'additional_properties': {'key': '', 'type': '{object}'}, - 'resolution_kind': {'key': 'resolutionKind', 'type': 'str'}, - 'values': {'key': 'values', 'type': '[str]'}, - } - - def __init__( - self, - *, - resolution_kind: Union[str, "ResolutionKind"], - additional_properties: Optional[Dict[str, Any]] = None, - values: Optional[List[str]] = None, - **kwargs - ): - """ - :keyword additional_properties: Unmatched properties from the message are deserialized to this - collection. - :paramtype additional_properties: dict[str, any] - :keyword resolution_kind: Required. The type of an entity resolution. Possible values include: - "DictionaryNormalizedValue". - :paramtype resolution_kind: str or ~azure.ai.language.conversations.models.ResolutionKind - :keyword values: A list of normalized entities. - :paramtype values: list[str] - """ - super(DictionaryNormalizedValueResolution, self).__init__(additional_properties=additional_properties, resolution_kind=resolution_kind, **kwargs) - self.values = values - - class TargetIntentResult(msrest.serialization.Model): """This is the base class of an intent prediction. You probably want to use the sub-classes and not this class directly. Known - sub-classes are: LUISTargetIntentResult, DSTargetIntentResult, QuestionAnsweringTargetIntentResult. + sub-classes are: ConversationTargetIntentResult, LUISTargetIntentResult, NoneLinkedTargetIntentResult, QuestionAnsweringTargetIntentResult. All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, } _subtype_map = { - 'target_kind': {'luis': 'LUISTargetIntentResult', 'luis_deepstack': 'DSTargetIntentResult', 'question_answering': 'QuestionAnsweringTargetIntentResult'} + 'target_kind': {'conversation': 'ConversationTargetIntentResult', 'luis': 'LUISTargetIntentResult', 'non_linked': 'NoneLinkedTargetIntentResult', 'question_answering': 'QuestionAnsweringTargetIntentResult'} } def __init__( self, *, + confidence_score: float, api_version: Optional[str] = None, - confidence_score: Optional[float] = None, **kwargs ): """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float """ super(TargetIntentResult, self).__init__(**kwargs) + self.target_kind = None # type: Optional[str] self.api_version = api_version self.confidence_score = confidence_score - self.target_kind = None # type: Optional[str] -class DSTargetIntentResult(TargetIntentResult): - """A wrap up of LUIS Deepstack response. +class ConversationTargetIntentResult(TargetIntentResult): + """A wrap up of Conversation project response. All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind - :ivar result: The actual response from a LUIS Deepstack application. - :vartype result: ~azure.ai.language.conversations.models.DeepstackResult + :ivar result: The actual response from a Conversation project. + :vartype result: ~azure.ai.language.conversations.models.ConversationResult """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, - 'result': {'key': 'result', 'type': 'DeepstackResult'}, + 'result': {'key': 'result', 'type': 'ConversationResult'}, } def __init__( self, *, + confidence_score: float, api_version: Optional[str] = None, - confidence_score: Optional[float] = None, - result: Optional["DeepstackResult"] = None, + result: Optional["ConversationResult"] = None, **kwargs ): """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float - :keyword result: The actual response from a LUIS Deepstack application. - :paramtype result: ~azure.ai.language.conversations.models.DeepstackResult + :keyword result: The actual response from a Conversation project. + :paramtype result: ~azure.ai.language.conversations.models.ConversationResult """ - super(DSTargetIntentResult, self).__init__(api_version=api_version, confidence_score=confidence_score, **kwargs) - self.target_kind = 'luis_deepstack' # type: str + super(ConversationTargetIntentResult, self).__init__(api_version=api_version, confidence_score=confidence_score, **kwargs) + self.target_kind = 'conversation' # type: str self.result = result @@ -705,7 +664,9 @@ class Error(msrest.serialization.Model): :ivar code: Required. One of a server-defined set of error codes. Possible values include: "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", - "TooManyRequests", "InternalServerError", "ServiceUnavailable". + "ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound", + "AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling", + "AzureCognitiveSearchIndexLimitReached", "InternalServerError", "ServiceUnavailable". :vartype code: str or ~azure.ai.language.conversations.models.ErrorCode :ivar message: Required. A human-readable representation of the error. :vartype message: str @@ -744,7 +705,9 @@ def __init__( """ :keyword code: Required. One of a server-defined set of error codes. Possible values include: "InvalidRequest", "InvalidArgument", "Unauthorized", "Forbidden", "NotFound", - "TooManyRequests", "InternalServerError", "ServiceUnavailable". + "ProjectNotFound", "OperationNotFound", "AzureCognitiveSearchNotFound", + "AzureCognitiveSearchIndexNotFound", "TooManyRequests", "AzureCognitiveSearchThrottling", + "AzureCognitiveSearchIndexLimitReached", "InternalServerError", "ServiceUnavailable". :paramtype code: str or ~azure.ai.language.conversations.models.ErrorCode :keyword message: Required. A human-readable representation of the error. :paramtype message: str @@ -855,6 +818,194 @@ def __init__( self.innererror = innererror +class KnowledgeBaseAnswer(msrest.serialization.Model): + """Represents knowledge base answer. + + :ivar questions: List of questions associated with the answer. + :vartype questions: list[str] + :ivar answer: Answer text. + :vartype answer: str + :ivar confidence_score: Answer confidence score, value ranges from 0 to 1. + :vartype confidence_score: float + :ivar id: ID of the QnA result. + :vartype id: int + :ivar source: Source of QnA result. + :vartype source: str + :ivar metadata: Metadata associated with the answer, useful to categorize or filter question + answers. + :vartype metadata: dict[str, str] + :ivar dialog: Dialog associated with Answer. + :vartype dialog: ~azure.ai.language.conversations.models.KnowledgeBaseAnswerDialog + :ivar answer_span: Answer span object of QnA with respect to user's question. + :vartype answer_span: ~azure.ai.language.conversations.models.AnswerSpan + """ + + _validation = { + 'confidence_score': {'maximum': 1, 'minimum': 0}, + } + + _attribute_map = { + 'questions': {'key': 'questions', 'type': '[str]'}, + 'answer': {'key': 'answer', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'id': {'key': 'id', 'type': 'int'}, + 'source': {'key': 'source', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': '{str}'}, + 'dialog': {'key': 'dialog', 'type': 'KnowledgeBaseAnswerDialog'}, + 'answer_span': {'key': 'answerSpan', 'type': 'AnswerSpan'}, + } + + def __init__( + self, + *, + questions: Optional[List[str]] = None, + answer: Optional[str] = None, + confidence_score: Optional[float] = None, + id: Optional[int] = None, + source: Optional[str] = None, + metadata: Optional[Dict[str, str]] = None, + dialog: Optional["KnowledgeBaseAnswerDialog"] = None, + answer_span: Optional["AnswerSpan"] = None, + **kwargs + ): + """ + :keyword questions: List of questions associated with the answer. + :paramtype questions: list[str] + :keyword answer: Answer text. + :paramtype answer: str + :keyword confidence_score: Answer confidence score, value ranges from 0 to 1. + :paramtype confidence_score: float + :keyword id: ID of the QnA result. + :paramtype id: int + :keyword source: Source of QnA result. + :paramtype source: str + :keyword metadata: Metadata associated with the answer, useful to categorize or filter question + answers. + :paramtype metadata: dict[str, str] + :keyword dialog: Dialog associated with Answer. + :paramtype dialog: ~azure.ai.language.conversations.models.KnowledgeBaseAnswerDialog + :keyword answer_span: Answer span object of QnA with respect to user's question. + :paramtype answer_span: ~azure.ai.language.conversations.models.AnswerSpan + """ + super(KnowledgeBaseAnswer, self).__init__(**kwargs) + self.questions = questions + self.answer = answer + self.confidence_score = confidence_score + self.id = id + self.source = source + self.metadata = metadata + self.dialog = dialog + self.answer_span = answer_span + + +class KnowledgeBaseAnswerDialog(msrest.serialization.Model): + """Dialog associated with Answer. + + :ivar is_context_only: To mark if a prompt is relevant only with a previous question or not. If + true, do not include this QnA as search result for queries without context; otherwise, if + false, ignores context and includes this QnA in search result. + :vartype is_context_only: bool + :ivar prompts: List of prompts associated with the answer. + :vartype prompts: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswerPrompt] + """ + + _validation = { + 'prompts': {'max_items': 20, 'min_items': 0}, + } + + _attribute_map = { + 'is_context_only': {'key': 'isContextOnly', 'type': 'bool'}, + 'prompts': {'key': 'prompts', 'type': '[KnowledgeBaseAnswerPrompt]'}, + } + + def __init__( + self, + *, + is_context_only: Optional[bool] = None, + prompts: Optional[List["KnowledgeBaseAnswerPrompt"]] = None, + **kwargs + ): + """ + :keyword is_context_only: To mark if a prompt is relevant only with a previous question or not. + If true, do not include this QnA as search result for queries without context; otherwise, if + false, ignores context and includes this QnA in search result. + :paramtype is_context_only: bool + :keyword prompts: List of prompts associated with the answer. + :paramtype prompts: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswerPrompt] + """ + super(KnowledgeBaseAnswerDialog, self).__init__(**kwargs) + self.is_context_only = is_context_only + self.prompts = prompts + + +class KnowledgeBaseAnswerPrompt(msrest.serialization.Model): + """Prompt for an answer. + + :ivar display_order: Index of the prompt - used in ordering of the prompts. + :vartype display_order: int + :ivar qna_id: QnA ID corresponding to the prompt. + :vartype qna_id: int + :ivar display_text: Text displayed to represent a follow up question prompt. + :vartype display_text: str + """ + + _validation = { + 'display_text': {'max_length': 200, 'min_length': 0}, + } + + _attribute_map = { + 'display_order': {'key': 'displayOrder', 'type': 'int'}, + 'qna_id': {'key': 'qnaId', 'type': 'int'}, + 'display_text': {'key': 'displayText', 'type': 'str'}, + } + + def __init__( + self, + *, + display_order: Optional[int] = None, + qna_id: Optional[int] = None, + display_text: Optional[str] = None, + **kwargs + ): + """ + :keyword display_order: Index of the prompt - used in ordering of the prompts. + :paramtype display_order: int + :keyword qna_id: QnA ID corresponding to the prompt. + :paramtype qna_id: int + :keyword display_text: Text displayed to represent a follow up question prompt. + :paramtype display_text: str + """ + super(KnowledgeBaseAnswerPrompt, self).__init__(**kwargs) + self.display_order = display_order + self.qna_id = qna_id + self.display_text = display_text + + +class KnowledgeBaseAnswers(msrest.serialization.Model): + """Represents List of Question Answers. + + :ivar answers: Represents Answer Result list. + :vartype answers: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswer] + """ + + _attribute_map = { + 'answers': {'key': 'answers', 'type': '[KnowledgeBaseAnswer]'}, + } + + def __init__( + self, + *, + answers: Optional[List["KnowledgeBaseAnswer"]] = None, + **kwargs + ): + """ + :keyword answers: Represents Answer Result list. + :paramtype answers: list[~azure.ai.language.conversations.models.KnowledgeBaseAnswer] + """ + super(KnowledgeBaseAnswers, self).__init__(**kwargs) + self.answers = answers + + class LUISCallingOptions(msrest.serialization.Model): """This customizes how the service calls LUIS Generally Available projects. @@ -917,13 +1068,13 @@ def __init__( self.bing_spell_check_subscription_key = bing_spell_check_subscription_key -class LUISParameters(AnalyzeParameters): +class LUISParameters(AnalysisParameters): """This is a set of request parameters for LUIS Generally Available projects. All required parameters must be populated in order to send to Azure. :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". + values include: "luis", "conversation", "question_answering", "non_linked". :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version to use when call a specific target service. :vartype api_version: str @@ -982,43 +1133,42 @@ class LUISTargetIntentResult(TargetIntentResult): All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar result: The actual response from a LUIS Generally Available application. :vartype result: any """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, 'result': {'key': 'result', 'type': 'object'}, } def __init__( self, *, + confidence_score: float, api_version: Optional[str] = None, - confidence_score: Optional[float] = None, result: Optional[Any] = None, **kwargs ): """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float :keyword result: The actual response from a LUIS Generally Available application. :paramtype result: any @@ -1028,13 +1178,110 @@ def __init__( self.result = result -class QuestionAnsweringParameters(AnalyzeParameters): +class NoneLinkedTargetIntentResult(TargetIntentResult): + """A wrap up of non-linked intent response. + + All required parameters must be populated in order to send to Azure. + + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind + :ivar api_version: The API version used to call a target service. + :vartype api_version: str + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. + :vartype confidence_score: float + :ivar result: The actual response from a Conversation project. + :vartype result: ~azure.ai.language.conversations.models.ConversationResult + """ + + _validation = { + 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, + } + + _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, + 'api_version': {'key': 'apiVersion', 'type': 'str'}, + 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, + 'result': {'key': 'result', 'type': 'ConversationResult'}, + } + + def __init__( + self, + *, + confidence_score: float, + api_version: Optional[str] = None, + result: Optional["ConversationResult"] = None, + **kwargs + ): + """ + :keyword api_version: The API version used to call a target service. + :paramtype api_version: str + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. + :paramtype confidence_score: float + :keyword result: The actual response from a Conversation project. + :paramtype result: ~azure.ai.language.conversations.models.ConversationResult + """ + super(NoneLinkedTargetIntentResult, self).__init__(api_version=api_version, confidence_score=confidence_score, **kwargs) + self.target_kind = 'non_linked' # type: str + self.result = result + + +class OrchestratorPrediction(BasePrediction): + """This represents the prediction result of an Orchestrator project. + + All required parameters must be populated in order to send to Azure. + + :ivar project_kind: Required. The type of the project.Constant filled by server. Possible + values include: "conversation", "workflow". + :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind + :ivar top_intent: The intent with the highest score. + :vartype top_intent: str + :ivar intents: Required. A dictionary that contains all intents. A key is an intent name and a + value is its confidence score and target type. The top intent's value also contains the actual + response from the target project. + :vartype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + + _validation = { + 'project_kind': {'required': True}, + 'intents': {'required': True}, + } + + _attribute_map = { + 'project_kind': {'key': 'projectKind', 'type': 'str'}, + 'top_intent': {'key': 'topIntent', 'type': 'str'}, + 'intents': {'key': 'intents', 'type': '{TargetIntentResult}'}, + } + + def __init__( + self, + *, + intents: Dict[str, "TargetIntentResult"], + top_intent: Optional[str] = None, + **kwargs + ): + """ + :keyword top_intent: The intent with the highest score. + :paramtype top_intent: str + :keyword intents: Required. A dictionary that contains all intents. A key is an intent name and + a value is its confidence score and target type. The top intent's value also contains the + actual response from the target project. + :paramtype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] + """ + super(OrchestratorPrediction, self).__init__(top_intent=top_intent, **kwargs) + self.project_kind = 'workflow' # type: str + self.intents = intents + + +class QuestionAnsweringParameters(AnalysisParameters): """This is a set of request parameters for Question Answering knowledge bases. All required parameters must be populated in order to send to Azure. :ivar target_kind: Required. The type of a target service.Constant filled by server. Possible - values include: "luis", "luis_deepstack", "question_answering". + values include: "luis", "conversation", "question_answering", "non_linked". :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version to use when call a specific target service. :vartype api_version: str @@ -1075,94 +1322,46 @@ class QuestionAnsweringTargetIntentResult(TargetIntentResult): All required parameters must be populated in order to send to Azure. + :ivar target_kind: Required. This discriminator property specifies the type of the target + project that returns the response.Constant filled by server. Possible values include: "luis", + "conversation", "question_answering", "non_linked". + :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar api_version: The API version used to call a target service. :vartype api_version: str - :ivar confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :ivar confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :vartype confidence_score: float - :ivar target_kind: Required. This discriminator property specifies the type of the target - project that returns the response. 'luis' means the type is LUIS Generally Available. - 'luis_deepstack' means LUIS vNext. 'question_answering' means Question Answering.Constant - filled by server. Possible values include: "luis", "luis_deepstack", "question_answering". - :vartype target_kind: str or ~azure.ai.language.conversations.models.TargetKind :ivar result: The generated answer by a Question Answering KB. - :vartype result: any + :vartype result: ~azure.ai.language.conversations.models.KnowledgeBaseAnswers """ _validation = { - 'confidence_score': {'maximum': 1, 'minimum': 0}, 'target_kind': {'required': True}, + 'confidence_score': {'required': True, 'maximum': 1, 'minimum': 0}, } _attribute_map = { + 'target_kind': {'key': 'targetKind', 'type': 'str'}, 'api_version': {'key': 'apiVersion', 'type': 'str'}, 'confidence_score': {'key': 'confidenceScore', 'type': 'float'}, - 'target_kind': {'key': 'targetType', 'type': 'str'}, - 'result': {'key': 'result', 'type': 'object'}, + 'result': {'key': 'result', 'type': 'KnowledgeBaseAnswers'}, } def __init__( self, *, + confidence_score: float, api_version: Optional[str] = None, - confidence_score: Optional[float] = None, - result: Optional[Any] = None, + result: Optional["KnowledgeBaseAnswers"] = None, **kwargs ): """ :keyword api_version: The API version used to call a target service. :paramtype api_version: str - :keyword confidence_score: The prediction score and it ranges from 0.0 to 1.0. + :keyword confidence_score: Required. The prediction score and it ranges from 0.0 to 1.0. :paramtype confidence_score: float :keyword result: The generated answer by a Question Answering KB. - :paramtype result: any + :paramtype result: ~azure.ai.language.conversations.models.KnowledgeBaseAnswers """ super(QuestionAnsweringTargetIntentResult, self).__init__(api_version=api_version, confidence_score=confidence_score, **kwargs) self.target_kind = 'question_answering' # type: str self.result = result - - -class WorkflowPrediction(BasePrediction): - """This represents the prediction result of an Workflow project. - - All required parameters must be populated in order to send to Azure. - - :ivar top_intent: The intent with the highest score. - :vartype top_intent: str - :ivar project_kind: Required. The type of the project.Constant filled by server. Possible - values include: "conversation", "workflow". - :vartype project_kind: str or ~azure.ai.language.conversations.models.ProjectKind - :ivar intents: Required. A dictionary that contains all intents. A key is an intent name and a - value is its confidence score and target type. The top intent's value also contains the actual - response from the target project. - :vartype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] - """ - - _validation = { - 'project_kind': {'required': True}, - 'intents': {'required': True}, - } - - _attribute_map = { - 'top_intent': {'key': 'topIntent', 'type': 'str'}, - 'project_kind': {'key': 'projectType', 'type': 'str'}, - 'intents': {'key': 'intents', 'type': '{TargetIntentResult}'}, - } - - def __init__( - self, - *, - intents: Dict[str, "TargetIntentResult"], - top_intent: Optional[str] = None, - **kwargs - ): - """ - :keyword top_intent: The intent with the highest score. - :paramtype top_intent: str - :keyword intents: Required. A dictionary that contains all intents. A key is an intent name and - a value is its confidence score and target type. The top intent's value also contains the - actual response from the target project. - :paramtype intents: dict[str, ~azure.ai.language.conversations.models.TargetIntentResult] - """ - super(WorkflowPrediction, self).__init__(top_intent=top_intent, **kwargs) - self.project_kind = 'workflow' # type: str - self.intents = intents diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py index 769c2b77e1d8..f8450ab9a9d0 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/azure/ai/language/conversations/operations/_operations.py @@ -36,7 +36,7 @@ def build_analyze_conversations_request( project_name = kwargs.pop('project_name') # type: str deployment_name = kwargs.pop('deployment_name') # type: str - api_version = "2021-07-15-preview" + api_version = "2021-11-01-preview" accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/:analyze-conversations') @@ -67,15 +67,15 @@ class ConversationAnalysisClientOperationsMixin(object): @distributed_trace def analyze_conversations( self, - analyze_conversation_options, # type: "_models.AnalyzeConversationOptions" + conversation_analysis_options, # type: "_models.ConversationAnalysisOptions" **kwargs # type: Any ): # type: (...) -> "_models.AnalyzeConversationResult" """Analyzes the input conversation utterance. - :param analyze_conversation_options: Post body of the request. - :type analyze_conversation_options: - ~azure.ai.language.conversations.models.AnalyzeConversationOptions + :param conversation_analysis_options: Post body of the request. + :type conversation_analysis_options: + ~azure.ai.language.conversations.models.ConversationAnalysisOptions :keyword project_name: The name of the project to use. :paramtype project_name: str :keyword deployment_name: The name of the specific deployment of the project to use. @@ -94,7 +94,7 @@ def analyze_conversations( project_name = kwargs.pop('project_name') # type: str deployment_name = kwargs.pop('deployment_name') # type: str - json = self._serialize.body(analyze_conversation_options, 'AnalyzeConversationOptions') + json = self._serialize.body(conversation_analysis_options, 'ConversationAnalysisOptions') request = build_analyze_conversations_request( content_type=content_type, diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md index 326aef0c67ea..c3f50953d187 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/README.md @@ -20,11 +20,10 @@ You can authenticate your client with a Conversational Language Understanding AP These sample programs show common scenarios for the Conversational Language Understanding client's offerings. -| **File Name** | **Description** | +| **File Name**| **Description**| | ----------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [sample_analyze_conversation_app.py][sample_analyze_conversation_app] and [sample_analyze_conversation_app_async.py][sample_analyze_conversation_app_async] | Analyze intents and entities in your utterance using a deepstack (conversation) project | -| [sample_analyze_workflow_app.py][sample_analyze_workflow_app] and [sample_analyze_workflow_app_async.py][sample_analyze_workflow_app_async] | Analyze user utterance using an orchestrator (workflow) project, which uses the best candidate from one of your different apps to analyze user query (ex: Qna, DeepStack, and Luis) | - +| [sample_analyze_conversation_app.py][sample_analyze_conversation_app] and [sample_analyze_conversation_app_async.py][sample_analyze_conversation_app_async] | Analyze intents and entities in your utterance using a conversation (conversation) project | +| [sample_analyze_orchestration_app.py][sample_analyze_orchestration_app] and [sample_analyze_orchestration_app_async.py][sample_analyze_orchestration_app_async] | Analyze user utterance using an orchestrator (orchestration) project, which uses the best candidate from one of your different apps to analyze user query (ex: Qna, Conversation, and Luis) | ## Prerequisites - Python 2.7, or 3.6 or later is required to use this package (3.6 or later if using asyncio) @@ -57,7 +56,12 @@ what you can do with the Azure Conversational Language Understanding client libr | **Advanced Sample File Name** | **Description** | | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | -| [sample_analyze_workflow_app_with_params.py][sample_analyze_workflow_app_with_params] and [sample_analyze_workflow_app_with_params_async.py][sample_analyze_workflow_app_with_params_async] | Same as workflow sample, but with ability to customize call with parameters | +| [sample_analyze_conversation_app_language_parm.py][sample_analyze_conversation_app_language_parm] and [sample_analyze_conversation_app_language_parm_async.py][sample_analyze_conversation_app_language_parm_async] | Same as conversations sample, but with the ability to specify query language | +| [sample_analyze_orchestration_app_with_params.py][sample_analyze_orchestration_app_with_params] and [sample_analyze_orchestration_app_with_params_async.py][sample_analyze_orchestration_app_with_params_async] | Same as orchestration sample, but with ability to customize call with parameters | +| [sample_analyze_orchestration_app_qna_response.py][sample_analyze_orchestration_app_qna_response] and [sample_analyze_orchestration_app_qna_response_async.py][sample_analyze_orchestration_app_qna_response_async] | Same as orchestration sample, but in this case the best response will be a Qna project | +| [sample_analyze_orchestration_app_conversation_response.py][sample_analyze_orchestration_app_conversation_response] and [sample_analyze_orchestration_app_conversation_reponse_async.py][sample_analyze_orchestration_app_conversation_response_async] | Same as orchestration sample, but in this case the best response will be a Conversation project | +| [sample_analyze_orchestration_app_luis_response.py][sample_analyze_orchestration_app_luis_response] and [sample_analyze_orchestration_app_luis_response_async.py][sample_analyze_orchestration_app_luis_response_async] | Same as orchestration sample, but in this case the best response will be a LUIS project | + [azure_subscription]: https://azure.microsoft.com/free/ [azure_clu_account]: https://language.azure.com/clu/projects @@ -66,9 +70,17 @@ what you can do with the Azure Conversational Language Understanding client libr [sample_authentication_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_authentication_async.py [sample_analyze_conversation_app]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py [sample_analyze_conversation_app_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py -[sample_analyze_workflow_app]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py -[sample_analyze_workflow_app_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py -[sample_analyze_workflow_app_with_params]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py -[sample_analyze_workflow_app_with_params_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py +[sample_analyze_conversation_app_language_parm]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app_language_parm.py +[sample_analyze_conversation_app_language_parm_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_language_parm_async.py +[sample_analyze_orchestration_app]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app.py +[sample_analyze_orchestration_app_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_async.py +[sample_analyze_orchestration_app_qna_response]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_qna_response.py +[sample_analyze_orchestration_app_qna_response_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_qna_response_async.py +[sample_analyze_orchestration_app_conversation_response]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_conversation_response.py +[sample_analyze_orchestration_app_conversation_response_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_conversation_response_async.py +[sample_analyze_orchestration_app_luis_response]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_luis_response.py +[sample_analyze_orchestration_app_luis_response_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py +[sample_analyze_orchestration_app_with_params]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_with_params.py +[sample_analyze_orchestration_app_with_params_async]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_with_params_async.py [api_reference_documentation]: https://language.azure.com/clu/projects [versioning_story_readme]: https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/cognitivelanguage/azure-ai-language-conversations#install-the-package diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py index e500223bc143..3382199b2679 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_async.py @@ -8,9 +8,9 @@ FILE: sample_analyze_conversation_app_async.py DESCRIPTION: - This sample demonstrates how to analyze user query for intents and entities using a deepstack project. + This sample demonstrates how to analyze user query for intents and entities using a conversation project. - For more info about how to setup a CLU deepstack project, see the README. + For more info about how to setup a CLU conversation project, see the README. USAGE: python sample_analyze_conversation_app_async.py @@ -30,7 +30,7 @@ async def sample_analyze_conversation_app_async(): from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations.aio import ConversationAnalysisClient - from azure.ai.language.conversations.models import AnalyzeConversationOptions + from azure.ai.language.conversations.models import ConversationAnalysisOptions # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] @@ -39,7 +39,7 @@ async def sample_analyze_conversation_app_async(): # prepare data query = "One california maki please." - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query ) @@ -57,7 +57,7 @@ async def sample_analyze_conversation_app_async(): print("project kind: {}\n".format(result.prediction.project_kind)) print("view top intent:") - print("top intent: {}".format(result.prediction.top_intent)) + print("\ttop intent: {}".format(result.prediction.top_intent)) print("\tcategory: {}".format(result.prediction.intents[0].category)) print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_language_parm_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_language_parm_async.py new file mode 100644 index 000000000000..28ce5f347867 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_conversation_app_language_parm_async.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_conversation_app_language_parm_async.py + +DESCRIPTION: + This sample demonstrates how to analyze user query for intents and entities using + a conversation project with a language parameter. + + For more info about how to setup a CLU conversation project, see the README. + +USAGE: + python sample_analyze_conversation_app_language_parm_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_PROJECT - the name of your CLU conversations project. +""" + +import asyncio + +async def sample_analyze_conversation_app_language_parm_async(): + # [START analyze_conversation_app_language_parm_async] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"] + + # prepare data + query = "One california maki please." + input = ConversationAnalysisOptions( + query=query, + language="en" + ) + + # analyze quey + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + async with client: + result = client.analyze_conversations( + input, + project_name=conv_project, + deployment_name='production' + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + print("\ttop intent: {}".format(result.prediction.top_intent)) + print("\tcategory: {}".format(result.prediction.intents[0].category)) + print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) + + print("view entities:") + for entity in result.prediction.entities: + print("\tcategory: {}".format(entity.category)) + print("\ttext: {}".format(entity.text)) + print("\tconfidence score: {}".format(entity.confidence_score)) + # [END analyze_conversation_app_language_parm_async] + +async def main(): + await sample_analyze_conversation_app_language_parm_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_async.py similarity index 69% rename from sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_async.py index 87dcbbb6911a..d21a4e39eb0f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_async.py @@ -5,42 +5,42 @@ # ------------------------------------ """ -FILE: sample_analyze_workflow_app_async.py +FILE: sample_analyze_orchestration_app_async.py DESCRIPTION: - This sample demonstrates how to analyze user query using an orchestration/workflow project. - In this sample, workflow project's top intent will map to a Question Answering project. + This sample demonstrates how to analyze user query using an orchestration project. + In this sample, orchestration project's top intent will map to a Question Answering project. - For more info about how to setup a CLU workflow project, see the README. + For more info about how to setup a CLU orchestration project, see the README. USAGE: - python sample_analyze_workflow_app_async.py + python sample_analyze_orchestration_app_async.py Set the environment variables with your own values before running the sample: 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. 2) AZURE_CONVERSATIONS_KEY - your CLU API key. - 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. """ import asyncio -async def sample_analyze_workflow_app_async(): - # [START analyze_workflow_app] +async def sample_analyze_orchestration_app_async(): + # [START analyze_orchestration_app] # import libraries import os from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations.aio import ConversationAnalysisClient - from azure.ai.language.conversations.models import AnalyzeConversationOptions + from azure.ai.language.conversations.models import ConversationAnalysisOptions # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] - workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] # prepare data query = "How do you make sushi rice?", - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query ) @@ -49,7 +49,7 @@ async def sample_analyze_workflow_app_async(): async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) @@ -59,16 +59,16 @@ async def sample_analyze_workflow_app_async(): print("view top intent:") top_intent = result.prediction.top_intent - print("top intent: {}".format(top_intent)) + print("\ttop intent: {}".format(top_intent)) top_intent_object = result.prediction.intents[top_intent] print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) print("view Question Answering result:") print("\tresult: {}\n".format(top_intent_object.result)) - # [END analyze_workflow_app] + # [END analyze_orchestration_app] async def main(): - await sample_analyze_workflow_app_async() + await sample_analyze_orchestration_app_async() if __name__ == '__main__': loop = asyncio.get_event_loop() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_conversation_response_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_conversation_response_async.py new file mode 100644 index 000000000000..5b0acd521099 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_conversation_response_async.py @@ -0,0 +1,86 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_orchestration_app_conversation_response_async.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration project. + In this sample, orchestration project's top intent will map to a conversation project. + + For more info about how to setup a CLU orchestration project, see the README. + +USAGE: + python sample_analyze_orchestration_app_conversation_response_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. +""" + +import asyncio + +async def sample_analyze_orchestration_app_conversation_response_async(): + # [START analyze_orchestration_app_conversation_response_async] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "Sushi", + input = ConversationAnalysisOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + async with client: + result = client.analyze_conversations( + input, + project_name=orchestration_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("\ttop intent: {}".format(top_intent)) + + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view conversation result:\n") + + # print("view intents:") + # for intent in top_intent_object.result.prediction.intents: + # print("\tcategory: {}".format(intent.category)) + # print("\tconfidence score: {}".format(intent.confidence_score)) + + # print("view entities:") + # for entity in top_intent_object.result.prediction.entities: + # print("\tcategory: {}".format(entity.category)) + # print("\ttext: {}".format(entity.text)) + # print("\tconfidence score: {}".format(entity.confidence_score)) + # [END analyze_orchestration_app_conversation_response_async] + +async def main(): + await sample_analyze_orchestration_app_conversation_response_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py new file mode 100644 index 000000000000..c767930f43b8 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_luis_response_async.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_orchestration_app_luis_response_async.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration project. + In this sample, orchestration project's top intent will map to a LUIS project. + + For more info about how to setup a CLU orchestration project, see the README. + +USAGE: + python sample_analyze_orchestration_app_luis_response_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. +""" + +import asyncio + +async def sample_analyze_orchestration_app_luis_response_async(): + # [START analyze_orchestration_app_luis_response_async] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "book me a flight ticket to Bali", + input = ConversationAnalysisOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + async with client: + result = client.analyze_conversations( + input, + project_name=orchestration_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("\ttop intent: {}".format(top_intent)) + + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view luis response:") + luis_response = result.prediction.intents[top_intent].result + print("\tluis response: {}\n".format(luis_response)) + # [END analyze_orchestration_app_luis_response_async] + +async def main(): + await sample_analyze_orchestration_app_luis_response_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_qna_response_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_qna_response_async.py new file mode 100644 index 000000000000..af197a1df031 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_qna_response_async.py @@ -0,0 +1,79 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_orchestration_app_qna_response_async.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration project. + In this sample, orchestration project's top intent will map to a Qna project. + + For more info about how to setup a CLU orchestration project, see the README. + +USAGE: + python sample_analyze_orchestration_app_qna_response_async.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. +""" + +import asyncio + +async def sample_analyze_orchestration_app_qna_response_async(): + # [START analyze_orchestration_app_qna_response_async] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "How do you make sushi rice?", + input = ConversationAnalysisOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + async with client: + result = client.analyze_conversations( + input, + project_name=orchestration_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("\ttop intent: {}".format(top_intent)) + + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view qna result:") + qna_result = result.prediction.intents[top_intent].result + for answer in qna_result.answers: + print("\tanswer: {}\n".format(answer.answer)) + # [END analyze_orchestration_app_qna_response_async] + + +async def main(): + await sample_analyze_orchestration_app_qna_response_async() + +if __name__ == '__main__': + loop = asyncio.get_event_loop() + loop.run_until_complete(main()) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_with_params_async.py similarity index 74% rename from sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_with_params_async.py index ee4e434a6ead..85d85eea14c8 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_workflow_app_with_params_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/async/sample_analyze_orchestration_app_with_params_async.py @@ -5,46 +5,46 @@ # ------------------------------------ """ -FILE: sample_analyze_workflow_app_with_params_async.py +FILE: sample_analyze_orchestration_app_with_params_async.py DESCRIPTION: - This sample demonstrates how to analyze user query using an orchestration/workflow project. + This sample demonstrates how to analyze user query using an orchestration project. In this sample, worflow project's top intent will map to a Question Answering project. - For more info about how to setup a CLU workflow project, see the README. + For more info about how to setup a CLU orchestration project, see the README. USAGE: - python sample_analyze_workflow_app_with_params_async.py + python sample_analyze_orchestration_app_with_params_async.py Set the environment variables with your own values before running the sample: 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. 2) AZURE_CONVERSATIONS_KEY - your CLU API key. - 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. """ import asyncio -async def sample_analyze_workflow_app_with_params_async(): - # [START analyze_workflow_app_with_params] +async def sample_analyze_orchestration_app_with_params_async(): + # [START analyze_orchestration_app_with_params] # import libraries import os from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations.aio import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, + ConversationAnalysisOptions, QuestionAnsweringParameters, - DeepstackParameters, + ConversationParameters, ) # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] - workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] # prepare data query = "How do you make sushi rice?", - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, parameters={ "SushiMaking": QuestionAnsweringParameters( @@ -54,7 +54,7 @@ async def sample_analyze_workflow_app_with_params_async(): "confidenceScoreThreshold": 0.1 } ), - "SushiOrder": DeepstackParameters( + "SushiOrder": ConversationParameters( calling_options={ "verbose": True } @@ -67,7 +67,7 @@ async def sample_analyze_workflow_app_with_params_async(): async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) @@ -77,17 +77,17 @@ async def sample_analyze_workflow_app_with_params_async(): print("view top intent:") top_intent = result.prediction.top_intent - print("top intent: {}".format(top_intent)) + print("\ttop intent: {}".format(top_intent)) top_intent_object = result.prediction.intents[top_intent] print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) print("view Question Answering result:") print("\tresult: {}\n".format(top_intent_object.result)) - # [END analyze_workflow_app_with_params] + # [END analyze_orchestration_app_with_params] async def main(): - await sample_analyze_workflow_app_with_params_async() + await sample_analyze_orchestration_app_with_params_async() if __name__ == '__main__': loop = asyncio.get_event_loop() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py index f7994db10de0..1c8f0c5eed0f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app.py @@ -8,9 +8,9 @@ FILE: sample_analyze_conversation_app.py DESCRIPTION: - This sample demonstrates how to analyze user query for intents and entities using a deepstack project. + This sample demonstrates how to analyze user query for intents and entities using a conversation project. - For more info about how to setup a CLU deepstack project, see the README. + For more info about how to setup a CLU conversation project, see the README. USAGE: python sample_analyze_conversation_app.py @@ -28,7 +28,7 @@ def sample_analyze_conversation_app(): from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations import ConversationAnalysisClient - from azure.ai.language.conversations.models import AnalyzeConversationOptions + from azure.ai.language.conversations.models import ConversationAnalysisOptions # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] @@ -37,7 +37,7 @@ def sample_analyze_conversation_app(): # prepare data query = "One california maki please." - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query ) @@ -55,7 +55,7 @@ def sample_analyze_conversation_app(): print("project kind: {}\n".format(result.prediction.project_kind)) print("view top intent:") - print("top intent: {}".format(result.prediction.top_intent)) + print("\ttop intent: {}".format(result.prediction.top_intent)) print("\tcategory: {}".format(result.prediction.intents[0].category)) print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app_language_parm.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app_language_parm.py new file mode 100644 index 000000000000..69486b088d46 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_conversation_app_language_parm.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_conversation_app_language_parm.py + +DESCRIPTION: + This sample demonstrates how to analyze user query for intents and entities using + a conversation project with a language parameter. + + For more info about how to setup a CLU conversation project, see the README. + +USAGE: + python sample_analyze_conversation_app_language_parm.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_PROJECT - the name of your CLU conversations project. +""" + +def sample_analyze_conversation_app_language_parm(): + # [START analyze_conversation_app_language_parm] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + conv_project = os.environ["AZURE_CONVERSATIONS_PROJECT"] + + # prepare data + query = "One california maki please." + input = ConversationAnalysisOptions( + query=query, + language="en" + ) + + # analyze quey + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=conv_project, + deployment_name='production' + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + print("\ttop intent: {}".format(result.prediction.top_intent)) + print("\tcategory: {}".format(result.prediction.intents[0].category)) + print("\tconfidence score: {}\n".format(result.prediction.intents[0].confidence_score)) + + print("view entities:") + for entity in result.prediction.entities: + print("\tcategory: {}".format(entity.category)) + print("\ttext: {}".format(entity.text)) + print("\tconfidence score: {}".format(entity.confidence_score)) + # [END analyze_conversation_app_language_parm] + + +if __name__ == '__main__': + sample_analyze_conversation_app_language_parm() diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app.py similarity index 68% rename from sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app.py index 6378346fb3fc..9de90369506b 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app.py @@ -5,40 +5,39 @@ # ------------------------------------ """ -FILE: sample_analyze_workflow_app.py +FILE: sample_analyze_orchestration_app.py DESCRIPTION: - This sample demonstrates how to analyze user query using an orchestration/workflow project. - In this sample, workflow project's top intent will map to a Qna project. + This sample demonstrates how to analyze user query using an orchestration project. - For more info about how to setup a CLU workflow project, see the README. + For more info about how to setup a CLU orchestration project, see the README. USAGE: - python sample_analyze_workflow_app.py + python sample_analyze_orchestration_app.py Set the environment variables with your own values before running the sample: 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. 2) AZURE_CONVERSATIONS_KEY - your CLU API key. - 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. """ -def sample_analyze_workflow_app(): - # [START analyze_workflow_app] +def sample_analyze_orchestration_app(): + # [START analyze_orchestration_app] # import libraries import os from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations import ConversationAnalysisClient - from azure.ai.language.conversations.models import AnalyzeConversationOptions + from azure.ai.language.conversations.models import ConversationAnalysisOptions # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] - workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] # prepare data query = "How do you make sushi rice?", - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query ) @@ -47,7 +46,7 @@ def sample_analyze_workflow_app(): with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) @@ -57,14 +56,14 @@ def sample_analyze_workflow_app(): print("view top intent:") top_intent = result.prediction.top_intent - print("top intent: {}".format(top_intent)) + print("\ttop intent: {}".format(top_intent)) top_intent_object = result.prediction.intents[top_intent] print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) - print("view qna result:") + print("view result:") print("\tresult: {}\n".format(top_intent_object.result)) - # [END analyze_workflow_app] + # [END analyze_orchestration_app] if __name__ == '__main__': - sample_analyze_workflow_app() \ No newline at end of file + sample_analyze_orchestration_app() \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_conversation_response.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_conversation_response.py new file mode 100644 index 000000000000..bd1ab80b6e31 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_conversation_response.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_orchestration_app_conversation_response.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration project. + In this sample, orchestration project's top intent will map to a conversation project. + + For more info about how to setup a CLU orchestration project, see the README. + +USAGE: + python sample_analyze_orchestration_app_conversation_response.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. +""" + +def sample_analyze_orchestration_app_conversation_response(): + # [START analyze_orchestration_app_conversation_response] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "One california maki please.", + input = ConversationAnalysisOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=orchestration_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("\ttop intent: {}".format(top_intent)) + + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view conversation result:\n") + + # print("view intents:") + # for intent in top_intent_object.result.prediction.intents: + # print("\tcategory: {}".format(intent.category)) + # print("\tconfidence score: {}".format(intent.confidence_score)) + + # print("view entities:") + # for entity in top_intent_object.result.prediction.entities: + # print("\tcategory: {}".format(entity.category)) + # print("\ttext: {}".format(entity.text)) + # print("\tconfidence score: {}".format(entity.confidence_score)) + # [END analyze_orchestration_app_conversation_response] + +if __name__ == '__main__': + sample_analyze_orchestration_app_conversation_response() \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_luis_response.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_luis_response.py new file mode 100644 index 000000000000..4e85c382ceab --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_luis_response.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_orchestration_app_luis_response.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration project. + In this sample, orchestration project's top intent will map to a LUIS project. + + For more info about how to setup a CLU orchestration project, see the README. + +USAGE: + python sample_analyze_orchestration_app_luis_response.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. +""" + +def sample_analyze_orchestration_app_luis_response(): + # [START analyze_orchestration_app_luis_response] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "book me a flight ticket to Bali", + input = ConversationAnalysisOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=orchestration_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("\ttop intent: {}".format(top_intent)) + + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view luis response:") + luis_response = result.prediction.intents[top_intent].result + print("\tluis response: {}\n".format(luis_response)) + # [END analyze_orchestration_app_luis_response] + +if __name__ == '__main__': + sample_analyze_orchestration_app_luis_response() \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_qna_response.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_qna_response.py new file mode 100644 index 000000000000..400f99786bca --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_qna_response.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# ------------------------------------ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. +# ------------------------------------ + +""" +FILE: sample_analyze_orchestration_app_qna_response.py + +DESCRIPTION: + This sample demonstrates how to analyze user query using an orchestration project. + In this sample, orchestration project's top intent will map to a Qna project. + + For more info about how to setup a CLU orchestration project, see the README. + +USAGE: + python sample_analyze_orchestration_app_qna_response.py + + Set the environment variables with your own values before running the sample: + 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. + 2) AZURE_CONVERSATIONS_KEY - your CLU API key. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. +""" + +def sample_analyze_orchestration_app_qna_response(): + # [START analyze_orchestration_app_qna_response] + # import libraries + import os + from azure.core.credentials import AzureKeyCredential + + from azure.ai.language.conversations import ConversationAnalysisClient + from azure.ai.language.conversations.models import ConversationAnalysisOptions + + # get secrets + conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] + conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + + # prepare data + query = "How do you make sushi rice?", + input = ConversationAnalysisOptions( + query=query + ) + + # analyze query + client = ConversationAnalysisClient(conv_endpoint, AzureKeyCredential(conv_key)) + with client: + result = client.analyze_conversations( + input, + project_name=orchestration_project, + deployment_name='production', + ) + + # view result + print("query: {}".format(result.query)) + print("project kind: {}\n".format(result.prediction.project_kind)) + + print("view top intent:") + top_intent = result.prediction.top_intent + print("\ttop intent: {}".format(top_intent)) + + top_intent_object = result.prediction.intents[top_intent] + print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) + + print("view qna result:") + qna_result = result.prediction.intents[top_intent].result + for answer in qna_result.answers: + print("\tanswer: {}\n".format(answer.answer)) + # [END analyze_orchestration_app_qna_response] + +if __name__ == '__main__': + sample_analyze_orchestration_app_qna_response() \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_with_params.py similarity index 72% rename from sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_with_params.py index 7c300f690e75..ce6e2c2d55fb 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_workflow_app_with_params.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/samples/sample_analyze_orchestration_app_with_params.py @@ -5,44 +5,44 @@ # ------------------------------------ """ -FILE: sample_analyze_workflow_app_with_params.py +FILE: sample_analyze_orchestration_app_with_params.py DESCRIPTION: - This sample demonstrates how to analyze user query using an orchestration/workflow project. + This sample demonstrates how to analyze user query using an orchestration project. In this sample, worflow project's top intent will map to a Qna project. - For more info about how to setup a CLU workflow project, see the README. + For more info about how to setup a CLU orchestration project, see the README. USAGE: - python sample_analyze_workflow_app_with_params.py + python sample_analyze_orchestration_app_with_params.py Set the environment variables with your own values before running the sample: 1) AZURE_CONVERSATIONS_ENDPOINT - the endpoint to your CLU resource. 2) AZURE_CONVERSATIONS_KEY - your CLU API key. - 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU workflow project. + 3) AZURE_CONVERSATIONS_WORKFLOW_PROJECT - the name of your CLU orchestration project. """ -def sample_analyze_workflow_app_with_params(): - # [START analyze_workflow_app_with_params] +def sample_analyze_orchestration_app_with_params(): + # [START analyze_orchestration_app_with_params] # import libraries import os from azure.core.credentials import AzureKeyCredential from azure.ai.language.conversations import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, + ConversationAnalysisOptions, QuestionAnsweringParameters, - DeepstackParameters, + ConversationParameters, ) # get secrets conv_endpoint = os.environ["AZURE_CONVERSATIONS_ENDPOINT"] conv_key = os.environ["AZURE_CONVERSATIONS_KEY"] - workflow_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] + orchestration_project = os.environ["AZURE_CONVERSATIONS_WORKFLOW_PROJECT"] # prepare data query = "How do you make sushi rice?", - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, parameters={ "SushiMaking": QuestionAnsweringParameters( @@ -52,7 +52,7 @@ def sample_analyze_workflow_app_with_params(): "confidenceScoreThreshold": 0.1 } ), - "SushiOrder": DeepstackParameters( + "SushiOrder": ConversationParameters( calling_options={ "verbose": True } @@ -65,7 +65,7 @@ def sample_analyze_workflow_app_with_params(): with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) @@ -75,14 +75,14 @@ def sample_analyze_workflow_app_with_params(): print("view top intent:") top_intent = result.prediction.top_intent - print("top intent: {}".format(top_intent)) + print("\ttop intent: {}".format(top_intent)) top_intent_object = result.prediction.intents[top_intent] print("\tconfidence score: {}\n".format(top_intent_object.confidence_score)) - print("view Question Answering result:") + print("view result:") print("\tresult: {}\n".format(top_intent_object.result)) - # [END analyze_workflow_app_with_params] + # [END analyze_orchestration_app_with_params] if __name__ == '__main__': - sample_analyze_workflow_app_with_params() \ No newline at end of file + sample_analyze_orchestration_app_with_params() \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_conversation_app_async.test_conversation_app.yaml similarity index 54% rename from sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app.yaml rename to sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_conversation_app_async.test_conversation_app.yaml index ce0fcdc9e420..b190027baa9b 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app.yaml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_conversation_app_async.test_conversation_app.yaml @@ -11,28 +11,24 @@ interactions: User-Agent: - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-07-15-preview + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-11-01-preview response: body: - string: "{\n \"query\": \"One california maki please.\",\n \"prediction\": - {\n \"intents\": [\n {\n \"category\": \"Order\",\n \"confidenceScore\": - 1\n }\n ],\n \"entities\": [\n {\n \"category\": \"OrderItem\",\n - \ \"text\": \"california maki\",\n \"offset\": 4,\n \"length\": - 15,\n \"confidenceScore\": 1\n }\n ],\n \"topIntent\": \"Order\",\n - \ \"projectType\": \"conversation\"\n }\n}" + string: '{"query":"One california maki please.","prediction":{"topIntent":"Order","projectKind":"conversation","intents":[{"category":"Order","confidenceScore":1}],"entities":[{"category":"OrderItem","text":"california + maki","offset":4,"length":15,"confidenceScore":1}]}}' headers: - apim-request-id: 577adef9-402b-4f6a-ae8b-abc1c82660a4 + apim-request-id: 87b3b6ef-b1e5-4ff8-a378-d34b7d0fe040 cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private content-type: application/json; charset=utf-8 - date: Thu, 30 Sep 2021 16:56:53 GMT + date: Thu, 28 Oct 2021 15:00:10 GMT pragma: no-cache - request-id: 577adef9-402b-4f6a-ae8b-abc1c82660a4 + request-id: 87b3b6ef-b1e5-4ff8-a378-d34b7d0fe040 strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '303' + x-envoy-upstream-service-time: '42' status: code: 200 message: OK - url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischOne&deploymentName=production&api-version=2021-07-15-preview + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischOne&deploymentName=production&api-version=2021-11-01-preview version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml similarity index 54% rename from sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml rename to sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml index 79a376aa59e2..4de936033832 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_conversation_app_async.test_conversation_app_with_dictparams.yaml @@ -11,28 +11,24 @@ interactions: User-Agent: - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-07-15-preview + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-11-01-preview response: body: - string: "{\n \"query\": \"One california maki please.\",\n \"prediction\": - {\n \"intents\": [\n {\n \"category\": \"Order\",\n \"confidenceScore\": - 1\n }\n ],\n \"entities\": [\n {\n \"category\": \"OrderItem\",\n - \ \"text\": \"california maki\",\n \"offset\": 4,\n \"length\": - 15,\n \"confidenceScore\": 1\n }\n ],\n \"topIntent\": \"Order\",\n - \ \"projectType\": \"conversation\"\n }\n}" + string: '{"query":"One california maki please.","prediction":{"topIntent":"Order","projectKind":"conversation","intents":[{"category":"Order","confidenceScore":1}],"entities":[{"category":"OrderItem","text":"california + maki","offset":4,"length":15,"confidenceScore":1}]}}' headers: - apim-request-id: 9ec258d5-b660-4f35-bacb-ef4ad6af3fd9 + apim-request-id: 67a9790b-8430-470e-8284-fae857c7adae cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private content-type: application/json; charset=utf-8 - date: Thu, 30 Sep 2021 16:56:54 GMT + date: Thu, 28 Oct 2021 15:00:11 GMT pragma: no-cache - request-id: 9ec258d5-b660-4f35-bacb-ef4ad6af3fd9 + request-id: 67a9790b-8430-470e-8284-fae857c7adae strict-transport-security: max-age=31536000; includeSubDomains; preload transfer-encoding: chunked x-content-type-options: nosniff - x-envoy-upstream-service-time: '51' + x-envoy-upstream-service-time: '40' status: code: 200 message: OK - url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischOne&deploymentName=production&api-version=2021-07-15-preview + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischOne&deploymentName=production&api-version=2021-11-01-preview version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app.yaml new file mode 100644 index 000000000000..201bb68e8ed6 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app.yaml @@ -0,0 +1,76 @@ +interactions: +- request: + body: '{"query": "How do you make sushi rice?"}' + headers: + Accept: + - application/json + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: "{\"query\":\"How do you make sushi rice?\",\"prediction\":{\"topIntent\":\"SushiMaking\",\"projectKind\":\"workflow\",\"intents\":{\"SushiMaking\":{\"confidenceScore\":0.77994794,\"targetKind\":\"question_answering\",\"result\":{\"answers\":[{\"questions\":[],\"answer\":\"#### + Sushi Rice (Shari) adapted from Sushi Made Easy by K. Wong\\n\\nIngredients: + 2 cups sushi rice or short-grain rice 2 \xBD cups cold water (or amount of + water suggested on rice package if different for 2 cups of rice)\\n\\nVinegar + Mixture 4 tbsp. rice vinegar 2 tbsp. sugar 1/2 tsp. salt\\n\\nDirections:\\n\\nWash + the rice until water runs clear (do not skip this step, it is essential in + achieving the correct consistency to work with your rice).\\n\\n2. Place the + rice in a pot with a tight fitting lid and add the water.\",\"confidenceScore\":0.6334000000000001,\"id\":0,\"source\":\"sushi(1).pdf\",\"metadata\":{},\"answerSpan\":{\"text\":\"2 + cups\",\"confidenceScore\":0.25120000000000003,\"offset\":77,\"length\":7}}]}},\"SushiOrder\":{\"confidenceScore\":0.7317708,\"targetKind\":\"conversation\"},\"None\":{\"confidenceScore\":0,\"targetKind\":\"non_linked\"}}}}" + headers: + apim-request-id: 6ca10ed1-20de-4560-9bc8-4c5beb0c6652 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + date: Thu, 28 Oct 2021 15:00:12 GMT + pragma: no-cache + request-id: 6ca10ed1-20de-4560-9bc8-4c5beb0c6652 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '615' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-11-01-preview +- request: + body: '{"query": "I will have sashimi"}' + headers: + Accept: + - application/json + Content-Length: + - '32' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: '{"query":"I will have sashimi","prediction":{"topIntent":"SushiMaking","projectKind":"workflow","intents":{"SushiMaking":{"confidenceScore":0.79296875,"targetKind":"question_answering","result":{"answers":[{"questions":[],"answer":"Put + the dark brown sugar, oil, vinegar and garlic in a zip lock plastic bag and + shake to mix well. Put the flank steak in the bag, close, and toss to coat + the steak evenly with the marinade. Let sit to rest at least 30 minutes.","confidenceScore":0.19149999999999998,"id":0,"source":"SushiQuikCompleteRecipeBook.pdf","metadata":{},"answerSpan":{"text":"flank + steak","confidenceScore":0.527,"offset":106,"length":12}}]}},"SushiOrder":{"confidenceScore":0.78515625,"targetKind":"conversation"},"None":{"confidenceScore":0,"targetKind":"non_linked"}}}}' + headers: + apim-request-id: 7fe89c7a-5d95-4d4b-a8e9-7f0b26f94099 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + date: Thu, 28 Oct 2021 15:00:13 GMT + pragma: no-cache + request-id: 7fe89c7a-5d95-4d4b-a8e9-7f0b26f94099 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '540' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-11-01-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_model.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_model.yaml new file mode 100644 index 000000000000..65318a865bd8 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_model.yaml @@ -0,0 +1,44 @@ +interactions: +- request: + body: '{"query": "How do you make sushi rice?", "parameters": {"SushiMaking": + {"targetKind": "question_answering", "callingOptions": {"question": "How do + you make sushi rice?", "top": 1, "confidence_score_threshold": 0.1}}, "SushiOrder": + {"targetKind": "conversation", "callingOptions": {"verbose": true}}}}' + headers: + Accept: + - application/json + Content-Length: + - '300' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: "{\"query\":\"How do you make sushi rice?\",\"prediction\":{\"topIntent\":\"SushiMaking\",\"projectKind\":\"workflow\",\"intents\":{\"SushiMaking\":{\"confidenceScore\":0.77994794,\"targetKind\":\"question_answering\",\"result\":{\"answers\":[{\"questions\":[],\"answer\":\"#### + Sushi Rice (Shari) adapted from Sushi Made Easy by K. Wong\\n\\nIngredients: + 2 cups sushi rice or short-grain rice 2 \xBD cups cold water (or amount of + water suggested on rice package if different for 2 cups of rice)\\n\\nVinegar + Mixture 4 tbsp. rice vinegar 2 tbsp. sugar 1/2 tsp. salt\\n\\nDirections:\\n\\nWash + the rice until water runs clear (do not skip this step, it is essential in + achieving the correct consistency to work with your rice).\\n\\n2. Place the + rice in a pot with a tight fitting lid and add the water.\",\"confidenceScore\":0.6334000000000001,\"id\":0,\"source\":\"sushi(1).pdf\",\"metadata\":{},\"answerSpan\":{\"text\":\"2 + cups\",\"confidenceScore\":0.25120000000000003,\"offset\":77,\"length\":7}}]}},\"SushiOrder\":{\"confidenceScore\":0.7317708,\"targetKind\":\"conversation\"},\"None\":{\"confidenceScore\":0,\"targetKind\":\"non_linked\"}}}}" + headers: + apim-request-id: ce74035e-bdcb-4e8c-968e-77990660fe23 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + date: Thu, 28 Oct 2021 15:00:15 GMT + pragma: no-cache + request-id: ce74035e-bdcb-4e8c-968e-77990660fe23 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '772' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-11-01-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_parameters.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_parameters.yaml new file mode 100644 index 000000000000..d521f16fe0c2 --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/recordings/test_orchestration_app_async.test_orchestration_app_with_parameters.yaml @@ -0,0 +1,44 @@ +interactions: +- request: + body: '{"query": "(''How do you make sushi rice?'',)", "parameters": {"SushiMaking": + {"targetKind": "question_answering", "callingOptions": {"question": "(''How + do you make sushi rice?'',)", "top": 1, "confidenceScoreThreshold": 0.1}}, "SushiOrder": + {"targetKind": "conversation", "callingOptions": {"verbose": true}}}}' + headers: + Accept: + - application/json + Content-Length: + - '308' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: "{\"query\":\"('How do you make sushi rice?',)\",\"prediction\":{\"topIntent\":\"SushiMaking\",\"projectKind\":\"workflow\",\"intents\":{\"SushiMaking\":{\"confidenceScore\":0.7864583,\"targetKind\":\"question_answering\",\"result\":{\"answers\":[{\"questions\":[],\"answer\":\"#### + Sushi Rice (Shari) adapted from Sushi Made Easy by K. Wong\\n\\nIngredients: + 2 cups sushi rice or short-grain rice 2 \xBD cups cold water (or amount of + water suggested on rice package if different for 2 cups of rice)\\n\\nVinegar + Mixture 4 tbsp. rice vinegar 2 tbsp. sugar 1/2 tsp. salt\\n\\nDirections:\\n\\nWash + the rice until water runs clear (do not skip this step, it is essential in + achieving the correct consistency to work with your rice).\\n\\n2. Place the + rice in a pot with a tight fitting lid and add the water.\",\"confidenceScore\":0.5602,\"id\":0,\"source\":\"sushi(1).pdf\",\"metadata\":{},\"answerSpan\":{\"text\":\"2 + cups sushi rice or short-grain rice 2 \xBD cups cold water\",\"confidenceScore\":0.2865,\"offset\":77,\"length\":58}}]}},\"SushiOrder\":{\"confidenceScore\":0.7239583,\"targetKind\":\"conversation\"},\"None\":{\"confidenceScore\":0,\"targetKind\":\"non_linked\"}}}}" + headers: + apim-request-id: 4fcbb0ee-19bf-49ee-8ccc-71e2d77aa2c1 + cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: application/json; charset=utf-8 + date: Thu, 28 Oct 2021 15:00:17 GMT + pragma: no-cache + request-id: 4fcbb0ee-19bf-49ee-8ccc-71e2d77aa2c1 + strict-transport-security: max-age=31536000; includeSubDomains; preload + transfer-encoding: chunked + x-content-type-options: nosniff + x-envoy-upstream-service-time: '634' + status: + code: 200 + message: OK + url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-11-01-preview +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_conversation_app_async.py similarity index 92% rename from sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app_async.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_conversation_app_async.py index b0ad647aee85..21369aa0166a 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_conversation_app_async.py @@ -14,9 +14,9 @@ from azure.ai.language.conversations.aio import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, + ConversationAnalysisOptions, AnalyzeConversationResult, - DeepstackPrediction + ConversationPrediction ) @@ -27,7 +27,7 @@ async def test_conversation_app(self, conv_account, conv_key, conv_project): # prepare data query = "One california maki please." - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, ) @@ -43,7 +43,7 @@ async def test_conversation_app(self, conv_account, conv_key, conv_project): # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, DeepstackPrediction) + assert isinstance(result.prediction, ConversationPrediction) assert result.prediction.project_kind == 'conversation' assert result.prediction.top_intent == 'Order' assert len(result.prediction.entities) > 0 @@ -75,7 +75,7 @@ async def test_conversation_app_with_dictparams(self, conv_account, conv_key, co # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, DeepstackPrediction) + assert isinstance(result.prediction, ConversationPrediction) assert result.prediction.project_kind == 'conversation' assert result.prediction.top_intent == 'Order' assert len(result.prediction.entities) > 0 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_orchestration_app_async.py similarity index 66% rename from sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app_async.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_orchestration_app_async.py index 78052780d63e..7f760a467bcf 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_orchestration_app_async.py @@ -14,22 +14,21 @@ from azure.ai.language.conversations.aio import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, AnalyzeConversationResult, - AnalyzeConversationOptions, + AnalysisParameters, AnalyzeConversationResult, QuestionAnsweringParameters, - DeepstackParameters, - DeepstackCallingOptions, + ConversationParameters, + ConversationCallingOptions, QuestionAnsweringTargetIntentResult, - WorkflowPrediction, - DSTargetIntentResult + OrchestratorPrediction, + ConversationAnalysisOptions ) -class WorkflowAppAsyncTests(AsyncConversationTest): +class OrchestrationAppAsyncTests(AsyncConversationTest): @GlobalConversationAccountPreparer() - async def test_workflow_app(self, conv_account, conv_key, workflow_project): + async def test_orchestration_app(self, conv_account, conv_key, orchestration_project): client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) async with client: @@ -38,41 +37,42 @@ async def test_workflow_app(self, conv_account, conv_key, workflow_project): query = "How do you make sushi rice?" result = await client.analyze_conversations( {"query": query}, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert + top_intent = "SushiMaking" assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" - assert result.prediction.top_intent == "SushiMaking" - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert result.prediction.top_intent == top_intent + assert isinstance(result.prediction.intents[top_intent], QuestionAnsweringTargetIntentResult) # analyze query query = "I will have sashimi" result = await client.analyze_conversations( {"query": query}, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" # assert result.prediction.top_intent == "SushiOrder" --> wrong top intent! - # assert isinstance(result.prediction.intents, DSTargetIntentResult) + # assert isinstance(result.prediction.intents, ConversationTargetIntentResult) @GlobalConversationAccountPreparer() - async def test_workflow_app_with_parameters(self, conv_account, conv_key, workflow_project): + async def test_orchestration_app_with_parameters(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?", - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, parameters={ "SushiMaking": QuestionAnsweringParameters( @@ -82,7 +82,7 @@ async def test_workflow_app_with_parameters(self, conv_account, conv_key, workfl "confidenceScoreThreshold": 0.1 } ), - "SushiOrder": DeepstackParameters( + "SushiOrder": ConversationParameters( calling_options={ "verbose": True } @@ -95,25 +95,26 @@ async def test_workflow_app_with_parameters(self, conv_account, conv_key, workfl async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert + top_intent = "SushiMaking" assert isinstance(result, AnalyzeConversationResult) # assert result.query == query --> weird behavior here! - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" - assert result.prediction.top_intent == "SushiMaking" - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert result.prediction.top_intent == top_intent + assert isinstance(result.prediction.intents[top_intent], QuestionAnsweringTargetIntentResult) @GlobalConversationAccountPreparer() - async def test_workflow_app_with_model(self, conv_account, conv_key, workflow_project): + async def test_orchestration_app_with_model(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?" - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, parameters={ "SushiMaking": QuestionAnsweringParameters( @@ -123,8 +124,8 @@ async def test_workflow_app_with_model(self, conv_account, conv_key, workflow_pr "confidence_score_threshold":0.1 } ), - "SushiOrder": DeepstackParameters( - calling_options=DeepstackCallingOptions( + "SushiOrder": ConversationParameters( + calling_options=ConversationCallingOptions( verbose=True ) ) @@ -136,14 +137,15 @@ async def test_workflow_app_with_model(self, conv_account, conv_key, workflow_pr async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert + top_intent = "SushiMaking" assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" - assert result.prediction.top_intent == "SushiMaking" - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert result.prediction.top_intent == top_intent + assert isinstance(result.prediction.intents[top_intent], QuestionAnsweringTargetIntentResult) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct_async.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_orchestration_direct_async.py similarity index 75% rename from sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct_async.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_orchestration_direct_async.py index 982763cab607..eee9d536f691 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct_async.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/async/test_orchestration_direct_async.py @@ -14,26 +14,26 @@ from azure.ai.language.conversations.aio import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, + AnalysisParameters, AnalyzeConversationResult, QuestionAnsweringParameters, - DeepstackParameters, - WorkflowPrediction, - QuestionAnsweringTargetIntentResult, - DSTargetIntentResult, - LUISTargetIntentResult + # ConversationParameters, + # OrchestratorPrediction, + # QuestionAnsweringTargetIntentResult, + # ConversationTargetIntentResult, + # LUISTargetIntentResult ) -class WorkflowAppDirectAsyncTests(AsyncConversationTest): +class OrchestrationAppDirectAsyncTests(AsyncConversationTest): @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - async def test_direct_kb_intent(self, conv_account, conv_key, workflow_project): + async def test_direct_kb_intent(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?" target_intent = "SushiMaking" - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ @@ -52,26 +52,26 @@ async def test_direct_kb_intent(self, conv_account, conv_key, workflow_project): async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + # assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - async def test_kb_intent_with_model(self, conv_account, conv_key, workflow_project): + async def test_kb_intent_with_model(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?" target_intent = "SushiMaking" - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ @@ -90,35 +90,35 @@ async def test_kb_intent_with_model(self, conv_account, conv_key, workflow_proje async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + # assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - async def test_deepstack_intent(self, conv_account, conv_key, workflow_project): + async def test_conversation_intent(self, conv_account, conv_key, orchestration_project): # prepare data query = "I will have the oyako donburi please." target_intent = "SushiOrder" client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ - "SushiOrder": DeepstackParameters( - calling_options={ - "verbose": True, - } - ) + # "SushiOrder": ConversationParameters( + # calling_options={ + # "verbose": True, + # } + # ) } ) @@ -126,35 +126,35 @@ async def test_deepstack_intent(self, conv_account, conv_key, workflow_project): async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + # assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent - # assert isinstance(result.prediction.intents, DSTargetIntentResult) + # assert isinstance(result.prediction.intents, ConversationTargetIntentResult) @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - async def test_luis_intent(self, conv_account, conv_key, workflow_project): + async def test_luis_intent(self, conv_account, conv_key, orchestration_project): # prepare data query = "I will have the oyako donburi please." target_intent = "SushiOrder" client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ - "SushiOrder": DeepstackParameters( - calling_options={ - "verbose": True, - } - ) + # "SushiOrder": ConversationParameters( + # calling_options={ + # "verbose": True, + # } + # ) } ) @@ -162,14 +162,14 @@ async def test_luis_intent(self, conv_account, conv_key, workflow_project): async with client: result = await client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + # assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent # assert isinstance(result.prediction.intents, LUISTargetIntentResult) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml index 2c7a6cc30bcd..4ec8fbdd55e9 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app.yaml @@ -1,6 +1,6 @@ interactions: - request: - body: !!python/unicode '{"query": "One california maki please."}' + body: '{"query": "One california maki please."}' headers: Accept: - application/json @@ -13,31 +13,26 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-project&deploymentName=production + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-11-01-preview response: body: - string: !!python/unicode "{\n \"query\": \"One california maki please.\",\n - \ \"prediction\": {\n \"intents\": [\n {\n \"category\": \"Order\",\n - \ \"confidenceScore\": 1\n }\n ],\n \"entities\": [\n {\n - \ \"category\": \"OrderItem\",\n \"text\": \"california maki\",\n - \ \"offset\": 4,\n \"length\": 15,\n \"confidenceScore\": - 1\n }\n ],\n \"topIntent\": \"Order\",\n \"projectType\": \"conversation\"\n - \ }\n}" + string: '{"query":"One california maki please.","prediction":{"topIntent":"Order","projectKind":"conversation","intents":[{"category":"Order","confidenceScore":1}],"entities":[{"category":"OrderItem","text":"california + maki","offset":4,"length":15,"confidenceScore":1}]}}' headers: apim-request-id: - - 02b21bc7-d52c-48f4-8ecb-5ec8b95c0822 + - a040b58d-0e08-45bd-aa45-36904e24abf6 cache-control: - no-store, proxy-revalidate, no-cache, max-age=0, private content-type: - application/json; charset=utf-8 date: - - Thu, 30 Sep 2021 17:41:07 GMT + - Thu, 28 Oct 2021 15:00:01 GMT pragma: - no-cache request-id: - - 02b21bc7-d52c-48f4-8ecb-5ec8b95c0822 + - a040b58d-0e08-45bd-aa45-36904e24abf6 strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -45,7 +40,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '126' + - '42' status: code: 200 message: OK diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml index fb25b0bf0925..4ebac5b5090f 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_conversation_app.test_conversation_app_with_dictparams.yaml @@ -1,6 +1,6 @@ interactions: - request: - body: !!python/unicode '{"query": "One california maki please."}' + body: '{"query": "One california maki please."}' headers: Accept: - application/json @@ -13,31 +13,26 @@ interactions: Content-Type: - application/json User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-project&deploymentName=production + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-project&deploymentName=production&api-version=2021-11-01-preview response: body: - string: !!python/unicode "{\n \"query\": \"One california maki please.\",\n - \ \"prediction\": {\n \"intents\": [\n {\n \"category\": \"Order\",\n - \ \"confidenceScore\": 1\n }\n ],\n \"entities\": [\n {\n - \ \"category\": \"OrderItem\",\n \"text\": \"california maki\",\n - \ \"offset\": 4,\n \"length\": 15,\n \"confidenceScore\": - 1\n }\n ],\n \"topIntent\": \"Order\",\n \"projectType\": \"conversation\"\n - \ }\n}" + string: '{"query":"One california maki please.","prediction":{"topIntent":"Order","projectKind":"conversation","intents":[{"category":"Order","confidenceScore":1}],"entities":[{"category":"OrderItem","text":"california + maki","offset":4,"length":15,"confidenceScore":1}]}}' headers: apim-request-id: - - 2c325546-f02f-43fd-afb0-e9d5c2f1b418 + - 2ed02857-3030-4bd3-a976-1a7b218138e0 cache-control: - no-store, proxy-revalidate, no-cache, max-age=0, private content-type: - application/json; charset=utf-8 date: - - Thu, 30 Sep 2021 17:41:09 GMT + - Thu, 28 Oct 2021 15:00:03 GMT pragma: - no-cache request-id: - - 2c325546-f02f-43fd-afb0-e9d5c2f1b418 + - 2ed02857-3030-4bd3-a976-1a7b218138e0 strict-transport-security: - max-age=31536000; includeSubDomains; preload transfer-encoding: @@ -45,7 +40,7 @@ interactions: x-content-type-options: - nosniff x-envoy-upstream-service-time: - - '73' + - '42' status: code: 200 message: OK diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app.yaml new file mode 100644 index 000000000000..c964a79e1b5b --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app.yaml @@ -0,0 +1,102 @@ +interactions: +- request: + body: '{"query": "How do you make sushi rice?"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '40' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: "{\"query\":\"How do you make sushi rice?\",\"prediction\":{\"topIntent\":\"SushiMaking\",\"projectKind\":\"workflow\",\"intents\":{\"SushiMaking\":{\"confidenceScore\":0.77994794,\"targetKind\":\"question_answering\",\"result\":{\"answers\":[{\"questions\":[],\"answer\":\"#### + Sushi Rice (Shari) adapted from Sushi Made Easy by K. Wong\\n\\nIngredients: + 2 cups sushi rice or short-grain rice 2 \xBD cups cold water (or amount of + water suggested on rice package if different for 2 cups of rice)\\n\\nVinegar + Mixture 4 tbsp. rice vinegar 2 tbsp. sugar 1/2 tsp. salt\\n\\nDirections:\\n\\nWash + the rice until water runs clear (do not skip this step, it is essential in + achieving the correct consistency to work with your rice).\\n\\n2. Place the + rice in a pot with a tight fitting lid and add the water.\",\"confidenceScore\":0.6334000000000001,\"id\":0,\"source\":\"sushi(1).pdf\",\"metadata\":{},\"answerSpan\":{\"text\":\"2 + cups\",\"confidenceScore\":0.25120000000000003,\"offset\":77,\"length\":7}}]}},\"SushiOrder\":{\"confidenceScore\":0.7317708,\"targetKind\":\"conversation\"},\"None\":{\"confidenceScore\":0,\"targetKind\":\"non_linked\"}}}}" + headers: + apim-request-id: + - a0540c2d-549e-45e2-9dbe-a6d86e47d194 + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + date: + - Thu, 28 Oct 2021 15:00:04 GMT + pragma: + - no-cache + request-id: + - a0540c2d-549e-45e2-9dbe-a6d86e47d194 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '887' + status: + code: 200 + message: OK +- request: + body: '{"query": "I will have sashimi"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '32' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: '{"query":"I will have sashimi","prediction":{"topIntent":"SushiMaking","projectKind":"workflow","intents":{"SushiMaking":{"confidenceScore":0.79296875,"targetKind":"question_answering","result":{"answers":[{"questions":[],"answer":"Put + the dark brown sugar, oil, vinegar and garlic in a zip lock plastic bag and + shake to mix well. Put the flank steak in the bag, close, and toss to coat + the steak evenly with the marinade. Let sit to rest at least 30 minutes.","confidenceScore":0.19149999999999998,"id":0,"source":"SushiQuikCompleteRecipeBook.pdf","metadata":{},"answerSpan":{"text":"flank + steak","confidenceScore":0.527,"offset":106,"length":12}}]}},"SushiOrder":{"confidenceScore":0.78515625,"targetKind":"conversation"},"None":{"confidenceScore":0,"targetKind":"non_linked"}}}}' + headers: + apim-request-id: + - b8937fc2-cd3c-4d4f-a06d-6485643e2974 + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + date: + - Thu, 28 Oct 2021 15:00:05 GMT + pragma: + - no-cache + request-id: + - b8937fc2-cd3c-4d4f-a06d-6485643e2974 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '829' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_model.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_model.yaml new file mode 100644 index 000000000000..101aef5fd56e --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_model.yaml @@ -0,0 +1,57 @@ +interactions: +- request: + body: '{"query": "How do you make sushi rice?", "parameters": {"SushiMaking": + {"targetKind": "question_answering", "callingOptions": {"question": "How do + you make sushi rice?", "top": 1, "confidence_score_threshold": 0.1}}, "SushiOrder": + {"targetKind": "conversation", "callingOptions": {"verbose": true}}}}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '300' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: "{\"query\":\"How do you make sushi rice?\",\"prediction\":{\"topIntent\":\"SushiMaking\",\"projectKind\":\"workflow\",\"intents\":{\"SushiMaking\":{\"confidenceScore\":0.77994794,\"targetKind\":\"question_answering\",\"result\":{\"answers\":[{\"questions\":[],\"answer\":\"#### + Sushi Rice (Shari) adapted from Sushi Made Easy by K. Wong\\n\\nIngredients: + 2 cups sushi rice or short-grain rice 2 \xBD cups cold water (or amount of + water suggested on rice package if different for 2 cups of rice)\\n\\nVinegar + Mixture 4 tbsp. rice vinegar 2 tbsp. sugar 1/2 tsp. salt\\n\\nDirections:\\n\\nWash + the rice until water runs clear (do not skip this step, it is essential in + achieving the correct consistency to work with your rice).\\n\\n2. Place the + rice in a pot with a tight fitting lid and add the water.\",\"confidenceScore\":0.6334000000000001,\"id\":0,\"source\":\"sushi(1).pdf\",\"metadata\":{},\"answerSpan\":{\"text\":\"2 + cups\",\"confidenceScore\":0.25120000000000003,\"offset\":77,\"length\":7}}]}},\"SushiOrder\":{\"confidenceScore\":0.7317708,\"targetKind\":\"conversation\"},\"None\":{\"confidenceScore\":0,\"targetKind\":\"non_linked\"}}}}" + headers: + apim-request-id: + - a7ac4b5e-88d8-4654-91df-71366845c8c5 + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + date: + - Thu, 28 Oct 2021 15:00:07 GMT + pragma: + - no-cache + request-id: + - a7ac4b5e-88d8-4654-91df-71366845c8c5 + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '641' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_parameters.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_parameters.yaml new file mode 100644 index 000000000000..32f508a1aead --- /dev/null +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_orchestration_app.test_orchestration_app_with_parameters.yaml @@ -0,0 +1,57 @@ +interactions: +- request: + body: '{"query": "(''How do you make sushi rice?'',)", "parameters": {"SushiMaking": + {"targetKind": "question_answering", "callingOptions": {"question": "(''How + do you make sushi rice?'',)", "top": 1, "confidenceScoreThreshold": 0.1}}, "SushiOrder": + {"targetKind": "conversation", "callingOptions": {"verbose": true}}}}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '308' + Content-Type: + - application/json + User-Agent: + - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) + method: POST + uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-11-01-preview + response: + body: + string: "{\"query\":\"('How do you make sushi rice?',)\",\"prediction\":{\"topIntent\":\"SushiMaking\",\"projectKind\":\"workflow\",\"intents\":{\"SushiMaking\":{\"confidenceScore\":0.7864583,\"targetKind\":\"question_answering\",\"result\":{\"answers\":[{\"questions\":[],\"answer\":\"#### + Sushi Rice (Shari) adapted from Sushi Made Easy by K. Wong\\n\\nIngredients: + 2 cups sushi rice or short-grain rice 2 \xBD cups cold water (or amount of + water suggested on rice package if different for 2 cups of rice)\\n\\nVinegar + Mixture 4 tbsp. rice vinegar 2 tbsp. sugar 1/2 tsp. salt\\n\\nDirections:\\n\\nWash + the rice until water runs clear (do not skip this step, it is essential in + achieving the correct consistency to work with your rice).\\n\\n2. Place the + rice in a pot with a tight fitting lid and add the water.\",\"confidenceScore\":0.5602,\"id\":0,\"source\":\"sushi(1).pdf\",\"metadata\":{},\"answerSpan\":{\"text\":\"2 + cups sushi rice or short-grain rice 2 \xBD cups cold water\",\"confidenceScore\":0.2865,\"offset\":77,\"length\":58}}]}},\"SushiOrder\":{\"confidenceScore\":0.7239583,\"targetKind\":\"conversation\"},\"None\":{\"confidenceScore\":0,\"targetKind\":\"non_linked\"}}}}" + headers: + apim-request-id: + - 4ca6e417-47e8-45a6-b5d9-8053bdae1ddb + cache-control: + - no-store, proxy-revalidate, no-cache, max-age=0, private + content-type: + - application/json; charset=utf-8 + date: + - Thu, 28 Oct 2021 15:00:09 GMT + pragma: + - no-cache + request-id: + - 4ca6e417-47e8-45a6-b5d9-8053bdae1ddb + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + transfer-encoding: + - chunked + x-content-type-options: + - nosniff + x-envoy-upstream-service-time: + - '978' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml deleted file mode 100644 index 11e5169ed888..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app.yaml +++ /dev/null @@ -1,215 +0,0 @@ -interactions: -- request: - body: !!python/unicode '{"query": "How do you make sushi rice?"}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '40' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production - response: - body: - string: !!python/unicode "{\n \"query\": \"How do you make sushi rice?\",\n - \ \"prediction\": {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": - \"question_answering\",\n \"result\": {\n \"answers\": [\n - \ {\n \"questions\": [\n \"do you eat - cake?\",\n \"do you ever eat beef?\",\n \"do - you ever eat pizza?\",\n \"have you ever eaten tofu?\",\n \"you - don't eat?\",\n \"have you ever wanted to eat?\",\n \"Don't - you ever get hungry?\",\n \"how many calories do you need?\",\n - \ \"What kind of food do you like?\",\n \"What - do you eat for dinner?\",\n \"What do you eat?\",\n \"What - kind of food do you eat?\",\n \"What is your favorite snack?\",\n - \ \"What is your favorite meal?\",\n \"what foods - do you eat?\",\n \"What do you want to eat?\",\n \"What - did you eat for lunch?\",\n \"What do you like to dine on?\",\n - \ \"What kind of foods do you like?\",\n \"What - do you eat for lunch?\",\n \"What do you eat for breakfast?\",\n - \ \"What did you have for lunch?\",\n \"What - did you have for dinner?\",\n \"do you eat vegetables\",\n - \ \"What do you like to eat?\",\n \"will you - ever eat?\",\n \"Are you ever hungry?\",\n \"Do - you eat pasta?\",\n \"do you eat pizza?\",\n \"you - don't need to eat?\",\n \"you don't need food?\",\n \"What - kind of food do you like to eat?\",\n \"will you ever need - to eat?\",\n \"when do you eat?\",\n \"What's - your favorite cuisine?\",\n \"what kinds of foods do you like?\",\n - \ \"What kinds of food do you like to eat?\",\n \"What - kinds of food do you eat?\",\n \"What did you eat for dinner?\",\n - \ \"you don't eat food?\",\n \"Do you eat?\",\n - \ \"do you need calories to survive?\",\n \"Do - you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n - \ \"Do you get hungry?\",\n \"do you ever need - to eat?\",\n \"What did you have for breakfast?\",\n \"do - you ever eat food?\",\n \"do you need food?\",\n \"do - you eat food?\",\n \"do you consume food?\",\n \"Are - you hungry?\",\n \"Are you going to have lunch?\",\n \"Are - you going to have dinner?\",\n \"Are you going to have breakfast?\",\n - \ \"Do you ever get hungry?\",\n \"have you ever - wanted a snack?\",\n \"What did you eat for breakfast?\",\n - \ \"so you don't eat?\",\n \"how many calories - do you need to eat?\",\n \"how many calories do you need each - day?\",\n \"how many calories do you eat?\",\n \"do - you need calories?\",\n \"have you ever wanted food?\",\n \"do - you need food to survive?\",\n \"have you ever wanted a meal?\",\n - \ \"have you ever been hungry?\",\n \"Don't you - get hungry?\",\n \"do you not need to eat?\",\n \"do - you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so - you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have - you ever eaten toast?\",\n \"do you eat toast?\",\n \"do - you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n - \ \"do you eat bread?\",\n \"so you've really - never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do - you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have - you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do - you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true - or false: you don't get hungry\",\n \"do you eat tofu?\",\n - \ \"do you ever eat pork?\",\n \"have you ever - eaten pork?\",\n \"do you eat pork?\",\n \"so - you never eat?\",\n \"do you eat beef?\",\n \"so - you've really never eaten?\",\n \"true or false: you don't - eat\",\n \"tell me whether or not you eat\",\n \"is - it true that you don't eat?\",\n \"so you've never really eaten - food?\",\n \"so you've never really eaten anything?\",\n \"do - you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do - you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n - \ \"have you ever eaten vegetables?\",\n \"have - you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do - you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do - you ever eat vegetables?\",\n \"do you eat ice cream?\",\n - \ \"have you ever eaten pasta?\",\n \"do you - ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do - you eat pie?\",\n \"do you ever eat cookies?\",\n \"do - you eat steak?\",\n \"do you ever eat fries?\",\n \"have - you ever eaten fries?\",\n \"do you eat fries?\",\n \"do - you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n - \ \"do you eat burgers?\",\n \"have you ever - eaten pie?\",\n \"have you ever eaten steak?\",\n \"have - you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have - you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do - you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n - \ \"do you ever eat tofu?\",\n \"do you ever - eat steak?\"\n ],\n \"answer\": \"I only do food - for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n - \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: - - c674556f-5ac0-43cd-a1ca-4243b8b3c86a - cache-control: - - no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: - - application/json; charset=utf-8 - csp-billing-usage: - - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: - - Thu, 30 Sep 2021 17:41:11 GMT - pragma: - - no-cache - request-id: - - c674556f-5ac0-43cd-a1ca-4243b8b3c86a - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - transfer-encoding: - - chunked - x-content-type-options: - - nosniff - x-envoy-upstream-service-time: - - '812' - status: - code: 200 - message: OK -- request: - body: !!python/unicode '{"query": "I will have sashimi"}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '32' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production - response: - body: - string: !!python/unicode "{\n \"query\": \"I will have sashimi\",\n \"prediction\": - {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n - \ \"result\": {\n \"answers\": [\n {\n \"questions\": - [\n \"I could really use a hug\",\n \"Can I - get a little hug?\",\n \"A hug would be nice\",\n \"Can - we hug it out?\",\n \"Let's hug\",\n \"Can I - please get a hug?\",\n \"I want a hug\",\n \"I - could use a hug\",\n \"Can you hug me?\",\n \"Will - you give me a hug?\",\n \"Can I have a big hug?\",\n \"Can - I have a little hug?\",\n \"Can you give me a big hug?\",\n - \ \"Can you give me a hug?\",\n \"Can you give - me a little hug?\",\n \"I need a big hug\",\n \"I - need a hug\",\n \"Will you give me a big hug?\",\n \"Will - you hug me?\",\n \"Would you give me a big hug?\",\n \"Would - you give me a hug?\",\n \"Can I get a big hug?\",\n \"Can - I please have a hug?\",\n \"Can I get a hug?\",\n \"I - really need a hug\",\n \"Can we hug?\",\n \"Would - you give me a little hug?\",\n \"Let's hug it out\",\n \"I'd - love a hug\",\n \"I'd like a hug\",\n \"Do you - want to give me a hug?\"\n ],\n \"answer\": \"Giving - you a virtual hug right now.\",\n \"score\": 2.29,\n \"id\": - 67,\n \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.5102507\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.4897493\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: - - 998ec5bb-3bb7-4d2f-ae48-ba24283f6264 - cache-control: - - no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: - - application/json; charset=utf-8 - csp-billing-usage: - - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: - - Thu, 30 Sep 2021 17:41:12 GMT - pragma: - - no-cache - request-id: - - 998ec5bb-3bb7-4d2f-ae48-ba24283f6264 - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - transfer-encoding: - - chunked - x-content-type-options: - - nosniff - x-envoy-upstream-service-time: - - '737' - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml deleted file mode 100644 index b36ae897cc57..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_model.yaml +++ /dev/null @@ -1,142 +0,0 @@ -interactions: -- request: - body: !!python/unicode '{"query": "How do you make sushi rice?", "parameters": - {"SushiMaking": {"callingOptions": {"confidence_score_threshold": 0.1, "top": - 1, "question": "How do you make sushi rice?"}, "targetKind": "question_answering"}, - "SushiOrder": {"callingOptions": {"verbose": true}, "targetKind": "luis_deepstack"}}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '302' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production - response: - body: - string: !!python/unicode "{\n \"query\": \"How do you make sushi rice?\",\n - \ \"prediction\": {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": - \"question_answering\",\n \"result\": {\n \"answers\": [\n - \ {\n \"questions\": [\n \"do you eat - cake?\",\n \"do you ever eat beef?\",\n \"do - you ever eat pizza?\",\n \"have you ever eaten tofu?\",\n \"you - don't eat?\",\n \"have you ever wanted to eat?\",\n \"Don't - you ever get hungry?\",\n \"how many calories do you need?\",\n - \ \"What kind of food do you like?\",\n \"What - do you eat for dinner?\",\n \"What do you eat?\",\n \"What - kind of food do you eat?\",\n \"What is your favorite snack?\",\n - \ \"What is your favorite meal?\",\n \"what foods - do you eat?\",\n \"What do you want to eat?\",\n \"What - did you eat for lunch?\",\n \"What do you like to dine on?\",\n - \ \"What kind of foods do you like?\",\n \"What - do you eat for lunch?\",\n \"What do you eat for breakfast?\",\n - \ \"What did you have for lunch?\",\n \"What - did you have for dinner?\",\n \"do you eat vegetables\",\n - \ \"What do you like to eat?\",\n \"will you - ever eat?\",\n \"Are you ever hungry?\",\n \"Do - you eat pasta?\",\n \"do you eat pizza?\",\n \"you - don't need to eat?\",\n \"you don't need food?\",\n \"What - kind of food do you like to eat?\",\n \"will you ever need - to eat?\",\n \"when do you eat?\",\n \"What's - your favorite cuisine?\",\n \"what kinds of foods do you like?\",\n - \ \"What kinds of food do you like to eat?\",\n \"What - kinds of food do you eat?\",\n \"What did you eat for dinner?\",\n - \ \"you don't eat food?\",\n \"Do you eat?\",\n - \ \"do you need calories to survive?\",\n \"Do - you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n - \ \"Do you get hungry?\",\n \"do you ever need - to eat?\",\n \"What did you have for breakfast?\",\n \"do - you ever eat food?\",\n \"do you need food?\",\n \"do - you eat food?\",\n \"do you consume food?\",\n \"Are - you hungry?\",\n \"Are you going to have lunch?\",\n \"Are - you going to have dinner?\",\n \"Are you going to have breakfast?\",\n - \ \"Do you ever get hungry?\",\n \"have you ever - wanted a snack?\",\n \"What did you eat for breakfast?\",\n - \ \"so you don't eat?\",\n \"how many calories - do you need to eat?\",\n \"how many calories do you need each - day?\",\n \"how many calories do you eat?\",\n \"do - you need calories?\",\n \"have you ever wanted food?\",\n \"do - you need food to survive?\",\n \"have you ever wanted a meal?\",\n - \ \"have you ever been hungry?\",\n \"Don't you - get hungry?\",\n \"do you not need to eat?\",\n \"do - you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so - you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have - you ever eaten toast?\",\n \"do you eat toast?\",\n \"do - you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n - \ \"do you eat bread?\",\n \"so you've really - never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do - you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have - you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do - you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true - or false: you don't get hungry\",\n \"do you eat tofu?\",\n - \ \"do you ever eat pork?\",\n \"have you ever - eaten pork?\",\n \"do you eat pork?\",\n \"so - you never eat?\",\n \"do you eat beef?\",\n \"so - you've really never eaten?\",\n \"true or false: you don't - eat\",\n \"tell me whether or not you eat\",\n \"is - it true that you don't eat?\",\n \"so you've never really eaten - food?\",\n \"so you've never really eaten anything?\",\n \"do - you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do - you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n - \ \"have you ever eaten vegetables?\",\n \"have - you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do - you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do - you ever eat vegetables?\",\n \"do you eat ice cream?\",\n - \ \"have you ever eaten pasta?\",\n \"do you - ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do - you eat pie?\",\n \"do you ever eat cookies?\",\n \"do - you eat steak?\",\n \"do you ever eat fries?\",\n \"have - you ever eaten fries?\",\n \"do you eat fries?\",\n \"do - you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n - \ \"do you eat burgers?\",\n \"have you ever - eaten pie?\",\n \"have you ever eaten steak?\",\n \"have - you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have - you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do - you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n - \ \"do you ever eat tofu?\",\n \"do you ever - eat steak?\"\n ],\n \"answer\": \"I only do food - for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n - \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: - - f270a6a8-c502-447b-ba35-ebf518b0f004 - cache-control: - - no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: - - application/json; charset=utf-8 - csp-billing-usage: - - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: - - Thu, 30 Sep 2021 17:41:13 GMT - pragma: - - no-cache - request-id: - - f270a6a8-c502-447b-ba35-ebf518b0f004 - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - transfer-encoding: - - chunked - x-content-type-options: - - nosniff - x-envoy-upstream-service-time: - - '471' - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml deleted file mode 100644 index 132ea8fff9f6..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app.test_workflow_app_with_parameters.yaml +++ /dev/null @@ -1,142 +0,0 @@ -interactions: -- request: - body: !!python/unicode '{"query": "(''How do you make sushi rice?'',)", "parameters": - {"SushiMaking": {"callingOptions": {"top": 1, "question": "(''How do you make - sushi rice?'',)", "confidenceScoreThreshold": 0.1}, "targetKind": "question_answering"}, - "SushiOrder": {"callingOptions": {"verbose": true}, "targetKind": "luis_deepstack"}}}' - headers: - Accept: - - application/json - Accept-Encoding: - - gzip, deflate - Connection: - - keep-alive - Content-Length: - - '310' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/2.7.18 (Windows-10-10.0.19041) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?api-version=2021-07-15-preview&projectName=test-workflow&deploymentName=production - response: - body: - string: !!python/unicode "{\n \"query\": \"('How do you make sushi rice?',)\",\n - \ \"prediction\": {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": - \"question_answering\",\n \"result\": {\n \"answers\": [\n - \ {\n \"questions\": [\n \"do you eat - cake?\",\n \"do you ever eat beef?\",\n \"do - you ever eat pizza?\",\n \"have you ever eaten tofu?\",\n \"you - don't eat?\",\n \"have you ever wanted to eat?\",\n \"Don't - you ever get hungry?\",\n \"how many calories do you need?\",\n - \ \"What kind of food do you like?\",\n \"What - do you eat for dinner?\",\n \"What do you eat?\",\n \"What - kind of food do you eat?\",\n \"What is your favorite snack?\",\n - \ \"What is your favorite meal?\",\n \"what foods - do you eat?\",\n \"What do you want to eat?\",\n \"What - did you eat for lunch?\",\n \"What do you like to dine on?\",\n - \ \"What kind of foods do you like?\",\n \"What - do you eat for lunch?\",\n \"What do you eat for breakfast?\",\n - \ \"What did you have for lunch?\",\n \"What - did you have for dinner?\",\n \"do you eat vegetables\",\n - \ \"What do you like to eat?\",\n \"will you - ever eat?\",\n \"Are you ever hungry?\",\n \"Do - you eat pasta?\",\n \"do you eat pizza?\",\n \"you - don't need to eat?\",\n \"you don't need food?\",\n \"What - kind of food do you like to eat?\",\n \"will you ever need - to eat?\",\n \"when do you eat?\",\n \"What's - your favorite cuisine?\",\n \"what kinds of foods do you like?\",\n - \ \"What kinds of food do you like to eat?\",\n \"What - kinds of food do you eat?\",\n \"What did you eat for dinner?\",\n - \ \"you don't eat food?\",\n \"Do you eat?\",\n - \ \"do you need calories to survive?\",\n \"Do - you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n - \ \"Do you get hungry?\",\n \"do you ever need - to eat?\",\n \"What did you have for breakfast?\",\n \"do - you ever eat food?\",\n \"do you need food?\",\n \"do - you eat food?\",\n \"do you consume food?\",\n \"Are - you hungry?\",\n \"Are you going to have lunch?\",\n \"Are - you going to have dinner?\",\n \"Are you going to have breakfast?\",\n - \ \"Do you ever get hungry?\",\n \"have you ever - wanted a snack?\",\n \"What did you eat for breakfast?\",\n - \ \"so you don't eat?\",\n \"how many calories - do you need to eat?\",\n \"how many calories do you need each - day?\",\n \"how many calories do you eat?\",\n \"do - you need calories?\",\n \"have you ever wanted food?\",\n \"do - you need food to survive?\",\n \"have you ever wanted a meal?\",\n - \ \"have you ever been hungry?\",\n \"Don't you - get hungry?\",\n \"do you not need to eat?\",\n \"do - you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so - you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have - you ever eaten toast?\",\n \"do you eat toast?\",\n \"do - you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n - \ \"do you eat bread?\",\n \"so you've really - never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do - you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have - you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do - you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true - or false: you don't get hungry\",\n \"do you eat tofu?\",\n - \ \"do you ever eat pork?\",\n \"have you ever - eaten pork?\",\n \"do you eat pork?\",\n \"so - you never eat?\",\n \"do you eat beef?\",\n \"so - you've really never eaten?\",\n \"true or false: you don't - eat\",\n \"tell me whether or not you eat\",\n \"is - it true that you don't eat?\",\n \"so you've never really eaten - food?\",\n \"so you've never really eaten anything?\",\n \"do - you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do - you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n - \ \"have you ever eaten vegetables?\",\n \"have - you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do - you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do - you ever eat vegetables?\",\n \"do you eat ice cream?\",\n - \ \"have you ever eaten pasta?\",\n \"do you - ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do - you eat pie?\",\n \"do you ever eat cookies?\",\n \"do - you eat steak?\",\n \"do you ever eat fries?\",\n \"have - you ever eaten fries?\",\n \"do you eat fries?\",\n \"do - you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n - \ \"do you eat burgers?\",\n \"have you ever - eaten pie?\",\n \"have you ever eaten steak?\",\n \"have - you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have - you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do - you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n - \ \"do you ever eat tofu?\",\n \"do you ever - eat steak?\"\n ],\n \"answer\": \"I only do food - for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n - \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.58619076\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.4138092\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: - - a28b94cb-e298-4a2c-838e-af7b67c1060f - cache-control: - - no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: - - application/json; charset=utf-8 - csp-billing-usage: - - CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: - - Thu, 30 Sep 2021 17:41:15 GMT - pragma: - - no-cache - request-id: - - a28b94cb-e298-4a2c-838e-af7b67c1060f - strict-transport-security: - - max-age=31536000; includeSubDomains; preload - transfer-encoding: - - chunked - x-content-type-options: - - nosniff - x-envoy-upstream-service-time: - - '330' - status: - code: 200 - message: OK -version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml deleted file mode 100644 index a5a0766b79f0..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app.yaml +++ /dev/null @@ -1,186 +0,0 @@ -interactions: -- request: - body: '{"query": "How do you make sushi rice?"}' - headers: - Accept: - - application/json - Content-Length: - - '40' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview - response: - body: - string: "{\n \"query\": \"How do you make sushi rice?\",\n \"prediction\": - {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n - \ \"result\": {\n \"answers\": [\n {\n \"questions\": - [\n \"do you eat cake?\",\n \"do you ever eat - beef?\",\n \"do you ever eat pizza?\",\n \"have - you ever eaten tofu?\",\n \"you don't eat?\",\n \"have - you ever wanted to eat?\",\n \"Don't you ever get hungry?\",\n - \ \"how many calories do you need?\",\n \"What - kind of food do you like?\",\n \"What do you eat for dinner?\",\n - \ \"What do you eat?\",\n \"What kind of food - do you eat?\",\n \"What is your favorite snack?\",\n \"What - is your favorite meal?\",\n \"what foods do you eat?\",\n \"What - do you want to eat?\",\n \"What did you eat for lunch?\",\n - \ \"What do you like to dine on?\",\n \"What - kind of foods do you like?\",\n \"What do you eat for lunch?\",\n - \ \"What do you eat for breakfast?\",\n \"What - did you have for lunch?\",\n \"What did you have for dinner?\",\n - \ \"do you eat vegetables\",\n \"What do you - like to eat?\",\n \"will you ever eat?\",\n \"Are - you ever hungry?\",\n \"Do you eat pasta?\",\n \"do - you eat pizza?\",\n \"you don't need to eat?\",\n \"you - don't need food?\",\n \"What kind of food do you like to eat?\",\n - \ \"will you ever need to eat?\",\n \"when do - you eat?\",\n \"What's your favorite cuisine?\",\n \"what - kinds of foods do you like?\",\n \"What kinds of food do you - like to eat?\",\n \"What kinds of food do you eat?\",\n \"What - did you eat for dinner?\",\n \"you don't eat food?\",\n \"Do - you eat?\",\n \"do you need calories to survive?\",\n \"Do - you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n - \ \"Do you get hungry?\",\n \"do you ever need - to eat?\",\n \"What did you have for breakfast?\",\n \"do - you ever eat food?\",\n \"do you need food?\",\n \"do - you eat food?\",\n \"do you consume food?\",\n \"Are - you hungry?\",\n \"Are you going to have lunch?\",\n \"Are - you going to have dinner?\",\n \"Are you going to have breakfast?\",\n - \ \"Do you ever get hungry?\",\n \"have you ever - wanted a snack?\",\n \"What did you eat for breakfast?\",\n - \ \"so you don't eat?\",\n \"how many calories - do you need to eat?\",\n \"how many calories do you need each - day?\",\n \"how many calories do you eat?\",\n \"do - you need calories?\",\n \"have you ever wanted food?\",\n \"do - you need food to survive?\",\n \"have you ever wanted a meal?\",\n - \ \"have you ever been hungry?\",\n \"Don't you - get hungry?\",\n \"do you not need to eat?\",\n \"do - you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so - you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have - you ever eaten toast?\",\n \"do you eat toast?\",\n \"do - you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n - \ \"do you eat bread?\",\n \"so you've really - never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do - you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have - you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do - you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true - or false: you don't get hungry\",\n \"do you eat tofu?\",\n - \ \"do you ever eat pork?\",\n \"have you ever - eaten pork?\",\n \"do you eat pork?\",\n \"so - you never eat?\",\n \"do you eat beef?\",\n \"so - you've really never eaten?\",\n \"true or false: you don't - eat\",\n \"tell me whether or not you eat\",\n \"is - it true that you don't eat?\",\n \"so you've never really eaten - food?\",\n \"so you've never really eaten anything?\",\n \"do - you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do - you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n - \ \"have you ever eaten vegetables?\",\n \"have - you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do - you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do - you ever eat vegetables?\",\n \"do you eat ice cream?\",\n - \ \"have you ever eaten pasta?\",\n \"do you - ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do - you eat pie?\",\n \"do you ever eat cookies?\",\n \"do - you eat steak?\",\n \"do you ever eat fries?\",\n \"have - you ever eaten fries?\",\n \"do you eat fries?\",\n \"do - you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n - \ \"do you eat burgers?\",\n \"have you ever - eaten pie?\",\n \"have you ever eaten steak?\",\n \"have - you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have - you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do - you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n - \ \"do you ever eat tofu?\",\n \"do you ever - eat steak?\"\n ],\n \"answer\": \"I only do food - for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n - \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: 1685ca0c-6a9e-407b-883c-3edabb16a15d - cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: application/json; charset=utf-8 - csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: Thu, 30 Sep 2021 16:57:03 GMT - pragma: no-cache - request-id: 1685ca0c-6a9e-407b-883c-3edabb16a15d - strict-transport-security: max-age=31536000; includeSubDomains; preload - transfer-encoding: chunked - x-content-type-options: nosniff - x-envoy-upstream-service-time: '246' - status: - code: 200 - message: OK - url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview -- request: - body: '{"query": "I will have sashimi"}' - headers: - Accept: - - application/json - Content-Length: - - '32' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview - response: - body: - string: "{\n \"query\": \"I will have sashimi\",\n \"prediction\": {\n \"intents\": - {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n - \ \"result\": {\n \"answers\": [\n {\n \"questions\": - [\n \"I could really use a hug\",\n \"Can I - get a little hug?\",\n \"A hug would be nice\",\n \"Can - we hug it out?\",\n \"Let's hug\",\n \"Can I - please get a hug?\",\n \"I want a hug\",\n \"I - could use a hug\",\n \"Can you hug me?\",\n \"Will - you give me a hug?\",\n \"Can I have a big hug?\",\n \"Can - I have a little hug?\",\n \"Can you give me a big hug?\",\n - \ \"Can you give me a hug?\",\n \"Can you give - me a little hug?\",\n \"I need a big hug\",\n \"I - need a hug\",\n \"Will you give me a big hug?\",\n \"Will - you hug me?\",\n \"Would you give me a big hug?\",\n \"Would - you give me a hug?\",\n \"Can I get a big hug?\",\n \"Can - I please have a hug?\",\n \"Can I get a hug?\",\n \"I - really need a hug\",\n \"Can we hug?\",\n \"Would - you give me a little hug?\",\n \"Let's hug it out\",\n \"I'd - love a hug\",\n \"I'd like a hug\",\n \"Do you - want to give me a hug?\"\n ],\n \"answer\": \"Giving - you a virtual hug right now.\",\n \"score\": 2.29,\n \"id\": - 67,\n \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.5102507\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.4897493\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: d71eeb28-556b-4b94-a0fe-b650f982bf05 - cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: application/json; charset=utf-8 - csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: Thu, 30 Sep 2021 16:57:03 GMT - pragma: no-cache - request-id: d71eeb28-556b-4b94-a0fe-b650f982bf05 - strict-transport-security: max-age=31536000; includeSubDomains; preload - transfer-encoding: chunked - x-content-type-options: nosniff - x-envoy-upstream-service-time: '204' - status: - code: 200 - message: OK - url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview -version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml deleted file mode 100644 index 62caf86d9677..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_model.yaml +++ /dev/null @@ -1,127 +0,0 @@ -interactions: -- request: - body: '{"query": "How do you make sushi rice?", "parameters": {"SushiMaking": - {"targetKind": "question_answering", "callingOptions": {"question": "How do - you make sushi rice?", "top": 1, "confidence_score_threshold": 0.1}}, "SushiOrder": - {"targetKind": "luis_deepstack", "callingOptions": {"verbose": true}}}}' - headers: - Accept: - - application/json - Content-Length: - - '302' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview - response: - body: - string: "{\n \"query\": \"How do you make sushi rice?\",\n \"prediction\": - {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n - \ \"result\": {\n \"answers\": [\n {\n \"questions\": - [\n \"do you eat cake?\",\n \"do you ever eat - beef?\",\n \"do you ever eat pizza?\",\n \"have - you ever eaten tofu?\",\n \"you don't eat?\",\n \"have - you ever wanted to eat?\",\n \"Don't you ever get hungry?\",\n - \ \"how many calories do you need?\",\n \"What - kind of food do you like?\",\n \"What do you eat for dinner?\",\n - \ \"What do you eat?\",\n \"What kind of food - do you eat?\",\n \"What is your favorite snack?\",\n \"What - is your favorite meal?\",\n \"what foods do you eat?\",\n \"What - do you want to eat?\",\n \"What did you eat for lunch?\",\n - \ \"What do you like to dine on?\",\n \"What - kind of foods do you like?\",\n \"What do you eat for lunch?\",\n - \ \"What do you eat for breakfast?\",\n \"What - did you have for lunch?\",\n \"What did you have for dinner?\",\n - \ \"do you eat vegetables\",\n \"What do you - like to eat?\",\n \"will you ever eat?\",\n \"Are - you ever hungry?\",\n \"Do you eat pasta?\",\n \"do - you eat pizza?\",\n \"you don't need to eat?\",\n \"you - don't need food?\",\n \"What kind of food do you like to eat?\",\n - \ \"will you ever need to eat?\",\n \"when do - you eat?\",\n \"What's your favorite cuisine?\",\n \"what - kinds of foods do you like?\",\n \"What kinds of food do you - like to eat?\",\n \"What kinds of food do you eat?\",\n \"What - did you eat for dinner?\",\n \"you don't eat food?\",\n \"Do - you eat?\",\n \"do you need calories to survive?\",\n \"Do - you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n - \ \"Do you get hungry?\",\n \"do you ever need - to eat?\",\n \"What did you have for breakfast?\",\n \"do - you ever eat food?\",\n \"do you need food?\",\n \"do - you eat food?\",\n \"do you consume food?\",\n \"Are - you hungry?\",\n \"Are you going to have lunch?\",\n \"Are - you going to have dinner?\",\n \"Are you going to have breakfast?\",\n - \ \"Do you ever get hungry?\",\n \"have you ever - wanted a snack?\",\n \"What did you eat for breakfast?\",\n - \ \"so you don't eat?\",\n \"how many calories - do you need to eat?\",\n \"how many calories do you need each - day?\",\n \"how many calories do you eat?\",\n \"do - you need calories?\",\n \"have you ever wanted food?\",\n \"do - you need food to survive?\",\n \"have you ever wanted a meal?\",\n - \ \"have you ever been hungry?\",\n \"Don't you - get hungry?\",\n \"do you not need to eat?\",\n \"do - you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so - you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have - you ever eaten toast?\",\n \"do you eat toast?\",\n \"do - you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n - \ \"do you eat bread?\",\n \"so you've really - never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do - you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have - you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do - you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true - or false: you don't get hungry\",\n \"do you eat tofu?\",\n - \ \"do you ever eat pork?\",\n \"have you ever - eaten pork?\",\n \"do you eat pork?\",\n \"so - you never eat?\",\n \"do you eat beef?\",\n \"so - you've really never eaten?\",\n \"true or false: you don't - eat\",\n \"tell me whether or not you eat\",\n \"is - it true that you don't eat?\",\n \"so you've never really eaten - food?\",\n \"so you've never really eaten anything?\",\n \"do - you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do - you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n - \ \"have you ever eaten vegetables?\",\n \"have - you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do - you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do - you ever eat vegetables?\",\n \"do you eat ice cream?\",\n - \ \"have you ever eaten pasta?\",\n \"do you - ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do - you eat pie?\",\n \"do you ever eat cookies?\",\n \"do - you eat steak?\",\n \"do you ever eat fries?\",\n \"have - you ever eaten fries?\",\n \"do you eat fries?\",\n \"do - you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n - \ \"do you eat burgers?\",\n \"have you ever - eaten pie?\",\n \"have you ever eaten steak?\",\n \"have - you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have - you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do - you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n - \ \"do you ever eat tofu?\",\n \"do you ever - eat steak?\"\n ],\n \"answer\": \"I only do food - for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n - \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.564024\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.435976\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: dedc30b9-bec0-48c0-8f54-0e40b3964ebe - cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: application/json; charset=utf-8 - csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: Thu, 30 Sep 2021 16:57:05 GMT - pragma: no-cache - request-id: dedc30b9-bec0-48c0-8f54-0e40b3964ebe - strict-transport-security: max-age=31536000; includeSubDomains; preload - transfer-encoding: chunked - x-content-type-options: nosniff - x-envoy-upstream-service-time: '364' - status: - code: 200 - message: OK - url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview -version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml deleted file mode 100644 index 787d7d3ace40..000000000000 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/recordings/test_workflow_app_async.test_workflow_app_with_parameters.yaml +++ /dev/null @@ -1,127 +0,0 @@ -interactions: -- request: - body: '{"query": "(''How do you make sushi rice?'',)", "parameters": {"SushiMaking": - {"targetKind": "question_answering", "callingOptions": {"question": "(''How - do you make sushi rice?'',)", "top": 1, "confidenceScoreThreshold": 0.1}}, "SushiOrder": - {"targetKind": "luis_deepstack", "callingOptions": {"verbose": true}}}}' - headers: - Accept: - - application/json - Content-Length: - - '310' - Content-Type: - - application/json - User-Agent: - - azsdk-python-ai-language-conversations/1.0.0b1 Python/3.9.7 (Windows-10-10.0.19043-SP0) - method: POST - uri: https://test-resource.api.cognitive.microsoft.com/language/:analyze-conversations?projectName=test-workflow&deploymentName=production&api-version=2021-07-15-preview - response: - body: - string: "{\n \"query\": \"('How do you make sushi rice?',)\",\n \"prediction\": - {\n \"intents\": {\n \"SushiMaking\": {\n \"targetType\": \"question_answering\",\n - \ \"result\": {\n \"answers\": [\n {\n \"questions\": - [\n \"do you eat cake?\",\n \"do you ever eat - beef?\",\n \"do you ever eat pizza?\",\n \"have - you ever eaten tofu?\",\n \"you don't eat?\",\n \"have - you ever wanted to eat?\",\n \"Don't you ever get hungry?\",\n - \ \"how many calories do you need?\",\n \"What - kind of food do you like?\",\n \"What do you eat for dinner?\",\n - \ \"What do you eat?\",\n \"What kind of food - do you eat?\",\n \"What is your favorite snack?\",\n \"What - is your favorite meal?\",\n \"what foods do you eat?\",\n \"What - do you want to eat?\",\n \"What did you eat for lunch?\",\n - \ \"What do you like to dine on?\",\n \"What - kind of foods do you like?\",\n \"What do you eat for lunch?\",\n - \ \"What do you eat for breakfast?\",\n \"What - did you have for lunch?\",\n \"What did you have for dinner?\",\n - \ \"do you eat vegetables\",\n \"What do you - like to eat?\",\n \"will you ever eat?\",\n \"Are - you ever hungry?\",\n \"Do you eat pasta?\",\n \"do - you eat pizza?\",\n \"you don't need to eat?\",\n \"you - don't need food?\",\n \"What kind of food do you like to eat?\",\n - \ \"will you ever need to eat?\",\n \"when do - you eat?\",\n \"What's your favorite cuisine?\",\n \"what - kinds of foods do you like?\",\n \"What kinds of food do you - like to eat?\",\n \"What kinds of food do you eat?\",\n \"What - did you eat for dinner?\",\n \"you don't eat food?\",\n \"Do - you eat?\",\n \"do you need calories to survive?\",\n \"Do - you have a favorite snack?\",\n \"Do you have a favorite meal?\",\n - \ \"Do you get hungry?\",\n \"do you ever need - to eat?\",\n \"What did you have for breakfast?\",\n \"do - you ever eat food?\",\n \"do you need food?\",\n \"do - you eat food?\",\n \"do you consume food?\",\n \"Are - you hungry?\",\n \"Are you going to have lunch?\",\n \"Are - you going to have dinner?\",\n \"Are you going to have breakfast?\",\n - \ \"Do you ever get hungry?\",\n \"have you ever - wanted a snack?\",\n \"What did you eat for breakfast?\",\n - \ \"so you don't eat?\",\n \"how many calories - do you need to eat?\",\n \"how many calories do you need each - day?\",\n \"how many calories do you eat?\",\n \"do - you need calories?\",\n \"have you ever wanted food?\",\n \"do - you need food to survive?\",\n \"have you ever wanted a meal?\",\n - \ \"have you ever been hungry?\",\n \"Don't you - get hungry?\",\n \"do you not need to eat?\",\n \"do - you eat cookies?\",\n \"do you ever eat bacon?\",\n \"so - you don't need to eat?\",\n \"do you ever eat toast?\",\n \"have - you ever eaten toast?\",\n \"do you eat toast?\",\n \"do - you ever eat bread?\",\n \"have you ever eaten pancakes?\",\n - \ \"do you eat bread?\",\n \"so you've really - never been hungry?\",\n \"have you ever eaten bacon?\",\n \"do - you eat bacon?\",\n \"do you ever eat eggs?\",\n \"have - you ever eaten eggs?\",\n \"do you eat eggs?\",\n \"Do - you eat fruit?\",\n \"have you ever eaten bread?\",\n \"true - or false: you don't get hungry\",\n \"do you eat tofu?\",\n - \ \"do you ever eat pork?\",\n \"have you ever - eaten pork?\",\n \"do you eat pork?\",\n \"so - you never eat?\",\n \"do you eat beef?\",\n \"so - you've really never eaten?\",\n \"true or false: you don't - eat\",\n \"tell me whether or not you eat\",\n \"is - it true that you don't eat?\",\n \"so you've never really eaten - food?\",\n \"so you've never really eaten anything?\",\n \"do - you eat pancakes?\",\n \"have you ever eaten beef?\",\n \"do - you ever eat fruit?\",\n \"have you ever eaten cookies?\",\n - \ \"have you ever eaten vegetables?\",\n \"have - you ever eaten fruit?\",\n \"do you ever eat cake?\",\n \"do - you ever eat pie?\",\n \"do you ever eat pancakes?\",\n \"do - you ever eat vegetables?\",\n \"do you eat ice cream?\",\n - \ \"have you ever eaten pasta?\",\n \"do you - ever eat pasta?\",\n \"have you ever eaten pizza?\",\n \"do - you eat pie?\",\n \"do you ever eat cookies?\",\n \"do - you eat steak?\",\n \"do you ever eat fries?\",\n \"have - you ever eaten fries?\",\n \"do you eat fries?\",\n \"do - you ever eat burgers?\",\n \"have you ever eaten burgers?\",\n - \ \"do you eat burgers?\",\n \"have you ever - eaten pie?\",\n \"have you ever eaten steak?\",\n \"have - you ever eaten cake?\",\n \"do you ever eat chicken?\",\n \"have - you ever eaten chicken?\",\n \"do you eat chicken?\",\n \"do - you ever eat ice cream?\",\n \"have you ever eaten ice cream?\",\n - \ \"do you ever eat tofu?\",\n \"do you ever - eat steak?\"\n ],\n \"answer\": \"I only do food - for thought.\",\n \"score\": 10.71,\n \"id\": 12,\n - \ \"source\": \"qna_chitchat_Friendly.tsv\",\n \"isDocumentText\": - false,\n \"metadata\": [\n {\n \"name\": - \"editorial\",\n \"value\": \"chitchat\"\n }\n - \ ],\n \"context\": {\n \"isContextOnly\": - false,\n \"prompts\": []\n }\n }\n - \ ],\n \"activeLearningEnabled\": true\n },\n \"confidenceScore\": - 0.58619076\n },\n \"SushiOrder\": {\n \"targetType\": \"luis_deepstack\",\n - \ \"confidenceScore\": 0.4138092\n },\n \"None\": {\n \"targetType\": - \"non_linked\",\n \"confidenceScore\": 0\n }\n },\n \"topIntent\": - \"SushiMaking\",\n \"projectType\": \"workflow\"\n }\n}" - headers: - apim-request-id: d8dde644-cd13-4f84-9466-797cbfda2428 - cache-control: no-store, proxy-revalidate, no-cache, max-age=0, private - content-type: application/json; charset=utf-8 - csp-billing-usage: CognitiveServices.TextAnalytics.OrchestrationLUISTransaction=0 - date: Thu, 30 Sep 2021 16:57:06 GMT - pragma: no-cache - request-id: d8dde644-cd13-4f84-9466-797cbfda2428 - strict-transport-security: max-age=31536000; includeSubDomains; preload - transfer-encoding: chunked - x-content-type-options: nosniff - x-envoy-upstream-service-time: '234' - status: - code: 200 - message: OK - url: https://antischsdktest.cognitiveservices.azure.com/language/:analyze-conversations?projectName=antischTwo&deploymentName=production&api-version=2021-07-15-preview -version: 1 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py index 8dd770ff9b4c..c5ed8c4a0d3e 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_conversation_app.py @@ -16,9 +16,9 @@ from azure.ai.language.conversations import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, + ConversationAnalysisOptions, AnalyzeConversationResult, - DeepstackPrediction + ConversationPrediction ) @@ -29,7 +29,7 @@ def test_conversation_app(self, conv_account, conv_key, conv_project): # prepare data query = "One california maki please." - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, ) @@ -45,7 +45,7 @@ def test_conversation_app(self, conv_account, conv_key, conv_project): # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, DeepstackPrediction) + assert isinstance(result.prediction, ConversationPrediction) assert result.prediction.project_kind == 'conversation' assert result.prediction.top_intent == 'Order' assert len(result.prediction.entities) > 0 @@ -64,6 +64,7 @@ def test_conversation_app_with_dictparams(self, conv_account, conv_key, conv_pro query = "One california maki please." params = { "query": query, + "api_version": "2021-11-01-preview" } # analyze quey @@ -78,7 +79,7 @@ def test_conversation_app_with_dictparams(self, conv_account, conv_key, conv_pro # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, DeepstackPrediction) + assert isinstance(result.prediction, ConversationPrediction) assert result.prediction.project_kind == 'conversation' assert result.prediction.top_intent == 'Order' assert len(result.prediction.entities) > 0 diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_orchestration_app.py similarity index 66% rename from sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_orchestration_app.py index 98ea790b3462..da018335055e 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_app.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_orchestration_app.py @@ -16,20 +16,19 @@ from azure.ai.language.conversations import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, AnalyzeConversationResult, QuestionAnsweringParameters, - DeepstackParameters, - DeepstackCallingOptions, + ConversationParameters, + ConversationCallingOptions, QuestionAnsweringTargetIntentResult, - WorkflowPrediction, - DSTargetIntentResult + OrchestratorPrediction, + ConversationAnalysisOptions ) -class WorkflowAppTests(ConversationTest): +class OrchestrationAppTests(ConversationTest): @GlobalConversationAccountPreparer() - def test_workflow_app(self, conv_account, conv_key, workflow_project): + def test_orchestration_app(self, conv_account, conv_key, orchestration_project): client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) with client: @@ -38,41 +37,42 @@ def test_workflow_app(self, conv_account, conv_key, workflow_project): query = "How do you make sushi rice?" result = client.analyze_conversations( {"query": query}, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert + top_intent = "SushiMaking" assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" - assert result.prediction.top_intent == "SushiMaking" - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert result.prediction.top_intent == top_intent + assert isinstance(result.prediction.intents[top_intent], QuestionAnsweringTargetIntentResult) # analyze query query = "I will have sashimi" result = client.analyze_conversations( {"query": query}, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" # assert result.prediction.top_intent == "SushiOrder" --> wrong top intent! - # assert isinstance(result.prediction.intents, DSTargetIntentResult) + # assert isinstance(result.prediction.intents, ConversationTargetIntentResult) @GlobalConversationAccountPreparer() - def test_workflow_app_with_parameters(self, conv_account, conv_key, workflow_project): + def test_orchestration_app_with_parameters(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?", - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, parameters={ "SushiMaking": QuestionAnsweringParameters( @@ -82,7 +82,7 @@ def test_workflow_app_with_parameters(self, conv_account, conv_key, workflow_pro "confidenceScoreThreshold": 0.1 } ), - "SushiOrder": DeepstackParameters( + "SushiOrder": ConversationParameters( calling_options={ "verbose": True } @@ -95,25 +95,26 @@ def test_workflow_app_with_parameters(self, conv_account, conv_key, workflow_pro with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert + top_intent = "SushiMaking" assert isinstance(result, AnalyzeConversationResult) # assert result.query == query --> weird behavior here! - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" - assert result.prediction.top_intent == "SushiMaking" - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert result.prediction.top_intent == top_intent + assert isinstance(result.prediction.intents[top_intent], QuestionAnsweringTargetIntentResult) @GlobalConversationAccountPreparer() - def test_workflow_app_with_model(self, conv_account, conv_key, workflow_project): + def test_orchestration_app_with_model(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?" - input = AnalyzeConversationOptions( + input = ConversationAnalysisOptions( query=query, parameters={ "SushiMaking": QuestionAnsweringParameters( @@ -123,8 +124,8 @@ def test_workflow_app_with_model(self, conv_account, conv_key, workflow_project) "confidence_score_threshold":0.1 } ), - "SushiOrder": DeepstackParameters( - calling_options=DeepstackCallingOptions( + "SushiOrder": ConversationParameters( + calling_options=ConversationCallingOptions( verbose=True ) ) @@ -136,14 +137,15 @@ def test_workflow_app_with_model(self, conv_account, conv_key, workflow_project) with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert + top_intent = "SushiMaking" assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" - assert result.prediction.top_intent == "SushiMaking" - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert result.prediction.top_intent == top_intent + assert isinstance(result.prediction.intents[top_intent], QuestionAnsweringTargetIntentResult) diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_orchestration_direct.py similarity index 76% rename from sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct.py rename to sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_orchestration_direct.py index 02f2aac6a7e6..84b31382d2dd 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_workflow_direct.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/test_orchestration_direct.py @@ -16,27 +16,27 @@ from azure.ai.language.conversations import ConversationAnalysisClient from azure.ai.language.conversations.models import ( - AnalyzeConversationOptions, + AnalysisParameters, AnalyzeConversationResult, QuestionAnsweringParameters, - DeepstackParameters, - WorkflowPrediction, + ConversationParameters, + OrchestratorPrediction, QuestionAnsweringTargetIntentResult, - DSTargetIntentResult, + ConversationTargetIntentResult, LUISTargetIntentResult ) -class WorkflowAppDirectTests(ConversationTest): +class OrchestrationAppDirectTests(ConversationTest): @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - def test_direct_kb_intent(self, conv_account, conv_key, workflow_project): + def test_direct_kb_intent(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?" target_intent = "SushiMaking" - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ @@ -55,26 +55,26 @@ def test_direct_kb_intent(self, conv_account, conv_key, workflow_project): with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - def test_kb_intent_with_model(self, conv_account, conv_key, workflow_project): + def test_kb_intent_with_model(self, conv_account, conv_key, orchestration_project): # prepare data query = "How do you make sushi rice?" target_intent = "SushiMaking" - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ @@ -93,31 +93,31 @@ def test_kb_intent_with_model(self, conv_account, conv_key, workflow_project): with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent - # assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) + assert isinstance(result.prediction.intents, QuestionAnsweringTargetIntentResult) @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - def test_deepstack_intent(self, conv_account, conv_key, workflow_project): + def test_conversation_intent(self, conv_account, conv_key, orchestration_project): # prepare data query = "I will have the oyako donburi please." target_intent = "SushiOrder" client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ - "SushiOrder": DeepstackParameters( + "SushiOrder": ConversationParameters( calling_options={ "verbose": True, } @@ -129,32 +129,32 @@ def test_deepstack_intent(self, conv_account, conv_key, workflow_project): with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent - # assert isinstance(result.prediction.intents, DSTargetIntentResult) + assert isinstance(result.prediction.intents, ConversationTargetIntentResult) @pytest.mark.skip(reason="internal server error!") @GlobalConversationAccountPreparer() - def test_luis_intent(self, conv_account, conv_key, workflow_project): + def test_luis_intent(self, conv_account, conv_key, orchestration_project): # prepare data query = "I will have the oyako donburi please." target_intent = "SushiOrder" client = ConversationAnalysisClient(conv_account, AzureKeyCredential(conv_key)) - input = AnalyzeConversationOptions( + input = AnalysisParameters( query=query, direct_target=target_intent, parameters={ - "SushiOrder": DeepstackParameters( + "SushiOrder": ConversationParameters( calling_options={ "verbose": True, } @@ -166,14 +166,14 @@ def test_luis_intent(self, conv_account, conv_key, workflow_project): with client: result = client.analyze_conversations( input, - project_name=workflow_project, + project_name=orchestration_project, deployment_name='production', ) # assert assert isinstance(result, AnalyzeConversationResult) assert result.query == query - assert isinstance(result.prediction, WorkflowPrediction) + assert isinstance(result.prediction, OrchestratorPrediction) assert result.prediction.project_kind == "workflow" assert result.prediction.top_intent == target_intent - # assert isinstance(result.prediction.intents, LUISTargetIntentResult) \ No newline at end of file + assert isinstance(result.prediction.intents, LUISTargetIntentResult) \ No newline at end of file diff --git a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py index 7894ef03185c..0e26485968ef 100644 --- a/sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py +++ b/sdk/cognitivelanguage/azure-ai-language-conversations/tests/testcase.py @@ -85,7 +85,7 @@ def create_resource(self, name, **kwargs): 'conv_account': os.environ.get("AZURE_CONVERSATIONS_ENDPOINT"), 'conv_key': os.environ.get("AZURE_CONVERSATIONS_KEY"), 'conv_project': os.environ.get("AZURE_CONVERSATIONS_PROJECT"), - 'workflow_project': os.environ.get("AZURE_CONVERSATIONS_WORKFLOW_PROJECT") + 'orchestration_project': os.environ.get("AZURE_CONVERSATIONS_WORKFLOW_PROJECT") } return { 'location': REGION, @@ -93,5 +93,5 @@ def create_resource(self, name, **kwargs): 'conv_account': TEST_ENDPOINT, 'conv_key': TEST_KEY, 'conv_project': TEST_PROJECT, - 'workflow_project': TEST_WORKFLOW + 'orchestration_project': TEST_WORKFLOW }