From ccc5b461b62f9bb49e823606a45105594f22ef6e Mon Sep 17 00:00:00 2001 From: ZiTao-Li Date: Fri, 19 Apr 2024 11:15:27 +0800 Subject: [PATCH 1/3] add file --- .../groupchat_utils.py | 37 +++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 examples/conversation_with_RAG_agents/groupchat_utils.py diff --git a/examples/conversation_with_RAG_agents/groupchat_utils.py b/examples/conversation_with_RAG_agents/groupchat_utils.py new file mode 100644 index 000000000..24d422c57 --- /dev/null +++ b/examples/conversation_with_RAG_agents/groupchat_utils.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- +""" Group chat utils.""" +import re +from typing import Sequence + + +def select_next_one(agents: Sequence, rnd: int) -> Sequence: + """ + Select next agent. + """ + return agents[rnd % len(agents)] + + +def filter_agents(string: str, agents: Sequence) -> Sequence: + """ + This function filters the input string for occurrences of the given names + prefixed with '@' and returns a list of the found names. + """ + if len(agents) == 0: + return [] + + # Create a pattern that matches @ followed by any of the candidate names + pattern = ( + r"@(" + "|".join(re.escape(agent.name) for agent in agents) + r")\b" + ) + + # Find all occurrences of the pattern in the string + matches = re.findall(pattern, string) + + # Create a dictionary mapping agent names to agent objects for quick lookup + agent_dict = {agent.name: agent for agent in agents} + + # Return the list of matched agent objects preserving the order + ordered_agents = [ + agent_dict[name] for name in matches if name in agent_dict + ] + return ordered_agents From 06d9ebb8a93ad3609861f37fae8a92b83cf7c53f Mon Sep 17 00:00:00 2001 From: ZiTao-Li Date: Fri, 26 Apr 2024 17:54:35 +0800 Subject: [PATCH 2/3] reformat code --- .../configs/agent_config.json | 12 +- .../rag_example.py | 106 ++++++++++++------ src/agentscope/agents/__init__.py | 3 + .../agentscope/agents}/rag_agents.py | 2 +- .../agentscope}/rag/__init__.py | 1 - .../agentscope}/rag/langchain_rag.py | 0 .../agentscope}/rag/llama_index_rag.py | 10 +- .../agentscope}/rag/rag.py | 0 8 files changed, 89 insertions(+), 45 deletions(-) rename {examples/conversation_with_RAG_agents => src/agentscope/agents}/rag_agents.py (99%) rename {examples/conversation_with_RAG_agents => src/agentscope}/rag/__init__.py (99%) rename {examples/conversation_with_RAG_agents => src/agentscope}/rag/langchain_rag.py (100%) rename {examples/conversation_with_RAG_agents => src/agentscope}/rag/llama_index_rag.py (99%) rename {examples/conversation_with_RAG_agents => src/agentscope}/rag/rag.py (100%) diff --git a/examples/conversation_with_RAG_agents/configs/agent_config.json b/examples/conversation_with_RAG_agents/configs/agent_config.json index 34138ae8b..abd4b3125 100644 --- a/examples/conversation_with_RAG_agents/configs/agent_config.json +++ b/examples/conversation_with_RAG_agents/configs/agent_config.json @@ -24,7 +24,7 @@ "similarity_top_k": 10, "log_retrieval": false, "recent_n_mem": 1, - "persist_dir": "../../rag_storage/tutorial_assist" + "persist_dir": "./rag_storage/tutorial_assist" } } }, @@ -67,7 +67,7 @@ "similarity_top_k": 10, "log_retrieval": false, "recent_n_mem": 1, - "persist_dir": "../../rag_storage/code_assist" + "persist_dir": "./rag_storage/code_assist" } } }, @@ -108,15 +108,17 @@ "similarity_top_k": 4, "log_retrieval": true, "recent_n_mem": 1, - "persist_dir": "../../rag_storage/docstring_assist" + "persist_dir": "./rag_storage/docstring_assist", + "repo_base": "../../", + "file_dir": "../../docs/docstring_html/" } } }, { "class": "DialogAgent", "args": { - "name": "Summarize-Assistant", - "description": "Summarize-Assistant is an agent that can summarize multiple RAG agents' answers.", + "name": "Guide-Assistant", + "description": "Guide-Assistant is an agent that can summarize multiple RAG agents' answers.", "sys_prompt": "You summarize the answers of the previous two messages and remove the redundant information. The answer need to be simple and itemized. The answer needs to be less than 100 words.ex .", "model_config_name": "qwen_config", "use_memory": true diff --git a/examples/conversation_with_RAG_agents/rag_example.py b/examples/conversation_with_RAG_agents/rag_example.py index d22682941..b3abfde34 100644 --- a/examples/conversation_with_RAG_agents/rag_example.py +++ b/examples/conversation_with_RAG_agents/rag_example.py @@ -1,19 +1,46 @@ # -*- coding: utf-8 -*- """ -A simple example for conversation between user and -an agent with RAG capability. +An example for conversation between user and agents with RAG capability. +One agent is a tutorial assistant, the other is a code explainer. """ import json import os -from rag_agents import LlamaIndexAgent from groupchat_utils import filter_agents import agentscope -from agentscope.agents import UserAgent +from agentscope.agents import UserAgent, DialogAgent, LlamaIndexAgent -from agentscope.message import Msg -from agentscope.agents import DialogAgent + +AGENT_CHOICE_PROMPT = """ +There are following available agents. You need to choose the most appropriate +agent(s) to answer the user's question. + +agent descriptions:{} + +First, rephrase the user's question, which must contain the key information. +The you need to think step by step. If you believe some of the agents are +good candidates to answer the question (e.g., AGENT_1 and AGENT_2), then +you need to follow the following format to generate your output: + +' +Because $YOUR_REASONING. +I believe @AGENT_1 and @AGENT_2 are the most appropriate agents to answer +your question. +' +""" + + +def prepare_docstring_html(repo_path: str, html_dir: str) -> None: + """prepare docstring in html for API assistant""" + os.system( + f"sphinx-apidoc -f -o {repo_path}/docs/sphinx_doc/en/source " + f"{repo_path}/src/agentscope -t template", + ) + os.system( + f"sphinx-build -b html {repo_path}/docs/sphinx_doc/en/source " + f"{html_dir} -W --keep-going", + ) def main() -> None: @@ -34,53 +61,66 @@ def main() -> None: with open("configs/agent_config.json", "r", encoding="utf-8") as f: agent_configs = json.load(f) + + # define RAG-based agents for tutorial and code tutorial_agent = LlamaIndexAgent(**agent_configs[0]["args"]) code_explain_agent = LlamaIndexAgent(**agent_configs[1]["args"]) + + # prepare html for api agent + prepare_docstring_html( + agent_configs[2]["args"]["rag_config"]["repo_base"], + agent_configs[2]["args"]["rag_config"]["file_dir"], + ) + # define an API agent api_agent = LlamaIndexAgent(**agent_configs[2]["args"]) - agent_configs[3]["args"].pop("description") - summarize_agent = DialogAgent(**agent_configs[3]["args"]) + rag_agents = [ tutorial_agent, code_explain_agent, api_agent, ] rag_agent_names = [agent.name for agent in rag_agents] - summarize_agents = [summarize_agent] - summarize_agent_names = [agent.name for agent in summarize_agents] - helper_agents = rag_agents + summarize_agents + + # define a guide agent + rag_agent_descriptions = [ + "agent名字:" + agent.name + "\n agent描述:" + agent.description + "\n" + for agent in rag_agents + ] + agent_configs[3]["args"].pop("description") + agent_configs[3]["args"]["sys_prompt"] = agent_configs[3]["args"][ + "sys_prompt" + ] + AGENT_CHOICE_PROMPT.format( + "".join(rag_agent_descriptions), + ) + + guide_agent = DialogAgent(**agent_configs[3]["args"]) user_agent = UserAgent() - # start the conversation between user and assistant while True: + # The workflow is the following: + # 1. user input a message, + # 2. if it mentions one of the agents, then the agent will be called + # 3. otherwise, the guide agent will be decide which agent to call + # 4. the called agent will response to the user + # 5. repeat x = user_agent() x.role = "user" # to enforce dashscope requirement on roles if len(x["content"]) == 0 or str(x["content"]).startswith("exit"): break - speak_list = filter_agents(x.get("content", ""), helper_agents) + speak_list = filter_agents(x.get("content", ""), rag_agents) if len(speak_list) == 0: - # if no agent is @ (mentioned), default invoke all rag agents and - # summarize agents - speak_list = rag_agents + summarize_agents - for agent in speak_list: - if agent.name in summarize_agent_names: - # if summarize agent is mentioned, then also call rag agents - # TODO: let summarize agent choose which agent to call - speak_list = rag_agents + summarize_agents - + x["content"] = "用户问题:" + x["content"] + guide_response = guide_agent(x) + # Only one agent can be called in the current version, + # we may support multi-agent conversation later + speak_list = filter_agents( + guide_response.get("content", ""), + rag_agents, + ) agent_name_list = [agent.name for agent in speak_list] - rag_agent_responses = [] for agent_name, agent in zip(agent_name_list, speak_list): if agent_name in rag_agent_names: - rag_agent_responses.append(agent(x)) - - msg = Msg( - name="user", - role="user", - content="/n".join([msg.content for msg in rag_agent_responses]), - ) - for agent_name, agent in zip(agent_name_list, speak_list): - if agent_name in summarize_agent_names: - agent(msg) + agent(x) if __name__ == "__main__": diff --git a/src/agentscope/agents/__init__.py b/src/agentscope/agents/__init__.py index c61fdbdc3..7d1e7c711 100644 --- a/src/agentscope/agents/__init__.py +++ b/src/agentscope/agents/__init__.py @@ -8,6 +8,7 @@ from .text_to_image_agent import TextToImageAgent from .rpc_agent import RpcAgent, RpcAgentServerLauncher from .react_agent import ReActAgent +from .rag_agents import RAGAgentBase, LlamaIndexAgent __all__ = [ @@ -20,4 +21,6 @@ "ReActAgent", "RpcAgent", "RpcAgentServerLauncher", + "RAGAgentBase", + "LlamaIndexAgent", ] diff --git a/examples/conversation_with_RAG_agents/rag_agents.py b/src/agentscope/agents/rag_agents.py similarity index 99% rename from examples/conversation_with_RAG_agents/rag_agents.py rename to src/agentscope/agents/rag_agents.py index b173677c7..c1cc61c57 100644 --- a/examples/conversation_with_RAG_agents/rag_agents.py +++ b/src/agentscope/agents/rag_agents.py @@ -11,7 +11,7 @@ import importlib from loguru import logger -from rag import RAGBase, LlamaIndexRAG +from agentscope.rag import RAGBase, LlamaIndexRAG from agentscope.agents.agent import AgentBase from agentscope.message import Msg diff --git a/examples/conversation_with_RAG_agents/rag/__init__.py b/src/agentscope/rag/__init__.py similarity index 99% rename from examples/conversation_with_RAG_agents/rag/__init__.py rename to src/agentscope/rag/__init__.py index 3c8f48882..ac407feb1 100644 --- a/examples/conversation_with_RAG_agents/rag/__init__.py +++ b/src/agentscope/rag/__init__.py @@ -4,7 +4,6 @@ from .llama_index_rag import LlamaIndexRAG - try: from .langchain_rag import LangChainRAG except Exception: diff --git a/examples/conversation_with_RAG_agents/rag/langchain_rag.py b/src/agentscope/rag/langchain_rag.py similarity index 100% rename from examples/conversation_with_RAG_agents/rag/langchain_rag.py rename to src/agentscope/rag/langchain_rag.py diff --git a/examples/conversation_with_RAG_agents/rag/llama_index_rag.py b/src/agentscope/rag/llama_index_rag.py similarity index 99% rename from examples/conversation_with_RAG_agents/rag/llama_index_rag.py rename to src/agentscope/rag/llama_index_rag.py index 8f778c71c..412ea4310 100644 --- a/examples/conversation_with_RAG_agents/rag/llama_index_rag.py +++ b/src/agentscope/rag/llama_index_rag.py @@ -3,10 +3,10 @@ This module is an integration of the Llama index RAG into AgentScope package """ - +import os.path from typing import Any, Optional, List, Union from loguru import logger -import os.path + try: from llama_index.core.readers.base import BaseReader @@ -33,8 +33,8 @@ VectorStoreIndex = None PrivateAttr = None -from rag import RAGBase -from rag.rag import ( +from agentscope.rag import RAGBase +from agentscope.rag.rag import ( DEFAULT_CHUNK_SIZE, DEFAULT_CHUNK_OVERLAP, DEFAULT_TOP_K, @@ -282,7 +282,7 @@ def store_and_index( # load the storage_context storage_context = StorageContext.from_defaults( persist_dir=self.persist_dir, - ) + ) # construct index from self.index = load_index_from_storage( storage_context=storage_context, diff --git a/examples/conversation_with_RAG_agents/rag/rag.py b/src/agentscope/rag/rag.py similarity index 100% rename from examples/conversation_with_RAG_agents/rag/rag.py rename to src/agentscope/rag/rag.py From fc9110790cb9b9a1f69d5f705882d7437ea7d9fa Mon Sep 17 00:00:00 2001 From: ZiTao-Li Date: Sun, 28 Apr 2024 11:23:36 +0800 Subject: [PATCH 3/3] fix --- examples/conversation_with_RAG_agents/rag_example.py | 3 +++ src/agentscope/rag/llama_index_rag.py | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/examples/conversation_with_RAG_agents/rag_example.py b/examples/conversation_with_RAG_agents/rag_example.py index c6c3c7f3a..0369d8925 100644 --- a/examples/conversation_with_RAG_agents/rag_example.py +++ b/examples/conversation_with_RAG_agents/rag_example.py @@ -74,10 +74,13 @@ def main() -> None: # define an API agent api_agent = LlamaIndexAgent(**agent_configs[2]["args"]) + searching_agent = LlamaIndexAgent(**agent_configs[4]["args"]) + rag_agents = [ tutorial_agent, code_explain_agent, api_agent, + searching_agent, ] rag_agent_names = [agent.name for agent in rag_agents] diff --git a/src/agentscope/rag/llama_index_rag.py b/src/agentscope/rag/llama_index_rag.py index b4ec64768..59e741ccf 100644 --- a/src/agentscope/rag/llama_index_rag.py +++ b/src/agentscope/rag/llama_index_rag.py @@ -201,7 +201,7 @@ def store_and_index( self, docs_list: Any, retriever: Any = None, - store_and_index_args_list: Any = None, + store_and_index_args_list: list[dict] = None, **kwargs: Any, ) -> Any: """ @@ -297,7 +297,7 @@ def load_docs(self, index_config: dict) -> Any: """ if "load_data" in index_config: - load_data_args = self._prepare_args_from_config( + load_data_args = self.prepare_args_from_config( index_config["load_data"], ) else: