Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

hotfix(agents-api): Fix session.situation not being rendered #499

Merged
merged 1 commit into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions agents-api/agents_api/common/protocol/sessions.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ def get_chat_environment(self) -> dict[str, dict | list[dict]]:
"session": self.session.model_dump(),
"agents": [agent.model_dump() for agent in self.agents],
"current_agent": current_agent.model_dump(),
"agent": current_agent.model_dump(),
creatorrr marked this conversation as resolved.
Show resolved Hide resolved
"users": [user.model_dump() for user in self.users],
"settings": settings,
"tools": [tool.model_dump() for tool in tools],
Expand Down
40 changes: 30 additions & 10 deletions agents-api/agents_api/routers/sessions/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,21 +46,41 @@ async def chat(
# Merge the settings and prepare environment
chat_context.merge_settings(chat_input)
settings: dict = chat_context.settings.model_dump()
env: dict = chat_context.get_chat_environment()
settings["model"] = f"openai/{settings['model']}" # litellm proxy idiosyncracy

# Render the messages
new_raw_messages = [msg.model_dump() for msg in chat_input.messages]

# Get the past messages and doc references
past_messages, doc_references = await gather_messages(
developer=developer,
session_id=session_id,
chat_context=chat_context,
chat_input=chat_input,
)

# Prepare the environment
env: dict = chat_context.get_chat_environment()
env["docs"] = doc_references
new_messages = await render_template(new_raw_messages, variables=env)

# Render the system message
if situation := chat_context.session.situation:
system_message = dict(
role="system",
content=situation,
)

system_messages: list[dict] = await render_template(
[system_message], variables=env
)
past_messages = system_messages + past_messages

# Render the incoming messages
new_raw_messages = [msg.model_dump() for msg in chat_input.messages]

if chat_context.session.render_templates:
new_messages = await render_template(new_raw_messages, variables=env)
else:
new_messages = new_raw_messages

# Combine the past messages with the new messages
messages = past_messages + new_messages

# Get the tools
Expand All @@ -74,13 +94,11 @@ async def chat(

# FIXME: Hotfix for datetime not serializable. Needs investigation
messages = [
msg.model_dump() if hasattr(msg, "model_dump") else msg
for msg in messages
msg.model_dump() if hasattr(msg, "model_dump") else msg for msg in messages
]

messages = [
dict(role=m["role"], content=m["content"], user=m.get("user"))
for m in messages
dict(role=m["role"], content=m["content"], user=m.get("user")) for m in messages
]

# Get the response from the model
Expand All @@ -104,7 +122,9 @@ async def chat(
# Add the response to the new entries
new_entries.append(
CreateEntryRequest.from_model_input(
model=settings["model"], **model_response.choices[0].model_dump()['message'], source="api_response"
model=settings["model"],
**model_response.choices[0].model_dump()["message"],
source="api_response",
)
)
background_tasks.add_task(
Expand Down
Loading