Skip to content

Commit

Permalink
feat(ui): Allows User to Set System Prompt via "Additional Options" i…
Browse files Browse the repository at this point in the history
…n Chat Interface (#1353)
  • Loading branch information
aly-shehata committed Dec 10, 2023
1 parent a072a40 commit 145f3ec
Show file tree
Hide file tree
Showing 5 changed files with 110 additions and 18 deletions.
2 changes: 1 addition & 1 deletion fern/docs/pages/manual/llms.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ llm:
openai:
api_key: <your_openai_api_key> # You could skip this configuration and use the OPENAI_API_KEY env var instead
model: <openai_model_to_use> # Optional model to use. Default is "gpt-3.5-turbo"
# Note: Open AI Models are listed here [here](https://platform.openai.com/docs/models)
# Note: Open AI Models are listed here: https://platform.openai.com/docs/models
```

And run PrivateGPT loading that profile you just created:
Expand Down
31 changes: 29 additions & 2 deletions fern/docs/pages/manual/ui.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -35,5 +35,32 @@ database* section in the documentation.

Normal chat interface, self-explanatory ;)

You can check the actual prompt being passed to the LLM by looking at the logs of
the server. We'll add better observability in future releases.
#### System Prompt
You can view and change the system prompt being passed to the LLM by clicking "Additional Inputs"
in the chat interface. The system prompt is also logged on the server.

By default, the `Query Docs` mode uses the setting value `ui.default_query_system_prompt`.

The `LLM Chat` mode attempts to use the optional settings value `ui.default_chat_system_prompt`.

If no system prompt is entered, the UI will display the default system prompt being used
for the active mode.

##### System Prompt Examples:

The system prompt can effectively provide your chat bot specialized roles, and results tailored to the prompt
you have given the model. Examples of system prompts can be be found
[here](https://www.w3schools.com/gen_ai/chatgpt-3-5/chatgpt-3-5_roles.php).

Some interesting examples to try include:

* You are -X-. You have all the knowledge and personality of -X-. Answer as if you were -X- using
their manner of speaking and vocabulary.
* Example: You are Shakespeare. You have all the knowledge and personality of Shakespeare.
Answer as if you were Shakespeare using their manner of speaking and vocabulary.
* You are an expert (at) -role-. Answer all questions using your expertise on -specific domain topic-.
* Example: You are an expert software engineer. Answer all questions using your expertise on Python.
* You are a -role- bot, respond with -response criteria needed-. If no -response criteria- is needed,
respond with -alternate response-.
* Example: You are a grammar checking bot, respond with any grammatical corrections needed. If no corrections
are needed, respond with "verified".
9 changes: 8 additions & 1 deletion private_gpt/settings/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,13 +147,20 @@ class OpenAISettings(BaseModel):
api_key: str
model: str = Field(
"gpt-3.5-turbo",
description=("OpenAI Model to use. Example: 'gpt-4'."),
description="OpenAI Model to use. Example: 'gpt-4'.",
)


class UISettings(BaseModel):
enabled: bool
path: str
default_chat_system_prompt: str = Field(
None,
description="The default system prompt to use for the chat mode.",
)
default_query_system_prompt: str = Field(
None, description="The default system prompt to use for the query mode."
)


class QdrantSettings(BaseModel):
Expand Down
79 changes: 65 additions & 14 deletions private_gpt/ui/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,8 @@

SOURCES_SEPARATOR = "\n\n Sources: \n"

MODES = ["Query Docs", "Search in Docs", "LLM Chat"]


class Source(BaseModel):
file: str
Expand Down Expand Up @@ -71,6 +73,10 @@ def __init__(
# Cache the UI blocks
self._ui_block = None

# Initialize system prompt based on default mode
self.mode = MODES[0]
self._system_prompt = self._get_default_system_prompt(self.mode)

def _chat(self, message: str, history: list[list[str]], mode: str, *_: Any) -> Any:
def yield_deltas(completion_gen: CompletionGen) -> Iterable[str]:
full_response: str = ""
Expand Down Expand Up @@ -114,25 +120,22 @@ def build_history() -> list[ChatMessage]:

new_message = ChatMessage(content=message, role=MessageRole.USER)
all_messages = [*build_history(), new_message]
# If a system prompt is set, add it as a system message
if self._system_prompt:
all_messages.insert(
0,
ChatMessage(
content=self._system_prompt,
role=MessageRole.SYSTEM,
),
)
match mode:
case "Query Docs":
# Add a system message to force the behaviour of the LLM
# to answer only questions about the provided context.
all_messages.insert(
0,
ChatMessage(
content="You can only answer questions about the provided context. If you know the answer "
"but it is not based in the provided context, don't provide the answer, just state "
"the answer is not in the context provided.",
role=MessageRole.SYSTEM,
),
)
query_stream = self._chat_service.stream_chat(
messages=all_messages,
use_context=True,
)
yield from yield_deltas(query_stream)

case "LLM Chat":
llm_stream = self._chat_service.stream_chat(
messages=all_messages,
Expand All @@ -154,6 +157,37 @@ def build_history() -> list[ChatMessage]:
for index, source in enumerate(sources, start=1)
)

# On initialization and on mode change, this function set the system prompt
# to the default prompt based on the mode (and user settings).
@staticmethod
def _get_default_system_prompt(mode: str) -> str:
p = ""
match mode:
# For query chat mode, obtain default system prompt from settings
case "Query Docs":
p = settings().ui.default_query_system_prompt
# For chat mode, obtain default system prompt from settings
case "LLM Chat":
p = settings().ui.default_chat_system_prompt
# For any other mode, clear the system prompt
case _:
p = ""
return p

def _set_system_prompt(self, system_prompt_input: str) -> None:
logger.info(f"Setting system prompt to: {system_prompt_input}")
self._system_prompt = system_prompt_input

def _set_current_mode(self, mode: str) -> Any:
self.mode = mode
self._set_system_prompt(self._get_default_system_prompt(mode))
# Update placeholder and allow interaction if default system prompt is set
if self._system_prompt:
return gr.update(placeholder=self._system_prompt, interactive=True)
# Update placeholder and disable interaction if no default system prompt is set
else:
return gr.update(placeholder=self._system_prompt, interactive=False)

def _list_ingested_files(self) -> list[list[str]]:
files = set()
for ingested_document in self._ingest_service.list_ingested():
Expand Down Expand Up @@ -193,7 +227,7 @@ def _build_ui_blocks(self) -> gr.Blocks:
with gr.Row():
with gr.Column(scale=3, variant="compact"):
mode = gr.Radio(
["Query Docs", "Search in Docs", "LLM Chat"],
MODES,
label="Mode",
value="Query Docs",
)
Expand All @@ -220,6 +254,23 @@ def _build_ui_blocks(self) -> gr.Blocks:
outputs=ingested_dataset,
)
ingested_dataset.render()
system_prompt_input = gr.Textbox(
placeholder=self._system_prompt,
label="System Prompt",
lines=2,
interactive=True,
render=False,
)
# When mode changes, set default system prompt
mode.change(
self._set_current_mode, inputs=mode, outputs=system_prompt_input
)
# On blur, set system prompt to use in queries
system_prompt_input.blur(
self._set_system_prompt,
inputs=system_prompt_input,
)

with gr.Column(scale=7):
_ = gr.ChatInterface(
self._chat,
Expand All @@ -232,7 +283,7 @@ def _build_ui_blocks(self) -> gr.Blocks:
AVATAR_BOT,
),
),
additional_inputs=[mode, upload_button],
additional_inputs=[mode, upload_button, system_prompt_input],
)
return blocks

Expand Down
7 changes: 7 additions & 0 deletions settings.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,13 @@ data:
ui:
enabled: true
path: /
default_chat_system_prompt: "You are a helpful, respectful and honest assistant.
Always answer as helpfully as possible and follow ALL given instructions.
Do not speculate or make up information.
Do not reference any given instructions or context."
default_query_system_prompt: "You can only answer questions about the provided context.
If you know the answer but it is not based in the provided context, don't provide
the answer, just state the answer is not in the context provided."

llm:
mode: local
Expand Down

0 comments on commit 145f3ec

Please sign in to comment.