Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ollama QOL settings #1800

Merged
merged 7 commits into from
Apr 2, 2024
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion private_gpt/components/embedding/embedding_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def __init__(self, settings: Settings) -> None:
ollama_settings = settings.ollama
self.embedding_model = OllamaEmbedding(
model_name=ollama_settings.embedding_model,
base_url=ollama_settings.api_base,
base_url=ollama_settings.embedding_api_base,
)
case "azopenai":
try:
Expand Down
33 changes: 33 additions & 0 deletions private_gpt/components/llm/custom/ollama.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
from llama_index.llms.ollama import Ollama
from pydantic import Field


class CustomOllama(Ollama):
"""Custom Ollama class to fill in "keep_alive" when sending requests."""

keep_alive: str = Field(
default="5m",
description="String that describes the time the model should stay in (V)RAM after last request.",
)

def __init__(self, *args, **kwargs) -> None:
# fetch keep_alive from kwargs or use 5m if not found.
keep_alive = kwargs.pop("keep_alive", "5m")
super().__init__(*args, **kwargs)
self.keep_alive = keep_alive

def chat(self, *args, **kwargs):
kwargs["keep_alive"] = self.keep_alive
return super().chat(*args, **kwargs)

def stream_chat(self, *args, **kwargs):
kwargs["keep_alive"] = self.keep_alive
return super().stream_chat(*args, **kwargs)

def complete(self, *args, **kwargs):
kwargs["keep_alive"] = self.keep_alive
return super().complete(*args, **kwargs)

def stream_complete(self, *args, **kwargs):
kwargs["keep_alive"] = self.keep_alive
return super().stream_complete(*args, **kwargs)
7 changes: 5 additions & 2 deletions private_gpt/components/llm/llm_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,9 @@ def __init__(self, settings: Settings) -> None:
)
case "ollama":
try:
from llama_index.llms.ollama import Ollama # type: ignore
from private_gpt.components.llm.custom.ollama import (
CustomOllama, # type: ignore
)
except ImportError as e:
raise ImportError(
"Ollama dependencies not found, install with `poetry install --extras llms-ollama`"
Expand All @@ -125,13 +127,14 @@ def __init__(self, settings: Settings) -> None:
"repeat_penalty": ollama_settings.repeat_penalty, # ollama llama-cpp
}

self.llm = Ollama(
self.llm = CustomOllama(
model=ollama_settings.llm_model,
base_url=ollama_settings.api_base,
temperature=settings.llm.temperature,
context_window=settings.llm.context_window,
additional_kwargs=settings_kwargs,
request_timeout=ollama_settings.request_timeout,
keep_alive=ollama_settings.keep_alive,
)
case "azopenai":
try:
Expand Down
8 changes: 8 additions & 0 deletions private_gpt/settings/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,10 @@ class OllamaSettings(BaseModel):
"http://localhost:11434",
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
)
embedding_api_base: str = Field(
api_base, # default is same as api_base, unless specified differently
description="Base URL of Ollama embedding API. Defaults to the same value as api_base",
)
llm_model: str = Field(
None,
description="Model to use. Example: 'llama2-uncensored'.",
Expand All @@ -217,6 +221,10 @@ class OllamaSettings(BaseModel):
None,
description="Model to use. Example: 'nomic-embed-text'.",
)
keep_alive: str = Field(
"5m",
description="Time the model will stay loaded in memory after a request. examples: 5m, 5h, '-1' ",
)
tfs_z: float = Field(
1.0,
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",
Expand Down
2 changes: 2 additions & 0 deletions settings-ollama.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ ollama:
llm_model: mistral
embedding_model: nomic-embed-text
api_base: http://localhost:11434
keep_alive: 5m
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
Expand Down
2 changes: 2 additions & 0 deletions settings.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,8 @@ ollama:
llm_model: llama2
embedding_model: nomic-embed-text
api_base: http://localhost:11434
keep_alive: 5m
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
request_timeout: 120.0

azopenai:
Expand Down
Loading