Skip to content

Commit

Permalink
feat(llm): Ollama LLM-Embeddings decouple + longer keep_alive settings (
Browse files Browse the repository at this point in the history
  • Loading branch information
Robinsane committed Apr 2, 2024
1 parent 83adc12 commit b3b0140
Show file tree
Hide file tree
Showing 5 changed files with 33 additions and 1 deletion.
2 changes: 1 addition & 1 deletion private_gpt/components/embedding/embedding_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def __init__(self, settings: Settings) -> None:
ollama_settings = settings.ollama
self.embedding_model = OllamaEmbedding(
model_name=ollama_settings.embedding_model,
base_url=ollama_settings.api_base,
base_url=ollama_settings.embedding_api_base,
)
case "azopenai":
try:
Expand Down
20 changes: 20 additions & 0 deletions private_gpt/components/llm/llm_component.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import logging
from collections.abc import Callable
from typing import Any

from injector import inject, singleton
from llama_index.core.llms import LLM, MockLLM
Expand Down Expand Up @@ -133,6 +135,24 @@ def __init__(self, settings: Settings) -> None:
additional_kwargs=settings_kwargs,
request_timeout=ollama_settings.request_timeout,
)

if (
ollama_settings.keep_alive
!= ollama_settings.model_fields["keep_alive"].default
):
# Modify Ollama methods to use the "keep_alive" field.
def add_keep_alive(func: Callable[..., Any]) -> Callable[..., Any]:
def wrapper(*args: Any, **kwargs: Any) -> Any:
kwargs["keep_alive"] = ollama_settings.keep_alive
return func(*args, **kwargs)

return wrapper

Ollama.chat = add_keep_alive(Ollama.chat)
Ollama.stream_chat = add_keep_alive(Ollama.stream_chat)
Ollama.complete = add_keep_alive(Ollama.complete)
Ollama.stream_complete = add_keep_alive(Ollama.stream_complete)

case "azopenai":
try:
from llama_index.llms.azure_openai import ( # type: ignore
Expand Down
8 changes: 8 additions & 0 deletions private_gpt/settings/settings.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,10 @@ class OllamaSettings(BaseModel):
"http://localhost:11434",
description="Base URL of Ollama API. Example: 'https://localhost:11434'.",
)
embedding_api_base: str = Field(
api_base, # default is same as api_base, unless specified differently
description="Base URL of Ollama embedding API. Defaults to the same value as api_base",
)
llm_model: str = Field(
None,
description="Model to use. Example: 'llama2-uncensored'.",
Expand All @@ -217,6 +221,10 @@ class OllamaSettings(BaseModel):
None,
description="Model to use. Example: 'nomic-embed-text'.",
)
keep_alive: str = Field(
"5m",
description="Time the model will stay loaded in memory after a request. examples: 5m, 5h, '-1' ",
)
tfs_z: float = Field(
1.0,
description="Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.",
Expand Down
2 changes: 2 additions & 0 deletions settings-ollama.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ ollama:
llm_model: mistral
embedding_model: nomic-embed-text
api_base: http://localhost:11434
keep_alive: 5m
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
tfs_z: 1.0 # Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0) will reduce the impact more, while a value of 1.0 disables this setting.
top_k: 40 # Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)
top_p: 0.9 # Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)
Expand Down
2 changes: 2 additions & 0 deletions settings.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ ollama:
llm_model: llama2
embedding_model: nomic-embed-text
api_base: http://localhost:11434
keep_alive: 5m
# embedding_api_base: http://ollama_embedding:11434 # uncomment if your embedding model runs on another ollama
request_timeout: 120.0

azopenai:
Expand Down

0 comments on commit b3b0140

Please sign in to comment.