diff --git a/README.md b/README.md index 3a6dbf5..3468e66 100644 --- a/README.md +++ b/README.md @@ -125,6 +125,7 @@ The official image is available at dockerhub: [ruecat/ollama-telegram](https://h | `USER_IDS` | Telegram user IDs of regular users.
These only can chat with the bot. | Yes | | 1234567890
**OR**
1234567890,0987654321, etc. | | `INITMODEL` | Default LLM | No | `llama2` | mistral:latest
mistral:7b-instruct | | `OLLAMA_BASE_URL` | Your OllamaAPI URL | No | | localhost
host.docker.internal | +| `OLLAMA_PORT` | Your OllamaAPI port | No | 11434 | | ## Credits diff --git a/bot/func/functions.py b/bot/func/functions.py index 2388ed3..4518f07 100644 --- a/bot/func/functions.py +++ b/bot/func/functions.py @@ -13,6 +13,7 @@ allowed_ids = list(map(int, os.getenv("USER_IDS", "").split(","))) admin_ids = list(map(int, os.getenv("ADMIN_IDS", "").split(","))) ollama_base_url = os.getenv("OLLAMA_BASE_URL") +ollama_port = os.getenv("OLLAMA_PORT", "11434") log_level_str = os.getenv("LOG_LEVEL", "INFO") # --- Other @@ -32,7 +33,7 @@ # Model List async def model_list(): async with aiohttp.ClientSession() as session: - url = f"http://{ollama_base_url}:11434/api/tags" + url = f"http://{ollama_base_url}:{ollama_port}/api/tags" async with session.get(url) as response: if response.status == 200: data = await response.json() @@ -42,7 +43,7 @@ async def model_list(): async def generate(payload: dict, modelname: str, prompt: str): # try: async with aiohttp.ClientSession() as session: - url = f"http://{ollama_base_url}:11434/api/chat" + url = f"http://{ollama_base_url}:{ollama_port}/api/chat" # Stream from API async with session.post(url, json=payload) as response: