-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.py
37 lines (26 loc) · 847 Bytes
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from fastapi import FastAPI
from typing import Union
from utils import llm_processor
from model.llm_chat import LLMChatRequest
from huggingface_hub import login
import uvicorn
import os
app = FastAPI()
def pre_config():
login(os.getenv("HF_TOKEN"))
@app.on_event("startup")
async def startup_event():
pre_config()
print("FastAPI application is starting...")
@app.get("/health")
def health() -> dict:
return {"status": "ok"}
@app.post("/v0/llmchat")
async def llmchat(llm_chat_request: LLMChatRequest) -> dict:
chat_model = llm_chat_request.llm_model
chat_query = llm_chat_request.query
return llm_processor.llm_common_processor(chat_model, chat_query)
# Run the FastAPI server using uvicorn
if __name__ == "__main__":
port = int(os.environ.get("PORT", 80))
uvicorn.run(app, host="0.0.0.0", port=port)