diff --git a/scripts/usecases/llm/llmServer.py b/scripts/usecases/llm/llmServer.py index 7451fbf60..4083b3531 100644 --- a/scripts/usecases/llm/llmServer.py +++ b/scripts/usecases/llm/llmServer.py @@ -10,6 +10,7 @@ # Global variable to indicate model loading status model="tiiuae/falcon-7b-instruct" + model_loaded = False llm = None @@ -29,7 +30,8 @@ async def startup_event(): @app.get("/status") def get_status(): - # Endpoint to return the model loading status + if not model_loaded: + return {"model": model, "loaded": model_loaded, "message": "Model is not loaded yet."} return {"model": model, "loaded": model_loaded} # Common function to generate text based on the prompt @@ -60,4 +62,4 @@ async def query_post(request: Request) -> JSONResponse: if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=port) - \ No newline at end of file +