Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: hugging face in demo #15

Merged
merged 2 commits into from
Jul 23, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions demo/.env_example
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
HUGGINGFACEHUB_API_TOKEN=<your token>
OPENAI_API_KEY=<your token>
3 changes: 2 additions & 1 deletion examples/README.md → demo/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
pip install -r requirements.txt
```

Optional: Rename `.env.example` to `.env` and fill in the values.

### Running

Expand All @@ -13,4 +14,4 @@ streamlit run main.py

### Usage

Access the application at http://localhost:8501
Access the application at http://localhost:8501
64 changes: 64 additions & 0 deletions demo/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from dotenv import load_dotenv
import os
import openai
import streamlit as st

# Load environment variables from .env file
load_dotenv()

# Get API keys from environment variables
huggingface_api_key = os.getenv("HUGGINGFACEHUB_API_TOKEN")
openai_api_key = os.getenv("OPENAI_API_KEY")

with st.sidebar:
api_key = st.text_input(
"API Key", key="chatbot_api_key", type="password"
)
model_name = st.selectbox(
"Chat API Endpoint",
options=["gpt-4", "hf-gpt2"],
index=0,
)

genoss_endpoint = "http://localhost:4321"

st.title("🐂🌈 Genoss")
if "messages" not in st.session_state:
st.session_state["messages"] = [
{"role": "assistant", "content": "How can I help you?"}
]

for msg in st.session_state.messages:
st.chat_message(msg["role"]).write(msg["content"])

if prompt := st.chat_input():
st.session_state.messages.append({"role": "user", "content": prompt})
st.chat_message("user").write(prompt)
msg = ""

# Use the user-provided API key if available, otherwise use the API key from the .env file
if api_key == "" or api_key is None:
api_key = api_key if api_key else (huggingface_api_key if model_name.startswith("hf") else openai_api_key)
if api_key == "" or api_key is None:
st.error("Please provide an API key")
st.stop()

openai.api_key = api_key
openai.api_base = genoss_endpoint

try:
response = openai.ChatCompletion.create(
model=model_name,
messages=st.session_state.messages,
)
msg = response.choices[0].message
except Exception as e:
msg = f"Error: {e}"

st.empty()

st.session_state.messages.append(msg)
try:
st.chat_message("assistant").write(msg["content"])
except Exception as e:
st.error(f"Error: {e}, {msg}")
File renamed without changes.
49 changes: 0 additions & 49 deletions examples/main.py

This file was deleted.

9 changes: 4 additions & 5 deletions genoss/llm/base_genoss.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
from abc import abstractmethod
from ast import Not
import re
from typing import Dict, List

from pydantic import BaseModel
Expand All @@ -10,11 +12,8 @@ class BaseGenossLLM(BaseModel):

@abstractmethod
def generate_answer(self, prompt: str) -> Dict:
print("You need to implement the generate_answer method")

return {id: 1}
return NotImplementedError

@abstractmethod
def generate_embedding(self, text: str) -> List[float]:
print("You need to implement the generate_embedding method")
return [0.0, 0.0, 0.0]
return NotImplementedError
4 changes: 0 additions & 4 deletions genoss/llm/fake_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,12 @@ class FakeLLM(BaseGenossLLM):
description: str = "Fake LLM for testing purpose"

def generate_answer(self, question: str) -> Dict:
print("Generating Answer")

llm = FakeListLLM(responses=["Hello from FakeLLM!"])

llm_chain = LLMChain(llm=llm, prompt=prompt_template)
response_text = llm_chain(question)

print("###################")
print(response_text)

answer = response_text["text"]
chat_completion = ChatCompletion(
model=self.name, answer=answer, question=question
Expand Down
2 changes: 0 additions & 2 deletions genoss/llm/hf_hub/base_hf_hub.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,6 @@ def generate_answer(self, question: str) -> Dict:
Generate answer from prompt
"""

print("Generating Answer...")

llm = HuggingFaceHub(
repo_id=self.repo_id, huggingfacehub_api_token=self.huggingfacehub_api_token
) # type: ignore
Expand Down
4 changes: 1 addition & 3 deletions genoss/llm/local/gpt4all.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,16 +17,14 @@ class Gpt4AllLLM(BaseLocalLLM):
model_path: str = "./local_models/ggml-gpt4all-j-v1.3-groovy.bin"

def generate_answer(self, question: str) -> Dict:
print("Generating Answer")

llm = GPT4All(
model=self.model_path, # pyright: ignore reportPrivateUsage=none
)

llm_chain = LLMChain(llm=llm, prompt=prompt_template)
response_text = llm_chain(question)
print("###################")
print(response_text)

answer = response_text["text"]

chat_completion = ChatCompletion(
Expand Down
7 changes: 2 additions & 5 deletions genoss/llm/openai/openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from genoss.entities.chat.chat_completion import ChatCompletion
from genoss.llm.base_genoss import BaseGenossLLM
from genoss.prompts.prompt_template import prompt_template
from langchain.chat_models import ChatOpenAI


class OpenAILLM(BaseGenossLLM):
Expand All @@ -28,16 +29,12 @@ def __init__(self, model_name: str, api_key, *args, **kwargs):
self.model_name = model_name

def generate_answer(self, question: str) -> Dict:
print("Generating Answer")

llm = OpenAIChat(model_name=self.model_name, openai_api_key=self.openai_api_key)
llm = ChatOpenAI(model_name=self.model_name, openai_api_key=self.openai_api_key)

llm_chain = LLMChain(llm=llm, prompt=prompt_template)
response_text = llm_chain(question)

print("###################")
print(response_text)

answer = response_text["text"]
chat_completion = ChatCompletion(
model=self.name, answer=answer, question=question
Expand Down
Loading