-
Notifications
You must be signed in to change notification settings - Fork 0
/
langchain_RAG.py
67 lines (51 loc) · 2.42 KB
/
langchain_RAG.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import os
import chromadb
import phoenix as px
from langchain import hub
from langchain.llms import ollama
from langchain.chains import RetrievalQA
from langchain.embeddings.sentence_transformer import SentenceTransformerEmbeddings
from langchain.vectorstores.chroma import Chroma
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from phoenix.trace.langchain import LangChainInstrumentor
# path dirs
chroma_path = "C:/Users/~/"
document_dir = "C:/Users/~/"
# Start Phoenix server and Instrument LangChain
session = px.launch_app()
LangChainInstrumentor().instrument()
def load_documents(document_dir):
"""Loads PDF documents from the specified directory, handling errors and splitting PDFs."""
loader_cls = PyPDFLoader # Only use PyPDFLoader for this function
for filename in os.listdir(document_dir):
if os.path.splitext(filename)[1].lower() == ".pdf": # Check for lowercase ".pdf" extension
try:
loader = loader_cls(os.path.join(document_dir, filename))
doc = loader.load()
text_splitter = RecursiveCharacterTextSplitter(chunk_size=4000,chunk_overlap=300)
chunks = text_splitter.split_documents(doc)
except Exception as e:
print(f"Error loading {filename}: {e}") # Log any errors
return chunks
def RAG(query:str):
# Create Chroma client and database
chunks = load_documents(document_dir)
client = chromadb.PersistentClient(path=chroma_path)
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = Chroma(client=client, embedding_function=embedding_function, collection_name="KnowledgeBase")
db.add_documents(chunks)
llm = ollama.Ollama(model="mistral", temperature=0.7) # setup llm
rag_prompt_llama = hub.pull("rlm/rag-prompt-llama") # setup prompt
# Use db.as_retriever() for retrieval
qa_chain = RetrievalQA.from_chain_type(llm, retriever=db.as_retriever(),
chain_type_kwargs={"prompt": rag_prompt_llama})
return qa_chain.invoke({"query": query})
while True:
user_input = input("You: ")
# If the user types "exit", exit the loop
if user_input == "exit":
break
response = RAG(user_input)
print(response)
px.active_session().url