Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Table splitter #78

Merged
merged 3 commits into from
Mar 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 13 additions & 7 deletions api/ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,16 +35,22 @@ async def ingest(payload: RequestPayload) -> Dict:
embedding_service, payload.google_drive
) # type: ignore TODO: Fix typing

await asyncio.gather(
tasks = [
embedding_service.embed_and_upsert(
chunks=chunks, encoder=encoder, index_name=payload.index_name
),
embedding_service.embed_and_upsert(
chunks=summary_documents,
encoder=encoder,
index_name=f"{payload.index_name}{SUMMARY_SUFFIX}",
),
)
]

if summary_documents and all(item is not None for item in summary_documents):
tasks.append(
embedding_service.embed_and_upsert(
chunks=summary_documents,
encoder=encoder,
index_name=f"{payload.index_name}{SUMMARY_SUFFIX}",
)
)

await asyncio.gather(*tasks)

if payload.webhook_url:
async with aiohttp.ClientSession() as session:
Expand Down
150 changes: 40 additions & 110 deletions dev/walkthrough.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,45 @@
"print(response.json())"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"{'success': True, 'index_name': 'simonas-serverless-384'}\n"
]
}
],
"source": [
"# Ingest a file\n",
"url = f\"{API_URL}/api/v1/ingest\"\n",
"\n",
"payload = {\n",
" \"files\": [\n",
" {\n",
" \"name\": \"csv_chunking\",\n",
" \"url\": \"https://raw.githubusercontent.com/datasciencedojo/datasets/master/titanic.csv\"\n",
" }\n",
" ],\n",
" \"vector_database\": {\n",
" \"type\": \"pinecone\",\n",
" \"config\": {\n",
" \"api_key\": PINECONE_API_KEY,\n",
" \"host\": PINECONE_HOST,\n",
" }\n",
" },\n",
" \"index_name\": PINECONE_INDEX,\n",
"}\n",
"\n",
"response = requests.post(url, json=payload)\n",
"\n",
"print(response.json())"
]
},
{
"cell_type": "code",
"execution_count": 3,
Expand All @@ -83,116 +122,7 @@
"text": [
"{\n",
" \"success\": true,\n",
" \"data\": [\n",
" {\n",
" \"id\": \"75d3adef-0fec-496e-99a7-0510d9c2ed5d\",\n",
" \"doc_url\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"document_id\": \"doc_fdadb486-da0e-4bc3-ada5-d583831cb112\",\n",
" \"content\": \"2 Related work\\nMore speci\\ufb01cally on document chunking methods for RAG, there are stan- dard approaches being considered such as chunking text into spans of a given token length (e.g. 128 and 256) or chunking based on sentences. Open source projects already allow simple processing of documents (e.g. Unstructured4, Lla- maindex5 or Langchain 6), without explicitly considering the table structure on which these chunking strategies are applied. Even though di\\ufb00erent approaches are available, an exhaustive evaluation of chunking applied to RAG and speci\\ufb01cally to \\ufb01nancial reporting, except for some limited chunking analysis [14,36], is non-existent. In our work, we compare a broad range of chunking approaches in addition to more simple ones and provide an analysis of the outcomes of di\\ufb00erent methods when asking questions about di\\ufb00erent aspects of the reports.\",\n",
" \"source\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"source_type\": \".pdf\",\n",
" \"chunk_index\": null,\n",
" \"title\": \"2 Related work\",\n",
" \"token_count\": null,\n",
" \"page_number\": 3,\n",
" \"metadata\": {\n",
" \"filename\": \"tmpykpa2wwh.pdf\",\n",
" \"filetype\": \"application/pdf\",\n",
" \"languages\": [\n",
" \"eng\"\n",
" ],\n",
" \"parent_id\": \"5cdbed1de9473b8856ab0befd08ff7cb\"\n",
" },\n",
" \"dense_embedding\": null\n",
" },\n",
" {\n",
" \"id\": \"58353d3f-a938-43f7-bde8-0e99125fa2f9\",\n",
" \"doc_url\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"document_id\": \"doc_fdadb486-da0e-4bc3-ada5-d583831cb112\",\n",
" \"content\": \"Table 3. Chunks statistics for basic chunking elements and Unstructured elements\\nResults in table 5 show that element-based chunking strategies o\\ufb00er the best question-answering accuracy, which is consistent with page retrieval and para- graph retrieval accuracy. Lastly, our approach stands out for its e\\ufb03ciency. Not only is element-based chunking generalizable without the need to select the chunk size, but when com- pared to the aggregation results that yield the highest retrieval scores. Element- based chunking achieves the highest retrieval scores with only half the number of chunks required compared to methods that do not consider the structure of the documents (62,529 v.s. 112,155). This can reduce the indexing cost and im- prove query latency because there are only half as many vectors to index for the vectordb that stores the chunks. This underscores the e\\ufb00ectiveness of our solu- tion in optimizing the balance between performance and computational resource requirements.\",\n",
" \"source\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"source_type\": \".pdf\",\n",
" \"chunk_index\": null,\n",
" \"title\": \"Table 3. Chunks statistics for basic chunking elements and Unstructured elements\",\n",
" \"token_count\": null,\n",
" \"page_number\": 9,\n",
" \"metadata\": {\n",
" \"filename\": \"tmpykpa2wwh.pdf\",\n",
" \"filetype\": \"application/pdf\",\n",
" \"languages\": [\n",
" \"eng\"\n",
" ],\n",
" \"parent_id\": \"53ffedc9520f52ef2c8e4568301c8530\"\n",
" },\n",
" \"dense_embedding\": null\n",
" },\n",
" {\n",
" \"id\": \"e3caf266-27a8-4654-94ec-9b82ead3c9ce\",\n",
" \"doc_url\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"document_id\": \"doc_fdadb486-da0e-4bc3-ada5-d583831cb112\",\n",
" \"content\": \"Table 3. Chunks statistics for basic chunking elements and Unstructured elements\\nRetrieval Accuracy Secondly, we evaluate the capabilities of each chunking strategy in terms of retrieval accuracy. We use the page numbers in the ground truth to calculate the page-level retrieval accuracy, and we use ROGUE [24] and BLEU [32] scores to evaluate the accuracy of paragraph-level retrieval compared to the ground truth evidence paragraphs. As shown in Table 4, when compared to Unstructured element-based chunk- ing strategies, basic chunking strategies seem to have higher page-level retrieval accuracy but lower paragraph-level accuracy on average. Additionally, basic chunking strategies also lack consistency between page-level and paragraph-level accuracy; higher page-level accuracy doesn\\u2019t ensure higher paragraph-level ac- curacy. For example, Base 128 has the second highest page-level accuracy but\",\n",
" \"source\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"source_type\": \".pdf\",\n",
" \"chunk_index\": null,\n",
" \"title\": \"Table 3. Chunks statistics for basic chunking elements and Unstructured elements\",\n",
" \"token_count\": null,\n",
" \"page_number\": 9,\n",
" \"metadata\": {\n",
" \"filename\": \"tmpykpa2wwh.pdf\",\n",
" \"filetype\": \"application/pdf\",\n",
" \"languages\": [\n",
" \"eng\"\n",
" ],\n",
" \"parent_id\": \"53ffedc9520f52ef2c8e4568301c8530\"\n",
" },\n",
" \"dense_embedding\": null\n",
" },\n",
" {\n",
" \"id\": \"14257177-480d-45cf-9759-f6e8b1bd60b5\",\n",
" \"doc_url\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"document_id\": \"doc_fdadb486-da0e-4bc3-ada5-d583831cb112\",\n",
" \"content\": \"5 Discussion\\new have observed that using basic 512 chunking strategies produces results most similar to the Unstructured element-based approach, which may be due to the fact that 512 tokens share a similar length with the token size within our element-based chunks and capture a long context, but fail keep a coherent context in some cases, leaving out relevant information required for Q&A. This is further observed when considering the ROGUE and BLEU scores in table 4, where the chunk contexts for the baseline have lower scores. These \\ufb01ndings support existing research stating that the best basic chunk size varies from data to data [3]. These results show, as well, that our method adapts to di\\ufb00erent documents without tuning. Our method relies on the struc-\",\n",
" \"source\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"source_type\": \".pdf\",\n",
" \"chunk_index\": null,\n",
" \"title\": \"5 Discussion\",\n",
" \"token_count\": null,\n",
" \"page_number\": 11,\n",
" \"metadata\": {\n",
" \"filename\": \"tmpykpa2wwh.pdf\",\n",
" \"filetype\": \"application/pdf\",\n",
" \"languages\": [\n",
" \"eng\"\n",
" ],\n",
" \"parent_id\": \"2a6506945581218449cc497a03e8cfcd\"\n",
" },\n",
" \"dense_embedding\": null\n",
" },\n",
" {\n",
" \"id\": \"94411542-6ad8-4454-ad42-d0fbf9f5b4f9\",\n",
" \"doc_url\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"document_id\": \"doc_fdadb486-da0e-4bc3-ada5-d583831cb112\",\n",
" \"content\": \"3.4 Chunking\\nThe list of elements considered are provided by the Unstructured9 open source library. From the set of processing strategies, 9 https://unstructured-io.github.io/unstructured/introduction.html#\",\n",
" \"source\": \"https://arxiv.org/pdf/2402.05131.pdf\",\n",
" \"source_type\": \".pdf\",\n",
" \"chunk_index\": null,\n",
" \"title\": \"3.4 Chunking\",\n",
" \"token_count\": null,\n",
" \"page_number\": 6,\n",
" \"metadata\": {\n",
" \"filename\": \"tmpykpa2wwh.pdf\",\n",
" \"filetype\": \"application/pdf\",\n",
" \"languages\": [\n",
" \"eng\"\n",
" ],\n",
" \"links\": [\n",
" \"{'text': '9https :// unstructured - io . github . io / unstructured / introduction . html', 'url': 'https://unstructured-io.github.io/unstructured/introduction.html#elements', 'start_index': 313}\"\n",
" ],\n",
" \"parent_id\": \"dac017d1d3734f5431cae57dcc72f748\"\n",
" },\n",
" \"dense_embedding\": null\n",
" }\n",
" ]\n",
" \"data\": []\n",
"}\n"
]
}
Expand Down
2 changes: 1 addition & 1 deletion models/document.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ class BaseDocument(BaseModel):

class BaseDocumentChunk(BaseModel):
id: str
doc_url: str | None = None
document_id: str
content: str
doc_url: str | None = None
source: str | None = None
source_type: str | None = None
chunk_index: int | None = None
Expand Down
1 change: 1 addition & 0 deletions service/code_interpreter.py
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ async def generate_code(
) -> str:
content = self.generate_prompt(query=query)
completion = await client.chat.completions.create(
temperature=0,
messages=[
{
"role": "system",
Expand Down
15 changes: 12 additions & 3 deletions service/embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,6 @@ async def _partition_file(
f"Downloading and extracting elements from {file.url}, "
f"using `{strategy}` strategy"
)
print(file.suffix)
with NamedTemporaryFile(suffix=file.suffix, delete=True) as temp_file:
with requests.get(url=file.url) as response:
temp_file.write(response.content)
Expand Down Expand Up @@ -157,6 +156,7 @@ async def generate_chunks(
) -> List[BaseDocumentChunk]:
doc_chunks = []
for file in tqdm(self.files, desc="Generating chunks"):
logger.info(f"Splitting method: {config.splitter.name}")
try:
chunks = []
if config.splitter.name == "by_title":
Expand Down Expand Up @@ -247,8 +247,17 @@ async def embed_batch(
) -> List[BaseDocumentChunk]:
async with sem:
try:
texts = [chunk.content for chunk in chunks_batch]
embeddings = encoder(texts)
chunk_texts = []
for chunk in chunks_batch:
if not chunk:
logger.warning("Empty chunk encountered")
continue
chunk_texts.append(chunk.content)

if not chunk_texts:
logger.warning(f"No content to embed in batch {chunks_batch}")
return []
embeddings = encoder(chunk_texts)
for chunk, embedding in zip(chunks_batch, embeddings):
chunk.dense_embedding = np.array(embedding).tolist()
pbar.update(len(chunks_batch)) # Update the progress bar
Expand Down
1 change: 0 additions & 1 deletion service/ingest.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ async def handle_urls(
):
embedding_service.files = files
chunks = await embedding_service.generate_chunks(config=config)
print(chunks)
summary_documents = await embedding_service.generate_summary_documents(
documents=chunks
)
Expand Down
Loading