Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgraded llama_index to 10.x version #1653

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1,342 changes: 635 additions & 707 deletions poetry.lock

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion private_gpt/components/embedding/custom/sagemaker.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from typing import Any

import boto3
from llama_index.embeddings.base import BaseEmbedding
from llama_index.legacy.embeddings.base import BaseEmbedding
from pydantic import Field, PrivateAttr


Expand Down
6 changes: 3 additions & 3 deletions private_gpt/components/embedding/embedding_component.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import logging

from injector import inject, singleton
from llama_index import MockEmbedding
from llama_index.embeddings.base import BaseEmbedding
from llama_index.legacy import MockEmbedding
from llama_index.legacy.embeddings.base import BaseEmbedding

from private_gpt.paths import models_cache_path
from private_gpt.settings.settings import Settings
Expand All @@ -20,7 +20,7 @@ def __init__(self, settings: Settings) -> None:
logger.info("Initializing the embedding model in mode=%s", embedding_mode)
match embedding_mode:
case "local":
from llama_index.embeddings import HuggingFaceEmbedding
from llama_index.legacy.embeddings import HuggingFaceEmbedding

self.embedding_model = HuggingFaceEmbedding(
model_name=settings.local.embedding_hf_model_name,
Expand Down
8 changes: 4 additions & 4 deletions private_gpt/components/ingest/ingest_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,16 @@
from pathlib import Path
from typing import Any

from llama_index import (
from llama_index.legacy import (
Document,
ServiceContext,
StorageContext,
VectorStoreIndex,
load_index_from_storage,
)
from llama_index.data_structs import IndexDict
from llama_index.indices.base import BaseIndex
from llama_index.ingestion import run_transformations
from llama_index.legacy.data_structs import IndexDict
from llama_index.legacy.indices.base import BaseIndex
from llama_index.legacy.ingestion import run_transformations

from private_gpt.components.ingest.ingest_helper import IngestionHelper
from private_gpt.paths import local_data_path
Expand Down
6 changes: 3 additions & 3 deletions private_gpt/components/ingest/ingest_helper.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging
from pathlib import Path

from llama_index import Document
from llama_index.readers import JSONReader, StringIterableReader
from llama_index.readers.file.base import DEFAULT_FILE_READER_CLS
from llama_index.legacy import Document
from llama_index.legacy.readers import JSONReader, StringIterableReader
from llama_index.legacy.readers.file.base import DEFAULT_FILE_READER_CLS

logger = logging.getLogger(__name__)

Expand Down
16 changes: 8 additions & 8 deletions private_gpt/components/llm/custom/sagemaker.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,32 +7,32 @@
from typing import TYPE_CHECKING, Any

import boto3 # type: ignore
from llama_index.bridge.pydantic import Field
from llama_index.llms import (
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.llms import (
CompletionResponse,
CustomLLM,
LLMMetadata,
)
from llama_index.llms.base import (
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.llms.generic_utils import (
from llama_index.legacy.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.llms.llama_utils import (
from llama_index.legacy.llms.llama_utils import (
completion_to_prompt as generic_completion_to_prompt,
)
from llama_index.llms.llama_utils import (
from llama_index.legacy.llms.llama_utils import (
messages_to_prompt as generic_messages_to_prompt,
)

if TYPE_CHECKING:
from collections.abc import Sequence

from llama_index.callbacks import CallbackManager
from llama_index.llms import (
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.llms import (
ChatMessage,
ChatResponse,
ChatResponseGen,
Expand Down
14 changes: 7 additions & 7 deletions private_gpt/components/llm/llm_component.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging

from injector import inject, singleton
from llama_index import set_global_tokenizer
from llama_index.llms import MockLLM
from llama_index.llms.base import LLM
from llama_index.legacy import set_global_tokenizer
from llama_index.legacy.llms import MockLLM
from llama_index.llms.openai.base import LLM
from transformers import AutoTokenizer # type: ignore

from private_gpt.components.llm.prompt_helper import get_prompt_style
Expand Down Expand Up @@ -31,7 +31,7 @@ def __init__(self, settings: Settings) -> None:
logger.info("Initializing the LLM in mode=%s", llm_mode)
match settings.llm.mode:
case "local":
from llama_index.llms import LlamaCPP
from llama_index.legacy.llms import LlamaCPP

prompt_style = get_prompt_style(settings.local.prompt_style)

Expand All @@ -58,7 +58,7 @@ def __init__(self, settings: Settings) -> None:
context_window=settings.llm.context_window,
)
case "openai":
from llama_index.llms import OpenAI
from llama_index.legacy.llms import OpenAI

openai_settings = settings.openai
self.llm = OpenAI(
Expand All @@ -67,7 +67,7 @@ def __init__(self, settings: Settings) -> None:
model=openai_settings.model,
)
case "openailike":
from llama_index.llms import OpenAILike
from llama_index.legacy.llms import OpenAILike

openai_settings = settings.openai
self.llm = OpenAILike(
Expand All @@ -81,7 +81,7 @@ def __init__(self, settings: Settings) -> None:
case "mock":
self.llm = MockLLM()
case "ollama":
from llama_index.llms import Ollama
from llama_index.legacy.llms import Ollama

ollama_settings = settings.ollama
self.llm = Ollama(
Expand Down
4 changes: 2 additions & 2 deletions private_gpt/components/llm/prompt_helper.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
from collections.abc import Sequence
from typing import Any, Literal

from llama_index.llms import ChatMessage, MessageRole
from llama_index.llms.llama_utils import (
from llama_index.legacy.llms import ChatMessage, MessageRole
from llama_index.legacy.llms.llama_utils import (
completion_to_prompt,
messages_to_prompt,
)
Expand Down
6 changes: 3 additions & 3 deletions private_gpt/components/node_store/node_store_component.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import logging

from injector import inject, singleton
from llama_index.storage.docstore import BaseDocumentStore, SimpleDocumentStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.storage.index_store.types import BaseIndexStore
from llama_index.legacy.storage.docstore import BaseDocumentStore, SimpleDocumentStore
from llama_index.legacy.storage.index_store import SimpleIndexStore
from llama_index.legacy.storage.index_store.types import BaseIndexStore

from private_gpt.paths import local_data_path

Expand Down
8 changes: 4 additions & 4 deletions private_gpt/components/vector_store/batched_chroma.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from typing import Any

from llama_index.schema import BaseNode, MetadataMode
from llama_index.vector_stores import ChromaVectorStore
from llama_index.vector_stores.chroma import chunk_list
from llama_index.vector_stores.utils import node_to_metadata_dict
from llama_index.legacy.schema import BaseNode, MetadataMode
from llama_index.legacy.vector_stores import ChromaVectorStore
from llama_index.legacy.vector_stores.chroma import chunk_list
from llama_index.legacy.vector_stores.utils import node_to_metadata_dict


class BatchedChromaVectorStore(ChromaVectorStore):
Expand Down
10 changes: 5 additions & 5 deletions private_gpt/components/vector_store/vector_store_component.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
import typing

from injector import inject, singleton
from llama_index import VectorStoreIndex
from llama_index.indices.vector_store import VectorIndexRetriever
from llama_index.vector_stores.types import VectorStore
from llama_index.legacy import VectorStoreIndex
from llama_index.legacy.indices.vector_store import VectorIndexRetriever
from llama_index.legacy.vector_stores.types import VectorStore

from private_gpt.components.vector_store.batched_chroma import BatchedChromaVectorStore
from private_gpt.open_ai.extensions.context_filter import ContextFilter
Expand Down Expand Up @@ -41,7 +41,7 @@ class VectorStoreComponent:
def __init__(self, settings: Settings) -> None:
match settings.vectorstore.database:
case "pgvector":
from llama_index.vector_stores import PGVectorStore
from llama_index.legacy.vector_stores import PGVectorStore

if settings.pgvector is None:
raise ValueError(
Expand Down Expand Up @@ -85,7 +85,7 @@ def __init__(self, settings: Settings) -> None:
)

case "qdrant":
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.legacy.vector_stores.qdrant import QdrantVectorStore
from qdrant_client import QdrantClient

if settings.qdrant is None:
Expand Down
2 changes: 1 addition & 1 deletion private_gpt/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@
from private_gpt.launcher import create_app

# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
llama_index.legacy.set_global_handler("simple")

app = create_app(global_injector)
2 changes: 1 addition & 1 deletion private_gpt/open_ai/openai_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
from collections.abc import Iterator
from typing import Literal

from llama_index.llms import ChatResponse, CompletionResponse
from llama_index.legacy.llms import ChatResponse, CompletionResponse
from pydantic import BaseModel, Field

from private_gpt.server.chunks.chunks_service import Chunk
Expand Down
2 changes: 1 addition & 1 deletion private_gpt/server/chat/chat_router.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from fastapi import APIRouter, Depends, Request
from llama_index.llms import ChatMessage, MessageRole
from llama_index.legacy.llms import ChatMessage, MessageRole
from pydantic import BaseModel
from starlette.responses import StreamingResponse

Expand Down
12 changes: 6 additions & 6 deletions private_gpt/server/chat/chat_service.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
from dataclasses import dataclass

from injector import inject, singleton
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.chat_engine import ContextChatEngine, SimpleChatEngine
from llama_index.chat_engine.types import (
from llama_index.legacy import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.legacy.chat_engine import ContextChatEngine, SimpleChatEngine
from llama_index.legacy.chat_engine.types import (
BaseChatEngine,
)
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.llms import ChatMessage, MessageRole
from llama_index.types import TokenGen
from llama_index.legacy.indices.postprocessor import MetadataReplacementPostProcessor
from llama_index.legacy.llms import ChatMessage, MessageRole
from llama_index.legacy.types import TokenGen
from pydantic import BaseModel

from private_gpt.components.embedding.embedding_component import EmbeddingComponent
Expand Down
6 changes: 3 additions & 3 deletions private_gpt/server/chunks/chunks_service.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from typing import TYPE_CHECKING, Literal

from injector import inject, singleton
from llama_index import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.schema import NodeWithScore
from llama_index.legacy import ServiceContext, StorageContext, VectorStoreIndex
from llama_index.legacy.schema import NodeWithScore
from pydantic import BaseModel, Field

from private_gpt.components.embedding.embedding_component import EmbeddingComponent
Expand All @@ -15,7 +15,7 @@
from private_gpt.server.ingest.model import IngestedDoc

if TYPE_CHECKING:
from llama_index.schema import RelatedNodeInfo
from llama_index.legacy.schema import RelatedNodeInfo


class Chunk(BaseModel):
Expand Down
4 changes: 2 additions & 2 deletions private_gpt/server/ingest/ingest_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,11 @@
from typing import AnyStr, BinaryIO

from injector import inject, singleton
from llama_index import (
from llama_index.legacy import (
ServiceContext,
StorageContext,
)
from llama_index.node_parser import SentenceWindowNodeParser
from llama_index.legacy.node_parser import SentenceWindowNodeParser

from private_gpt.components.embedding.embedding_component import EmbeddingComponent
from private_gpt.components.ingest.ingest_component import get_ingestion_component
Expand Down
2 changes: 1 addition & 1 deletion private_gpt/server/ingest/model.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import Any, Literal

from llama_index import Document
from llama_index.legacy import Document
from pydantic import BaseModel, Field


Expand Down
2 changes: 1 addition & 1 deletion private_gpt/ui/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
from fastapi import FastAPI
from gradio.themes.utils.colors import slate # type: ignore
from injector import inject, singleton
from llama_index.llms import ChatMessage, ChatResponse, MessageRole
from llama_index.legacy.llms import ChatMessage, ChatResponse, MessageRole
from pydantic import BaseModel

from private_gpt.constants import PROJECT_ROOT_PATH
Expand Down
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,10 @@ boto3 = "^1.28.56"
injector = "^0.21.0"
pyyaml = "^6.0.1"
python-multipart = "^0.0.6"
pypdf = "^3.16.2"
llama-index = { extras = ["local_models"], version = "0.9.3" }
pypdf = "^4.0.1"
llama-index = "^0.10.12"
watchdog = "^3.0.0"
qdrant-client = "^1.6.9"
qdrant-client = "^1.7.3"
chromadb = {version = "^0.4.13", optional = true}
asyncpg = {version = "^0.29.0", optional = true}
pgvector = {version = "^0.2.5", optional = true}
Expand Down
2 changes: 1 addition & 1 deletion tests/test_prompt_helper.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import pytest
from llama_index.llms import ChatMessage, MessageRole
from llama_index.legacy.llms import ChatMessage, MessageRole

from private_gpt.components.llm.prompt_helper import (
ChatMLPromptStyle,
Expand Down