From d0a7d991a27b66bcef3be2eaa223e0853cee2ed8 Mon Sep 17 00:00:00 2001 From: imartinez Date: Wed, 28 Feb 2024 18:45:54 +0100 Subject: [PATCH 01/15] Working refactor. Dependency clean-up pending. --- poetry.lock | 1424 ++++++++--------- .../embedding/embedding_component.py | 7 +- .../components/ingest/ingest_component.py | 76 +- .../components/ingest/ingest_helper.py | 47 +- private_gpt/components/llm/llm_component.py | 16 +- private_gpt/components/llm/prompt_helper.py | 64 +- .../node_store/node_store_component.py | 6 +- .../components/vector_store/batched_chroma.py | 24 +- .../vector_store/vector_store_component.py | 7 +- private_gpt/launcher.py | 7 + private_gpt/main.py | 5 - private_gpt/open_ai/openai_models.py | 2 +- private_gpt/server/chat/chat_router.py | 2 +- private_gpt/server/chat/chat_service.py | 33 +- private_gpt/server/chunks/chunks_service.py | 15 +- private_gpt/server/ingest/ingest_service.py | 20 +- private_gpt/server/ingest/model.py | 2 +- private_gpt/ui/ui.py | 2 +- pyproject.toml | 23 +- tests/test_prompt_helper.py | 2 +- 20 files changed, 877 insertions(+), 907 deletions(-) diff --git a/poetry.lock b/poetry.lock index 8cce782fd..45bb5d40c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,35 +1,5 @@ # This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand. -[[package]] -name = "accelerate" -version = "0.25.0" -description = "Accelerate" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "accelerate-0.25.0-py3-none-any.whl", hash = "sha256:c7bb817eb974bba0ff3ea1ba0f24d55afb86d50e3d4fe98d6922dc69cf2ccff1"}, - {file = "accelerate-0.25.0.tar.gz", hash = "sha256:ecf55b0ab278a1dac8539dde0d276977aff04683f07ede73eaf02478538576a1"}, -] - -[package.dependencies] -huggingface-hub = "*" -numpy = ">=1.17" -packaging = ">=20.0" -psutil = "*" -pyyaml = "*" -safetensors = ">=0.3.1" -torch = ">=1.10.0" - -[package.extras] -dev = ["bitsandbytes", "black (>=23.1,<24.0)", "datasets", "deepspeed", "evaluate", "hf-doc-builder (>=0.3.0)", "parameterized", "pytest", "pytest-subtests", "pytest-xdist", "rich", "ruff (>=0.0.241)", "scikit-learn", "scipy", "timm", "tqdm", "transformers", "urllib3 (<2.0.0)"] -quality = ["black (>=23.1,<24.0)", "hf-doc-builder (>=0.3.0)", "ruff (>=0.0.241)", "urllib3 (<2.0.0)"] -rich = ["rich"] -sagemaker = ["sagemaker"] -test-dev = ["bitsandbytes", "datasets", "deepspeed", "evaluate", "scikit-learn", "scipy", "timm", "tqdm", "transformers"] -test-prod = ["parameterized", "pytest", "pytest-subtests", "pytest-xdist"] -test-trackers = ["comet-ml", "dvclive", "tensorboard", "wandb"] -testing = ["bitsandbytes", "datasets", "deepspeed", "evaluate", "parameterized", "pytest", "pytest-subtests", "pytest-xdist", "scikit-learn", "scipy", "timm", "tqdm", "transformers"] - [[package]] name = "aiofiles" version = "23.2.1" @@ -150,20 +120,6 @@ files = [ [package.dependencies] frozenlist = ">=1.1.0" -[[package]] -name = "aiostream" -version = "0.5.2" -description = "Generator-based operators for asynchronous iteration" -optional = false -python-versions = ">=3.8" -files = [ - {file = "aiostream-0.5.2-py3-none-any.whl", hash = "sha256:054660370be9d37f6fe3ece3851009240416bd082e469fd90cc8673d3818cf71"}, - {file = "aiostream-0.5.2.tar.gz", hash = "sha256:b71b519a2d66c38f0872403ab86417955b77352f08d9ad02ad46fc3926b389f4"}, -] - -[package.dependencies] -typing-extensions = "*" - [[package]] name = "altair" version = "5.2.0" @@ -222,7 +178,7 @@ trio = ["trio (<0.22)"] name = "asgiref" version = "3.7.2" description = "ASGI specs, helper code, and adapters" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "asgiref-3.7.2-py3-none-any.whl", hash = "sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e"}, @@ -236,7 +192,7 @@ tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] name = "async-timeout" version = "4.0.3" description = "Timeout context manager for asyncio programs" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, @@ -247,7 +203,7 @@ files = [ name = "asyncpg" version = "0.29.0" description = "An asyncio PostgreSQL driver" -optional = true +optional = false python-versions = ">=3.8.0" files = [ {file = "asyncpg-0.29.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72fd0ef9f00aeed37179c62282a3d14262dbbafb74ec0ba16e1b1864d8a12169"}, @@ -322,7 +278,7 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte name = "backoff" version = "2.2.1" description = "Function decoration for backoff and retry" -optional = true +optional = false python-versions = ">=3.7,<4.0" files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, @@ -333,7 +289,7 @@ files = [ name = "bcrypt" version = "4.1.2" description = "Modern password hashing for your software and your servers" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, @@ -371,19 +327,22 @@ typecheck = ["mypy"] [[package]] name = "beautifulsoup4" -version = "4.12.2" +version = "4.12.3" description = "Screen-scraping library" optional = false python-versions = ">=3.6.0" files = [ - {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, - {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, + {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, + {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, ] [package.dependencies] soupsieve = ">1.2" [package.extras] +cchardet = ["cchardet"] +chardet = ["chardet"] +charset-normalizer = ["charset-normalizer"] html5lib = ["html5lib"] lxml = ["lxml"] @@ -422,32 +381,32 @@ uvloop = ["uvloop (>=0.15.2)"] [[package]] name = "boto3" -version = "1.34.2" +version = "1.34.51" description = "The AWS SDK for Python" optional = false python-versions = ">= 3.8" files = [ - {file = "boto3-1.34.2-py3-none-any.whl", hash = "sha256:aad3f305fe3cd4f2bba545c9580cd460c366af56a8aabb6094528dd32317f8d2"}, - {file = "boto3-1.34.2.tar.gz", hash = "sha256:970fd9f9f522eb48f3cd5574e927b369279ebf5bcf0f2fae5ed9cc6306e58558"}, + {file = "boto3-1.34.51-py3-none-any.whl", hash = "sha256:67732634dc7d0afda879bd9a5e2d0818a2c14a98bef766b95a3e253ea5104cb9"}, + {file = "boto3-1.34.51.tar.gz", hash = "sha256:2cd9463e738a184cbce8a6824027c22163c5f73e277a35ff5aa0fb0e845b4301"}, ] [package.dependencies] -botocore = ">=1.34.2,<1.35.0" +botocore = ">=1.34.51,<1.35.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.9.0,<0.10.0" +s3transfer = ">=0.10.0,<0.11.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.34.2" +version = "1.34.51" description = "Low-level, data-driven core of boto 3." optional = false python-versions = ">= 3.8" files = [ - {file = "botocore-1.34.2-py3-none-any.whl", hash = "sha256:655b1ea2a5d7b989a0eb6006c16137f785bc7334f31378115668c4be5d4b00eb"}, - {file = "botocore-1.34.2.tar.gz", hash = "sha256:8a9f4ad438ba814b9b7a22b24de3004f8aa232e7ae86e0087aea4d7792dc3a2a"}, + {file = "botocore-1.34.51-py3-none-any.whl", hash = "sha256:01d5156247f991b3466a8404e3d7460a9ecbd9b214f9992d6ba797d9ddc6f120"}, + {file = "botocore-1.34.51.tar.gz", hash = "sha256:5086217442e67dd9de36ec7e87a0c663f76b7790d5fb6a12de565af95e87e319"}, ] [package.dependencies] @@ -456,13 +415,49 @@ python-dateutil = ">=2.1,<3.0.0" urllib3 = {version = ">=1.25.4,<2.1", markers = "python_version >= \"3.10\""} [package.extras] -crt = ["awscrt (==0.19.17)"] +crt = ["awscrt (==0.19.19)"] + +[[package]] +name = "bs4" +version = "0.0.2" +description = "Dummy package for Beautiful Soup (beautifulsoup4)" +optional = false +python-versions = "*" +files = [ + {file = "bs4-0.0.2-py2.py3-none-any.whl", hash = "sha256:abf8742c0805ef7f662dce4b51cca104cffe52b835238afc169142ab9b3fbccc"}, + {file = "bs4-0.0.2.tar.gz", hash = "sha256:a48685c58f50fe127722417bae83fe6badf500d54b55f7e39ffe43b798653925"}, +] + +[package.dependencies] +beautifulsoup4 = "*" + +[[package]] +name = "build" +version = "1.0.3" +description = "A simple, correct Python build frontend" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "build-1.0.3-py3-none-any.whl", hash = "sha256:589bf99a67df7c9cf07ec0ac0e5e2ea5d4b37ac63301c4986d1acb126aa83f8f"}, + {file = "build-1.0.3.tar.gz", hash = "sha256:538aab1b64f9828977f84bc63ae570b060a8ed1be419e7870b8b4fc5e6ea553b"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "os_name == \"nt\""} +packaging = ">=19.0" +pyproject_hooks = "*" + +[package.extras] +docs = ["furo (>=2023.08.17)", "sphinx (>=7.0,<8.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)", "sphinx-issues (>=3.0.0)"] +test = ["filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2.12)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "setuptools (>=56.0.0)", "setuptools (>=56.0.0)", "setuptools (>=67.8.0)", "wheel (>=0.36.0)"] +typing = ["importlib-metadata (>=5.1)", "mypy (>=1.5.0,<1.6.0)", "tomli", "typing-extensions (>=3.7.4.3)"] +virtualenv = ["virtualenv (>=20.0.35)"] [[package]] name = "cachetools" version = "5.3.2" description = "Extensible memoizing collections and decorators" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, @@ -594,7 +589,7 @@ files = [ name = "chroma-hnswlib" version = "0.7.3" description = "Chromas fork of hnswlib" -optional = true +optional = false python-versions = "*" files = [ {file = "chroma-hnswlib-0.7.3.tar.gz", hash = "sha256:b6137bedde49fffda6af93b0297fe00429fc61e5a072b1ed9377f909ed95a932"}, @@ -629,17 +624,18 @@ numpy = "*" [[package]] name = "chromadb" -version = "0.4.20" +version = "0.4.24" description = "Chroma." -optional = true +optional = false python-versions = ">=3.8" files = [ - {file = "chromadb-0.4.20-py3-none-any.whl", hash = "sha256:0b8ca7d22483273b12a282af7a4fbad0a804207d4431324a4673741fe3c92028"}, - {file = "chromadb-0.4.20.tar.gz", hash = "sha256:69eb699ad6a5e46a2b558c8770ab91041dc32061ad66403aa8224fe5750229c5"}, + {file = "chromadb-0.4.24-py3-none-any.whl", hash = "sha256:3a08e237a4ad28b5d176685bd22429a03717fe09d35022fb230d516108da01da"}, + {file = "chromadb-0.4.24.tar.gz", hash = "sha256:a5c80b4e4ad9b236ed2d4899a5b9e8002b489293f2881cb2cadab5b199ee1c72"}, ] [package.dependencies] bcrypt = ">=4.0.1" +build = ">=1.0.3" chroma-hnswlib = "0.7.3" fastapi = ">=0.95.2" grpcio = ">=1.58.0" @@ -652,6 +648,7 @@ opentelemetry-api = ">=1.2.0" opentelemetry-exporter-otlp-proto-grpc = ">=1.2.0" opentelemetry-instrumentation-fastapi = ">=0.41b0" opentelemetry-sdk = ">=1.2.0" +orjson = ">=3.9.12" overrides = ">=7.3.1" posthog = ">=2.4.0" pulsar-client = ">=3.1.0" @@ -865,48 +862,6 @@ files = [ marshmallow = ">=3.18.0,<4.0.0" typing-inspect = ">=0.4.0,<1" -[[package]] -name = "datasets" -version = "2.14.4" -description = "HuggingFace community-driven open-source library of datasets" -optional = false -python-versions = ">=3.8.0" -files = [ - {file = "datasets-2.14.4-py3-none-any.whl", hash = "sha256:29336bd316a7d827ccd4da2236596279b20ca2ac78f64c04c9483da7cbc2459b"}, - {file = "datasets-2.14.4.tar.gz", hash = "sha256:ef29c2b5841de488cd343cfc26ab979bff77efa4d2285af51f1ad7db5c46a83b"}, -] - -[package.dependencies] -aiohttp = "*" -dill = ">=0.3.0,<0.3.8" -fsspec = {version = ">=2021.11.1", extras = ["http"]} -huggingface-hub = ">=0.14.0,<1.0.0" -multiprocess = "*" -numpy = ">=1.17" -packaging = "*" -pandas = "*" -pyarrow = ">=8.0.0" -pyyaml = ">=5.1" -requests = ">=2.19.0" -tqdm = ">=4.62.1" -xxhash = "*" - -[package.extras] -apache-beam = ["apache-beam (>=2.26.0,<2.44.0)"] -audio = ["librosa", "soundfile (>=0.12.1)"] -benchmarks = ["tensorflow (==2.12.0)", "torch (==2.0.1)", "transformers (==4.30.1)"] -dev = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "black (>=23.1,<24.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "rarfile (>=4.0)", "ruff (>=0.0.241)", "s3fs", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"] -docs = ["s3fs", "tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos", "torch", "transformers"] -jax = ["jax (>=0.2.8,!=0.3.2,<=0.3.25)", "jaxlib (>=0.1.65,<=0.3.25)"] -metrics-tests = ["Werkzeug (>=1.0.1)", "accelerate", "bert-score (>=0.3.6)", "jiwer", "langdetect", "mauve-text", "nltk", "requests-file (>=1.5.1)", "rouge-score", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "spacy (>=3.0.0)", "texttable (>=1.6.3)", "tldextract", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "typer (<0.5.0)"] -quality = ["black (>=23.1,<24.0)", "pyyaml (>=5.3.1)", "ruff (>=0.0.241)"] -s3 = ["s3fs"] -tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)", "tensorflow-macos"] -tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] -tests = ["Pillow (>=6.2.1)", "absl-py", "apache-beam (>=2.26.0,<2.44.0)", "elasticsearch (<8.0.0)", "faiss-cpu (>=1.6.4)", "joblib (<1.3.0)", "joblibspark", "librosa", "lz4", "py7zr", "pyspark (>=3.4)", "pytest", "pytest-datadir", "pytest-xdist", "rarfile (>=4.0)", "s3fs (>=2021.11.1)", "soundfile (>=0.12.1)", "sqlalchemy (<2.0.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1)", "tensorflow-macos", "tiktoken", "torch", "transformers", "zstandard"] -torch = ["torch"] -vision = ["Pillow (>=6.2.1)"] - [[package]] name = "deprecated" version = "1.2.14" @@ -925,19 +880,16 @@ wrapt = ">=1.10,<2" dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"] [[package]] -name = "dill" -version = "0.3.7" -description = "serialize all of Python" +name = "dirtyjson" +version = "1.0.8" +description = "JSON decoder for Python that can extract data from the muck" optional = false -python-versions = ">=3.7" +python-versions = "*" files = [ - {file = "dill-0.3.7-py3-none-any.whl", hash = "sha256:76b122c08ef4ce2eedcd4d1abd8e641114bfc6c2867f49f3c41facf65bf19f5e"}, - {file = "dill-0.3.7.tar.gz", hash = "sha256:cc1c8b182eb3013e24bd475ff2e9295af86c1a38eb1aff128dac8962a9ce3c03"}, + {file = "dirtyjson-1.0.8-py3-none-any.whl", hash = "sha256:125e27248435a58acace26d5c2c4c11a1c0de0a9c5124c5a94ba78e517d74f53"}, + {file = "dirtyjson-1.0.8.tar.gz", hash = "sha256:90ca4a18f3ff30ce849d100dcf4a003953c79d3a2348ef056f1d9c22231a25fd"}, ] -[package.extras] -graph = ["objgraph (>=1.7.2)"] - [[package]] name = "diskcache" version = "5.6.3" @@ -1005,55 +957,18 @@ files = [ dnspython = ">=2.0.0" idna = ">=2.0.0" -[[package]] -name = "evaluate" -version = "0.4.1" -description = "HuggingFace community-driven open-source library of evaluation" -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "evaluate-0.4.1-py3-none-any.whl", hash = "sha256:3ff079ab09572c0a2c1e6d749887c19f6783ab993320412cd39f6fe501d28510"}, - {file = "evaluate-0.4.1.tar.gz", hash = "sha256:d721d9f2059ced79770d8a0509e954fbd1bbac96a8f9160e29888d8073cda3d9"}, -] - -[package.dependencies] -datasets = ">=2.0.0" -dill = "*" -fsspec = {version = ">=2021.05.0", extras = ["http"]} -huggingface-hub = ">=0.7.0" -multiprocess = "*" -numpy = ">=1.17" -packaging = "*" -pandas = "*" -requests = ">=2.19.0" -responses = "<0.19" -tqdm = ">=4.62.1" -xxhash = "*" - -[package.extras] -dev = ["Werkzeug (>=1.0.1)", "absl-py", "accelerate", "bert-score (>=0.3.6)", "black (>=22.0,<23.0)", "cer (>=1.2.0)", "charcut (>=1.1.1)", "flake8 (>=3.8.3)", "isort (>=5.0.0)", "jiwer", "mauve-text", "nltk", "pytest", "pytest-datadir", "pytest-xdist", "pyyaml (>=5.3.1)", "requests-file (>=1.5.1)", "rouge-score (>=0.1.2)", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1,<=2.10)", "texttable (>=1.6.3)", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "torch", "transformers", "trectools", "unidecode (>=1.3.4)"] -docs = ["s3fs"] -evaluator = ["scipy (>=1.7.1)", "transformers"] -quality = ["black (>=22.0,<23.0)", "flake8 (>=3.8.3)", "isort (>=5.0.0)", "pyyaml (>=5.3.1)"] -template = ["cookiecutter", "gradio (>=3.0.0)"] -tensorflow = ["tensorflow (>=2.2.0,!=2.6.0,!=2.6.1)"] -tensorflow-gpu = ["tensorflow-gpu (>=2.2.0,!=2.6.0,!=2.6.1)"] -tests = ["Werkzeug (>=1.0.1)", "absl-py", "accelerate", "bert-score (>=0.3.6)", "cer (>=1.2.0)", "charcut (>=1.1.1)", "jiwer", "mauve-text", "nltk", "pytest", "pytest-datadir", "pytest-xdist", "requests-file (>=1.5.1)", "rouge-score (>=0.1.2)", "sacrebleu", "sacremoses", "scikit-learn", "scipy", "sentencepiece", "seqeval", "six (>=1.15.0,<1.16.0)", "tensorflow (>=2.3,!=2.6.0,!=2.6.1,<=2.10)", "texttable (>=1.6.3)", "tldextract (>=3.1.0)", "toml (>=0.10.1)", "torch", "transformers", "trectools", "unidecode (>=1.3.4)"] -torch = ["torch"] - [[package]] name = "fastapi" -version = "0.103.2" +version = "0.110.0" description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "fastapi-0.103.2-py3-none-any.whl", hash = "sha256:3270de872f0fe9ec809d4bd3d4d890c6d5cc7b9611d721d6438f9dacc8c4ef2e"}, - {file = "fastapi-0.103.2.tar.gz", hash = "sha256:75a11f6bfb8fc4d2bec0bd710c2d5f2829659c0e8c0afd5560fdda6ce25ec653"}, + {file = "fastapi-0.110.0-py3-none-any.whl", hash = "sha256:87a1f6fb632a218222c5984be540055346a8f5d8a68e8f6fb647b1dc9934de4b"}, + {file = "fastapi-0.110.0.tar.gz", hash = "sha256:266775f0dcc95af9d3ef39bad55cff525329a931d5fd51930aadd4f428bf7ff3"}, ] [package.dependencies] -anyio = ">=3.7.1,<4.0.0" email-validator = {version = ">=2.0.0", optional = true, markers = "extra == \"all\""} httpx = {version = ">=0.23.0", optional = true, markers = "extra == \"all\""} itsdangerous = {version = ">=1.1.0", optional = true, markers = "extra == \"all\""} @@ -1062,15 +977,15 @@ orjson = {version = ">=3.2.1", optional = true, markers = "extra == \"all\""} pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" pydantic-extra-types = {version = ">=2.0.0", optional = true, markers = "extra == \"all\""} pydantic-settings = {version = ">=2.0.0", optional = true, markers = "extra == \"all\""} -python-multipart = {version = ">=0.0.5", optional = true, markers = "extra == \"all\""} +python-multipart = {version = ">=0.0.7", optional = true, markers = "extra == \"all\""} pyyaml = {version = ">=5.3.1", optional = true, markers = "extra == \"all\""} -starlette = ">=0.27.0,<0.28.0" -typing-extensions = ">=4.5.0" +starlette = ">=0.36.3,<0.37.0" +typing-extensions = ">=4.8.0" ujson = {version = ">=4.0.1,<4.0.2 || >4.0.2,<4.1.0 || >4.1.0,<4.2.0 || >4.2.0,<4.3.0 || >4.3.0,<5.0.0 || >5.0.0,<5.1.0 || >5.1.0", optional = true, markers = "extra == \"all\""} uvicorn = {version = ">=0.12.0", extras = ["standard"], optional = true, markers = "extra == \"all\""} [package.extras] -all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.7)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] [[package]] name = "ffmpy" @@ -1271,10 +1186,6 @@ files = [ {file = "fsspec-2023.12.2.tar.gz", hash = "sha256:8548d39e8810b59c38014934f6b31e57f40c1b20f911f4cc2b85389c7e9bf0cb"}, ] -[package.dependencies] -aiohttp = {version = "<4.0.0a0 || >4.0.0a0,<4.0.0a1 || >4.0.0a1", optional = true, markers = "extra == \"http\""} -requests = {version = "*", optional = true, markers = "extra == \"http\""} - [package.extras] abfs = ["adlfs"] adl = ["adlfs"] @@ -1303,7 +1214,7 @@ tqdm = ["tqdm"] name = "google-auth" version = "2.25.2" description = "Google Authentication Library" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "google-auth-2.25.2.tar.gz", hash = "sha256:42f707937feb4f5e5a39e6c4f343a17300a459aaf03141457ba505812841cc40"}, @@ -1326,7 +1237,7 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] name = "googleapis-common-protos" version = "1.62.0" description = "Common protobufs used in Google APIs" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "googleapis-common-protos-1.62.0.tar.gz", hash = "sha256:83f0ece9f94e5672cced82f592d2a5edf527a96ed1794f0bab36d5735c996277"}, @@ -1748,9 +1659,11 @@ files = [ ] [package.dependencies] +aiohttp = {version = "*", optional = true, markers = "extra == \"inference\""} filelock = "*" fsspec = ">=2023.5.0" packaging = ">=20.9" +pydantic = {version = ">1.1,<3.0", optional = true, markers = "python_version > \"3.8\" and extra == \"inference\""} pyyaml = ">=5.1" requests = "*" tqdm = ">=4.42.1" @@ -1823,7 +1736,7 @@ files = [ name = "importlib-metadata" version = "6.11.0" description = "Read metadata from Python packages" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, @@ -2080,7 +1993,7 @@ files = [ name = "kubernetes" version = "28.1.0" description = "Kubernetes python client" -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "kubernetes-28.1.0-py2.py3-none-any.whl", hash = "sha256:10f56f8160dcb73647f15fafda268e7f60cf7dbc9f8e46d52fcd46d3beb0c18d"}, @@ -2104,16 +2017,17 @@ adal = ["adal (>=1.0.2)"] [[package]] name = "llama-cpp-python" -version = "0.2.23" +version = "0.2.53" description = "Python bindings for the llama.cpp library" optional = false python-versions = ">=3.8" files = [ - {file = "llama_cpp_python-0.2.23.tar.gz", hash = "sha256:364b61a13970932ea189b45a1c5dea89797b90e5da00f1fe6e72c47fbc512898"}, + {file = "llama_cpp_python-0.2.53.tar.gz", hash = "sha256:f7ff8eda538ca6c80521a8bbf80d3ef4527ecb28f6d08fa9b3bb1f0cfc3b684e"}, ] [package.dependencies] diskcache = ">=5.6.1" +jinja2 = ">=2.11.3" numpy = ">=1.20.0" typing-extensions = ">=4.5.0" @@ -2121,47 +2035,224 @@ typing-extensions = ">=4.5.0" all = ["llama_cpp_python[dev,server,test]"] dev = ["black (>=23.3.0)", "httpx (>=0.24.1)", "mkdocs (>=1.4.3)", "mkdocs-material (>=9.1.18)", "mkdocstrings[python] (>=0.22.0)", "pytest (>=7.4.0)", "twine (>=4.0.2)"] server = ["fastapi (>=0.100.0)", "pydantic-settings (>=2.0.1)", "sse-starlette (>=1.6.1)", "starlette-context (>=0.3.6,<0.4)", "uvicorn (>=0.22.0)"] -test = ["httpx (>=0.24.1)", "pytest (>=7.4.0)"] +test = ["httpx (>=0.24.1)", "pytest (>=7.4.0)", "scipy (>=1.10)"] [[package]] -name = "llama-index" -version = "0.9.3" +name = "llama-index-core" +version = "0.10.13" description = "Interface between LLMs and your data" optional = false -python-versions = ">=3.8.1,<3.12" +python-versions = ">=3.8.1,<4.0" files = [ - {file = "llama_index-0.9.3-py3-none-any.whl", hash = "sha256:2b4b5be5f1a3b1158e8bcce49b728fee66370c776198c0b35ee22214a36a1e0b"}, - {file = "llama_index-0.9.3.tar.gz", hash = "sha256:158c1feb68786e9134e3f70ab2c8a8e51b0e99f0f91f0a10162bd63c4d33f152"}, + {file = "llama_index_core-0.10.13-py3-none-any.whl", hash = "sha256:40c76fc02be7cd948a333ca541f2ff38cf02774e1c960674e2b68c61943bac90"}, + {file = "llama_index_core-0.10.13.tar.gz", hash = "sha256:826fded00767923fba8aca94f46c32b259e8879f517016ab7a3801b1b37187a1"}, ] [package.dependencies] aiohttp = ">=3.8.6,<4.0.0" -aiostream = ">=0.5.2,<0.6.0" -beautifulsoup4 = ">=4.12.2,<5.0.0" -dataclasses-json = ">=0.5.7,<0.6.0" +dataclasses-json = "*" deprecated = ">=1.2.9.3" +dirtyjson = ">=1.0.8,<2.0.0" fsspec = ">=2023.5.0" httpx = "*" +llamaindex-py-client = ">=0.1.13,<0.2.0" nest-asyncio = ">=1.5.8,<2.0.0" +networkx = ">=3.0" nltk = ">=3.8.1,<4.0.0" numpy = "*" openai = ">=1.1.0" -optimum = {version = ">=1.13.2,<2.0.0", extras = ["onnxruntime"], optional = true, markers = "extra == \"local-models\""} -pandas = {version = "*", extras = ["jinja2"]} -sentencepiece = {version = ">=0.1.99,<0.2.0", optional = true, markers = "extra == \"local-models\""} +pandas = "*" +pillow = ">=9.0.0" +PyYAML = ">=6.0.1" +requests = ">=2.31.0" SQLAlchemy = {version = ">=1.4.49", extras = ["asyncio"]} tenacity = ">=8.2.0,<9.0.0" tiktoken = ">=0.3.3" -transformers = {version = ">=4.34.0,<5.0.0", extras = ["torch"], optional = true, markers = "extra == \"local-models\""} +tqdm = ">=4.66.1,<5.0.0" typing-extensions = ">=4.5.0" typing-inspect = ">=0.8.0" -urllib3 = "<2" [package.extras] +gradientai = ["gradientai (>=1.4.0)"] +html = ["beautifulsoup4 (>=4.12.2,<5.0.0)"] langchain = ["langchain (>=0.0.303)"] -local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.99,<0.2.0)", "transformers[torch] (>=4.34.0,<5.0.0)"] -postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg-binary (>=3.1.12,<4.0.0)"] -query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "lm-format-enforcer (>=0.4.3,<0.5.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "scikit-learn (<1.3.0)", "spacy (>=3.7.1,<4.0.0)"] +local-models = ["optimum[onnxruntime] (>=1.13.2,<2.0.0)", "sentencepiece (>=0.1.99,<0.2.0)", "transformers[torch] (>=4.33.1,<5.0.0)"] +postgres = ["asyncpg (>=0.28.0,<0.29.0)", "pgvector (>=0.1.0,<0.2.0)", "psycopg2-binary (>=2.9.9,<3.0.0)"] +query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "lm-format-enforcer (>=0.4.3,<0.5.0)", "rank-bm25 (>=0.2.2,<0.3.0)", "scikit-learn", "spacy (>=3.7.1,<4.0.0)"] + +[[package]] +name = "llama-index-embeddings-huggingface" +version = "0.1.4" +description = "llama-index embeddings huggingface integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_embeddings_huggingface-0.1.4-py3-none-any.whl", hash = "sha256:9c80539f3cbbd7191c219e2cda154b1a7151aa912196bc537c16f40e18e4187c"}, + {file = "llama_index_embeddings_huggingface-0.1.4.tar.gz", hash = "sha256:042d249d91039bc4a531711c0c81ebf4f5c921de98629d2d342979bc4511a639"}, +] + +[package.dependencies] +huggingface-hub = {version = ">=0.19.0", extras = ["inference"]} +llama-index-core = ">=0.10.1,<0.11.0" +torch = ">=2.1.2,<3.0.0" +transformers = ">=4.37.0,<5.0.0" + +[[package]] +name = "llama-index-embeddings-openai" +version = "0.1.6" +description = "llama-index embeddings openai integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_embeddings_openai-0.1.6-py3-none-any.whl", hash = "sha256:f8b2dded0718e9f57c08ce352d186941e6acf7de414c64219210b66f7a6d6d2d"}, + {file = "llama_index_embeddings_openai-0.1.6.tar.gz", hash = "sha256:f12f0ef6f92211efe1a022a97bb68fc8731c93bd20df3b0567dba69c610033db"}, +] + +[package.dependencies] +llama-index-core = ">=0.10.1,<0.11.0" + +[[package]] +name = "llama-index-llms-llama-cpp" +version = "0.1.3" +description = "llama-index llms llama cpp integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_llms_llama_cpp-0.1.3-py3-none-any.whl", hash = "sha256:58ab5f492946b46544a057cec6b98268b610dbd99462d749b69a63d11577aa6b"}, + {file = "llama_index_llms_llama_cpp-0.1.3.tar.gz", hash = "sha256:ef7f531ae1f567eb821f6b2662f4bfcfb5301d9f3649973e5c202bbab6979935"}, +] + +[package.dependencies] +llama-cpp-python = ">=0.2.32,<0.3.0" +llama-index-core = ">=0.10.1,<0.11.0" + +[[package]] +name = "llama-index-llms-ollama" +version = "0.1.2" +description = "llama-index llms ollama integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_llms_ollama-0.1.2-py3-none-any.whl", hash = "sha256:967d816e13a6e064f8454b2faf7b70c749a52230258cac67b6025e20db6e988c"}, + {file = "llama_index_llms_ollama-0.1.2.tar.gz", hash = "sha256:19ec727d048c873915d5b037d9a2fe956520c019b11d7ab8c3c406dd11f34e4b"}, +] + +[package.dependencies] +llama-index-core = ">=0.10.1,<0.11.0" + +[[package]] +name = "llama-index-llms-openai" +version = "0.1.6" +description = "llama-index llms openai integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_llms_openai-0.1.6-py3-none-any.whl", hash = "sha256:4260ad31c3444e97ec8a8d061cb6dbf1074262b82341a2b69d2b27e8a23efe62"}, + {file = "llama_index_llms_openai-0.1.6.tar.gz", hash = "sha256:15530dfa3893b15c5576ebc71e01b77acbf47abd689219436fdf7b6ca567a9fd"}, +] + +[package.dependencies] +llama-index-core = ">=0.10.1,<0.11.0" + +[[package]] +name = "llama-index-llms-openai-like" +version = "0.1.3" +description = "llama-index llms openai like integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_llms_openai_like-0.1.3-py3-none-any.whl", hash = "sha256:0cf2c56f027c5e1f17c7fc606ad2b991f61daa75a88ba35bb5ea97de9766d4d3"}, + {file = "llama_index_llms_openai_like-0.1.3.tar.gz", hash = "sha256:c3412325077c75263e37d60d501314b9bdc770f12d8a2b16e756e7d9ac8f9e3f"}, +] + +[package.dependencies] +llama-index-core = ">=0.10.1,<0.11.0" +llama-index-llms-openai = ">=0.1.1,<0.2.0" +transformers = ">=4.37.0,<5.0.0" + +[[package]] +name = "llama-index-readers-file" +version = "0.1.6" +description = "llama-index readers file integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_readers_file-0.1.6-py3-none-any.whl", hash = "sha256:f583bd90353a0c0985213af02c97aa2f2f22e702d4311fe719de91382c9ad8dd"}, + {file = "llama_index_readers_file-0.1.6.tar.gz", hash = "sha256:d9fc0ca84926d04bd757c57fe87841cd9dbc2606aab5f2ce927deec14aaa1a74"}, +] + +[package.dependencies] +beautifulsoup4 = ">=4.12.3,<5.0.0" +bs4 = ">=0.0.2,<0.0.3" +llama-index-core = ">=0.10.1,<0.11.0" +pymupdf = ">=1.23.21,<2.0.0" +pypdf = ">=4.0.1,<5.0.0" + +[[package]] +name = "llama-index-vector-stores-chroma" +version = "0.1.4" +description = "llama-index vector_stores chroma integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_vector_stores_chroma-0.1.4-py3-none-any.whl", hash = "sha256:f475a450431ee4d9b2915ba9da2112dfdfacaee1ea220b8603720be1c116786c"}, + {file = "llama_index_vector_stores_chroma-0.1.4.tar.gz", hash = "sha256:7364f2a3f8a51b83d350da39da7e7046704cfa9c848ebe8fd1c6cb39ad4878f9"}, +] + +[package.dependencies] +chromadb = ">=0.4.22,<0.5.0" +llama-index-core = ">=0.10.1,<0.11.0" +onnxruntime = ">=1.17.0,<2.0.0" +tokenizers = ">=0.15.1,<0.16.0" + +[[package]] +name = "llama-index-vector-stores-postgres" +version = "0.1.2" +description = "llama-index vector_stores postgres integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_vector_stores_postgres-0.1.2-py3-none-any.whl", hash = "sha256:75e8c73b553e4ca29ca4e55aedcd6abcdaf5bba7b12baf89a66c81f9b0ef5054"}, + {file = "llama_index_vector_stores_postgres-0.1.2.tar.gz", hash = "sha256:05ec3d091984bfa6ec7055c350136062ead6cc81c06380519680e8ce8dd65ff9"}, +] + +[package.dependencies] +asyncpg = ">=0.29.0,<0.30.0" +llama-index-core = ">=0.10.1,<0.11.0" +pgvector = ">=0.2.4,<0.3.0" +psycopg2-binary = ">=2.9.9,<3.0.0" +sqlalchemy = {version = ">=2.0.25,<3.0.0", extras = ["asyncio"]} + +[[package]] +name = "llama-index-vector-stores-qdrant" +version = "0.1.3" +description = "llama-index vector_stores qdrant integration" +optional = false +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_vector_stores_qdrant-0.1.3-py3-none-any.whl", hash = "sha256:15805a37310830085e4e3399c0e4c87bb1dcaae008b89808b2a883726381cbe7"}, + {file = "llama_index_vector_stores_qdrant-0.1.3.tar.gz", hash = "sha256:19c8047e7061b41d1d0f559b3e7aa75dd41ce7eb96a386cdf63fedd350514f75"}, +] + +[package.dependencies] +grpcio = ">=1.60.0,<2.0.0" +llama-index-core = ">=0.10.1,<0.11.0" +qdrant-client = ">=1.7.1,<2.0.0" + +[[package]] +name = "llamaindex-py-client" +version = "0.1.13" +description = "" +optional = false +python-versions = ">=3.8,<4.0" +files = [ + {file = "llamaindex_py_client-0.1.13-py3-none-any.whl", hash = "sha256:02400c90655da80ae373e0455c829465208607d72462f1898fd383fdfe8dabce"}, + {file = "llamaindex_py_client-0.1.13.tar.gz", hash = "sha256:3bd9b435ee0a78171eba412dea5674d813eb5bf36e577d3c7c7e90edc54900d9"}, +] + +[package.dependencies] +httpx = ">=0.20.0" +pydantic = ">=1.10" [[package]] name = "markdown-it-py" @@ -2339,7 +2430,7 @@ files = [ name = "mmh3" version = "4.0.1" description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." -optional = true +optional = false python-versions = "*" files = [ {file = "mmh3-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b719ba87232749095011d567a36a25e40ed029fc61c47e74a12416d8bb60b311"}, @@ -2415,7 +2506,7 @@ test = ["mypy (>=1.0)", "pytest (>=7.0.0)"] name = "monotonic" version = "1.6" description = "An implementation of time.monotonic() for Python 2 & < 3.3" -optional = true +optional = false python-versions = "*" files = [ {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, @@ -2522,34 +2613,6 @@ files = [ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"}, ] -[[package]] -name = "multiprocess" -version = "0.70.15" -description = "better multiprocessing and multithreading in Python" -optional = false -python-versions = ">=3.7" -files = [ - {file = "multiprocess-0.70.15-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:aa36c7ed16f508091438687fe9baa393a7a8e206731d321e443745e743a0d4e5"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:20e024018c46d0d1602024c613007ac948f9754659e3853b0aa705e83f6931d8"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_i686.whl", hash = "sha256:e576062981c91f0fe8a463c3d52506e598dfc51320a8dd8d78b987dfca91c5db"}, - {file = "multiprocess-0.70.15-pp37-pypy37_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:e73f497e6696a0f5433ada2b3d599ae733b87a6e8b008e387c62ac9127add177"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:73db2e7b32dcc7f9b0f075c2ffa45c90b6729d3f1805f27e88534c8d321a1be5"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_i686.whl", hash = "sha256:4271647bd8a49c28ecd6eb56a7fdbd3c212c45529ad5303b40b3c65fc6928e5f"}, - {file = "multiprocess-0.70.15-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:cf981fb998d6ec3208cb14f0cf2e9e80216e834f5d51fd09ebc937c32b960902"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:18f9f2c7063346d1617bd1684fdcae8d33380ae96b99427260f562e1a1228b67"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_i686.whl", hash = "sha256:0eac53214d664c49a34695e5824872db4006b1a465edd7459a251809c3773370"}, - {file = "multiprocess-0.70.15-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:1a51dd34096db47fb21fa2b839e615b051d51b97af9a67afbcdaa67186b44883"}, - {file = "multiprocess-0.70.15-py310-none-any.whl", hash = "sha256:7dd58e33235e83cf09d625e55cffd7b0f0eede7ee9223cdd666a87624f60c21a"}, - {file = "multiprocess-0.70.15-py311-none-any.whl", hash = "sha256:134f89053d82c9ed3b73edd3a2531eb791e602d4f4156fc92a79259590bd9670"}, - {file = "multiprocess-0.70.15-py37-none-any.whl", hash = "sha256:f7d4a1629bccb433114c3b4885f69eccc200994323c80f6feee73b0edc9199c5"}, - {file = "multiprocess-0.70.15-py38-none-any.whl", hash = "sha256:bee9afba476c91f9ebee7beeee0601face9eff67d822e893f9a893725fbd6316"}, - {file = "multiprocess-0.70.15-py39-none-any.whl", hash = "sha256:3e0953f5d52b4c76f1c973eaf8214554d146f2be5decb48e928e55c7a2d19338"}, - {file = "multiprocess-0.70.15.tar.gz", hash = "sha256:f20eed3036c0ef477b07a4177cf7c1ba520d9a2677870a4f47fe026f0cd6787e"}, -] - -[package.dependencies] -dill = ">=0.3.7" - [[package]] name = "mypy" version = "1.7.1" @@ -2861,7 +2924,7 @@ files = [ name = "oauthlib" version = "3.2.2" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, @@ -2873,78 +2936,38 @@ rsa = ["cryptography (>=3.0.0)"] signals = ["blinker (>=1.4.0)"] signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] -[[package]] -name = "onnx" -version = "1.15.0" -description = "Open Neural Network Exchange" -optional = false -python-versions = ">=3.8" -files = [ - {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:51cacb6aafba308aaf462252ced562111f6991cdc7bc57a6c554c3519453a8ff"}, - {file = "onnx-1.15.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:0aee26b6f7f7da7e840de75ad9195a77a147d0662c94eaa6483be13ba468ffc1"}, - {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baf6ef6c93b3b843edb97a8d5b3d229a1301984f3f8dee859c29634d2083e6f9"}, - {file = "onnx-1.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ed899fe6000edc05bb2828863d3841cfddd5a7cf04c1a771f112e94de75d9f"}, - {file = "onnx-1.15.0-cp310-cp310-win32.whl", hash = "sha256:f1ad3d77fc2f4b4296f0ac2c8cadd8c1dcf765fc586b737462d3a0fe8f7c696a"}, - {file = "onnx-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:ca4ebc4f47109bfb12c8c9e83dd99ec5c9f07d2e5f05976356c6ccdce3552010"}, - {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:233ffdb5ca8cc2d960b10965a763910c0830b64b450376da59207f454701f343"}, - {file = "onnx-1.15.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:51fa79c9ea9af033638ec51f9177b8e76c55fad65bb83ea96ee88fafade18ee7"}, - {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f277d4861729f5253a51fa41ce91bfec1c4574ee41b5637056b43500917295ce"}, - {file = "onnx-1.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8a7c94d2ebead8f739fdb70d1ce5a71726f4e17b3e5b8ad64455ea1b2801a85"}, - {file = "onnx-1.15.0-cp311-cp311-win32.whl", hash = "sha256:17dcfb86a8c6bdc3971443c29b023dd9c90ff1d15d8baecee0747a6b7f74e650"}, - {file = "onnx-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:60a3e28747e305cd2e766e6a53a0a6d952cf9e72005ec6023ce5e07666676a4e"}, - {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:6b5c798d9e0907eaf319e3d3e7c89a2ed9a854bcb83da5fefb6d4c12d5e90721"}, - {file = "onnx-1.15.0-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:a4f774ff50092fe19bd8f46b2c9b27b1d30fbd700c22abde48a478142d464322"}, - {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2b0e7f3938f2d994c34616bfb8b4b1cebbc4a0398483344fe5e9f2fe95175e6"}, - {file = "onnx-1.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:49cebebd0020a4b12c1dd0909d426631212ef28606d7e4d49463d36abe7639ad"}, - {file = "onnx-1.15.0-cp38-cp38-win32.whl", hash = "sha256:1fdf8a3ff75abc2b32c83bf27fb7c18d6b976c9c537263fadd82b9560fe186fa"}, - {file = "onnx-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:763e55c26e8de3a2dce008d55ae81b27fa8fb4acbb01a29b9f3c01f200c4d676"}, - {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:b2d5e802837629fc9c86f19448d19dd04d206578328bce202aeb3d4bedab43c4"}, - {file = "onnx-1.15.0-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9a9cfbb5e5d5d88f89d0dfc9df5fb858899db874e1d5ed21e76c481f3cafc90d"}, - {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f472bbe5cb670a0a4a4db08f41fde69b187a009d0cb628f964840d3f83524e9"}, - {file = "onnx-1.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf2de9bef64792e5b8080c678023ac7d2b9e05d79a3e17e92cf6a4a624831d2"}, - {file = "onnx-1.15.0-cp39-cp39-win32.whl", hash = "sha256:ef4d9eb44b111e69e4534f3233fc2c13d1e26920d24ae4359d513bd54694bc6d"}, - {file = "onnx-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:95d7a3e2d79d371e272e39ae3f7547e0b116d0c7f774a4004e97febe6c93507f"}, - {file = "onnx-1.15.0.tar.gz", hash = "sha256:b18461a7d38f286618ca2a6e78062a2a9c634ce498e631e708a8041b00094825"}, -] - -[package.dependencies] -numpy = "*" -protobuf = ">=3.20.2" - -[package.extras] -reference = ["Pillow", "google-re2"] - [[package]] name = "onnxruntime" -version = "1.16.3" +version = "1.17.1" description = "ONNX Runtime is a runtime accelerator for Machine Learning models" optional = false python-versions = "*" files = [ - {file = "onnxruntime-1.16.3-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:3bc41f323ac77acfed190be8ffdc47a6a75e4beeb3473fbf55eeb075ccca8df2"}, - {file = "onnxruntime-1.16.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:212741b519ee61a4822c79c47147d63a8b0ffde25cd33988d3d7be9fbd51005d"}, - {file = "onnxruntime-1.16.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f91f5497fe3df4ceee2f9e66c6148d9bfeb320cd6a71df361c66c5b8bac985a"}, - {file = "onnxruntime-1.16.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ef2b1fc269cabd27f129fb9058917d6fdc89b188c49ed8700f300b945c81f889"}, - {file = "onnxruntime-1.16.3-cp310-cp310-win32.whl", hash = "sha256:f36b56a593b49a3c430be008c2aea6658d91a3030115729609ec1d5ffbaab1b6"}, - {file = "onnxruntime-1.16.3-cp310-cp310-win_amd64.whl", hash = "sha256:3c467eaa3d2429c026b10c3d17b78b7f311f718ef9d2a0d6938e5c3c2611b0cf"}, - {file = "onnxruntime-1.16.3-cp311-cp311-macosx_10_15_x86_64.whl", hash = "sha256:a225bb683991001d111f75323d355b3590e75e16b5e0f07a0401e741a0143ea1"}, - {file = "onnxruntime-1.16.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9aded21fe3d898edd86be8aa2eb995aa375e800ad3dfe4be9f618a20b8ee3630"}, - {file = "onnxruntime-1.16.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00cccc37a5195c8fca5011b9690b349db435986bd508eb44c9fce432da9228a4"}, - {file = "onnxruntime-1.16.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e253e572021563226a86f1c024f8f70cdae28f2fb1cc8c3a9221e8b1ce37db5"}, - {file = "onnxruntime-1.16.3-cp311-cp311-win32.whl", hash = "sha256:a82a8f0b4c978d08f9f5c7a6019ae51151bced9fd91e5aaa0c20a9e4ac7a60b6"}, - {file = "onnxruntime-1.16.3-cp311-cp311-win_amd64.whl", hash = "sha256:78d81d9af457a1dc90db9a7da0d09f3ccb1288ea1236c6ab19f0ca61f3eee2d3"}, - {file = "onnxruntime-1.16.3-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:04ebcd29c20473596a1412e471524b2fb88d55e6301c40b98dd2407b5911595f"}, - {file = "onnxruntime-1.16.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9996bab0f202a6435ab867bc55598f15210d0b72794d5de83712b53d564084ae"}, - {file = "onnxruntime-1.16.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b8f5083f903408238883821dd8c775f8120cb4a604166dbdabe97f4715256d5"}, - {file = "onnxruntime-1.16.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c2dcf1b70f8434abb1116fe0975c00e740722aaf321997195ea3618cc00558e"}, - {file = "onnxruntime-1.16.3-cp38-cp38-win32.whl", hash = "sha256:d4a0151e1accd04da6711f6fd89024509602f82c65a754498e960b032359b02d"}, - {file = "onnxruntime-1.16.3-cp38-cp38-win_amd64.whl", hash = "sha256:e8aa5bba78afbd4d8a2654b14ec7462ff3ce4a6aad312a3c2d2c2b65009f2541"}, - {file = "onnxruntime-1.16.3-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:6829dc2a79d48c911fedaf4c0f01e03c86297d32718a3fdee7a282766dfd282a"}, - {file = "onnxruntime-1.16.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:76f876c53bfa912c6c242fc38213a6f13f47612d4360bc9d599bd23753e53161"}, - {file = "onnxruntime-1.16.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4137e5d443e2dccebe5e156a47f1d6d66f8077b03587c35f11ee0c7eda98b533"}, - {file = "onnxruntime-1.16.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c56695c1a343c7c008b647fff3df44da63741fbe7b6003ef576758640719be7b"}, - {file = "onnxruntime-1.16.3-cp39-cp39-win32.whl", hash = "sha256:985a029798744ce4743fcf8442240fed35c8e4d4d30ec7d0c2cdf1388cd44408"}, - {file = "onnxruntime-1.16.3-cp39-cp39-win_amd64.whl", hash = "sha256:28ff758b17ce3ca6bcad3d936ec53bd7f5482e7630a13f6dcae518eba8f71d85"}, + {file = "onnxruntime-1.17.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:d43ac17ac4fa3c9096ad3c0e5255bb41fd134560212dc124e7f52c3159af5d21"}, + {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:55b5e92a4c76a23981c998078b9bf6145e4fb0b016321a8274b1607bd3c6bd35"}, + {file = "onnxruntime-1.17.1-cp310-cp310-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ebbcd2bc3a066cf54e6f18c75708eb4d309ef42be54606d22e5bdd78afc5b0d7"}, + {file = "onnxruntime-1.17.1-cp310-cp310-win32.whl", hash = "sha256:5e3716b5eec9092e29a8d17aab55e737480487deabfca7eac3cd3ed952b6ada9"}, + {file = "onnxruntime-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:fbb98cced6782ae1bb799cc74ddcbbeeae8819f3ad1d942a74d88e72b6511337"}, + {file = "onnxruntime-1.17.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:36fd6f87a1ecad87e9c652e42407a50fb305374f9a31d71293eb231caae18784"}, + {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:99a8bddeb538edabc524d468edb60ad4722cff8a49d66f4e280c39eace70500b"}, + {file = "onnxruntime-1.17.1-cp311-cp311-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fd7fddb4311deb5a7d3390cd8e9b3912d4d963efbe4dfe075edbaf18d01c024e"}, + {file = "onnxruntime-1.17.1-cp311-cp311-win32.whl", hash = "sha256:606a7cbfb6680202b0e4f1890881041ffc3ac6e41760a25763bd9fe146f0b335"}, + {file = "onnxruntime-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:53e4e06c0a541696ebdf96085fd9390304b7b04b748a19e02cf3b35c869a1e76"}, + {file = "onnxruntime-1.17.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:40f08e378e0f85929712a2b2c9b9a9cc400a90c8a8ca741d1d92c00abec60843"}, + {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac79da6d3e1bb4590f1dad4bb3c2979d7228555f92bb39820889af8b8e6bd472"}, + {file = "onnxruntime-1.17.1-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ae9ba47dc099004e3781f2d0814ad710a13c868c739ab086fc697524061695ea"}, + {file = "onnxruntime-1.17.1-cp312-cp312-win32.whl", hash = "sha256:2dff1a24354220ac30e4a4ce2fb1df38cb1ea59f7dac2c116238d63fe7f4c5ff"}, + {file = "onnxruntime-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:6226a5201ab8cafb15e12e72ff2a4fc8f50654e8fa5737c6f0bd57c5ff66827e"}, + {file = "onnxruntime-1.17.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:cd0c07c0d1dfb8629e820b05fda5739e4835b3b82faf43753d2998edf2cf00aa"}, + {file = "onnxruntime-1.17.1-cp38-cp38-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:617ebdf49184efa1ba6e4467e602fbfa029ed52c92f13ce3c9f417d303006381"}, + {file = "onnxruntime-1.17.1-cp38-cp38-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9dae9071e3facdf2920769dceee03b71c684b6439021defa45b830d05e148924"}, + {file = "onnxruntime-1.17.1-cp38-cp38-win32.whl", hash = "sha256:835d38fa1064841679433b1aa8138b5e1218ddf0cfa7a3ae0d056d8fd9cec713"}, + {file = "onnxruntime-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:96621e0c555c2453bf607606d08af3f70fbf6f315230c28ddea91754e17ad4e6"}, + {file = "onnxruntime-1.17.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:7a9539935fb2d78ebf2cf2693cad02d9930b0fb23cdd5cf37a7df813e977674d"}, + {file = "onnxruntime-1.17.1-cp39-cp39-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:45c6a384e9d9a29c78afff62032a46a993c477b280247a7e335df09372aedbe9"}, + {file = "onnxruntime-1.17.1-cp39-cp39-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4e19f966450f16863a1d6182a685ca33ae04d7772a76132303852d05b95411ea"}, + {file = "onnxruntime-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e2ae712d64a42aac29ed7a40a426cb1e624a08cfe9273dcfe681614aa65b07dc"}, + {file = "onnxruntime-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:f7e9f7fb049825cdddf4a923cfc7c649d84d63c0134315f8e0aa9e0c3004672c"}, ] [package.dependencies] @@ -2982,7 +3005,7 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] name = "opentelemetry-api" version = "1.21.0" description = "OpenTelemetry Python API" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"}, @@ -2997,7 +3020,7 @@ importlib-metadata = ">=6.0,<7.0" name = "opentelemetry-exporter-otlp-proto-common" version = "1.21.0" description = "OpenTelemetry Protobuf encoding" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_exporter_otlp_proto_common-1.21.0-py3-none-any.whl", hash = "sha256:97b1022b38270ec65d11fbfa348e0cd49d12006485c2321ea3b1b7037d42b6ec"}, @@ -3012,7 +3035,7 @@ opentelemetry-proto = "1.21.0" name = "opentelemetry-exporter-otlp-proto-grpc" version = "1.21.0" description = "OpenTelemetry Collector Protobuf over gRPC Exporter" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_exporter_otlp_proto_grpc-1.21.0-py3-none-any.whl", hash = "sha256:ab37c63d6cb58d6506f76d71d07018eb1f561d83e642a8f5aa53dddf306087a4"}, @@ -3036,7 +3059,7 @@ test = ["pytest-grpc"] name = "opentelemetry-instrumentation" version = "0.42b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_instrumentation-0.42b0-py3-none-any.whl", hash = "sha256:65ae54ddb90ca2d05d2d16bf6863173e7141eba1bbbf41fc9bbb02446adbe369"}, @@ -3052,7 +3075,7 @@ wrapt = ">=1.0.0,<2.0.0" name = "opentelemetry-instrumentation-asgi" version = "0.42b0" description = "ASGI instrumentation for OpenTelemetry" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_instrumentation_asgi-0.42b0-py3-none-any.whl", hash = "sha256:79b7278fb614aba1bf2211060960d3e8501c1d7d9314b857b30ad80ba34a2805"}, @@ -3074,7 +3097,7 @@ test = ["opentelemetry-instrumentation-asgi[instruments]", "opentelemetry-test-u name = "opentelemetry-instrumentation-fastapi" version = "0.42b0" description = "OpenTelemetry FastAPI Instrumentation" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_instrumentation_fastapi-0.42b0-py3-none-any.whl", hash = "sha256:d53a26c4859767d5ba67109038cabc7165d97a8a8b7654ccde4ce290036d1725"}, @@ -3096,7 +3119,7 @@ test = ["httpx (>=0.22,<1.0)", "opentelemetry-instrumentation-fastapi[instrument name = "opentelemetry-proto" version = "1.21.0" description = "OpenTelemetry Python Proto" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_proto-1.21.0-py3-none-any.whl", hash = "sha256:32fc4248e83eebd80994e13963e683f25f3b443226336bb12b5b6d53638f50ba"}, @@ -3110,7 +3133,7 @@ protobuf = ">=3.19,<5.0" name = "opentelemetry-sdk" version = "1.21.0" description = "OpenTelemetry Python SDK" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"}, @@ -3126,7 +3149,7 @@ typing-extensions = ">=3.7.4" name = "opentelemetry-semantic-conventions" version = "0.42b0" description = "OpenTelemetry Semantic Conventions" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"}, @@ -3137,128 +3160,77 @@ files = [ name = "opentelemetry-util-http" version = "0.42b0" description = "Web util for OpenTelemetry" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "opentelemetry_util_http-0.42b0-py3-none-any.whl", hash = "sha256:764069ed2f7e9a98ed1a7a87111f838000484e388e81f467405933be4b0306c6"}, {file = "opentelemetry_util_http-0.42b0.tar.gz", hash = "sha256:665e7d372837811aa08cbb9102d4da862441d1c9b1795d649ef08386c8a3cbbd"}, ] -[[package]] -name = "optimum" -version = "1.16.1" -description = "Optimum Library is an extension of the Hugging Face Transformers library, providing a framework to integrate third-party libraries from Hardware Partners and interface with their specific functionality." -optional = false -python-versions = ">=3.7.0" -files = [ - {file = "optimum-1.16.1-py3-none-any.whl", hash = "sha256:c0d0cb5877ae7b8f40289c54885341297f61b30255a93a71f5e979bc86b03598"}, - {file = "optimum-1.16.1.tar.gz", hash = "sha256:9a74bc5cbbe2373fc45f477257d5046fd28b725bce91c1f8846baadba57f2129"}, -] - -[package.dependencies] -coloredlogs = "*" -datasets = [ - {version = "*"}, - {version = ">=1.2.1", optional = true, markers = "extra == \"onnxruntime\""}, -] -evaluate = {version = "*", optional = true, markers = "extra == \"onnxruntime\""} -huggingface-hub = ">=0.8.0" -numpy = "*" -onnx = {version = "*", optional = true, markers = "extra == \"onnxruntime\""} -onnxruntime = {version = ">=1.11.0", optional = true, markers = "extra == \"onnxruntime\""} -packaging = "*" -protobuf = {version = ">=3.20.1", optional = true, markers = "extra == \"onnxruntime\""} -sympy = "*" -torch = ">=1.9" -transformers = {version = ">=4.26.0", extras = ["sentencepiece"]} - -[package.extras] -amd = ["optimum-amd"] -benchmark = ["evaluate (>=0.2.0)", "optuna", "scikit-learn", "seqeval", "torchvision", "tqdm"] -dev = ["Pillow", "accelerate", "black (>=23.1,<24.0)", "diffusers (>=0.17.0)", "einops", "invisible-watermark", "parameterized", "pytest", "pytest-xdist", "requests", "ruff (==0.1.5)", "sacremoses", "torchaudio", "torchvision"] -diffusers = ["diffusers"] -doc-build = ["accelerate"] -exporters = ["onnx", "onnxruntime", "timm"] -exporters-gpu = ["onnx", "onnxruntime-gpu", "timm"] -exporters-tf = ["h5py", "numpy (<1.24.0)", "onnx", "onnxruntime", "tensorflow (>=2.4,<=2.12.1)", "tf2onnx", "timm"] -furiosa = ["optimum-furiosa"] -graphcore = ["optimum-graphcore"] -habana = ["optimum-habana", "transformers (>=4.33.0,<4.35.0)"] -intel = ["optimum-intel (>=1.12.0)"] -neural-compressor = ["optimum-intel[neural-compressor] (>=1.12.0)"] -neuron = ["optimum-neuron[neuron]"] -neuronx = ["optimum-neuron[neuronx]"] -nncf = ["optimum-intel[nncf] (>=1.12.0)"] -onnxruntime = ["datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime (>=1.11.0)", "protobuf (>=3.20.1)"] -onnxruntime-gpu = ["accelerate", "datasets (>=1.2.1)", "evaluate", "onnx", "onnxruntime-gpu (>=1.11.0)", "protobuf (>=3.20.1)"] -openvino = ["optimum-intel[openvino] (>=1.12.0)"] -quality = ["black (>=23.1,<24.0)", "ruff (==0.1.5)"] -tests = ["Pillow", "accelerate", "diffusers (>=0.17.0)", "einops", "invisible-watermark", "parameterized", "pytest", "pytest-xdist", "requests", "sacremoses", "torchaudio", "torchvision"] - [[package]] name = "orjson" -version = "3.9.10" +version = "3.9.15" description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy" optional = false python-versions = ">=3.8" files = [ - {file = "orjson-3.9.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c18a4da2f50050a03d1da5317388ef84a16013302a5281d6f64e4a3f406aabc4"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5148bab4d71f58948c7c39d12b14a9005b6ab35a0bdf317a8ade9a9e4d9d0bd5"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4cf7837c3b11a2dfb589f8530b3cff2bd0307ace4c301e8997e95c7468c1378e"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c62b6fa2961a1dcc51ebe88771be5319a93fd89bd247c9ddf732bc250507bc2b"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb3922a7a804755bbe6b5be9b312e746137a03600f488290318936c1a2d4dc"}, - {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1234dc92d011d3554d929b6cf058ac4a24d188d97be5e04355f1b9223e98bbe9"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:06ad5543217e0e46fd7ab7ea45d506c76f878b87b1b4e369006bdb01acc05a83"}, - {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4fd72fab7bddce46c6826994ce1e7de145ae1e9e106ebb8eb9ce1393ca01444d"}, - {file = "orjson-3.9.10-cp310-none-win32.whl", hash = "sha256:b5b7d4a44cc0e6ff98da5d56cde794385bdd212a86563ac321ca64d7f80c80d1"}, - {file = "orjson-3.9.10-cp310-none-win_amd64.whl", hash = "sha256:61804231099214e2f84998316f3238c4c2c4aaec302df12b21a64d72e2a135c7"}, - {file = "orjson-3.9.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cff7570d492bcf4b64cc862a6e2fb77edd5e5748ad715f487628f102815165e9"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8bc367f725dfc5cabeed1ae079d00369900231fbb5a5280cf0736c30e2adf7"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c812312847867b6335cfb264772f2a7e85b3b502d3a6b0586aa35e1858528ab1"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edd2856611e5050004f4722922b7b1cd6268da34102667bd49d2a2b18bafb81"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:674eb520f02422546c40401f4efaf8207b5e29e420c17051cddf6c02783ff5ca"}, - {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0dc4310da8b5f6415949bd5ef937e60aeb0eb6b16f95041b5e43e6200821fb"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e99c625b8c95d7741fe057585176b1b8783d46ed4b8932cf98ee145c4facf499"}, - {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f18f96b47299c11203edfbdc34e1b69085070d9a3d1f302810cc23ad36bf3"}, - {file = "orjson-3.9.10-cp311-none-win32.whl", hash = "sha256:ce0a29c28dfb8eccd0f16219360530bc3cfdf6bf70ca384dacd36e6c650ef8e8"}, - {file = "orjson-3.9.10-cp311-none-win_amd64.whl", hash = "sha256:cf80b550092cc480a0cbd0750e8189247ff45457e5a023305f7ef1bcec811616"}, - {file = "orjson-3.9.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:602a8001bdf60e1a7d544be29c82560a7b49319a0b31d62586548835bbe2c862"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f295efcd47b6124b01255d1491f9e46f17ef40d3d7eabf7364099e463fb45f0f"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92af0d00091e744587221e79f68d617b432425a7e59328ca4c496f774a356071"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5a02360e73e7208a872bf65a7554c9f15df5fe063dc047f79738998b0506a14"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:858379cbb08d84fe7583231077d9a36a1a20eb72f8c9076a45df8b083724ad1d"}, - {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666c6fdcaac1f13eb982b649e1c311c08d7097cbda24f32612dae43648d8db8d"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3fb205ab52a2e30354640780ce4587157a9563a68c9beaf52153e1cea9aa0921"}, - {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7ec960b1b942ee3c69323b8721df2a3ce28ff40e7ca47873ae35bfafeb4555ca"}, - {file = "orjson-3.9.10-cp312-none-win_amd64.whl", hash = "sha256:3e892621434392199efb54e69edfff9f699f6cc36dd9553c5bf796058b14b20d"}, - {file = "orjson-3.9.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8b9ba0ccd5a7f4219e67fbbe25e6b4a46ceef783c42af7dbc1da548eb28b6531"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e2ecd1d349e62e3960695214f40939bbfdcaeaaa62ccc638f8e651cf0970e5f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f433be3b3f4c66016d5a20e5b4444ef833a1f802ced13a2d852c637f69729c1"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4689270c35d4bb3102e103ac43c3f0b76b169760aff8bcf2d401a3e0e58cdb7f"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd176f528a8151a6efc5359b853ba3cc0e82d4cd1fab9c1300c5d957dc8f48c"}, - {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a2ce5ea4f71681623f04e2b7dadede3c7435dfb5e5e2d1d0ec25b35530e277b"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:49f8ad582da6e8d2cf663c4ba5bf9f83cc052570a3a767487fec6af839b0e777"}, - {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a11b4b1a8415f105d989876a19b173f6cdc89ca13855ccc67c18efbd7cbd1f8"}, - {file = "orjson-3.9.10-cp38-none-win32.whl", hash = "sha256:a353bf1f565ed27ba71a419b2cd3db9d6151da426b61b289b6ba1422a702e643"}, - {file = "orjson-3.9.10-cp38-none-win_amd64.whl", hash = "sha256:e28a50b5be854e18d54f75ef1bb13e1abf4bc650ab9d635e4258c58e71eb6ad5"}, - {file = "orjson-3.9.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ee5926746232f627a3be1cc175b2cfad24d0170d520361f4ce3fa2fd83f09e1d"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a73160e823151f33cdc05fe2cea557c5ef12fdf276ce29bb4f1c571c8368a60"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c338ed69ad0b8f8f8920c13f529889fe0771abbb46550013e3c3d01e5174deef"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5869e8e130e99687d9e4be835116c4ebd83ca92e52e55810962446d841aba8de"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2c1e559d96a7f94a4f581e2a32d6d610df5840881a8cba8f25e446f4d792df3"}, - {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a3a3a72c9811b56adf8bcc829b010163bb2fc308877e50e9910c9357e78521"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7f8fb7f5ecf4f6355683ac6881fd64b5bb2b8a60e3ccde6ff799e48791d8f864"}, - {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c943b35ecdf7123b2d81d225397efddf0bce2e81db2f3ae633ead38e85cd5ade"}, - {file = "orjson-3.9.10-cp39-none-win32.whl", hash = "sha256:fb0b361d73f6b8eeceba47cd37070b5e6c9de5beaeaa63a1cb35c7e1a73ef088"}, - {file = "orjson-3.9.10-cp39-none-win_amd64.whl", hash = "sha256:b90f340cb6397ec7a854157fac03f0c82b744abdd1c0941a024c3c29d1340aff"}, - {file = "orjson-3.9.10.tar.gz", hash = "sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1"}, + {file = "orjson-3.9.15-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d61f7ce4727a9fa7680cd6f3986b0e2c732639f46a5e0156e550e35258aa313a"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4feeb41882e8aa17634b589533baafdceb387e01e117b1ec65534ec724023d04"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fbbeb3c9b2edb5fd044b2a070f127a0ac456ffd079cb82746fc84af01ef021a4"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b66bcc5670e8a6b78f0313bcb74774c8291f6f8aeef10fe70e910b8040f3ab75"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2973474811db7b35c30248d1129c64fd2bdf40d57d84beed2a9a379a6f57d0ab"}, + {file = "orjson-3.9.15-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fe41b6f72f52d3da4db524c8653e46243c8c92df826ab5ffaece2dba9cccd58"}, + {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4228aace81781cc9d05a3ec3a6d2673a1ad0d8725b4e915f1089803e9efd2b99"}, + {file = "orjson-3.9.15-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6f7b65bfaf69493c73423ce9db66cfe9138b2f9ef62897486417a8fcb0a92bfe"}, + {file = "orjson-3.9.15-cp310-none-win32.whl", hash = "sha256:2d99e3c4c13a7b0fb3792cc04c2829c9db07838fb6973e578b85c1745e7d0ce7"}, + {file = "orjson-3.9.15-cp310-none-win_amd64.whl", hash = "sha256:b725da33e6e58e4a5d27958568484aa766e825e93aa20c26c91168be58e08cbb"}, + {file = "orjson-3.9.15-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c8e8fe01e435005d4421f183038fc70ca85d2c1e490f51fb972db92af6e047c2"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87f1097acb569dde17f246faa268759a71a2cb8c96dd392cd25c668b104cad2f"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff0f9913d82e1d1fadbd976424c316fbc4d9c525c81d047bbdd16bd27dd98cfc"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8055ec598605b0077e29652ccfe9372247474375e0e3f5775c91d9434e12d6b1"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6768a327ea1ba44c9114dba5fdda4a214bdb70129065cd0807eb5f010bfcbb5"}, + {file = "orjson-3.9.15-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:12365576039b1a5a47df01aadb353b68223da413e2e7f98c02403061aad34bde"}, + {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:71c6b009d431b3839d7c14c3af86788b3cfac41e969e3e1c22f8a6ea13139404"}, + {file = "orjson-3.9.15-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e18668f1bd39e69b7fed19fa7cd1cd110a121ec25439328b5c89934e6d30d357"}, + {file = "orjson-3.9.15-cp311-none-win32.whl", hash = "sha256:62482873e0289cf7313461009bf62ac8b2e54bc6f00c6fabcde785709231a5d7"}, + {file = "orjson-3.9.15-cp311-none-win_amd64.whl", hash = "sha256:b3d336ed75d17c7b1af233a6561cf421dee41d9204aa3cfcc6c9c65cd5bb69a8"}, + {file = "orjson-3.9.15-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:82425dd5c7bd3adfe4e94c78e27e2fa02971750c2b7ffba648b0f5d5cc016a73"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c51378d4a8255b2e7c1e5cc430644f0939539deddfa77f6fac7b56a9784160a"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6ae4e06be04dc00618247c4ae3f7c3e561d5bc19ab6941427f6d3722a0875ef7"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:bcef128f970bb63ecf9a65f7beafd9b55e3aaf0efc271a4154050fc15cdb386e"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b72758f3ffc36ca566ba98a8e7f4f373b6c17c646ff8ad9b21ad10c29186f00d"}, + {file = "orjson-3.9.15-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10c57bc7b946cf2efa67ac55766e41764b66d40cbd9489041e637c1304400494"}, + {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:946c3a1ef25338e78107fba746f299f926db408d34553b4754e90a7de1d44068"}, + {file = "orjson-3.9.15-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2f256d03957075fcb5923410058982aea85455d035607486ccb847f095442bda"}, + {file = "orjson-3.9.15-cp312-none-win_amd64.whl", hash = "sha256:5bb399e1b49db120653a31463b4a7b27cf2fbfe60469546baf681d1b39f4edf2"}, + {file = "orjson-3.9.15-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:b17f0f14a9c0ba55ff6279a922d1932e24b13fc218a3e968ecdbf791b3682b25"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f6cbd8e6e446fb7e4ed5bac4661a29e43f38aeecbf60c4b900b825a353276a1"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:76bc6356d07c1d9f4b782813094d0caf1703b729d876ab6a676f3aaa9a47e37c"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fdfa97090e2d6f73dced247a2f2d8004ac6449df6568f30e7fa1a045767c69a6"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7413070a3e927e4207d00bd65f42d1b780fb0d32d7b1d951f6dc6ade318e1b5a"}, + {file = "orjson-3.9.15-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cf1596680ac1f01839dba32d496136bdd5d8ffb858c280fa82bbfeb173bdd40"}, + {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:809d653c155e2cc4fd39ad69c08fdff7f4016c355ae4b88905219d3579e31eb7"}, + {file = "orjson-3.9.15-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:920fa5a0c5175ab14b9c78f6f820b75804fb4984423ee4c4f1e6d748f8b22bc1"}, + {file = "orjson-3.9.15-cp38-none-win32.whl", hash = "sha256:2b5c0f532905e60cf22a511120e3719b85d9c25d0e1c2a8abb20c4dede3b05a5"}, + {file = "orjson-3.9.15-cp38-none-win_amd64.whl", hash = "sha256:67384f588f7f8daf040114337d34a5188346e3fae6c38b6a19a2fe8c663a2f9b"}, + {file = "orjson-3.9.15-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:6fc2fe4647927070df3d93f561d7e588a38865ea0040027662e3e541d592811e"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:34cbcd216e7af5270f2ffa63a963346845eb71e174ea530867b7443892d77180"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f541587f5c558abd93cb0de491ce99a9ef8d1ae29dd6ab4dbb5a13281ae04cbd"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:92255879280ef9c3c0bcb327c5a1b8ed694c290d61a6a532458264f887f052cb"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:05a1f57fb601c426635fcae9ddbe90dfc1ed42245eb4c75e4960440cac667262"}, + {file = "orjson-3.9.15-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ede0bde16cc6e9b96633df1631fbcd66491d1063667f260a4f2386a098393790"}, + {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e88b97ef13910e5f87bcbc4dd7979a7de9ba8702b54d3204ac587e83639c0c2b"}, + {file = "orjson-3.9.15-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57d5d8cf9c27f7ef6bc56a5925c7fbc76b61288ab674eb352c26ac780caa5b10"}, + {file = "orjson-3.9.15-cp39-none-win32.whl", hash = "sha256:001f4eb0ecd8e9ebd295722d0cbedf0748680fb9998d3993abaed2f40587257a"}, + {file = "orjson-3.9.15-cp39-none-win_amd64.whl", hash = "sha256:ea0b183a5fe6b2b45f3b854b0d19c4e932d6f5934ae1f723b07cf9560edd4ec7"}, + {file = "orjson-3.9.15.tar.gz", hash = "sha256:95cae920959d772f30ab36d3b25f83bb0f3be671e986c72ce22f8fa700dae061"}, ] [[package]] name = "overrides" version = "7.4.0" description = "A decorator to automatically detect mismatch when overriding a method." -optional = true +optional = false python-versions = ">=3.6" files = [ {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, @@ -3355,7 +3327,7 @@ files = [ name = "pgvector" version = "0.2.5" description = "pgvector support for Python" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "pgvector-0.2.5-py2.py3-none-any.whl", hash = "sha256:5e5e93ec4d3c45ab1fa388729d56c602f6966296e19deee8878928c6d567e41b"}, @@ -3484,7 +3456,7 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p name = "posthog" version = "3.1.0" description = "Integrate PostHog into any python application." -optional = true +optional = false python-versions = "*" files = [ {file = "posthog-3.1.0-py2.py3-none-any.whl", hash = "sha256:acd033530bdfc275dce5587f205f62378991ecb9b7cd5479e79c7f4ac575d319"}, @@ -3541,39 +3513,11 @@ files = [ {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"}, ] -[[package]] -name = "psutil" -version = "5.9.6" -description = "Cross-platform lib for process and system monitoring in Python." -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" -files = [ - {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"}, - {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:91ecd2d9c00db9817a4b4192107cf6954addb5d9d67a969a4f436dbc9200f88c"}, - {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:10e8c17b4f898d64b121149afb136c53ea8b68c7531155147867b7b1ac9e7e28"}, - {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:18cd22c5db486f33998f37e2bb054cc62fd06646995285e02a51b1e08da97017"}, - {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ca2780f5e038379e520281e4c032dddd086906ddff9ef0d1b9dcf00710e5071c"}, - {file = "psutil-5.9.6-cp27-none-win32.whl", hash = "sha256:70cb3beb98bc3fd5ac9ac617a327af7e7f826373ee64c80efd4eb2856e5051e9"}, - {file = "psutil-5.9.6-cp27-none-win_amd64.whl", hash = "sha256:51dc3d54607c73148f63732c727856f5febec1c7c336f8f41fcbd6315cce76ac"}, - {file = "psutil-5.9.6-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c69596f9fc2f8acd574a12d5f8b7b1ba3765a641ea5d60fb4736bf3c08a8214a"}, - {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92e0cc43c524834af53e9d3369245e6cc3b130e78e26100d1f63cdb0abeb3d3c"}, - {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:748c9dd2583ed86347ed65d0035f45fa8c851e8d90354c122ab72319b5f366f4"}, - {file = "psutil-5.9.6-cp36-cp36m-win32.whl", hash = "sha256:3ebf2158c16cc69db777e3c7decb3c0f43a7af94a60d72e87b2823aebac3d602"}, - {file = "psutil-5.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:ff18b8d1a784b810df0b0fff3bcb50ab941c3b8e2c8de5726f9c71c601c611aa"}, - {file = "psutil-5.9.6-cp37-abi3-win32.whl", hash = "sha256:a6f01f03bf1843280f4ad16f4bde26b817847b4c1a0db59bf6419807bc5ce05c"}, - {file = "psutil-5.9.6-cp37-abi3-win_amd64.whl", hash = "sha256:6e5fb8dc711a514da83098bc5234264e551ad980cec5f85dabf4d38ed6f15e9a"}, - {file = "psutil-5.9.6-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:daecbcbd29b289aac14ece28eca6a3e60aa361754cf6da3dfb20d4d32b6c7f57"}, - {file = "psutil-5.9.6.tar.gz", hash = "sha256:e4b92ddcd7dd4cdd3f900180ea1e104932c7bce234fb88976e2a3b296441225a"}, -] - -[package.extras] -test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] - [[package]] name = "psycopg2-binary" version = "2.9.9" description = "psycopg2 - Python-PostgreSQL Database Adapter" -optional = true +optional = false python-versions = ">=3.7" files = [ {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"}, @@ -3654,7 +3598,7 @@ files = [ name = "pulsar-client" version = "3.3.0" description = "Apache Pulsar Python client library" -optional = true +optional = false python-versions = "*" files = [ {file = "pulsar_client-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:c31afd3e67a044ff93177df89e08febf214cc965e95ede097d9fe8755af00e01"}, @@ -3697,59 +3641,11 @@ all = ["apache-bookkeeper-client (>=4.16.1)", "fastavro (==1.7.3)", "grpcio (>=1 avro = ["fastavro (==1.7.3)"] functions = ["apache-bookkeeper-client (>=4.16.1)", "grpcio (>=1.8.2)", "prometheus-client", "protobuf (>=3.6.1,<=3.20.3)", "ratelimit"] -[[package]] -name = "pyarrow" -version = "14.0.1" -description = "Python library for Apache Arrow" -optional = false -python-versions = ">=3.8" -files = [ - {file = "pyarrow-14.0.1-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:96d64e5ba7dceb519a955e5eeb5c9adcfd63f73a56aea4722e2cc81364fc567a"}, - {file = "pyarrow-14.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a8ae88c0038d1bc362a682320112ee6774f006134cd5afc291591ee4bc06505"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f6f053cb66dc24091f5511e5920e45c83107f954a21032feadc7b9e3a8e7851"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:906b0dc25f2be12e95975722f1e60e162437023f490dbd80d0deb7375baf3171"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:78d4a77a46a7de9388b653af1c4ce539350726cd9af62e0831e4f2bd0c95a2f4"}, - {file = "pyarrow-14.0.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:06ca79080ef89d6529bb8e5074d4b4f6086143b2520494fcb7cf8a99079cde93"}, - {file = "pyarrow-14.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:32542164d905002c42dff896efdac79b3bdd7291b1b74aa292fac8450d0e4dcd"}, - {file = "pyarrow-14.0.1-cp311-cp311-macosx_10_14_x86_64.whl", hash = "sha256:c7331b4ed3401b7ee56f22c980608cf273f0380f77d0f73dd3c185f78f5a6220"}, - {file = "pyarrow-14.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:922e8b49b88da8633d6cac0e1b5a690311b6758d6f5d7c2be71acb0f1e14cd61"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:58c889851ca33f992ea916b48b8540735055201b177cb0dcf0596a495a667b00"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30d8494870d9916bb53b2a4384948491444741cb9a38253c590e21f836b01222"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:be28e1a07f20391bb0b15ea03dcac3aade29fc773c5eb4bee2838e9b2cdde0cb"}, - {file = "pyarrow-14.0.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:981670b4ce0110d8dcb3246410a4aabf5714db5d8ea63b15686bce1c914b1f83"}, - {file = "pyarrow-14.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:4756a2b373a28f6166c42711240643fb8bd6322467e9aacabd26b488fa41ec23"}, - {file = "pyarrow-14.0.1-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:cf87e2cec65dd5cf1aa4aba918d523ef56ef95597b545bbaad01e6433851aa10"}, - {file = "pyarrow-14.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:470ae0194fbfdfbf4a6b65b4f9e0f6e1fa0ea5b90c1ee6b65b38aecee53508c8"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6263cffd0c3721c1e348062997babdf0151301f7353010c9c9a8ed47448f82ab"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a8089d7e77d1455d529dbd7cff08898bbb2666ee48bc4085203af1d826a33cc"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fada8396bc739d958d0b81d291cfd201126ed5e7913cb73de6bc606befc30226"}, - {file = "pyarrow-14.0.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:2a145dab9ed7849fc1101bf03bcdc69913547f10513fdf70fc3ab6c0a50c7eee"}, - {file = "pyarrow-14.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:05fe7994745b634c5fb16ce5717e39a1ac1fac3e2b0795232841660aa76647cd"}, - {file = "pyarrow-14.0.1-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:a8eeef015ae69d104c4c3117a6011e7e3ecd1abec79dc87fd2fac6e442f666ee"}, - {file = "pyarrow-14.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:3c76807540989fe8fcd02285dd15e4f2a3da0b09d27781abec3adc265ddbeba1"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:450e4605e3c20e558485f9161a79280a61c55efe585d51513c014de9ae8d393f"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:323cbe60210173ffd7db78bfd50b80bdd792c4c9daca8843ef3cd70b186649db"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:0140c7e2b740e08c5a459439d87acd26b747fc408bde0a8806096ee0baaa0c15"}, - {file = "pyarrow-14.0.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:e592e482edd9f1ab32f18cd6a716c45b2c0f2403dc2af782f4e9674952e6dd27"}, - {file = "pyarrow-14.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:d264ad13605b61959f2ae7c1d25b1a5b8505b112715c961418c8396433f213ad"}, - {file = "pyarrow-14.0.1-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:01e44de9749cddc486169cb632f3c99962318e9dacac7778315a110f4bf8a450"}, - {file = "pyarrow-14.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d0351fecf0e26e152542bc164c22ea2a8e8c682726fce160ce4d459ea802d69c"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:33c1f6110c386464fd2e5e4ea3624466055bbe681ff185fd6c9daa98f30a3f9a"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11e045dfa09855b6d3e7705a37c42e2dc2c71d608fab34d3c23df2e02df9aec3"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:097828b55321897db0e1dbfc606e3ff8101ae5725673498cbfa7754ee0da80e4"}, - {file = "pyarrow-14.0.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:1daab52050a1c48506c029e6fa0944a7b2436334d7e44221c16f6f1b2cc9c510"}, - {file = "pyarrow-14.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:3f6d5faf4f1b0d5a7f97be987cf9e9f8cd39902611e818fe134588ee99bf0283"}, - {file = "pyarrow-14.0.1.tar.gz", hash = "sha256:b8b3f4fe8d4ec15e1ef9b599b94683c5216adaed78d5cb4c606180546d1e2ee1"}, -] - -[package.dependencies] -numpy = ">=1.16.6" - [[package]] name = "pyasn1" version = "0.5.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -optional = true +optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"}, @@ -3760,7 +3656,7 @@ files = [ name = "pyasn1-modules" version = "0.3.0" description = "A collection of ASN.1-based protocols modules" -optional = true +optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, @@ -3964,6 +3860,64 @@ files = [ plugins = ["importlib-metadata"] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pymupdf" +version = "1.23.25" +description = "A high performance Python library for data extraction, analysis, conversion & manipulation of PDF (and other) documents." +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyMuPDF-1.23.25-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:6be2b20fbff40602f673fc8e60fde3e5911397f8ca9ed6aa2d15be94b12cc2c4"}, + {file = "PyMuPDF-1.23.25-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:0f6923a44fbeaeefaabb2fa10955dcef3624e8826db661201951f3b3409fed32"}, + {file = "PyMuPDF-1.23.25-cp310-none-manylinux2014_aarch64.whl", hash = "sha256:8eeb2e97347586ec293fddaf61e8dfc58d6b2763406e8f7a6e45b560bf9b15a3"}, + {file = "PyMuPDF-1.23.25-cp310-none-manylinux2014_x86_64.whl", hash = "sha256:dca46799c152051697c5e88d66c17ba6d0244668d0c4dd8a2ba2d8d3cb745988"}, + {file = "PyMuPDF-1.23.25-cp310-none-win32.whl", hash = "sha256:88bfed1bd13ec84869489fc7b97381016cb8b99956073f4c3e8ac8c840bbb15a"}, + {file = "PyMuPDF-1.23.25-cp310-none-win_amd64.whl", hash = "sha256:98a78582c8a0c61b372e2bcd63dc61efc873e40b7d1f0b896a195e1a9ef9ffa7"}, + {file = "PyMuPDF-1.23.25-cp311-none-macosx_10_9_x86_64.whl", hash = "sha256:d7792810634036a745ea3eb3c4ccf2b6adab55ca9644e3352747d2b5aa5327f9"}, + {file = "PyMuPDF-1.23.25-cp311-none-macosx_11_0_arm64.whl", hash = "sha256:03bd1985b0234c3d2b8e26bb3e9ab1d2641dbada1e199b838a6bf884f35224c8"}, + {file = "PyMuPDF-1.23.25-cp311-none-manylinux2014_aarch64.whl", hash = "sha256:638fcb1f7551eb5ab582e412e204e8ded94acbbc37bc7f1e891a5dfc428881ee"}, + {file = "PyMuPDF-1.23.25-cp311-none-manylinux2014_x86_64.whl", hash = "sha256:067c88b4e6609cb7e74d98d0b0a35c11eb8e29f4fc51dc7ed1dd448b81d347c7"}, + {file = "PyMuPDF-1.23.25-cp311-none-win32.whl", hash = "sha256:a694f160d1701285cf3152951430740878d168511cd9ea0a3adcfaf3cac00322"}, + {file = "PyMuPDF-1.23.25-cp311-none-win_amd64.whl", hash = "sha256:514bcb679926b33413637b0bd73b223c90fb0d19352caf3395d0f23b1d47e8af"}, + {file = "PyMuPDF-1.23.25-cp312-none-macosx_10_9_x86_64.whl", hash = "sha256:bba342321e1b5574631894d7d34ec046605d953a23553b7d2f9c0e4d3c27254b"}, + {file = "PyMuPDF-1.23.25-cp312-none-macosx_11_0_arm64.whl", hash = "sha256:b2cb058c8229f9697deebe0574f7d95e4b9a5e295ceafd554346bbd464141e89"}, + {file = "PyMuPDF-1.23.25-cp312-none-manylinux2014_aarch64.whl", hash = "sha256:2479473b533936593428ce78499a1e9901570110ac602f03f1f3174efa0fa6a8"}, + {file = "PyMuPDF-1.23.25-cp312-none-manylinux2014_x86_64.whl", hash = "sha256:a247a4be1e43a6127ee305eae9f65767ee7519a2aa0cb1a2aa6acfd4e7fe7a9b"}, + {file = "PyMuPDF-1.23.25-cp312-none-win32.whl", hash = "sha256:b062be400bbaff6e8b17c0a8da9481e01ec935f97967e0870e9aacd7ba60a52a"}, + {file = "PyMuPDF-1.23.25-cp312-none-win_amd64.whl", hash = "sha256:b12e608761e1586a65f6e96a34417a91f814dbab29f2929b41d825ab32fab6ef"}, + {file = "PyMuPDF-1.23.25-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:ac97691c0e0e23607626d394bd660a46ea33f64921dc9288cf24daee207f9fe3"}, + {file = "PyMuPDF-1.23.25-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:c0a16cda5dc9b59d494ae23bdd9c4a3db53d04f2b6390265f5c0fe6269777975"}, + {file = "PyMuPDF-1.23.25-cp38-none-manylinux2014_aarch64.whl", hash = "sha256:23d735db51722a889bb50636d161d2747f08fa0b82cc2e4a7eb8e228b25d1c4e"}, + {file = "PyMuPDF-1.23.25-cp38-none-manylinux2014_x86_64.whl", hash = "sha256:cbc1407dcf01b2e3e547b2d7643b97cc44c0950d2bb4b12c74322664c5cb37d7"}, + {file = "PyMuPDF-1.23.25-cp38-none-win32.whl", hash = "sha256:c29518701d6360beb01c25cf69a77b6426db90a9e7cd11179b3bd783c7fb4cb1"}, + {file = "PyMuPDF-1.23.25-cp38-none-win_amd64.whl", hash = "sha256:c1bb6fa9e00c846e6829dec2bee8326754adaef5c80626b99233c01923f0342c"}, + {file = "PyMuPDF-1.23.25-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:514b272bfcd897f9ae29384da04167dcdea3b13ce0f2b9099b645314355d037d"}, + {file = "PyMuPDF-1.23.25-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:ef345a5b050d0869ef404845075edd5f4bd7fd99e235f4d32ce85f423779a120"}, + {file = "PyMuPDF-1.23.25-cp39-none-manylinux2014_aarch64.whl", hash = "sha256:b3ade5b349c38ddffb24f8c266fbcd7161f488c43960ff0f03f977d40d4df967"}, + {file = "PyMuPDF-1.23.25-cp39-none-manylinux2014_x86_64.whl", hash = "sha256:111d795a3e840aec2ad66beebd90a5327994ec85ed56fd68312f5463062dbbfa"}, + {file = "PyMuPDF-1.23.25-cp39-none-win32.whl", hash = "sha256:2237ce9897771f4af686cc0c81517ffb020fc1a011b95ccf5ccf05383492bd6d"}, + {file = "PyMuPDF-1.23.25-cp39-none-win_amd64.whl", hash = "sha256:251c9c321a2112716068d5ae11deedd1911d0387cbdd0ef19adb216a3adf882c"}, + {file = "PyMuPDF-1.23.25.tar.gz", hash = "sha256:eb414e92f08107f43576a1fedea28aa837220b15ad58c8e32015435fe96cc03e"}, +] + +[package.dependencies] +PyMuPDFb = "1.23.22" + +[[package]] +name = "pymupdfb" +version = "1.23.22" +description = "MuPDF shared libraries for PyMuPDF." +optional = false +python-versions = ">=3.8" +files = [ + {file = "PyMuPDFb-1.23.22-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9085a1e2fbf16f2820f9f7ad3d25e85f81d9b9eb0409110c1670d4cf5a27a678"}, + {file = "PyMuPDFb-1.23.22-py3-none-macosx_11_0_arm64.whl", hash = "sha256:01016dd33220cef4ecaf929d09fd27a584dc3ec3e5c9f4112dfe63613ea35135"}, + {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf50e814db91f2a2325219302fbac229a23682c372cf8232aabd51ea3f18210e"}, + {file = "PyMuPDFb-1.23.22-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3ffa713ad18e816e584c8a5f569995c32d22f8ac76ab6e4a61f2d2983c4b73d9"}, + {file = "PyMuPDFb-1.23.22-py3-none-win32.whl", hash = "sha256:d00e372452845aea624659c302d25e935052269fd3aafe26948301576d6f2ee8"}, + {file = "PyMuPDFb-1.23.22-py3-none-win_amd64.whl", hash = "sha256:7c9c157281fdee9f296e666a323307dbf74cb38f017921bb131fa7bfcd39c2bd"}, +] + [[package]] name = "pyparsing" version = "3.1.1" @@ -3980,13 +3934,13 @@ diagrams = ["jinja2", "railroad-diagrams"] [[package]] name = "pypdf" -version = "3.17.2" +version = "4.0.2" description = "A pure-python PDF library capable of splitting, merging, cropping, and transforming PDF files" optional = false python-versions = ">=3.6" files = [ - {file = "pypdf-3.17.2-py3-none-any.whl", hash = "sha256:e149ed50aa41e04b176246714806cd8d6c6c6d68b528508f849642959041963a"}, - {file = "pypdf-3.17.2.tar.gz", hash = "sha256:d6f077060912f8292d7db3da04f7bf2428ac974781e11eef219193a22120f649"}, + {file = "pypdf-4.0.2-py3-none-any.whl", hash = "sha256:a62daa2a24d5a608ba1b6284dde185317ce3644f89b9ebe5314d0c5d1c9f257d"}, + {file = "pypdf-4.0.2.tar.gz", hash = "sha256:3316d9ddfcff5df67ae3cdfe8b945c432aa43e7f970bae7c2a4ab4fe129cd937"}, ] [package.extras] @@ -4000,12 +3954,23 @@ image = ["Pillow (>=8.0.0)"] name = "pypika" version = "0.48.9" description = "A SQL query builder API for Python" -optional = true +optional = false python-versions = "*" files = [ {file = "PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378"}, ] +[[package]] +name = "pyproject-hooks" +version = "1.0.0" +description = "Wrappers to call pyproject.toml-based build backend hooks." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyproject_hooks-1.0.0-py3-none-any.whl", hash = "sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8"}, + {file = "pyproject_hooks-1.0.0.tar.gz", hash = "sha256:f271b298b97f5955d53fb12b72c1fb1948c22c1a6b70b315c54cedaca0264ef5"}, +] + [[package]] name = "pyreadline3" version = "3.4.1" @@ -4103,17 +4068,17 @@ cli = ["click (>=5.0)"] [[package]] name = "python-multipart" -version = "0.0.6" +version = "0.0.9" description = "A streaming multipart parser for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"}, - {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"}, + {file = "python_multipart-0.0.9-py3-none-any.whl", hash = "sha256:97ca7b8ea7b05f977dc3849c3ba99d51689822fab725c3703af7c866a0c2b215"}, + {file = "python_multipart-0.0.9.tar.gz", hash = "sha256:03f54688c663f1b7977105f021043b0793151e4cb1c1a9d4a11fc13d622c4026"}, ] [package.extras] -dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"] +dev = ["atomicwrites (==1.4.1)", "attrs (==23.2.0)", "coverage (==7.4.1)", "hatch", "invoke (==2.2.0)", "more-itertools (==10.2.0)", "pbr (==6.0.0)", "pluggy (==1.4.0)", "py (==1.11.0)", "pytest (==8.0.0)", "pytest-cov (==4.1.0)", "pytest-timeout (==2.2.0)", "pyyaml (==6.0.1)", "ruff (==0.2.1)"] [[package]] name = "pytz" @@ -4210,13 +4175,13 @@ files = [ [[package]] name = "qdrant-client" -version = "1.7.0" +version = "1.7.3" description = "Client library for the Qdrant vector search engine" optional = false -python-versions = ">=3.8,<3.13" +python-versions = ">=3.8" files = [ - {file = "qdrant_client-1.7.0-py3-none-any.whl", hash = "sha256:ab5779cf3f008da2a801c943413423f1ff434128dfaeda031f037453e1fa8306"}, - {file = "qdrant_client-1.7.0.tar.gz", hash = "sha256:bbe0656020c2f11061d7836b87e99ba6b50a028f5318459cc1fddf4ef73d9a8b"}, + {file = "qdrant_client-1.7.3-py3-none-any.whl", hash = "sha256:b062420ba55eb847652c7d2a26404fb1986bea13aa785763024013f96a7a915c"}, + {file = "qdrant_client-1.7.3.tar.gz", hash = "sha256:7b809be892cdc5137ae80ea3335da40c06499ad0b0072b5abc6bad79da1d29fc"}, ] [package.dependencies] @@ -4226,7 +4191,7 @@ httpx = {version = ">=0.14.0", extras = ["http2"]} numpy = {version = ">=1.21", markers = "python_version >= \"3.8\" and python_version < \"3.12\""} portalocker = ">=2.7.0,<3.0.0" pydantic = ">=1.10.8" -urllib3 = ">=1.26.14,<2.0.0" +urllib3 = ">=1.26.14,<3" [package.extras] fastembed = ["fastembed (==0.1.1)"] @@ -4368,7 +4333,7 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "requests-oauthlib" version = "1.3.1" description = "OAuthlib authentication support for Requests." -optional = true +optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, @@ -4382,24 +4347,6 @@ requests = ">=2.0.0" [package.extras] rsa = ["oauthlib[signedtoken] (>=3.0.0)"] -[[package]] -name = "responses" -version = "0.18.0" -description = "A utility library for mocking out the `requests` Python library." -optional = false -python-versions = ">=3.7" -files = [ - {file = "responses-0.18.0-py3-none-any.whl", hash = "sha256:15c63ad16de13ee8e7182d99c9334f64fd81f1ee79f90748d527c28f7ca9dd51"}, - {file = "responses-0.18.0.tar.gz", hash = "sha256:380cad4c1c1dc942e5e8a8eaae0b4d4edf708f4f010db8b7bcfafad1fcd254ff"}, -] - -[package.dependencies] -requests = ">=2.0,<3.0" -urllib3 = ">=1.25.10" - -[package.extras] -tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=4.6)", "pytest-cov", "pytest-localserver", "types-mock", "types-requests"] - [[package]] name = "rich" version = "13.7.0" @@ -4530,7 +4477,7 @@ files = [ name = "rsa" version = "4.9" description = "Pure-Python RSA implementation" -optional = true +optional = false python-versions = ">=3.6,<4" files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, @@ -4568,13 +4515,13 @@ files = [ [[package]] name = "s3transfer" -version = "0.9.0" +version = "0.10.0" description = "An Amazon S3 Transfer Manager" optional = false python-versions = ">= 3.8" files = [ - {file = "s3transfer-0.9.0-py3-none-any.whl", hash = "sha256:01d4d2c35a016db8cb14f9a4d5e84c1f8c96e7ffc211422555eed45c11fa7eb1"}, - {file = "s3transfer-0.9.0.tar.gz", hash = "sha256:9e1b186ec8bb5907a1e82b51237091889a9973a2bb799a924bcd9f301ff79d3d"}, + {file = "s3transfer-0.10.0-py3-none-any.whl", hash = "sha256:3cdb40f5cfa6966e812209d0994f2a4709b561c88e90cf00c2696d2df4e56b2e"}, + {file = "s3transfer-0.10.0.tar.gz", hash = "sha256:d0c8bbf672d5eebbe4e57945e23b972d963f07d82f661cabf678a5c88831595b"}, ] [package.dependencies] @@ -5031,20 +4978,20 @@ sqlcipher = ["sqlcipher3_binary"] [[package]] name = "starlette" -version = "0.27.0" +version = "0.36.3" description = "The little ASGI library that shines." optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"}, - {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"}, + {file = "starlette-0.36.3-py3-none-any.whl", hash = "sha256:13d429aa93a61dc40bf503e8c801db1f1bca3dc706b10ef2434a36123568f044"}, + {file = "starlette-0.36.3.tar.gz", hash = "sha256:90a671733cfb35771d8cc605e0b679d23b992f8dcfad48cc60b38cb29aeb7080"}, ] [package.dependencies] anyio = ">=3.4.0,<5" [package.extras] -full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"] +full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7)", "pyyaml"] [[package]] name = "sympy" @@ -5139,109 +5086,121 @@ blobfile = ["blobfile (>=2)"] [[package]] name = "tokenizers" -version = "0.15.0" +version = "0.15.2" description = "" optional = false python-versions = ">=3.7" files = [ - {file = "tokenizers-0.15.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:cd3cd0299aaa312cd2988957598f80becd04d5a07338741eca076057a2b37d6e"}, - {file = "tokenizers-0.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a922c492c721744ee175f15b91704be2d305569d25f0547c77cd6c9f210f9dc"}, - {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:331dd786d02fc38698f835fff61c99480f98b73ce75a4c65bd110c9af5e4609a"}, - {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88dd0961c437d413ab027f8b115350c121d49902cfbadf08bb8f634b15fa1814"}, - {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6fdcc55339df7761cd52e1fbe8185d3b3963bc9e3f3545faa6c84f9e8818259a"}, - {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f1480b0051d8ab5408e8e4db2dc832f7082ea24aa0722c427bde2418c6f3bd07"}, - {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9855e6c258918f9cf62792d4f6ddfa6c56dccd8c8118640f867f6393ecaf8bd7"}, - {file = "tokenizers-0.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de9529fe75efcd54ba8d516aa725e1851df9199f0669b665c55e90df08f5af86"}, - {file = "tokenizers-0.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:8edcc90a36eab0705fe9121d6c77c6e42eeef25c7399864fd57dfb27173060bf"}, - {file = "tokenizers-0.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ae17884aafb3e94f34fb7cfedc29054f5f54e142475ebf8a265a4e388fee3f8b"}, - {file = "tokenizers-0.15.0-cp310-none-win32.whl", hash = "sha256:9a3241acdc9b44cff6e95c4a55b9be943ef3658f8edb3686034d353734adba05"}, - {file = "tokenizers-0.15.0-cp310-none-win_amd64.whl", hash = "sha256:4b31807cb393d6ea31926b307911c89a1209d5e27629aa79553d1599c8ffdefe"}, - {file = "tokenizers-0.15.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:af7e9be8c05d30bb137b9fd20f9d99354816599e5fd3d58a4b1e28ba3b36171f"}, - {file = "tokenizers-0.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c3d7343fa562ea29661783344a2d83662db0d3d17a6fa6a403cac8e512d2d9fd"}, - {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:32371008788aeeb0309a9244809a23e4c0259625e6b74a103700f6421373f395"}, - {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca9db64c7c9954fbae698884c5bb089764edc549731e5f9b7fa1dd4e4d78d77f"}, - {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:dbed5944c31195514669cf6381a0d8d47f164943000d10f93d6d02f0d45c25e0"}, - {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aab16c4a26d351d63e965b0c792f5da7227a37b69a6dc6d922ff70aa595b1b0c"}, - {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3c2b60b12fdd310bf85ce5d7d3f823456b9b65eed30f5438dd7761879c495983"}, - {file = "tokenizers-0.15.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0344d6602740e44054a9e5bbe9775a5e149c4dddaff15959bb07dcce95a5a859"}, - {file = "tokenizers-0.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4525f6997d81d9b6d9140088f4f5131f6627e4c960c2c87d0695ae7304233fc3"}, - {file = "tokenizers-0.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:65975094fef8cc68919644936764efd2ce98cf1bacbe8db2687155d2b0625bee"}, - {file = "tokenizers-0.15.0-cp311-none-win32.whl", hash = "sha256:ff5d2159c5d93015f5a4542aac6c315506df31853123aa39042672031768c301"}, - {file = "tokenizers-0.15.0-cp311-none-win_amd64.whl", hash = "sha256:2dd681b53cf615e60a31a115a3fda3980e543d25ca183797f797a6c3600788a3"}, - {file = "tokenizers-0.15.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:c9cce6ee149a3d703f86877bc2a6d997e34874b2d5a2d7839e36b2273f31d3d9"}, - {file = "tokenizers-0.15.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a0a94bc3370e6f1cc8a07a8ae867ce13b7c1b4291432a773931a61f256d44ea"}, - {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:309cfcccfc7e502cb1f1de2c9c1c94680082a65bfd3a912d5a5b2c90c677eb60"}, - {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8413e994dd7d875ab13009127fc85633916c71213917daf64962bafd488f15dc"}, - {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d0ebf9430f901dbdc3dcb06b493ff24a3644c9f88c08e6a1d6d0ae2228b9b818"}, - {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10361e9c7864b22dd791ec5126327f6c9292fb1d23481d4895780688d5e298ac"}, - {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:babe42635b8a604c594bdc56d205755f73414fce17ba8479d142a963a6c25cbc"}, - {file = "tokenizers-0.15.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3768829861e964c7a4556f5f23307fce6a23872c2ebf030eb9822dbbbf7e9b2a"}, - {file = "tokenizers-0.15.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9c91588a630adc88065e1c03ac6831e3e2112558869b9ebcb2b8afd8a14c944d"}, - {file = "tokenizers-0.15.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:77606994e793ca54ecf3a3619adc8a906a28ca223d9354b38df41cb8766a0ed6"}, - {file = "tokenizers-0.15.0-cp37-cp37m-macosx_10_7_x86_64.whl", hash = "sha256:6fe143939f3b596681922b2df12a591a5b010e7dcfbee2202482cd0c1c2f2459"}, - {file = "tokenizers-0.15.0-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:b7bee0f1795e3e3561e9a557061b1539e5255b8221e3f928f58100282407e090"}, - {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5d37e7f4439b4c46192ab4f2ff38ab815e4420f153caa13dec9272ef14403d34"}, - {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:caadf255cf7f951b38d10097836d1f3bcff4aeaaffadfdf748bab780bf5bff95"}, - {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:05accb9162bf711a941b1460b743d62fec61c160daf25e53c5eea52c74d77814"}, - {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:26a2ef890740127cb115ee5260878f4a677e36a12831795fd7e85887c53b430b"}, - {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e54c5f26df14913620046b33e822cb3bcd091a332a55230c0e63cc77135e2169"}, - {file = "tokenizers-0.15.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:669b8ed653a578bcff919566631156f5da3aab84c66f3c0b11a6281e8b4731c7"}, - {file = "tokenizers-0.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:0ea480d943297df26f06f508dab6e012b07f42bf3dffdd36e70799368a5f5229"}, - {file = "tokenizers-0.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:bc80a0a565ebfc7cd89de7dd581da8c2b3238addfca6280572d27d763f135f2f"}, - {file = "tokenizers-0.15.0-cp37-none-win32.whl", hash = "sha256:cdd945e678bbdf4517d5d8de66578a5030aeefecdb46f5320b034de9cad8d4dd"}, - {file = "tokenizers-0.15.0-cp37-none-win_amd64.whl", hash = "sha256:1ab96ab7dc706e002c32b2ea211a94c1c04b4f4de48354728c3a6e22401af322"}, - {file = "tokenizers-0.15.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:f21c9eb71c9a671e2a42f18b456a3d118e50c7f0fc4dd9fa8f4eb727fea529bf"}, - {file = "tokenizers-0.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a5f4543a35889679fc3052086e69e81880b2a5a28ff2a52c5a604be94b77a3f"}, - {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f8aa81afec893e952bd39692b2d9ef60575ed8c86fce1fd876a06d2e73e82dca"}, - {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1574a5a4af22c3def93fe8fe4adcc90a39bf5797ed01686a4c46d1c3bc677d2f"}, - {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7c7982fd0ec9e9122d03b209dac48cebfea3de0479335100ef379a9a959b9a5a"}, - {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d16b647032df2ce2c1f9097236e046ea9fedd969b25637b9d5d734d78aa53b"}, - {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b3cdf29e6f9653da330515dc8fa414be5a93aae79e57f8acc50d4028dd843edf"}, - {file = "tokenizers-0.15.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7286f3df10de840867372e3e64b99ef58c677210e3ceb653cd0e740a5c53fe78"}, - {file = "tokenizers-0.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aabc83028baa5a36ce7a94e7659250f0309c47fa4a639e5c2c38e6d5ea0de564"}, - {file = "tokenizers-0.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:72f78b0e0e276b1fc14a672fa73f3acca034ba8db4e782124a2996734a9ba9cf"}, - {file = "tokenizers-0.15.0-cp38-none-win32.whl", hash = "sha256:9680b0ecc26e7e42f16680c1aa62e924d58d1c2dd992707081cc10a374896ea2"}, - {file = "tokenizers-0.15.0-cp38-none-win_amd64.whl", hash = "sha256:f17cbd88dab695911cbdd385a5a7e3709cc61dff982351f5d1b5939f074a2466"}, - {file = "tokenizers-0.15.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:3661862df7382c5eb23ac4fbf7c75e69b02dc4f5784e4c5a734db406b5b24596"}, - {file = "tokenizers-0.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c3045d191dad49647f5a5039738ecf1c77087945c7a295f7bcf051c37067e883"}, - {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9fcaad9ab0801f14457d7c820d9f246b5ab590c407fc6b073819b1573097aa7"}, - {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a79f17027f24fe9485701c8dbb269b9c713954ec3bdc1e7075a66086c0c0cd3c"}, - {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:01a3aa332abc4bee7640563949fcfedca4de8f52691b3b70f2fc6ca71bfc0f4e"}, - {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05b83896a893cdfedad8785250daa3ba9f0504848323471524d4783d7291661e"}, - {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cbbf2489fcf25d809731ba2744ff278dd07d9eb3f8b7482726bd6cae607073a4"}, - {file = "tokenizers-0.15.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ab806ad521a5e9de38078b7add97589c313915f6f5fec6b2f9f289d14d607bd6"}, - {file = "tokenizers-0.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4a522612d5c88a41563e3463226af64e2fa00629f65cdcc501d1995dd25d23f5"}, - {file = "tokenizers-0.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e58a38c4e6075810bdfb861d9c005236a72a152ebc7005941cc90d1bbf16aca9"}, - {file = "tokenizers-0.15.0-cp39-none-win32.whl", hash = "sha256:b8034f1041fd2bd2b84ff9f4dc4ae2e1c3b71606820a9cd5c562ebd291a396d1"}, - {file = "tokenizers-0.15.0-cp39-none-win_amd64.whl", hash = "sha256:edde9aa964145d528d0e0dbf14f244b8a85ebf276fb76869bc02e2530fa37a96"}, - {file = "tokenizers-0.15.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:309445d10d442b7521b98083dc9f0b5df14eca69dbbfebeb98d781ee2cef5d30"}, - {file = "tokenizers-0.15.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d3125a6499226d4d48efc54f7498886b94c418e93a205b673bc59364eecf0804"}, - {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ed56ddf0d54877bb9c6d885177db79b41576e61b5ef6defeb579dcb803c04ad5"}, - {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b22cd714706cc5b18992a232b023f736e539495f5cc61d2d28d176e55046f6c"}, - {file = "tokenizers-0.15.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fac2719b1e9bc8e8e7f6599b99d0a8e24f33d023eb8ef644c0366a596f0aa926"}, - {file = "tokenizers-0.15.0-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:85ddae17570ec7e5bfaf51ffa78d044f444a8693e1316e1087ee6150596897ee"}, - {file = "tokenizers-0.15.0-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:76f1bed992e396bf6f83e3df97b64ff47885e45e8365f8983afed8556a0bc51f"}, - {file = "tokenizers-0.15.0-pp37-pypy37_pp73-macosx_10_7_x86_64.whl", hash = "sha256:3bb0f4df6dce41a1c7482087b60d18c372ef4463cb99aa8195100fcd41e0fd64"}, - {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:22c27672c27a059a5f39ff4e49feed8c7f2e1525577c8a7e3978bd428eb5869d"}, - {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78104f5d035c9991f92831fc0efe9e64a05d4032194f2a69f67aaa05a4d75bbb"}, - {file = "tokenizers-0.15.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a40b73dc19d82c3e3ffb40abdaacca8fbc95eeb26c66b7f9f860aebc07a73998"}, - {file = "tokenizers-0.15.0-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d801d1368188c74552cd779b1286e67cb9fd96f4c57a9f9a2a09b6def9e1ab37"}, - {file = "tokenizers-0.15.0-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82641ffb13a4da1293fcc9f437d457647e60ed0385a9216cd135953778b3f0a1"}, - {file = "tokenizers-0.15.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:160f9d1810f2c18fffa94aa98bf17632f6bd2dabc67fcb01a698ca80c37d52ee"}, - {file = "tokenizers-0.15.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:8d7d6eea831ed435fdeeb9bcd26476226401d7309d115a710c65da4088841948"}, - {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:f6456bec6c557d63d8ec0023758c32f589e1889ed03c055702e84ce275488bed"}, - {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1eef39a502fad3bf104b9e1906b4fb0cee20e44e755e51df9a98f8922c3bf6d4"}, - {file = "tokenizers-0.15.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1e4664c5b797e093c19b794bbecc19d2367e782b4a577d8b7c1821db5dc150d"}, - {file = "tokenizers-0.15.0-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:ca003fb5f3995ff5cf676db6681b8ea5d54d3b30bea36af1120e78ee1a4a4cdf"}, - {file = "tokenizers-0.15.0-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7f17363141eb0c53752c89e10650b85ef059a52765d0802ba9613dbd2d21d425"}, - {file = "tokenizers-0.15.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:8a765db05581c7d7e1280170f2888cda351760d196cc059c37ea96f121125799"}, - {file = "tokenizers-0.15.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2a0dd641a72604486cd7302dd8f87a12c8a9b45e1755e47d2682733f097c1af5"}, - {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0a1a3c973e4dc97797fc19e9f11546c95278ffc55c4492acb742f69e035490bc"}, - {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4fab75642aae4e604e729d6f78e0addb9d7e7d49e28c8f4d16b24da278e5263"}, - {file = "tokenizers-0.15.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65f80be77f6327a86d8fd35a4467adcfe6174c159b4ab52a1a8dd4c6f2d7d9e1"}, - {file = "tokenizers-0.15.0-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:a8da7533dbe66b88afd430c56a2f2ce1fd82e2681868f857da38eeb3191d7498"}, - {file = "tokenizers-0.15.0-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa8eb4584fc6cbe6a84d7a7864be3ed28e23e9fd2146aa8ef1814d579df91958"}, - {file = "tokenizers-0.15.0.tar.gz", hash = "sha256:10c7e6e7b4cabd757da59e93f5f8d1126291d16f8b54f28510825ef56a3e5d0e"}, + {file = "tokenizers-0.15.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52f6130c9cbf70544287575a985bf44ae1bda2da7e8c24e97716080593638012"}, + {file = "tokenizers-0.15.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:054c1cc9c6d68f7ffa4e810b3d5131e0ba511b6e4be34157aa08ee54c2f8d9ee"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a9b9b070fdad06e347563b88c278995735292ded1132f8657084989a4c84a6d5"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea621a7eef4b70e1f7a4e84dd989ae3f0eeb50fc8690254eacc08acb623e82f1"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:cf7fd9a5141634fa3aa8d6b7be362e6ae1b4cda60da81388fa533e0b552c98fd"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:44f2a832cd0825295f7179eaf173381dc45230f9227ec4b44378322d900447c9"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8b9ec69247a23747669ec4b0ca10f8e3dfb3545d550258129bd62291aabe8605"}, + {file = "tokenizers-0.15.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40b6a4c78da863ff26dbd5ad9a8ecc33d8a8d97b535172601cf00aee9d7ce9ce"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5ab2a4d21dcf76af60e05af8063138849eb1d6553a0d059f6534357bce8ba364"}, + {file = "tokenizers-0.15.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a47acfac7e511f6bbfcf2d3fb8c26979c780a91e06fb5b9a43831b2c0153d024"}, + {file = "tokenizers-0.15.2-cp310-none-win32.whl", hash = "sha256:064ff87bb6acdbd693666de9a4b692add41308a2c0ec0770d6385737117215f2"}, + {file = "tokenizers-0.15.2-cp310-none-win_amd64.whl", hash = "sha256:3b919afe4df7eb6ac7cafd2bd14fb507d3f408db7a68c43117f579c984a73843"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:89cd1cb93e4b12ff39bb2d626ad77e35209de9309a71e4d3d4672667b4b256e7"}, + {file = "tokenizers-0.15.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:cfed5c64e5be23d7ee0f0e98081a25c2a46b0b77ce99a4f0605b1ec43dd481fa"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:a907d76dcfda37023ba203ab4ceeb21bc5683436ebefbd895a0841fd52f6f6f2"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20ea60479de6fc7b8ae756b4b097572372d7e4032e2521c1bbf3d90c90a99ff0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:48e2b9335be2bc0171df9281385c2ed06a15f5cf121c44094338306ab7b33f2c"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:112a1dd436d2cc06e6ffdc0b06d55ac019a35a63afd26475205cb4b1bf0bfbff"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4620cca5c2817177ee8706f860364cc3a8845bc1e291aaf661fb899e5d1c45b0"}, + {file = "tokenizers-0.15.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ccd73a82751c523b3fc31ff8194702e4af4db21dc20e55b30ecc2079c5d43cb7"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:107089f135b4ae7817affe6264f8c7a5c5b4fd9a90f9439ed495f54fcea56fb4"}, + {file = "tokenizers-0.15.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0ff110ecc57b7aa4a594396525a3451ad70988e517237fe91c540997c4e50e29"}, + {file = "tokenizers-0.15.2-cp311-none-win32.whl", hash = "sha256:6d76f00f5c32da36c61f41c58346a4fa7f0a61be02f4301fd30ad59834977cc3"}, + {file = "tokenizers-0.15.2-cp311-none-win_amd64.whl", hash = "sha256:cc90102ed17271cf0a1262babe5939e0134b3890345d11a19c3145184b706055"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f86593c18d2e6248e72fb91c77d413a815153b8ea4e31f7cd443bdf28e467670"}, + {file = "tokenizers-0.15.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0774bccc6608eca23eb9d620196687c8b2360624619623cf4ba9dc9bd53e8b51"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d0222c5b7c9b26c0b4822a82f6a7011de0a9d3060e1da176f66274b70f846b98"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3835738be1de66624fff2f4f6f6684775da4e9c00bde053be7564cbf3545cc66"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0143e7d9dcd811855c1ce1ab9bf5d96d29bf5e528fd6c7824d0465741e8c10fd"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db35825f6d54215f6b6009a7ff3eedee0848c99a6271c870d2826fbbedf31a38"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3f5e64b0389a2be47091d8cc53c87859783b837ea1a06edd9d8e04004df55a5c"}, + {file = "tokenizers-0.15.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e0480c452217edd35eca56fafe2029fb4d368b7c0475f8dfa3c5c9c400a7456"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:a33ab881c8fe70474980577e033d0bc9a27b7ab8272896e500708b212995d834"}, + {file = "tokenizers-0.15.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a308a607ca9de2c64c1b9ba79ec9a403969715a1b8ba5f998a676826f1a7039d"}, + {file = "tokenizers-0.15.2-cp312-none-win32.whl", hash = "sha256:b8fcfa81bcb9447df582c5bc96a031e6df4da2a774b8080d4f02c0c16b42be0b"}, + {file = "tokenizers-0.15.2-cp312-none-win_amd64.whl", hash = "sha256:38d7ab43c6825abfc0b661d95f39c7f8af2449364f01d331f3b51c94dcff7221"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:38bfb0204ff3246ca4d5e726e8cc8403bfc931090151e6eede54d0e0cf162ef0"}, + {file = "tokenizers-0.15.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c861d35e8286a53e06e9e28d030b5a05bcbf5ac9d7229e561e53c352a85b1fc"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:936bf3842db5b2048eaa53dade907b1160f318e7c90c74bfab86f1e47720bdd6"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:620beacc3373277700d0e27718aa8b25f7b383eb8001fba94ee00aeea1459d89"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2735ecbbf37e52db4ea970e539fd2d450d213517b77745114f92867f3fc246eb"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:473c83c5e2359bb81b0b6fde870b41b2764fcdd36d997485e07e72cc3a62264a"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968fa1fb3c27398b28a4eca1cbd1e19355c4d3a6007f7398d48826bbe3a0f728"}, + {file = "tokenizers-0.15.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:865c60ae6eaebdde7da66191ee9b7db52e542ed8ee9d2c653b6d190a9351b980"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7c0d8b52664ab2d4a8d6686eb5effc68b78608a9008f086a122a7b2996befbab"}, + {file = "tokenizers-0.15.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:f33dfbdec3784093a9aebb3680d1f91336c56d86cc70ddf88708251da1fe9064"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_10_12_x86_64.whl", hash = "sha256:d44ba80988ff9424e33e0a49445072ac7029d8c0e1601ad25a0ca5f41ed0c1d6"}, + {file = "tokenizers-0.15.2-cp37-cp37m-macosx_11_0_arm64.whl", hash = "sha256:dce74266919b892f82b1b86025a613956ea0ea62a4843d4c4237be2c5498ed3a"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0ef06b9707baeb98b316577acb04f4852239d856b93e9ec3a299622f6084e4be"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c73e2e74bbb07910da0d37c326869f34113137b23eadad3fc00856e6b3d9930c"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4eeb12daf02a59e29f578a865f55d87cd103ce62bd8a3a5874f8fdeaa82e336b"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9ba9f6895af58487ca4f54e8a664a322f16c26bbb442effd01087eba391a719e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccec77aa7150e38eec6878a493bf8c263ff1fa8a62404e16c6203c64c1f16a26"}, + {file = "tokenizers-0.15.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3f40604f5042ff210ba82743dda2b6aa3e55aa12df4e9f2378ee01a17e2855e"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:5645938a42d78c4885086767c70923abad047163d809c16da75d6b290cb30bbe"}, + {file = "tokenizers-0.15.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:05a77cbfebe28a61ab5c3891f9939cc24798b63fa236d84e5f29f3a85a200c00"}, + {file = "tokenizers-0.15.2-cp37-none-win32.whl", hash = "sha256:361abdc068e8afe9c5b818769a48624687fb6aaed49636ee39bec4e95e1a215b"}, + {file = "tokenizers-0.15.2-cp37-none-win_amd64.whl", hash = "sha256:7ef789f83eb0f9baeb4d09a86cd639c0a5518528f9992f38b28e819df397eb06"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:4fe1f74a902bee74a3b25aff180fbfbf4f8b444ab37c4d496af7afd13a784ed2"}, + {file = "tokenizers-0.15.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c4b89038a684f40a6b15d6b09f49650ac64d951ad0f2a3ea9169687bbf2a8ba"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:d05a1b06f986d41aed5f2de464c003004b2df8aaf66f2b7628254bcbfb72a438"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:508711a108684111ec8af89d3a9e9e08755247eda27d0ba5e3c50e9da1600f6d"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:daa348f02d15160cb35439098ac96e3a53bacf35885072611cd9e5be7d333daa"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:494fdbe5932d3416de2a85fc2470b797e6f3226c12845cadf054dd906afd0442"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c2d60f5246f4da9373f75ff18d64c69cbf60c3bca597290cea01059c336d2470"}, + {file = "tokenizers-0.15.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93268e788825f52de4c7bdcb6ebc1fcd4a5442c02e730faa9b6b08f23ead0e24"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6fc7083ab404019fc9acafe78662c192673c1e696bd598d16dc005bd663a5cf9"}, + {file = "tokenizers-0.15.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:41e39b41e5531d6b2122a77532dbea60e171ef87a3820b5a3888daa847df4153"}, + {file = "tokenizers-0.15.2-cp38-none-win32.whl", hash = "sha256:06cd0487b1cbfabefb2cc52fbd6b1f8d4c37799bd6c6e1641281adaa6b2504a7"}, + {file = "tokenizers-0.15.2-cp38-none-win_amd64.whl", hash = "sha256:5179c271aa5de9c71712e31cb5a79e436ecd0d7532a408fa42a8dbfa4bc23fd9"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:82f8652a74cc107052328b87ea8b34291c0f55b96d8fb261b3880216a9f9e48e"}, + {file = "tokenizers-0.15.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:02458bee6f5f3139f1ebbb6d042b283af712c0981f5bc50edf771d6b762d5e4f"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:c9a09cd26cca2e1c349f91aa665309ddb48d71636370749414fbf67bc83c5343"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:158be8ea8554e5ed69acc1ce3fbb23a06060bd4bbb09029431ad6b9a466a7121"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ddba9a2b0c8c81633eca0bb2e1aa5b3a15362b1277f1ae64176d0f6eba78ab1"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3ef5dd1d39797044642dbe53eb2bc56435308432e9c7907728da74c69ee2adca"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:454c203164e07a860dbeb3b1f4a733be52b0edbb4dd2e5bd75023ffa8b49403a"}, + {file = "tokenizers-0.15.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0cf6b7f1d4dc59af960e6ffdc4faffe6460bbfa8dce27a58bf75755ffdb2526d"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2ef09bbc16519f6c25d0c7fc0c6a33a6f62923e263c9d7cca4e58b8c61572afb"}, + {file = "tokenizers-0.15.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c9a2ebdd2ad4ec7a68e7615086e633857c85e2f18025bd05d2a4399e6c5f7169"}, + {file = "tokenizers-0.15.2-cp39-none-win32.whl", hash = "sha256:918fbb0eab96fe08e72a8c2b5461e9cce95585d82a58688e7f01c2bd546c79d0"}, + {file = "tokenizers-0.15.2-cp39-none-win_amd64.whl", hash = "sha256:524e60da0135e106b254bd71f0659be9f89d83f006ea9093ce4d1fab498c6d0d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:6a9b648a58281c4672212fab04e60648fde574877d0139cd4b4f93fe28ca8944"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7c7d18b733be6bbca8a55084027f7be428c947ddf871c500ee603e375013ffba"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:13ca3611de8d9ddfbc4dc39ef54ab1d2d4aaa114ac8727dfdc6a6ec4be017378"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:237d1bf3361cf2e6463e6c140628e6406766e8b27274f5fcc62c747ae3c6f094"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67a0fe1e49e60c664915e9fb6b0cb19bac082ab1f309188230e4b2920230edb3"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:4e022fe65e99230b8fd89ebdfea138c24421f91c1a4f4781a8f5016fd5cdfb4d"}, + {file = "tokenizers-0.15.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:d857be2df69763362ac699f8b251a8cd3fac9d21893de129bc788f8baaef2693"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-macosx_10_12_x86_64.whl", hash = "sha256:708bb3e4283177236309e698da5fcd0879ce8fd37457d7c266d16b550bcbbd18"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c35e09e9899b72a76e762f9854e8750213f67567787d45f37ce06daf57ca78"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c1257f4394be0d3b00de8c9e840ca5601d0a4a8438361ce9c2b05c7d25f6057b"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02272fe48280e0293a04245ca5d919b2c94a48b408b55e858feae9618138aeda"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:dc3ad9ebc76eabe8b1d7c04d38be884b8f9d60c0cdc09b0aa4e3bcf746de0388"}, + {file = "tokenizers-0.15.2-pp37-pypy37_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:32e16bdeffa7c4f46bf2152172ca511808b952701d13e7c18833c0b73cb5c23f"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fb16ba563d59003028b678d2361a27f7e4ae0ab29c7a80690efa20d829c81fdb"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:2277c36d2d6cdb7876c274547921a42425b6810d38354327dd65a8009acf870c"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cf75d32e8d250781940d07f7eece253f2fe9ecdb1dc7ba6e3833fa17b82fcbc"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1b3b31884dc8e9b21508bb76da80ebf7308fdb947a17affce815665d5c4d028"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b10122d8d8e30afb43bb1fe21a3619f62c3e2574bff2699cf8af8b0b6c5dc4a3"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d88b96ff0fe8e91f6ef01ba50b0d71db5017fa4e3b1d99681cec89a85faf7bf7"}, + {file = "tokenizers-0.15.2-pp38-pypy38_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:37aaec5a52e959892870a7c47cef80c53797c0db9149d458460f4f31e2fb250e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:e2ea752f2b0fe96eb6e2f3adbbf4d72aaa1272079b0dfa1145507bd6a5d537e6"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:4b19a808d8799fda23504a5cd31d2f58e6f52f140380082b352f877017d6342b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:64c86e5e068ac8b19204419ed8ca90f9d25db20578f5881e337d203b314f4104"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de19c4dc503c612847edf833c82e9f73cd79926a384af9d801dcf93f110cea4e"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea09acd2fe3324174063d61ad620dec3bcf042b495515f27f638270a7d466e8b"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:cf27fd43472e07b57cf420eee1e814549203d56de00b5af8659cb99885472f1f"}, + {file = "tokenizers-0.15.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:7ca22bd897537a0080521445d91a58886c8c04084a6a19e6c78c586e0cfa92a5"}, + {file = "tokenizers-0.15.2.tar.gz", hash = "sha256:e6e9c6e019dd5484be5beafc775ae6c925f4c69a3487040ed09b45e13df2cb91"}, ] [package.dependencies] @@ -5387,43 +5346,39 @@ telegram = ["requests"] [[package]] name = "transformers" -version = "4.36.1" +version = "4.38.1" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.8.0" files = [ - {file = "transformers-4.36.1-py3-none-any.whl", hash = "sha256:0e309d03634885f02d46801ec4f2c3fc1d614a5b9ebde608181f3e842bac53b8"}, - {file = "transformers-4.36.1.tar.gz", hash = "sha256:28e55952d9bed68f06cf45a3d29cc480679b528afe944e68f8cf6c799e428759"}, + {file = "transformers-4.38.1-py3-none-any.whl", hash = "sha256:a7a9265fb060183e9d975cbbadc4d531b10281589c43f6d07563f86322728973"}, + {file = "transformers-4.38.1.tar.gz", hash = "sha256:86dc84ccbe36123647e84cbd50fc31618c109a41e6be92514b064ab55bf1304c"}, ] [package.dependencies] -accelerate = {version = ">=0.21.0", optional = true, markers = "extra == \"torch\""} filelock = "*" huggingface-hub = ">=0.19.3,<1.0" numpy = ">=1.17" packaging = ">=20.0" -protobuf = {version = "*", optional = true, markers = "extra == \"sentencepiece\""} pyyaml = ">=5.1" regex = "!=2019.12.17" requests = "*" -safetensors = ">=0.3.1" -sentencepiece = {version = ">=0.1.91,<0.1.92 || >0.1.92", optional = true, markers = "extra == \"sentencepiece\""} +safetensors = ">=0.4.1" tokenizers = ">=0.14,<0.19" -torch = {version = ">=1.10,<1.12.0 || >1.12.0", optional = true, markers = "extra == \"torch\""} tqdm = ">=4.27" [package.extras] accelerate = ["accelerate (>=0.21.0)"] -agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch (>=1.10,!=1.12.0)"] -all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] +agents = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "datasets (!=2.5.0)", "diffusers", "opencv-python", "sentencepiece (>=0.1.91,!=0.1.92)", "torch"] +all = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] audio = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] codecarbon = ["codecarbon (==1.2.0)"] deepspeed = ["accelerate (>=0.21.0)", "deepspeed (>=0.9.3)"] -deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic (<2)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] -dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic (<2)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic (<2)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.19)", "urllib3 (<2.0.0)"] -dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic (<2)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] -docs = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "torchaudio", "torchvision"] +deepspeed-testing = ["GitPython (<3.1.19)", "accelerate (>=0.21.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "deepspeed (>=0.9.3)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "optuna", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "timeout-decorator"] +dev = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "decord (==0.6.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "flax (>=0.4.1,<=0.7.0)", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +dev-tensorflow = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "isort (>=5.5.4)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "nltk", "onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "tensorboard", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timeout-decorator", "tokenizers (>=0.14,<0.19)", "urllib3 (<2.0.0)"] +dev-torch = ["GitPython (<3.1.19)", "Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "beautifulsoup4", "codecarbon (==1.2.0)", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "fugashi (>=1.0)", "hf-doc-builder", "hf-doc-builder (>=0.3.0)", "ipadic (>=1.0.0,<2.0)", "isort (>=5.5.4)", "kenlm", "librosa", "nltk", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "optuna", "parameterized", "phonemizer", "protobuf", "psutil", "pyctcdecode (>=0.4.0)", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "ray[tune] (>=2.7.0)", "rhoknp (>=1.1.0,<1.3.1)", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "scikit-learn", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "tensorboard", "timeout-decorator", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)", "urllib3 (<2.0.0)"] +docs = ["Pillow (>=10.0.1,<=15.0)", "accelerate (>=0.21.0)", "av (==9.2.0)", "codecarbon (==1.2.0)", "decord (==0.6.0)", "flax (>=0.4.1,<=0.7.0)", "hf-doc-builder", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "kenlm", "keras-nlp (>=0.3.1)", "librosa", "onnxconverter-common", "optax (>=0.0.8,<=0.1.4)", "optuna", "phonemizer", "protobuf", "pyctcdecode (>=0.4.0)", "ray[tune] (>=2.7.0)", "sentencepiece (>=0.1.91,!=0.1.92)", "sigopt", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx", "timm", "tokenizers (>=0.14,<0.19)", "torch", "torchaudio", "torchvision"] docs-specific = ["hf-doc-builder"] flax = ["flax (>=0.4.1,<=0.7.0)", "jax (>=0.4.1,<=0.4.13)", "jaxlib (>=0.4.1,<=0.4.13)", "optax (>=0.0.8,<=0.1.4)"] flax-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] @@ -5431,7 +5386,7 @@ ftfy = ["ftfy"] integrations = ["optuna", "ray[tune] (>=2.7.0)", "sigopt"] ja = ["fugashi (>=1.0)", "ipadic (>=1.0.0,<2.0)", "rhoknp (>=1.1.0,<1.3.1)", "sudachidict-core (>=20220729)", "sudachipy (>=0.6.6)", "unidic (>=1.0.2)", "unidic-lite (>=1.0.7)"] modelcreation = ["cookiecutter (==1.7.3)"] -natten = ["natten (>=0.14.6)"] +natten = ["natten (>=0.14.6,<0.15.0)"] onnx = ["onnxconverter-common", "onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)", "tf2onnx"] onnxruntime = ["onnxruntime (>=1.4.0)", "onnxruntime-tools (>=1.4.2)"] optuna = ["optuna"] @@ -5440,20 +5395,20 @@ ray = ["ray[tune] (>=2.7.0)"] retrieval = ["datasets (!=2.5.0)", "faiss-cpu"] sagemaker = ["sagemaker (>=2.31.0)"] sentencepiece = ["protobuf", "sentencepiece (>=0.1.91,!=0.1.92)"] -serving = ["fastapi", "pydantic (<2)", "starlette", "uvicorn"] +serving = ["fastapi", "pydantic", "starlette", "uvicorn"] sigopt = ["sigopt"] sklearn = ["scikit-learn"] speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] -testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pydantic (<2)", "pytest (>=7.2.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"] +testing = ["GitPython (<3.1.19)", "beautifulsoup4", "cookiecutter (==1.7.3)", "datasets (!=2.5.0)", "dill (<0.3.5)", "evaluate (>=0.2.0)", "faiss-cpu", "hf-doc-builder (>=0.3.0)", "nltk", "parameterized", "protobuf", "psutil", "pydantic", "pytest (>=7.2.0,<8.0.0)", "pytest-timeout", "pytest-xdist", "rjieba", "rouge-score (!=0.0.7,!=0.0.8,!=0.1,!=0.1.1)", "ruff (==0.1.5)", "sacrebleu (>=1.4.12,<2.0.0)", "sacremoses", "tensorboard", "timeout-decorator"] tf = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] tf-cpu = ["keras-nlp (>=0.3.1)", "onnxconverter-common", "tensorflow-cpu (>=2.6,<2.16)", "tensorflow-text (<2.16)", "tf2onnx"] tf-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)"] timm = ["timm"] tokenizers = ["tokenizers (>=0.14,<0.19)"] -torch = ["accelerate (>=0.21.0)", "torch (>=1.10,!=1.12.0)"] +torch = ["accelerate (>=0.21.0)", "torch"] torch-speech = ["kenlm", "librosa", "phonemizer", "pyctcdecode (>=0.4.0)", "torchaudio"] torch-vision = ["Pillow (>=10.0.1,<=15.0)", "torchvision"] -torchhub = ["filelock", "huggingface-hub (>=0.19.3,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.14,<0.19)", "torch (>=1.10,!=1.12.0)", "tqdm (>=4.27)"] +torchhub = ["filelock", "huggingface-hub (>=0.19.3,<1.0)", "importlib-metadata", "numpy (>=1.17)", "packaging (>=20.0)", "protobuf", "regex (!=2019.12.17)", "requests", "sentencepiece (>=0.1.91,!=0.1.92)", "tokenizers (>=0.14,<0.19)", "torch", "tqdm (>=4.27)"] video = ["av (==9.2.0)", "decord (==0.6.0)"] vision = ["Pillow (>=10.0.1,<=15.0)"] @@ -5735,38 +5690,40 @@ test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess [[package]] name = "watchdog" -version = "3.0.0" +version = "4.0.0" description = "Filesystem events monitoring" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:336adfc6f5cc4e037d52db31194f7581ff744b67382eb6021c868322e32eef41"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a70a8dcde91be523c35b2bf96196edc5730edb347e374c7de7cd20c43ed95397"}, - {file = "watchdog-3.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:adfdeab2da79ea2f76f87eb42a3ab1966a5313e5a69a0213a3cc06ef692b0e96"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2b57a1e730af3156d13b7fdddfc23dea6487fceca29fc75c5a868beed29177ae"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7ade88d0d778b1b222adebcc0927428f883db07017618a5e684fd03b83342bd9"}, - {file = "watchdog-3.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7e447d172af52ad204d19982739aa2346245cc5ba6f579d16dac4bfec226d2e7"}, - {file = "watchdog-3.0.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9fac43a7466eb73e64a9940ac9ed6369baa39b3bf221ae23493a9ec4d0022674"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8ae9cda41fa114e28faf86cb137d751a17ffd0316d1c34ccf2235e8a84365c7f"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:25f70b4aa53bd743729c7475d7ec41093a580528b100e9a8c5b5efe8899592fc"}, - {file = "watchdog-3.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4f94069eb16657d2c6faada4624c39464f65c05606af50bb7902e036e3219be3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:7c5f84b5194c24dd573fa6472685b2a27cc5a17fe5f7b6fd40345378ca6812e3"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3aa7f6a12e831ddfe78cdd4f8996af9cf334fd6346531b16cec61c3b3c0d8da0"}, - {file = "watchdog-3.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:233b5817932685d39a7896b1090353fc8efc1ef99c9c054e46c8002561252fb8"}, - {file = "watchdog-3.0.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:13bbbb462ee42ec3c5723e1205be8ced776f05b100e4737518c67c8325cf6100"}, - {file = "watchdog-3.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:8f3ceecd20d71067c7fd4c9e832d4e22584318983cabc013dbf3f70ea95de346"}, - {file = "watchdog-3.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c9d8c8ec7efb887333cf71e328e39cffbf771d8f8f95d308ea4125bf5f90ba64"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0e06ab8858a76e1219e68c7573dfeba9dd1c0219476c5a44d5333b01d7e1743a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:d00e6be486affb5781468457b21a6cbe848c33ef43f9ea4a73b4882e5f188a44"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:c07253088265c363d1ddf4b3cdb808d59a0468ecd017770ed716991620b8f77a"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:5113334cf8cf0ac8cd45e1f8309a603291b614191c9add34d33075727a967709"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:51f90f73b4697bac9c9a78394c3acbbd331ccd3655c11be1a15ae6fe289a8c83"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:ba07e92756c97e3aca0912b5cbc4e5ad802f4557212788e72a72a47ff376950d"}, - {file = "watchdog-3.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:d429c2430c93b7903914e4db9a966c7f2b068dd2ebdd2fa9b9ce094c7d459f33"}, - {file = "watchdog-3.0.0-py3-none-win32.whl", hash = "sha256:3ed7c71a9dccfe838c2f0b6314ed0d9b22e77d268c67e015450a29036a81f60f"}, - {file = "watchdog-3.0.0-py3-none-win_amd64.whl", hash = "sha256:4c9956d27be0bb08fc5f30d9d0179a855436e655f046d288e2bcc11adfae893c"}, - {file = "watchdog-3.0.0-py3-none-win_ia64.whl", hash = "sha256:5d9f3a10e02d7371cd929b5d8f11e87d4bad890212ed3901f9b4d68767bee759"}, - {file = "watchdog-3.0.0.tar.gz", hash = "sha256:4d98a320595da7a7c5a18fc48cb633c2e73cda78f93cac2ef42d42bf609a33f9"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:39cb34b1f1afbf23e9562501673e7146777efe95da24fab5707b88f7fb11649b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c522392acc5e962bcac3b22b9592493ffd06d1fc5d755954e6be9f4990de932b"}, + {file = "watchdog-4.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6c47bdd680009b11c9ac382163e05ca43baf4127954c5f6d0250e7d772d2b80c"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8350d4055505412a426b6ad8c521bc7d367d1637a762c70fdd93a3a0d595990b"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c17d98799f32e3f55f181f19dd2021d762eb38fdd381b4a748b9f5a36738e935"}, + {file = "watchdog-4.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4986db5e8880b0e6b7cd52ba36255d4793bf5cdc95bd6264806c233173b1ec0b"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:11e12fafb13372e18ca1bbf12d50f593e7280646687463dd47730fd4f4d5d257"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:5369136a6474678e02426bd984466343924d1df8e2fd94a9b443cb7e3aa20d19"}, + {file = "watchdog-4.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:76ad8484379695f3fe46228962017a7e1337e9acadafed67eb20aabb175df98b"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:45cc09cc4c3b43fb10b59ef4d07318d9a3ecdbff03abd2e36e77b6dd9f9a5c85"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eed82cdf79cd7f0232e2fdc1ad05b06a5e102a43e331f7d041e5f0e0a34a51c4"}, + {file = "watchdog-4.0.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba30a896166f0fee83183cec913298151b73164160d965af2e93a20bbd2ab605"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d18d7f18a47de6863cd480734613502904611730f8def45fc52a5d97503e5101"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2895bf0518361a9728773083908801a376743bcc37dfa252b801af8fd281b1ca"}, + {file = "watchdog-4.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:87e9df830022488e235dd601478c15ad73a0389628588ba0b028cb74eb72fed8"}, + {file = "watchdog-4.0.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6e949a8a94186bced05b6508faa61b7adacc911115664ccb1923b9ad1f1ccf7b"}, + {file = "watchdog-4.0.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6a4db54edea37d1058b08947c789a2354ee02972ed5d1e0dca9b0b820f4c7f92"}, + {file = "watchdog-4.0.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d31481ccf4694a8416b681544c23bd271f5a123162ab603c7d7d2dd7dd901a07"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8fec441f5adcf81dd240a5fe78e3d83767999771630b5ddfc5867827a34fa3d3"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:6a9c71a0b02985b4b0b6d14b875a6c86ddea2fdbebd0c9a720a806a8bbffc69f"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:557ba04c816d23ce98a06e70af6abaa0485f6d94994ec78a42b05d1c03dcbd50"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:d0f9bd1fd919134d459d8abf954f63886745f4660ef66480b9d753a7c9d40927"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:f9b2fdca47dc855516b2d66eef3c39f2672cbf7e7a42e7e67ad2cbfcd6ba107d"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:73c7a935e62033bd5e8f0da33a4dcb763da2361921a69a5a95aaf6c93aa03a87"}, + {file = "watchdog-4.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6a80d5cae8c265842c7419c560b9961561556c4361b297b4c431903f8c33b269"}, + {file = "watchdog-4.0.0-py3-none-win32.whl", hash = "sha256:8f9a542c979df62098ae9c58b19e03ad3df1c9d8c6895d96c0d51da17b243b1c"}, + {file = "watchdog-4.0.0-py3-none-win_amd64.whl", hash = "sha256:f970663fa4f7e80401a7b0cbeec00fa801bf0287d93d48368fc3e6fa32716245"}, + {file = "watchdog-4.0.0-py3-none-win_ia64.whl", hash = "sha256:9a03e16e55465177d416699331b0f3564138f1807ecc5f2de9d55d8f188d08c7"}, + {file = "watchdog-4.0.0.tar.gz", hash = "sha256:e3e7065cbdabe6183ab82199d7a4f6b3ba0a438c5a512a68559846ccb76a78ec"}, ] [package.extras] @@ -5863,7 +5820,7 @@ anyio = ">=3.0.0" name = "websocket-client" version = "1.7.0" description = "WebSocket client for Python with low level API options" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "websocket-client-1.7.0.tar.gz", hash = "sha256:10e511ea3a8c744631d3bd77e61eb17ed09304c413ad42cf6ddfa4c7787e8fe6"}, @@ -6033,123 +5990,6 @@ files = [ {file = "wrapt-1.16.0.tar.gz", hash = "sha256:5f370f952971e7d17c7d1ead40e49f32345a7f7a5373571ef44d800d06b1899d"}, ] -[[package]] -name = "xxhash" -version = "3.4.1" -description = "Python binding for xxHash" -optional = false -python-versions = ">=3.7" -files = [ - {file = "xxhash-3.4.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:91dbfa55346ad3e18e738742236554531a621042e419b70ad8f3c1d9c7a16e7f"}, - {file = "xxhash-3.4.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:665a65c2a48a72068fcc4d21721510df5f51f1142541c890491afc80451636d2"}, - {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb11628470a6004dc71a09fe90c2f459ff03d611376c1debeec2d648f44cb693"}, - {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5bef2a7dc7b4f4beb45a1edbba9b9194c60a43a89598a87f1a0226d183764189"}, - {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c0f7b2d547d72c7eda7aa817acf8791f0146b12b9eba1d4432c531fb0352228"}, - {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:00f2fdef6b41c9db3d2fc0e7f94cb3db86693e5c45d6de09625caad9a469635b"}, - {file = "xxhash-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:23cfd9ca09acaf07a43e5a695143d9a21bf00f5b49b15c07d5388cadf1f9ce11"}, - {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:6a9ff50a3cf88355ca4731682c168049af1ca222d1d2925ef7119c1a78e95b3b"}, - {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f1d7c69a1e9ca5faa75546fdd267f214f63f52f12692f9b3a2f6467c9e67d5e7"}, - {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:672b273040d5d5a6864a36287f3514efcd1d4b1b6a7480f294c4b1d1ee1b8de0"}, - {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:4178f78d70e88f1c4a89ff1ffe9f43147185930bb962ee3979dba15f2b1cc799"}, - {file = "xxhash-3.4.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9804b9eb254d4b8cc83ab5a2002128f7d631dd427aa873c8727dba7f1f0d1c2b"}, - {file = "xxhash-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c09c49473212d9c87261d22c74370457cfff5db2ddfc7fd1e35c80c31a8c14ce"}, - {file = "xxhash-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:ebbb1616435b4a194ce3466d7247df23499475c7ed4eb2681a1fa42ff766aff6"}, - {file = "xxhash-3.4.1-cp310-cp310-win_arm64.whl", hash = "sha256:25dc66be3db54f8a2d136f695b00cfe88018e59ccff0f3b8f545869f376a8a46"}, - {file = "xxhash-3.4.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:58c49083801885273e262c0f5bbeac23e520564b8357fbb18fb94ff09d3d3ea5"}, - {file = "xxhash-3.4.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b526015a973bfbe81e804a586b703f163861da36d186627e27524f5427b0d520"}, - {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36ad4457644c91a966f6fe137d7467636bdc51a6ce10a1d04f365c70d6a16d7e"}, - {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:248d3e83d119770f96003271fe41e049dd4ae52da2feb8f832b7a20e791d2920"}, - {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2070b6d5bbef5ee031666cf21d4953c16e92c2f8a24a94b5c240f8995ba3b1d0"}, - {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b2746035f518f0410915e247877f7df43ef3372bf36cfa52cc4bc33e85242641"}, - {file = "xxhash-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2a8ba6181514681c2591840d5632fcf7356ab287d4aff1c8dea20f3c78097088"}, - {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0aac5010869240e95f740de43cd6a05eae180c59edd182ad93bf12ee289484fa"}, - {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4cb11d8debab1626181633d184b2372aaa09825bde709bf927704ed72765bed1"}, - {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b29728cff2c12f3d9f1d940528ee83918d803c0567866e062683f300d1d2eff3"}, - {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:a15cbf3a9c40672523bdb6ea97ff74b443406ba0ab9bca10ceccd9546414bd84"}, - {file = "xxhash-3.4.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6e66df260fed01ed8ea790c2913271641c58481e807790d9fca8bfd5a3c13844"}, - {file = "xxhash-3.4.1-cp311-cp311-win32.whl", hash = "sha256:e867f68a8f381ea12858e6d67378c05359d3a53a888913b5f7d35fbf68939d5f"}, - {file = "xxhash-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:200a5a3ad9c7c0c02ed1484a1d838b63edcf92ff538770ea07456a3732c577f4"}, - {file = "xxhash-3.4.1-cp311-cp311-win_arm64.whl", hash = "sha256:1d03f1c0d16d24ea032e99f61c552cb2b77d502e545187338bea461fde253583"}, - {file = "xxhash-3.4.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c4bbba9b182697a52bc0c9f8ec0ba1acb914b4937cd4a877ad78a3b3eeabefb3"}, - {file = "xxhash-3.4.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9fd28a9da300e64e434cfc96567a8387d9a96e824a9be1452a1e7248b7763b78"}, - {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6066d88c9329ab230e18998daec53d819daeee99d003955c8db6fc4971b45ca3"}, - {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:93805bc3233ad89abf51772f2ed3355097a5dc74e6080de19706fc447da99cd3"}, - {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64da57d5ed586ebb2ecdde1e997fa37c27fe32fe61a656b77fabbc58e6fbff6e"}, - {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7a97322e9a7440bf3c9805cbaac090358b43f650516486746f7fa482672593df"}, - {file = "xxhash-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbe750d512982ee7d831838a5dee9e9848f3fb440e4734cca3f298228cc957a6"}, - {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fd79d4087727daf4d5b8afe594b37d611ab95dc8e29fe1a7517320794837eb7d"}, - {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:743612da4071ff9aa4d055f3f111ae5247342931dedb955268954ef7201a71ff"}, - {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:b41edaf05734092f24f48c0958b3c6cbaaa5b7e024880692078c6b1f8247e2fc"}, - {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:a90356ead70d715fe64c30cd0969072de1860e56b78adf7c69d954b43e29d9fa"}, - {file = "xxhash-3.4.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ac56eebb364e44c85e1d9e9cc5f6031d78a34f0092fea7fc80478139369a8b4a"}, - {file = "xxhash-3.4.1-cp312-cp312-win32.whl", hash = "sha256:911035345932a153c427107397c1518f8ce456f93c618dd1c5b54ebb22e73747"}, - {file = "xxhash-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:f31ce76489f8601cc7b8713201ce94b4bd7b7ce90ba3353dccce7e9e1fee71fa"}, - {file = "xxhash-3.4.1-cp312-cp312-win_arm64.whl", hash = "sha256:b5beb1c6a72fdc7584102f42c4d9df232ee018ddf806e8c90906547dfb43b2da"}, - {file = "xxhash-3.4.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6d42b24d1496deb05dee5a24ed510b16de1d6c866c626c2beb11aebf3be278b9"}, - {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b685fab18876b14a8f94813fa2ca80cfb5ab6a85d31d5539b7cd749ce9e3624"}, - {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:419ffe34c17ae2df019a4685e8d3934d46b2e0bbe46221ab40b7e04ed9f11137"}, - {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e041ce5714f95251a88670c114b748bca3bf80cc72400e9f23e6d0d59cf2681"}, - {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc860d887c5cb2f524899fb8338e1bb3d5789f75fac179101920d9afddef284b"}, - {file = "xxhash-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:312eba88ffe0a05e332e3a6f9788b73883752be63f8588a6dc1261a3eaaaf2b2"}, - {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:e01226b6b6a1ffe4e6bd6d08cfcb3ca708b16f02eb06dd44f3c6e53285f03e4f"}, - {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9f3025a0d5d8cf406a9313cd0d5789c77433ba2004b1c75439b67678e5136537"}, - {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:6d3472fd4afef2a567d5f14411d94060099901cd8ce9788b22b8c6f13c606a93"}, - {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:43984c0a92f06cac434ad181f329a1445017c33807b7ae4f033878d860a4b0f2"}, - {file = "xxhash-3.4.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:a55e0506fdb09640a82ec4f44171273eeabf6f371a4ec605633adb2837b5d9d5"}, - {file = "xxhash-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:faec30437919555b039a8bdbaba49c013043e8f76c999670aef146d33e05b3a0"}, - {file = "xxhash-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:c9e1b646af61f1fc7083bb7b40536be944f1ac67ef5e360bca2d73430186971a"}, - {file = "xxhash-3.4.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:961d948b7b1c1b6c08484bbce3d489cdf153e4122c3dfb07c2039621243d8795"}, - {file = "xxhash-3.4.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:719a378930504ab159f7b8e20fa2aa1896cde050011af838af7e7e3518dd82de"}, - {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74fb5cb9406ccd7c4dd917f16630d2e5e8cbbb02fc2fca4e559b2a47a64f4940"}, - {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5dab508ac39e0ab988039bc7f962c6ad021acd81fd29145962b068df4148c476"}, - {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c59f3e46e7daf4c589e8e853d700ef6607afa037bfad32c390175da28127e8c"}, - {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cc07256eff0795e0f642df74ad096f8c5d23fe66bc138b83970b50fc7f7f6c5"}, - {file = "xxhash-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e9f749999ed80f3955a4af0eb18bb43993f04939350b07b8dd2f44edc98ffee9"}, - {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:7688d7c02149a90a3d46d55b341ab7ad1b4a3f767be2357e211b4e893efbaaf6"}, - {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a8b4977963926f60b0d4f830941c864bed16aa151206c01ad5c531636da5708e"}, - {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:8106d88da330f6535a58a8195aa463ef5281a9aa23b04af1848ff715c4398fb4"}, - {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:4c76a77dbd169450b61c06fd2d5d436189fc8ab7c1571d39265d4822da16df22"}, - {file = "xxhash-3.4.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:11f11357c86d83e53719c592021fd524efa9cf024dc7cb1dfb57bbbd0d8713f2"}, - {file = "xxhash-3.4.1-cp38-cp38-win32.whl", hash = "sha256:0c786a6cd74e8765c6809892a0d45886e7c3dc54de4985b4a5eb8b630f3b8e3b"}, - {file = "xxhash-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:aabf37fb8fa27430d50507deeab2ee7b1bcce89910dd10657c38e71fee835594"}, - {file = "xxhash-3.4.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6127813abc1477f3a83529b6bbcfeddc23162cece76fa69aee8f6a8a97720562"}, - {file = "xxhash-3.4.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef2e194262f5db16075caea7b3f7f49392242c688412f386d3c7b07c7733a70a"}, - {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71be94265b6c6590f0018bbf73759d21a41c6bda20409782d8117e76cd0dfa8b"}, - {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:10e0a619cdd1c0980e25eb04e30fe96cf8f4324758fa497080af9c21a6de573f"}, - {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fa122124d2e3bd36581dd78c0efa5f429f5220313479fb1072858188bc2d5ff1"}, - {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17032f5a4fea0a074717fe33477cb5ee723a5f428de7563e75af64bfc1b1e10"}, - {file = "xxhash-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca7783b20e3e4f3f52f093538895863f21d18598f9a48211ad757680c3bd006f"}, - {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d77d09a1113899fad5f354a1eb4f0a9afcf58cefff51082c8ad643ff890e30cf"}, - {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:21287bcdd299fdc3328cc0fbbdeaa46838a1c05391264e51ddb38a3f5b09611f"}, - {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:dfd7a6cc483e20b4ad90224aeb589e64ec0f31e5610ab9957ff4314270b2bf31"}, - {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:543c7fcbc02bbb4840ea9915134e14dc3dc15cbd5a30873a7a5bf66039db97ec"}, - {file = "xxhash-3.4.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fe0a98d990e433013f41827b62be9ab43e3cf18e08b1483fcc343bda0d691182"}, - {file = "xxhash-3.4.1-cp39-cp39-win32.whl", hash = "sha256:b9097af00ebf429cc7c0e7d2fdf28384e4e2e91008130ccda8d5ae653db71e54"}, - {file = "xxhash-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:d699b921af0dcde50ab18be76c0d832f803034d80470703700cb7df0fbec2832"}, - {file = "xxhash-3.4.1-cp39-cp39-win_arm64.whl", hash = "sha256:2be491723405e15cc099ade1280133ccfbf6322d2ef568494fb7d07d280e7eee"}, - {file = "xxhash-3.4.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:431625fad7ab5649368c4849d2b49a83dc711b1f20e1f7f04955aab86cd307bc"}, - {file = "xxhash-3.4.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc6dbd5fc3c9886a9e041848508b7fb65fd82f94cc793253990f81617b61fe49"}, - {file = "xxhash-3.4.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f3ff8dbd0ec97aec842476cb8ccc3e17dd288cd6ce3c8ef38bff83d6eb927817"}, - {file = "xxhash-3.4.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef73a53fe90558a4096e3256752268a8bdc0322f4692ed928b6cd7ce06ad4fe3"}, - {file = "xxhash-3.4.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:450401f42bbd274b519d3d8dcf3c57166913381a3d2664d6609004685039f9d3"}, - {file = "xxhash-3.4.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a162840cf4de8a7cd8720ff3b4417fbc10001eefdd2d21541a8226bb5556e3bb"}, - {file = "xxhash-3.4.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b736a2a2728ba45017cb67785e03125a79d246462dfa892d023b827007412c52"}, - {file = "xxhash-3.4.1-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0ae4c2e7698adef58710d6e7a32ff518b66b98854b1c68e70eee504ad061d8"}, - {file = "xxhash-3.4.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6322c4291c3ff174dcd104fae41500e75dad12be6f3085d119c2c8a80956c51"}, - {file = "xxhash-3.4.1-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:dd59ed668801c3fae282f8f4edadf6dc7784db6d18139b584b6d9677ddde1b6b"}, - {file = "xxhash-3.4.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92693c487e39523a80474b0394645b393f0ae781d8db3474ccdcead0559ccf45"}, - {file = "xxhash-3.4.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4603a0f642a1e8d7f3ba5c4c25509aca6a9c1cc16f85091004a7028607ead663"}, - {file = "xxhash-3.4.1-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fa45e8cbfbadb40a920fe9ca40c34b393e0b067082d94006f7f64e70c7490a6"}, - {file = "xxhash-3.4.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:595b252943b3552de491ff51e5bb79660f84f033977f88f6ca1605846637b7c6"}, - {file = "xxhash-3.4.1-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:562d8b8f783c6af969806aaacf95b6c7b776929ae26c0cd941d54644ea7ef51e"}, - {file = "xxhash-3.4.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:41ddeae47cf2828335d8d991f2d2b03b0bdc89289dc64349d712ff8ce59d0647"}, - {file = "xxhash-3.4.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c44d584afdf3c4dbb3277e32321d1a7b01d6071c1992524b6543025fb8f4206f"}, - {file = "xxhash-3.4.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd7bddb3a5b86213cc3f2c61500c16945a1b80ecd572f3078ddbbe68f9dabdfb"}, - {file = "xxhash-3.4.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9ecb6c987b62437c2f99c01e97caf8d25660bf541fe79a481d05732e5236719c"}, - {file = "xxhash-3.4.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:696b4e18b7023527d5c50ed0626ac0520edac45a50ec7cf3fc265cd08b1f4c03"}, - {file = "xxhash-3.4.1.tar.gz", hash = "sha256:0379d6cf1ff987cd421609a264ce025e74f346e3e145dd106c0cc2e3ec3f99a9"}, -] - [[package]] name = "yarl" version = "1.9.4" @@ -6257,7 +6097,7 @@ multidict = ">=4.0" name = "zipp" version = "3.17.0" description = "Backport of pathlib-compatible object wrapper for zip files" -optional = true +optional = false python-versions = ">=3.8" files = [ {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, @@ -6275,4 +6115,4 @@ pgvector = ["asyncpg", "pgvector", "psycopg2-binary", "sqlalchemy"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "ce0641f693c3fc99cfd560d9c2cbfc6fba83e12bbaecbbdea33eddcda9c286c3" +content-hash = "61b133c46c9589c72010ebc29989fe6a01df7ce80f1b49e65dbfb5b1cf759a27" diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index e60c7af63..182b8184c 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -1,8 +1,7 @@ import logging from injector import inject, singleton -from llama_index import MockEmbedding -from llama_index.embeddings.base import BaseEmbedding +from llama_index.core.embeddings import BaseEmbedding, MockEmbedding from private_gpt.paths import models_cache_path from private_gpt.settings.settings import Settings @@ -20,7 +19,7 @@ def __init__(self, settings: Settings) -> None: logger.info("Initializing the embedding model in mode=%s", embedding_mode) match embedding_mode: case "local": - from llama_index.embeddings import HuggingFaceEmbedding + from llama_index.embeddings.huggingface import HuggingFaceEmbedding self.embedding_model = HuggingFaceEmbedding( model_name=settings.local.embedding_hf_model_name, @@ -36,7 +35,7 @@ def __init__(self, settings: Settings) -> None: endpoint_name=settings.sagemaker.embedding_endpoint_name, ) case "openai": - from llama_index import OpenAIEmbedding + from llama_index.embeddings.openai import OpenAIEmbedding openai_settings = settings.openai.api_key self.embedding_model = OpenAIEmbedding(api_key=openai_settings) diff --git a/private_gpt/components/ingest/ingest_component.py b/private_gpt/components/ingest/ingest_component.py index e8ec1e8fb..e21b6c237 100644 --- a/private_gpt/components/ingest/ingest_component.py +++ b/private_gpt/components/ingest/ingest_component.py @@ -8,16 +8,13 @@ from pathlib import Path from typing import Any -from llama_index import ( - Document, - ServiceContext, - StorageContext, - VectorStoreIndex, - load_index_from_storage, -) -from llama_index.data_structs import IndexDict -from llama_index.indices.base import BaseIndex -from llama_index.ingestion import run_transformations +from llama_index.core.data_structs import IndexDict +from llama_index.core.embeddings.utils import EmbedType +from llama_index.core.indices import VectorStoreIndex, load_index_from_storage +from llama_index.core.indices.base import BaseIndex +from llama_index.core.ingestion import run_transformations +from llama_index.core.schema import Document, TransformComponent +from llama_index.core.storage import StorageContext from private_gpt.components.ingest.ingest_helper import IngestionHelper from private_gpt.paths import local_data_path @@ -30,13 +27,15 @@ class BaseIngestComponent(abc.ABC): def __init__( self, storage_context: StorageContext, - service_context: ServiceContext, + embed_model: EmbedType, + transformations: list[TransformComponent], *args: Any, **kwargs: Any, ) -> None: logger.debug("Initializing base ingest component type=%s", type(self).__name__) self.storage_context = storage_context - self.service_context = service_context + self.embed_model = embed_model + self.transformations = transformations @abc.abstractmethod def ingest(self, file_name: str, file_data: Path) -> list[Document]: @@ -55,11 +54,12 @@ class BaseIngestComponentWithIndex(BaseIngestComponent, abc.ABC): def __init__( self, storage_context: StorageContext, - service_context: ServiceContext, + embed_model: EmbedType, + transformations: list[TransformComponent], *args: Any, **kwargs: Any, ) -> None: - super().__init__(storage_context, service_context, *args, **kwargs) + super().__init__(storage_context, embed_model, transformations, *args, **kwargs) self.show_progress = True self._index_thread_lock = ( @@ -73,9 +73,10 @@ def _initialize_index(self) -> BaseIndex[IndexDict]: # Load the index with store_nodes_override=True to be able to delete them index = load_index_from_storage( storage_context=self.storage_context, - service_context=self.service_context, store_nodes_override=True, # Force store nodes in index and document stores show_progress=self.show_progress, + embed_model=self.embed_model, + transformations=self.transformations, ) except ValueError: # There are no index in the storage context, creating a new one @@ -83,9 +84,10 @@ def _initialize_index(self) -> BaseIndex[IndexDict]: index = VectorStoreIndex.from_documents( [], storage_context=self.storage_context, - service_context=self.service_context, store_nodes_override=True, # Force store nodes in index and document stores show_progress=self.show_progress, + embed_model=self.embed_model, + transformations=self.transformations, ) index.storage_context.persist(persist_dir=local_data_path) return index @@ -106,11 +108,12 @@ class SimpleIngestComponent(BaseIngestComponentWithIndex): def __init__( self, storage_context: StorageContext, - service_context: ServiceContext, + embed_model: EmbedType, + transformations: list[TransformComponent], *args: Any, **kwargs: Any, ) -> None: - super().__init__(storage_context, service_context, *args, **kwargs) + super().__init__(storage_context, embed_model, transformations, *args, **kwargs) def ingest(self, file_name: str, file_data: Path) -> list[Document]: logger.info("Ingesting file_name=%s", file_name) @@ -151,16 +154,17 @@ class BatchIngestComponent(BaseIngestComponentWithIndex): def __init__( self, storage_context: StorageContext, - service_context: ServiceContext, + embed_model: EmbedType, + transformations: list[TransformComponent], count_workers: int, *args: Any, **kwargs: Any, ) -> None: - super().__init__(storage_context, service_context, *args, **kwargs) + super().__init__(storage_context, embed_model, transformations, *args, **kwargs) # Make an efficient use of the CPU and GPU, the embedding # must be in the transformations assert ( - len(self.service_context.transformations) >= 2 + len(self.transformations) >= 2 ), "Embeddings must be in the transformations" assert count_workers > 0, "count_workers must be > 0" self.count_workers = count_workers @@ -197,7 +201,7 @@ def _save_docs(self, documents: list[Document]) -> list[Document]: logger.debug("Transforming count=%s documents into nodes", len(documents)) nodes = run_transformations( documents, # type: ignore[arg-type] - self.service_context.transformations, + self.transformations, show_progress=self.show_progress, ) # Locking the index to avoid concurrent writes @@ -225,16 +229,17 @@ class ParallelizedIngestComponent(BaseIngestComponentWithIndex): def __init__( self, storage_context: StorageContext, - service_context: ServiceContext, + embed_model: EmbedType, + transformations: list[TransformComponent], count_workers: int, *args: Any, **kwargs: Any, ) -> None: - super().__init__(storage_context, service_context, *args, **kwargs) + super().__init__(storage_context, embed_model, transformations, *args, **kwargs) # To make an efficient use of the CPU and GPU, the embeddings # must be in the transformations (to be computed in batches) assert ( - len(self.service_context.transformations) >= 2 + len(self.transformations) >= 2 ), "Embeddings must be in the transformations" assert count_workers > 0, "count_workers must be > 0" self.count_workers = count_workers @@ -278,7 +283,7 @@ def _save_docs(self, documents: list[Document]) -> list[Document]: logger.debug("Transforming count=%s documents into nodes", len(documents)) nodes = run_transformations( documents, # type: ignore[arg-type] - self.service_context.transformations, + self.transformations, show_progress=self.show_progress, ) # Locking the index to avoid concurrent writes @@ -311,18 +316,29 @@ def __del__(self) -> None: def get_ingestion_component( storage_context: StorageContext, - service_context: ServiceContext, + embed_model: EmbedType, + transformations: list[TransformComponent], settings: Settings, ) -> BaseIngestComponent: """Get the ingestion component for the given configuration.""" ingest_mode = settings.embedding.ingest_mode if ingest_mode == "batch": return BatchIngestComponent( - storage_context, service_context, settings.embedding.count_workers + storage_context=storage_context, + embed_model=embed_model, + transformations=transformations, + count_workers=settings.embedding.count_workers, ) elif ingest_mode == "parallel": return ParallelizedIngestComponent( - storage_context, service_context, settings.embedding.count_workers + storage_context=storage_context, + embed_model=embed_model, + transformations=transformations, + count_workers=settings.embedding.count_workers, ) else: - return SimpleIngestComponent(storage_context, service_context) + return SimpleIngestComponent( + storage_context=storage_context, + embed_model=embed_model, + transformations=transformations, + ) diff --git a/private_gpt/components/ingest/ingest_helper.py b/private_gpt/components/ingest/ingest_helper.py index 1de2f7fa4..15920c3b2 100644 --- a/private_gpt/components/ingest/ingest_helper.py +++ b/private_gpt/components/ingest/ingest_helper.py @@ -1,14 +1,53 @@ import logging from pathlib import Path +from typing import Dict, Type -from llama_index import Document -from llama_index.readers import JSONReader, StringIterableReader -from llama_index.readers.file.base import DEFAULT_FILE_READER_CLS +from llama_index.core.readers import StringIterableReader +from llama_index.core.readers.base import BaseReader +from llama_index.core.readers.json import JSONReader +from llama_index.core.schema import Document logger = logging.getLogger(__name__) + +# Inspired by the `llama_index.core.readers.file.base` module +def _try_loading_included_file_formats() -> Dict[str, Type[BaseReader]]: + try: + from llama_index.readers.file.docs import DocxReader, HWPReader, PDFReader + from llama_index.readers.file.epub import EpubReader + from llama_index.readers.file.image import ImageReader + from llama_index.readers.file.ipynb import IPYNBReader + from llama_index.readers.file.markdown import MarkdownReader + from llama_index.readers.file.mbox import MboxReader + from llama_index.readers.file.tabular import PandasCSVReader + from llama_index.readers.file.slides import PptxReader + from llama_index.readers.file.video_audio import VideoAudioReader + except ImportError: + raise ImportError("`llama-index-readers-file` package not found") + + default_file_reader_cls: Dict[str, Type[BaseReader]] = { + ".hwp": HWPReader, + ".pdf": PDFReader, + ".docx": DocxReader, + ".pptx": PptxReader, + ".ppt": PptxReader, + ".pptm": PptxReader, + ".jpg": ImageReader, + ".png": ImageReader, + ".jpeg": ImageReader, + ".mp3": VideoAudioReader, + ".mp4": VideoAudioReader, + ".csv": PandasCSVReader, + ".epub": EpubReader, + ".md": MarkdownReader, + ".mbox": MboxReader, + ".ipynb": IPYNBReader, + } + return default_file_reader_cls + + # Patching the default file reader to support other file types -FILE_READER_CLS = DEFAULT_FILE_READER_CLS.copy() +FILE_READER_CLS = _try_loading_included_file_formats() FILE_READER_CLS.update( { ".json": JSONReader, diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index eebbdff0b..b2ffd0489 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -1,15 +1,16 @@ import logging from injector import inject, singleton -from llama_index import set_global_tokenizer -from llama_index.llms import MockLLM -from llama_index.llms.base import LLM +from llama_index.core.llms import LLM, MockLLM +from llama_index.core.utils import set_global_tokenizer +from llama_index.core.settings import Settings as LlamaIndexSettings from transformers import AutoTokenizer # type: ignore from private_gpt.components.llm.prompt_helper import get_prompt_style from private_gpt.paths import models_cache_path, models_path from private_gpt.settings.settings import Settings + logger = logging.getLogger(__name__) @@ -31,7 +32,7 @@ def __init__(self, settings: Settings) -> None: logger.info("Initializing the LLM in mode=%s", llm_mode) match settings.llm.mode: case "local": - from llama_index.llms import LlamaCPP + from llama_index.llms.llama_cpp import LlamaCPP prompt_style = get_prompt_style(settings.local.prompt_style) @@ -41,6 +42,7 @@ def __init__(self, settings: Settings) -> None: max_new_tokens=settings.llm.max_new_tokens, context_window=settings.llm.context_window, generate_kwargs={}, + callback_manager=LlamaIndexSettings.callback_manager, # All to GPU model_kwargs={"n_gpu_layers": -1, "offload_kqv": True}, # transform inputs into Llama2 format @@ -58,7 +60,7 @@ def __init__(self, settings: Settings) -> None: context_window=settings.llm.context_window, ) case "openai": - from llama_index.llms import OpenAI + from llama_index.llms.openai import OpenAI openai_settings = settings.openai self.llm = OpenAI( @@ -67,7 +69,7 @@ def __init__(self, settings: Settings) -> None: model=openai_settings.model, ) case "openailike": - from llama_index.llms import OpenAILike + from llama_index.llms.openai_like import OpenAILike openai_settings = settings.openai self.llm = OpenAILike( @@ -81,7 +83,7 @@ def __init__(self, settings: Settings) -> None: case "mock": self.llm = MockLLM() case "ollama": - from llama_index.llms import Ollama + from llama_index.llms.ollama import Ollama ollama_settings = settings.ollama self.llm = Ollama( diff --git a/private_gpt/components/llm/prompt_helper.py b/private_gpt/components/llm/prompt_helper.py index d1df9b814..985d217bd 100644 --- a/private_gpt/components/llm/prompt_helper.py +++ b/private_gpt/components/llm/prompt_helper.py @@ -3,11 +3,7 @@ from collections.abc import Sequence from typing import Any, Literal -from llama_index.llms import ChatMessage, MessageRole -from llama_index.llms.llama_utils import ( - completion_to_prompt, - messages_to_prompt, -) +from llama_index.core.llms import ChatMessage, MessageRole logger = logging.getLogger(__name__) @@ -73,7 +69,9 @@ def _completion_to_prompt(self, completion: str) -> str: class Llama2PromptStyle(AbstractPromptStyle): - """Simple prompt style that just uses the default llama_utils functions. + """Simple prompt style that uses llama 2 prompt style. + + Inspired by llama_index/legacy/llms/llama_utils.py It transforms the sequence of messages into a prompt that should look like: ```text @@ -83,11 +81,61 @@ class Llama2PromptStyle(AbstractPromptStyle): ``` """ + BOS, EOS = "", "" + B_INST, E_INST = "[INST]", "[/INST]" + B_SYS, E_SYS = "<>\n", "\n<>\n\n" + DEFAULT_SYSTEM_PROMPT = """\ + You are a helpful, respectful and honest assistant. \ + Always answer as helpfully as possible and follow ALL given instructions. \ + Do not speculate or make up information. \ + Do not reference any given instructions or context. \ + """ + def _messages_to_prompt(self, messages: Sequence[ChatMessage]) -> str: - return messages_to_prompt(messages) + string_messages: list[str] = [] + if messages[0].role == MessageRole.SYSTEM: + # pull out the system message (if it exists in messages) + system_message_str = messages[0].content or "" + messages = messages[1:] + else: + system_message_str = self.DEFAULT_SYSTEM_PROMPT + + system_message_str = f"{self.B_SYS} {system_message_str.strip()} {self.E_SYS}" + + for i in range(0, len(messages), 2): + # first message should always be a user + user_message = messages[i] + assert user_message.role == MessageRole.USER + + if i == 0: + # make sure system prompt is included at the start + str_message = f"{self.BOS} {self.B_INST} {system_message_str} " + else: + # end previous user-assistant interaction + string_messages[-1] += f" {self.EOS}" + # no need to include system prompt + str_message = f"{self.BOS} {self.B_INST} " + + # include user message content + str_message += f"{user_message.content} {self.E_INST}" + + if len(messages) > (i + 1): + # if assistant message exists, add to str_message + assistant_message = messages[i + 1] + assert assistant_message.role == MessageRole.ASSISTANT + str_message += f" {assistant_message.content}" + + string_messages.append(str_message) + + return "".join(string_messages) def _completion_to_prompt(self, completion: str) -> str: - return completion_to_prompt(completion) + system_prompt_str = self.DEFAULT_SYSTEM_PROMPT + + return ( + f"{self.BOS} {self.B_INST} {self.B_SYS} {system_prompt_str.strip()} {self.E_SYS} " + f"{completion.strip()} {self.E_INST}" + ) class TagPromptStyle(AbstractPromptStyle): diff --git a/private_gpt/components/node_store/node_store_component.py b/private_gpt/components/node_store/node_store_component.py index c039bf502..4383cf935 100644 --- a/private_gpt/components/node_store/node_store_component.py +++ b/private_gpt/components/node_store/node_store_component.py @@ -1,9 +1,9 @@ import logging from injector import inject, singleton -from llama_index.storage.docstore import BaseDocumentStore, SimpleDocumentStore -from llama_index.storage.index_store import SimpleIndexStore -from llama_index.storage.index_store.types import BaseIndexStore +from llama_index.core.storage.docstore import BaseDocumentStore, SimpleDocumentStore +from llama_index.core.storage.index_store import SimpleIndexStore +from llama_index.core.storage.index_store.types import BaseIndexStore from private_gpt.paths import local_data_path diff --git a/private_gpt/components/vector_store/batched_chroma.py b/private_gpt/components/vector_store/batched_chroma.py index f2cd9addf..eac536505 100644 --- a/private_gpt/components/vector_store/batched_chroma.py +++ b/private_gpt/components/vector_store/batched_chroma.py @@ -1,9 +1,25 @@ +from collections.abc import Generator from typing import Any -from llama_index.schema import BaseNode, MetadataMode -from llama_index.vector_stores import ChromaVectorStore -from llama_index.vector_stores.chroma import chunk_list -from llama_index.vector_stores.utils import node_to_metadata_dict +from llama_index.core.schema import BaseNode, MetadataMode +from llama_index.core.vector_stores.utils import node_to_metadata_dict +from llama_index.vector_stores.chroma import ChromaVectorStore + + +def chunk_list( + lst: list[BaseNode], max_chunk_size: int +) -> Generator[list[BaseNode], None, None]: + """Yield successive max_chunk_size-sized chunks from lst. + + Args: + lst (List[BaseNode]): list of nodes with embeddings + max_chunk_size (int): max chunk size + + Yields: + Generator[List[BaseNode], None, None]: list of nodes with embeddings + """ + for i in range(0, len(lst), max_chunk_size): + yield lst[i : i + max_chunk_size] class BatchedChromaVectorStore(ChromaVectorStore): diff --git a/private_gpt/components/vector_store/vector_store_component.py b/private_gpt/components/vector_store/vector_store_component.py index 4e3ed1145..0e965f8b4 100644 --- a/private_gpt/components/vector_store/vector_store_component.py +++ b/private_gpt/components/vector_store/vector_store_component.py @@ -2,9 +2,8 @@ import typing from injector import inject, singleton -from llama_index import VectorStoreIndex -from llama_index.indices.vector_store import VectorIndexRetriever -from llama_index.vector_stores.types import VectorStore +from llama_index.core.indices.vector_store import VectorIndexRetriever, VectorStoreIndex +from llama_index.core.vector_stores.types import VectorStore from private_gpt.components.vector_store.batched_chroma import BatchedChromaVectorStore from private_gpt.open_ai.extensions.context_filter import ContextFilter @@ -41,7 +40,7 @@ class VectorStoreComponent: def __init__(self, settings: Settings) -> None: match settings.vectorstore.database: case "pgvector": - from llama_index.vector_stores import PGVectorStore + from llama_index.vector_stores.postgres import PGVectorStore if settings.pgvector is None: raise ValueError( diff --git a/private_gpt/launcher.py b/private_gpt/launcher.py index 791e841a6..3857a8cdc 100644 --- a/private_gpt/launcher.py +++ b/private_gpt/launcher.py @@ -4,6 +4,8 @@ from fastapi import Depends, FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from injector import Injector +from llama_index.core.callbacks import CallbackManager +from llama_index.core.callbacks.global_handlers import create_global_handler from private_gpt.server.chat.chat_router import chat_router from private_gpt.server.chunks.chunks_router import chunks_router @@ -12,6 +14,7 @@ from private_gpt.server.health.health_router import health_router from private_gpt.server.ingest.ingest_router import ingest_router from private_gpt.settings.settings import Settings +from llama_index.core.settings import Settings as LlamaIndexSettings logger = logging.getLogger(__name__) @@ -31,6 +34,10 @@ async def bind_injector_to_request(request: Request) -> None: app.include_router(embeddings_router) app.include_router(health_router) + # Add LlamaIndex simple observability + global_handler = create_global_handler("simple") + LlamaIndexSettings.callback_manager = CallbackManager([global_handler]) + settings = root_injector.get(Settings) if settings.server.cors.enabled: logger.debug("Setting up CORS middleware") diff --git a/private_gpt/main.py b/private_gpt/main.py index d249fa6cb..58c4006cf 100644 --- a/private_gpt/main.py +++ b/private_gpt/main.py @@ -1,11 +1,6 @@ """FastAPI app creation, logger configuration and main API routes.""" -import llama_index - from private_gpt.di import global_injector from private_gpt.launcher import create_app -# Add LlamaIndex simple observability -llama_index.set_global_handler("simple") - app = create_app(global_injector) diff --git a/private_gpt/open_ai/openai_models.py b/private_gpt/open_ai/openai_models.py index dd78daf35..67b67dd4c 100644 --- a/private_gpt/open_ai/openai_models.py +++ b/private_gpt/open_ai/openai_models.py @@ -3,7 +3,7 @@ from collections.abc import Iterator from typing import Literal -from llama_index.llms import ChatResponse, CompletionResponse +from llama_index.core.llms import ChatResponse, CompletionResponse from pydantic import BaseModel, Field from private_gpt.server.chunks.chunks_service import Chunk diff --git a/private_gpt/server/chat/chat_router.py b/private_gpt/server/chat/chat_router.py index 385ba1736..ed86cdbbb 100644 --- a/private_gpt/server/chat/chat_router.py +++ b/private_gpt/server/chat/chat_router.py @@ -1,5 +1,5 @@ from fastapi import APIRouter, Depends, Request -from llama_index.llms import ChatMessage, MessageRole +from llama_index.core.llms import ChatMessage, MessageRole from pydantic import BaseModel from starlette.responses import StreamingResponse diff --git a/private_gpt/server/chat/chat_service.py b/private_gpt/server/chat/chat_service.py index ffdb3f90f..6b94f352c 100644 --- a/private_gpt/server/chat/chat_service.py +++ b/private_gpt/server/chat/chat_service.py @@ -1,14 +1,15 @@ from dataclasses import dataclass from injector import inject, singleton -from llama_index import ServiceContext, StorageContext, VectorStoreIndex -from llama_index.chat_engine import ContextChatEngine, SimpleChatEngine -from llama_index.chat_engine.types import ( +from llama_index.core.chat_engine import ContextChatEngine, SimpleChatEngine +from llama_index.core.chat_engine.types import ( BaseChatEngine, ) -from llama_index.indices.postprocessor import MetadataReplacementPostProcessor -from llama_index.llms import ChatMessage, MessageRole -from llama_index.types import TokenGen +from llama_index.core.indices import VectorStoreIndex +from llama_index.core.indices.postprocessor import MetadataReplacementPostProcessor +from llama_index.core.llms import ChatMessage, MessageRole +from llama_index.core.storage import StorageContext +from llama_index.core.types import TokenGen from pydantic import BaseModel from private_gpt.components.embedding.embedding_component import EmbeddingComponent @@ -75,20 +76,19 @@ def __init__( embedding_component: EmbeddingComponent, node_store_component: NodeStoreComponent, ) -> None: - self.llm_service = llm_component + self.llm_component = llm_component + self.embedding_component = embedding_component self.vector_store_component = vector_store_component self.storage_context = StorageContext.from_defaults( vector_store=vector_store_component.vector_store, docstore=node_store_component.doc_store, index_store=node_store_component.index_store, ) - self.service_context = ServiceContext.from_defaults( - llm=llm_component.llm, embed_model=embedding_component.embedding_model - ) self.index = VectorStoreIndex.from_vector_store( vector_store_component.vector_store, storage_context=self.storage_context, - service_context=self.service_context, + llm=llm_component.llm, + embed_model=embedding_component.embedding_model, show_progress=True, ) @@ -102,10 +102,17 @@ def _chat_engine( vector_index_retriever = self.vector_store_component.get_retriever( index=self.index, context_filter=context_filter ) + # TODO ContextChatEngine is still not migrated by LlamaIndex to accept + # llm directly, so we are passing legacy ServiceContext until it is fixed. + from llama_index.core import ServiceContext return ContextChatEngine.from_defaults( system_prompt=system_prompt, retriever=vector_index_retriever, - service_context=self.service_context, + llm=self.llm_component.llm, # Takes no effect at the moment + service_context=ServiceContext.from_defaults( + llm=self.llm_component.llm, + embed_model=self.embedding_component.embedding_model, + ), node_postprocessors=[ MetadataReplacementPostProcessor(target_metadata_key="window"), ], @@ -113,7 +120,7 @@ def _chat_engine( else: return SimpleChatEngine.from_defaults( system_prompt=system_prompt, - service_context=self.service_context, + llm=self.llm_component.llm, ) def stream_chat( diff --git a/private_gpt/server/chunks/chunks_service.py b/private_gpt/server/chunks/chunks_service.py index 7fbe8550f..7bda5d904 100644 --- a/private_gpt/server/chunks/chunks_service.py +++ b/private_gpt/server/chunks/chunks_service.py @@ -1,8 +1,9 @@ from typing import TYPE_CHECKING, Literal from injector import inject, singleton -from llama_index import ServiceContext, StorageContext, VectorStoreIndex -from llama_index.schema import NodeWithScore +from llama_index.core.indices import VectorStoreIndex +from llama_index.core.schema import NodeWithScore +from llama_index.core.storage import StorageContext from pydantic import BaseModel, Field from private_gpt.components.embedding.embedding_component import EmbeddingComponent @@ -15,7 +16,7 @@ from private_gpt.server.ingest.model import IngestedDoc if TYPE_CHECKING: - from llama_index.schema import RelatedNodeInfo + from llama_index.core.schema import RelatedNodeInfo class Chunk(BaseModel): @@ -63,14 +64,13 @@ def __init__( node_store_component: NodeStoreComponent, ) -> None: self.vector_store_component = vector_store_component + self.llm_component = llm_component + self.embedding_component = embedding_component self.storage_context = StorageContext.from_defaults( vector_store=vector_store_component.vector_store, docstore=node_store_component.doc_store, index_store=node_store_component.index_store, ) - self.query_service_context = ServiceContext.from_defaults( - llm=llm_component.llm, embed_model=embedding_component.embedding_model - ) def _get_sibling_nodes_text( self, node_with_score: NodeWithScore, related_number: int, forward: bool = True @@ -103,7 +103,8 @@ def retrieve_relevant( index = VectorStoreIndex.from_vector_store( self.vector_store_component.vector_store, storage_context=self.storage_context, - service_context=self.query_service_context, + llm=self.llm_component.llm, + embed_model=self.embedding_component.embedding_model, show_progress=True, ) vector_index_retriever = self.vector_store_component.get_retriever( diff --git a/private_gpt/server/ingest/ingest_service.py b/private_gpt/server/ingest/ingest_service.py index aa2f73c39..1d6f5ba26 100644 --- a/private_gpt/server/ingest/ingest_service.py +++ b/private_gpt/server/ingest/ingest_service.py @@ -4,11 +4,8 @@ from typing import AnyStr, BinaryIO from injector import inject, singleton -from llama_index import ( - ServiceContext, - StorageContext, -) -from llama_index.node_parser import SentenceWindowNodeParser +from llama_index.core.node_parser import SentenceWindowNodeParser +from llama_index.core.storage import StorageContext from private_gpt.components.embedding.embedding_component import EmbeddingComponent from private_gpt.components.ingest.ingest_component import get_ingestion_component @@ -40,17 +37,12 @@ def __init__( index_store=node_store_component.index_store, ) node_parser = SentenceWindowNodeParser.from_defaults() - self.ingest_service_context = ServiceContext.from_defaults( - llm=self.llm_service.llm, - embed_model=embedding_component.embedding_model, - node_parser=node_parser, - # Embeddings done early in the pipeline of node transformations, right - # after the node parsing - transformations=[node_parser, embedding_component.embedding_model], - ) self.ingest_component = get_ingestion_component( - self.storage_context, self.ingest_service_context, settings=settings() + self.storage_context, + embed_model=embedding_component.embedding_model, + transformations=[node_parser, embedding_component.embedding_model], + settings=settings(), ) def _ingest_data(self, file_name: str, file_data: AnyStr) -> list[IngestedDoc]: diff --git a/private_gpt/server/ingest/model.py b/private_gpt/server/ingest/model.py index eb957ee02..9f4139e68 100644 --- a/private_gpt/server/ingest/model.py +++ b/private_gpt/server/ingest/model.py @@ -1,6 +1,6 @@ from typing import Any, Literal -from llama_index import Document +from llama_index.core.schema import Document from pydantic import BaseModel, Field diff --git a/private_gpt/ui/ui.py b/private_gpt/ui/ui.py index a4b131fe8..bcc1f6758 100644 --- a/private_gpt/ui/ui.py +++ b/private_gpt/ui/ui.py @@ -10,7 +10,7 @@ from fastapi import FastAPI from gradio.themes.utils.colors import slate # type: ignore from injector import inject, singleton -from llama_index.llms import ChatMessage, ChatResponse, MessageRole +from llama_index.core.llms import ChatMessage, ChatResponse, MessageRole from pydantic import BaseModel from private_gpt.constants import PROJECT_ROOT_PATH diff --git a/pyproject.toml b/pyproject.toml index 1b380e207..59f8ff701 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,15 +6,24 @@ authors = ["Zylon "] [tool.poetry.dependencies] python = ">=3.11,<3.12" -fastapi = { extras = ["all"], version = "^0.103.1" } -boto3 = "^1.28.56" +fastapi = { extras = ["all"], version = "^0.110.0" } +boto3 = "^1.34.51" injector = "^0.21.0" pyyaml = "^6.0.1" -python-multipart = "^0.0.6" -pypdf = "^3.16.2" -llama-index = { extras = ["local_models"], version = "0.9.3" } -watchdog = "^3.0.0" -qdrant-client = "^1.6.9" +python-multipart = "^0.0.9" +llama-index-core = "^0.10.13" +llama-index-readers-file = "^0.1.6" +llama-index-embeddings-huggingface = "^0.1.4" +llama-index-embeddings-openai = "^0.1.6" +llama-index-vector-stores-qdrant = "^0.1.3" +llama-index-vector-stores-chroma = "^0.1.4" +llama-index-llms-llama-cpp = "^0.1.3" +llama-index-llms-openai = "^0.1.6" +llama-index-llms-openai-like = "^0.1.3" +llama-index-llms-ollama = "^0.1.2" +llama-index-vector-stores-postgres = "^0.1.2" +watchdog = "^4.0.0" +qdrant-client = "^1.7.3" chromadb = {version = "^0.4.13", optional = true} asyncpg = {version = "^0.29.0", optional = true} pgvector = {version = "^0.2.5", optional = true} diff --git a/tests/test_prompt_helper.py b/tests/test_prompt_helper.py index 48597698b..3b5af9146 100644 --- a/tests/test_prompt_helper.py +++ b/tests/test_prompt_helper.py @@ -1,5 +1,5 @@ import pytest -from llama_index.llms import ChatMessage, MessageRole +from llama_index.core.llms import ChatMessage, MessageRole from private_gpt.components.llm.prompt_helper import ( ChatMLPromptStyle, From 3373e80850c6fa061f2e1f71d2ca2b082235323a Mon Sep 17 00:00:00 2001 From: imartinez Date: Wed, 28 Feb 2024 20:28:30 +0100 Subject: [PATCH 02/15] Extract optional dependencies --- poetry.lock | 505 ++++++------------ .../components/embedding/custom/sagemaker.py | 2 +- .../embedding/embedding_component.py | 26 +- private_gpt/components/llm/llm_component.py | 39 +- .../vector_store/vector_store_component.py | 23 +- private_gpt/launcher.py | 7 +- pyproject.toml | 69 ++- 7 files changed, 255 insertions(+), 416 deletions(-) diff --git a/poetry.lock b/poetry.lock index 45bb5d40c..74713ef86 100644 --- a/poetry.lock +++ b/poetry.lock @@ -4,7 +4,7 @@ name = "aiofiles" version = "23.2.1" description = "File support for asyncio." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "aiofiles-23.2.1-py3-none-any.whl", hash = "sha256:19297512c647d4b27a2cf7c34caa7e405c0d60b5560618a29a9fe027b18b0107"}, @@ -124,7 +124,7 @@ frozenlist = ">=1.1.0" name = "altair" version = "5.2.0" description = "Vega-Altair: A declarative statistical visualization library for Python." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "altair-5.2.0-py3-none-any.whl", hash = "sha256:8c4888ad11db7c39f3f17aa7f4ea985775da389d79ac30a6c22856ab238df399"}, @@ -178,7 +178,7 @@ trio = ["trio (<0.22)"] name = "asgiref" version = "3.7.2" description = "ASGI specs, helper code, and adapters" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "asgiref-3.7.2-py3-none-any.whl", hash = "sha256:89b2ef2247e3b562a16eef663bc0e2e703ec6468e2fa8a5cd61cd449786d4f6e"}, @@ -192,7 +192,7 @@ tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] name = "async-timeout" version = "4.0.3" description = "Timeout context manager for asyncio programs" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"}, @@ -203,7 +203,7 @@ files = [ name = "asyncpg" version = "0.29.0" description = "An asyncio PostgreSQL driver" -optional = false +optional = true python-versions = ">=3.8.0" files = [ {file = "asyncpg-0.29.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:72fd0ef9f00aeed37179c62282a3d14262dbbafb74ec0ba16e1b1864d8a12169"}, @@ -278,7 +278,7 @@ tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pyte name = "backoff" version = "2.2.1" description = "Function decoration for backoff and retry" -optional = false +optional = true python-versions = ">=3.7,<4.0" files = [ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"}, @@ -289,7 +289,7 @@ files = [ name = "bcrypt" version = "4.1.2" description = "Modern password hashing for your software and your servers" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "bcrypt-4.1.2-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:ac621c093edb28200728a9cca214d7e838529e557027ef0581685909acd28b5e"}, @@ -383,7 +383,7 @@ uvloop = ["uvloop (>=0.15.2)"] name = "boto3" version = "1.34.51" description = "The AWS SDK for Python" -optional = false +optional = true python-versions = ">= 3.8" files = [ {file = "boto3-1.34.51-py3-none-any.whl", hash = "sha256:67732634dc7d0afda879bd9a5e2d0818a2c14a98bef766b95a3e253ea5104cb9"}, @@ -402,7 +402,7 @@ crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] name = "botocore" version = "1.34.51" description = "Low-level, data-driven core of boto 3." -optional = false +optional = true python-versions = ">= 3.8" files = [ {file = "botocore-1.34.51-py3-none-any.whl", hash = "sha256:01d5156247f991b3466a8404e3d7460a9ecbd9b214f9992d6ba797d9ddc6f120"}, @@ -435,7 +435,7 @@ beautifulsoup4 = "*" name = "build" version = "1.0.3" description = "A simple, correct Python build frontend" -optional = false +optional = true python-versions = ">= 3.7" files = [ {file = "build-1.0.3-py3-none-any.whl", hash = "sha256:589bf99a67df7c9cf07ec0ac0e5e2ea5d4b37ac63301c4986d1acb126aa83f8f"}, @@ -457,7 +457,7 @@ virtualenv = ["virtualenv (>=20.0.35)"] name = "cachetools" version = "5.3.2" description = "Extensible memoizing collections and decorators" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "cachetools-5.3.2-py3-none-any.whl", hash = "sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1"}, @@ -589,7 +589,7 @@ files = [ name = "chroma-hnswlib" version = "0.7.3" description = "Chromas fork of hnswlib" -optional = false +optional = true python-versions = "*" files = [ {file = "chroma-hnswlib-0.7.3.tar.gz", hash = "sha256:b6137bedde49fffda6af93b0297fe00429fc61e5a072b1ed9377f909ed95a932"}, @@ -626,7 +626,7 @@ numpy = "*" name = "chromadb" version = "0.4.24" description = "Chroma." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "chromadb-0.4.24-py3-none-any.whl", hash = "sha256:3a08e237a4ad28b5d176685bd22429a03717fe09d35022fb230d516108da01da"}, @@ -692,7 +692,7 @@ files = [ name = "coloredlogs" version = "15.0.1" description = "Colored terminal output for Python's logging module" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "coloredlogs-15.0.1-py2.py3-none-any.whl", hash = "sha256:612ee75c546f53e92e70049c9dbfcc18c935a2b9a53b66085ce9ef6a6e5c0934"}, @@ -709,7 +709,7 @@ cron = ["capturer (>=2.4)"] name = "contourpy" version = "1.2.0" description = "Python library for calculating contours of 2D quadrilateral grids" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "contourpy-1.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0274c1cb63625972c0c007ab14dd9ba9e199c36ae1a231ce45d725cbcbfd10a8"}, @@ -836,7 +836,7 @@ toml = ["tomli"] name = "cycler" version = "0.12.1" description = "Composable style cycles" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30"}, @@ -894,7 +894,7 @@ files = [ name = "diskcache" version = "5.6.3" description = "Disk Cache -- Disk and file backed persistent cache." -optional = false +optional = true python-versions = ">=3" files = [ {file = "diskcache-5.6.3-py3-none-any.whl", hash = "sha256:5e31b2d5fbad117cc363ebaf6b689474db18a1f6438bc82358b024abd4c2ca19"}, @@ -991,7 +991,7 @@ all = ["email-validator (>=2.0.0)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)" name = "ffmpy" version = "0.3.1" description = "A simple Python wrapper for ffmpeg" -optional = false +optional = true python-versions = "*" files = [ {file = "ffmpy-0.3.1.tar.gz", hash = "sha256:a173b8f42c7c669ff722df7fb31e1e870067713697f745224fa6e621b82f0004"}, @@ -1017,7 +1017,7 @@ typing = ["typing-extensions (>=4.8)"] name = "flatbuffers" version = "23.5.26" description = "The FlatBuffers serialization format for Python" -optional = false +optional = true python-versions = "*" files = [ {file = "flatbuffers-23.5.26-py2.py3-none-any.whl", hash = "sha256:c0ff356da363087b915fde4b8b45bdda73432fc17cddb3c8157472eab1422ad1"}, @@ -1028,7 +1028,7 @@ files = [ name = "fonttools" version = "4.46.0" description = "Tools to manipulate font files" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "fonttools-4.46.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d4e69e2c7f93b695d2e6f18f709d501d945f65c1d237dafaabdd23cd935a5276"}, @@ -1214,7 +1214,7 @@ tqdm = ["tqdm"] name = "google-auth" version = "2.25.2" description = "Google Authentication Library" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "google-auth-2.25.2.tar.gz", hash = "sha256:42f707937feb4f5e5a39e6c4f343a17300a459aaf03141457ba505812841cc40"}, @@ -1237,7 +1237,7 @@ requests = ["requests (>=2.20.0,<3.0.0.dev0)"] name = "googleapis-common-protos" version = "1.62.0" description = "Common protobufs used in Google APIs" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "googleapis-common-protos-1.62.0.tar.gz", hash = "sha256:83f0ece9f94e5672cced82f592d2a5edf527a96ed1794f0bab36d5735c996277"}, @@ -1252,13 +1252,13 @@ grpc = ["grpcio (>=1.44.0,<2.0.0.dev0)"] [[package]] name = "gradio" -version = "4.19.0" +version = "4.19.2" description = "Python library for easily interacting with trained machine learning models" -optional = false +optional = true python-versions = ">=3.8" files = [ - {file = "gradio-4.19.0-py3-none-any.whl", hash = "sha256:d09732190acc0f33b5e7ea3235d267472bf74beeea62dabb7a82f93193155e09"}, - {file = "gradio-4.19.0.tar.gz", hash = "sha256:e77e3ce8a4113865abd1dcf92cc9426d9da4896e0a6fd2824a0c90ec751dd442"}, + {file = "gradio-4.19.2-py3-none-any.whl", hash = "sha256:acab4a35f556dbc3ae637469312738d154bcb73f0b8d5f4f65e4d067ecb1e0b1"}, + {file = "gradio-4.19.2.tar.gz", hash = "sha256:6fe5815bb4dfaeed1fc74223bffd91da70a1b463158af8c5e03d01bb09068a1d"}, ] [package.dependencies] @@ -1266,8 +1266,8 @@ aiofiles = ">=22.0,<24.0" altair = ">=4.2.0,<6.0" fastapi = "*" ffmpy = "*" -gradio-client = "0.10.0" -httpx = "*" +gradio-client = "0.10.1" +httpx = ">=0.24.1" huggingface-hub = ">=0.19.3" importlib-resources = ">=1.3,<7.0" jinja2 = "<4.0" @@ -1280,9 +1280,9 @@ pandas = ">=1.0,<3.0" pillow = ">=8.0,<11.0" pydantic = ">=2.0" pydub = "*" -python-multipart = "*" +python-multipart = ">=0.0.9" pyyaml = ">=5.0,<7.0" -ruff = ">=0.1.7" +ruff = ">=0.2.2" semantic-version = ">=2.0,<3.0" tomlkit = "0.12.0" typer = {version = ">=0.9,<1.0", extras = ["all"]} @@ -1294,18 +1294,18 @@ oauth = ["authlib", "itsdangerous"] [[package]] name = "gradio-client" -version = "0.10.0" +version = "0.10.1" description = "Python library for easily interacting with trained machine learning models" -optional = false +optional = true python-versions = ">=3.8" files = [ - {file = "gradio_client-0.10.0-py3-none-any.whl", hash = "sha256:2bcfe61710f9f1c8f336fa9ff0f5c5f0ea52079233196cd753ad30cccdfd585c"}, - {file = "gradio_client-0.10.0.tar.gz", hash = "sha256:feaee70f18363d76f81a7d25fc3456f40ed5f92417e642c8f1bf86dc65e3a981"}, + {file = "gradio_client-0.10.1-py3-none-any.whl", hash = "sha256:a0413fffdde3360e0f6aaec8b8c23d8a320049a571de2d111d85ebd295002165"}, + {file = "gradio_client-0.10.1.tar.gz", hash = "sha256:879eb56fae5d6b1603bb9375b88d1de0d034f3dac4b3afc8dbc66f36f6e54d5d"}, ] [package.dependencies] fsspec = "*" -httpx = "*" +httpx = ">=0.24.1" huggingface-hub = ">=0.19.3" packaging = "*" typing-extensions = ">=4.0,<5.0" @@ -1386,7 +1386,7 @@ test = ["objgraph", "psutil"] name = "grpcio" version = "1.60.0" description = "HTTP/2-based RPC framework" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "grpcio-1.60.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:d020cfa595d1f8f5c6b343530cd3ca16ae5aefdd1e832b777f9f0eb105f5b139"}, @@ -1452,7 +1452,7 @@ protobuf = ["grpcio-tools (>=1.60.0)"] name = "grpcio-tools" version = "1.60.0" description = "Protobuf code generator for gRPC" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "grpcio-tools-1.60.0.tar.gz", hash = "sha256:ed30499340228d733ff69fcf4a66590ed7921f94eb5a2bf692258b1280b9dac7"}, @@ -1531,7 +1531,7 @@ files = [ name = "h2" version = "4.1.0" description = "HTTP/2 State-Machine based protocol implementation" -optional = false +optional = true python-versions = ">=3.6.1" files = [ {file = "h2-4.1.0-py3-none-any.whl", hash = "sha256:03a46bcf682256c95b5fd9e9a99c1323584c3eec6440d379b9903d709476bc6d"}, @@ -1546,7 +1546,7 @@ hyperframe = ">=6.0,<7" name = "hpack" version = "4.0.0" description = "Pure-Python HPACK header compression" -optional = false +optional = true python-versions = ">=3.6.1" files = [ {file = "hpack-4.0.0-py3-none-any.whl", hash = "sha256:84a076fad3dc9a9f8063ccb8041ef100867b1878b25ef0ee63847a5d53818a6c"}, @@ -1686,7 +1686,7 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t name = "humanfriendly" version = "10.0" description = "Human friendly output for text interfaces using Python" -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" files = [ {file = "humanfriendly-10.0-py2.py3-none-any.whl", hash = "sha256:1697e1a8a8f550fd43c2865cd84542fc175a61dcb779b6fee18cf6b6ccba1477"}, @@ -1700,7 +1700,7 @@ pyreadline3 = {version = "*", markers = "sys_platform == \"win32\" and python_ve name = "hyperframe" version = "6.0.1" description = "HTTP/2 framing layer for Python" -optional = false +optional = true python-versions = ">=3.6.1" files = [ {file = "hyperframe-6.0.1-py3-none-any.whl", hash = "sha256:0ec6bafd80d8ad2195c4f03aacba3a8265e57bc4cff261e802bf39970ed02a15"}, @@ -1736,7 +1736,7 @@ files = [ name = "importlib-metadata" version = "6.11.0" description = "Read metadata from Python packages" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "importlib_metadata-6.11.0-py3-none-any.whl", hash = "sha256:f0afba6205ad8f8947c7d338b5342d5db2afbfd82f9cbef7879a9539cc12eb9b"}, @@ -1755,7 +1755,7 @@ testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs name = "importlib-resources" version = "6.1.1" description = "Read resources from Python packages" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "importlib_resources-6.1.1-py3-none-any.whl", hash = "sha256:e8bf90d8213b486f428c9c39714b920041cb02c184686a3dee24905aaa8105d6"}, @@ -1823,7 +1823,7 @@ i18n = ["Babel (>=2.7)"] name = "jmespath" version = "1.0.1" description = "JSON Matching Expressions" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, @@ -1845,7 +1845,7 @@ files = [ name = "jsonschema" version = "4.20.0" description = "An implementation of JSON Schema validation for Python" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "jsonschema-4.20.0-py3-none-any.whl", hash = "sha256:ed6231f0429ecf966f5bc8dfef245998220549cbbcf140f913b7464c52c3b6b3"}, @@ -1866,7 +1866,7 @@ format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339- name = "jsonschema-specifications" version = "2023.11.2" description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "jsonschema_specifications-2023.11.2-py3-none-any.whl", hash = "sha256:e74ba7c0a65e8cb49dc26837d6cfe576557084a8b423ed16a420984228104f93"}, @@ -1880,7 +1880,7 @@ referencing = ">=0.31.0" name = "kiwisolver" version = "1.4.5" description = "A fast implementation of the Cassowary constraint solver" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "kiwisolver-1.4.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:05703cf211d585109fcd72207a31bb170a0f22144d68298dc5e61b3c946518af"}, @@ -1993,7 +1993,7 @@ files = [ name = "kubernetes" version = "28.1.0" description = "Kubernetes python client" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "kubernetes-28.1.0-py2.py3-none-any.whl", hash = "sha256:10f56f8160dcb73647f15fafda268e7f60cf7dbc9f8e46d52fcd46d3beb0c18d"}, @@ -2019,7 +2019,7 @@ adal = ["adal (>=1.0.2)"] name = "llama-cpp-python" version = "0.2.53" description = "Python bindings for the llama.cpp library" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "llama_cpp_python-0.2.53.tar.gz", hash = "sha256:f7ff8eda538ca6c80521a8bbf80d3ef4527ecb28f6d08fa9b3bb1f0cfc3b684e"}, @@ -2084,7 +2084,7 @@ query-tools = ["guidance (>=0.0.64,<0.0.65)", "jsonpath-ng (>=1.6.0,<2.0.0)", "l name = "llama-index-embeddings-huggingface" version = "0.1.4" description = "llama-index embeddings huggingface integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_embeddings_huggingface-0.1.4-py3-none-any.whl", hash = "sha256:9c80539f3cbbd7191c219e2cda154b1a7151aa912196bc537c16f40e18e4187c"}, @@ -2101,7 +2101,7 @@ transformers = ">=4.37.0,<5.0.0" name = "llama-index-embeddings-openai" version = "0.1.6" description = "llama-index embeddings openai integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_embeddings_openai-0.1.6-py3-none-any.whl", hash = "sha256:f8b2dded0718e9f57c08ce352d186941e6acf7de414c64219210b66f7a6d6d2d"}, @@ -2115,7 +2115,7 @@ llama-index-core = ">=0.10.1,<0.11.0" name = "llama-index-llms-llama-cpp" version = "0.1.3" description = "llama-index llms llama cpp integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_llms_llama_cpp-0.1.3-py3-none-any.whl", hash = "sha256:58ab5f492946b46544a057cec6b98268b610dbd99462d749b69a63d11577aa6b"}, @@ -2130,7 +2130,7 @@ llama-index-core = ">=0.10.1,<0.11.0" name = "llama-index-llms-ollama" version = "0.1.2" description = "llama-index llms ollama integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_llms_ollama-0.1.2-py3-none-any.whl", hash = "sha256:967d816e13a6e064f8454b2faf7b70c749a52230258cac67b6025e20db6e988c"}, @@ -2144,7 +2144,7 @@ llama-index-core = ">=0.10.1,<0.11.0" name = "llama-index-llms-openai" version = "0.1.6" description = "llama-index llms openai integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_llms_openai-0.1.6-py3-none-any.whl", hash = "sha256:4260ad31c3444e97ec8a8d061cb6dbf1074262b82341a2b69d2b27e8a23efe62"}, @@ -2158,7 +2158,7 @@ llama-index-core = ">=0.10.1,<0.11.0" name = "llama-index-llms-openai-like" version = "0.1.3" description = "llama-index llms openai like integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_llms_openai_like-0.1.3-py3-none-any.whl", hash = "sha256:0cf2c56f027c5e1f17c7fc606ad2b991f61daa75a88ba35bb5ea97de9766d4d3"}, @@ -2192,7 +2192,7 @@ pypdf = ">=4.0.1,<5.0.0" name = "llama-index-vector-stores-chroma" version = "0.1.4" description = "llama-index vector_stores chroma integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_vector_stores_chroma-0.1.4-py3-none-any.whl", hash = "sha256:f475a450431ee4d9b2915ba9da2112dfdfacaee1ea220b8603720be1c116786c"}, @@ -2209,7 +2209,7 @@ tokenizers = ">=0.15.1,<0.16.0" name = "llama-index-vector-stores-postgres" version = "0.1.2" description = "llama-index vector_stores postgres integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_vector_stores_postgres-0.1.2-py3-none-any.whl", hash = "sha256:75e8c73b553e4ca29ca4e55aedcd6abcdaf5bba7b12baf89a66c81f9b0ef5054"}, @@ -2227,7 +2227,7 @@ sqlalchemy = {version = ">=2.0.25,<3.0.0", extras = ["asyncio"]} name = "llama-index-vector-stores-qdrant" version = "0.1.3" description = "llama-index vector_stores qdrant integration" -optional = false +optional = true python-versions = ">=3.8.1,<4.0" files = [ {file = "llama_index_vector_stores_qdrant-0.1.3-py3-none-any.whl", hash = "sha256:15805a37310830085e4e3399c0e4c87bb1dcaae008b89808b2a883726381cbe7"}, @@ -2258,7 +2258,7 @@ pydantic = ">=1.10" name = "markdown-it-py" version = "3.0.0" description = "Python port of markdown-it. Markdown parsing, done right!" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, @@ -2371,7 +2371,7 @@ tests = ["pytest", "pytz", "simplejson"] name = "matplotlib" version = "3.8.2" description = "Python plotting package" -optional = false +optional = true python-versions = ">=3.9" files = [ {file = "matplotlib-3.8.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:09796f89fb71a0c0e1e2f4bdaf63fb2cefc84446bb963ecdeb40dfee7dfa98c7"}, @@ -2419,7 +2419,7 @@ python-dateutil = ">=2.7" name = "mdurl" version = "0.1.2" description = "Markdown URL utilities" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, @@ -2430,7 +2430,7 @@ files = [ name = "mmh3" version = "4.0.1" description = "Python extension for MurmurHash (MurmurHash3), a set of fast and robust hash functions." -optional = false +optional = true python-versions = "*" files = [ {file = "mmh3-4.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b719ba87232749095011d567a36a25e40ed029fc61c47e74a12416d8bb60b311"}, @@ -2506,7 +2506,7 @@ test = ["mypy (>=1.0)", "pytest (>=7.0.0)"] name = "monotonic" version = "1.6" description = "An implementation of time.monotonic() for Python 2 & < 3.3" -optional = false +optional = true python-versions = "*" files = [ {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"}, @@ -2517,7 +2517,7 @@ files = [ name = "mpmath" version = "1.3.0" description = "Python library for arbitrary-precision floating-point arithmetic" -optional = false +optional = true python-versions = "*" files = [ {file = "mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c"}, @@ -2783,7 +2783,7 @@ files = [ name = "nvidia-cublas-cu12" version = "12.1.3.1" description = "CUBLAS native runtime libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cublas_cu12-12.1.3.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:ee53ccca76a6fc08fb9701aa95b6ceb242cdaab118c3bb152af4e579af792728"}, @@ -2794,7 +2794,7 @@ files = [ name = "nvidia-cuda-cupti-cu12" version = "12.1.105" description = "CUDA profiling tools runtime libs." -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cuda_cupti_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:e54fde3983165c624cb79254ae9818a456eb6e87a7fd4d56a2352c24ee542d7e"}, @@ -2805,7 +2805,7 @@ files = [ name = "nvidia-cuda-nvrtc-cu12" version = "12.1.105" description = "NVRTC native runtime libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cuda_nvrtc_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:339b385f50c309763ca65456ec75e17bbefcbbf2893f462cb8b90584cd27a1c2"}, @@ -2816,7 +2816,7 @@ files = [ name = "nvidia-cuda-runtime-cu12" version = "12.1.105" description = "CUDA Runtime native Libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cuda_runtime_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:6e258468ddf5796e25f1dc591a31029fa317d97a0a94ed93468fc86301d61e40"}, @@ -2827,7 +2827,7 @@ files = [ name = "nvidia-cudnn-cu12" version = "8.9.2.26" description = "cuDNN runtime libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cudnn_cu12-8.9.2.26-py3-none-manylinux1_x86_64.whl", hash = "sha256:5ccb288774fdfb07a7e7025ffec286971c06d8d7b4fb162525334616d7629ff9"}, @@ -2840,7 +2840,7 @@ nvidia-cublas-cu12 = "*" name = "nvidia-cufft-cu12" version = "11.0.2.54" description = "CUFFT native runtime libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cufft_cu12-11.0.2.54-py3-none-manylinux1_x86_64.whl", hash = "sha256:794e3948a1aa71fd817c3775866943936774d1c14e7628c74f6f7417224cdf56"}, @@ -2851,7 +2851,7 @@ files = [ name = "nvidia-curand-cu12" version = "10.3.2.106" description = "CURAND native runtime libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_curand_cu12-10.3.2.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:9d264c5036dde4e64f1de8c50ae753237c12e0b1348738169cd0f8a536c0e1e0"}, @@ -2862,7 +2862,7 @@ files = [ name = "nvidia-cusolver-cu12" version = "11.4.5.107" description = "CUDA solver native runtime libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cusolver_cu12-11.4.5.107-py3-none-manylinux1_x86_64.whl", hash = "sha256:8a7ec542f0412294b15072fa7dab71d31334014a69f953004ea7a118206fe0dd"}, @@ -2878,7 +2878,7 @@ nvidia-nvjitlink-cu12 = "*" name = "nvidia-cusparse-cu12" version = "12.1.0.106" description = "CUSPARSE native runtime libraries" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_cusparse_cu12-12.1.0.106-py3-none-manylinux1_x86_64.whl", hash = "sha256:f3b50f42cf363f86ab21f720998517a659a48131e8d538dc02f8768237bd884c"}, @@ -2892,7 +2892,7 @@ nvidia-nvjitlink-cu12 = "*" name = "nvidia-nccl-cu12" version = "2.18.1" description = "NVIDIA Collective Communication Library (NCCL) Runtime" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_nccl_cu12-2.18.1-py3-none-manylinux1_x86_64.whl", hash = "sha256:1a6c4acefcbebfa6de320f412bf7866de856e786e0462326ba1bac40de0b5e71"}, @@ -2902,7 +2902,7 @@ files = [ name = "nvidia-nvjitlink-cu12" version = "12.3.101" description = "Nvidia JIT LTO Library" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_nvjitlink_cu12-12.3.101-py3-none-manylinux1_x86_64.whl", hash = "sha256:64335a8088e2b9d196ae8665430bc6a2b7e6ef2eb877a9c735c804bd4ff6467c"}, @@ -2913,7 +2913,7 @@ files = [ name = "nvidia-nvtx-cu12" version = "12.1.105" description = "NVIDIA Tools Extension" -optional = false +optional = true python-versions = ">=3" files = [ {file = "nvidia_nvtx_cu12-12.1.105-py3-none-manylinux1_x86_64.whl", hash = "sha256:dc21cf308ca5691e7c04d962e213f8a4aa9bbfa23d95412f452254c2caeb09e5"}, @@ -2924,7 +2924,7 @@ files = [ name = "oauthlib" version = "3.2.2" description = "A generic, spec-compliant, thorough implementation of the OAuth request-signing logic" -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "oauthlib-3.2.2-py3-none-any.whl", hash = "sha256:8139f29aac13e25d502680e9e19963e83f16838d48a0d71c287fe40e7067fbca"}, @@ -2940,7 +2940,7 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"] name = "onnxruntime" version = "1.17.1" description = "ONNX Runtime is a runtime accelerator for Machine Learning models" -optional = false +optional = true python-versions = "*" files = [ {file = "onnxruntime-1.17.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:d43ac17ac4fa3c9096ad3c0e5255bb41fd134560212dc124e7f52c3159af5d21"}, @@ -3005,7 +3005,7 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] name = "opentelemetry-api" version = "1.21.0" description = "OpenTelemetry Python API" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_api-1.21.0-py3-none-any.whl", hash = "sha256:4bb86b28627b7e41098f0e93280fe4892a1abed1b79a19aec6f928f39b17dffb"}, @@ -3020,7 +3020,7 @@ importlib-metadata = ">=6.0,<7.0" name = "opentelemetry-exporter-otlp-proto-common" version = "1.21.0" description = "OpenTelemetry Protobuf encoding" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_exporter_otlp_proto_common-1.21.0-py3-none-any.whl", hash = "sha256:97b1022b38270ec65d11fbfa348e0cd49d12006485c2321ea3b1b7037d42b6ec"}, @@ -3035,7 +3035,7 @@ opentelemetry-proto = "1.21.0" name = "opentelemetry-exporter-otlp-proto-grpc" version = "1.21.0" description = "OpenTelemetry Collector Protobuf over gRPC Exporter" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_exporter_otlp_proto_grpc-1.21.0-py3-none-any.whl", hash = "sha256:ab37c63d6cb58d6506f76d71d07018eb1f561d83e642a8f5aa53dddf306087a4"}, @@ -3059,7 +3059,7 @@ test = ["pytest-grpc"] name = "opentelemetry-instrumentation" version = "0.42b0" description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_instrumentation-0.42b0-py3-none-any.whl", hash = "sha256:65ae54ddb90ca2d05d2d16bf6863173e7141eba1bbbf41fc9bbb02446adbe369"}, @@ -3075,7 +3075,7 @@ wrapt = ">=1.0.0,<2.0.0" name = "opentelemetry-instrumentation-asgi" version = "0.42b0" description = "ASGI instrumentation for OpenTelemetry" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_instrumentation_asgi-0.42b0-py3-none-any.whl", hash = "sha256:79b7278fb614aba1bf2211060960d3e8501c1d7d9314b857b30ad80ba34a2805"}, @@ -3097,7 +3097,7 @@ test = ["opentelemetry-instrumentation-asgi[instruments]", "opentelemetry-test-u name = "opentelemetry-instrumentation-fastapi" version = "0.42b0" description = "OpenTelemetry FastAPI Instrumentation" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_instrumentation_fastapi-0.42b0-py3-none-any.whl", hash = "sha256:d53a26c4859767d5ba67109038cabc7165d97a8a8b7654ccde4ce290036d1725"}, @@ -3119,7 +3119,7 @@ test = ["httpx (>=0.22,<1.0)", "opentelemetry-instrumentation-fastapi[instrument name = "opentelemetry-proto" version = "1.21.0" description = "OpenTelemetry Python Proto" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_proto-1.21.0-py3-none-any.whl", hash = "sha256:32fc4248e83eebd80994e13963e683f25f3b443226336bb12b5b6d53638f50ba"}, @@ -3133,7 +3133,7 @@ protobuf = ">=3.19,<5.0" name = "opentelemetry-sdk" version = "1.21.0" description = "OpenTelemetry Python SDK" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_sdk-1.21.0-py3-none-any.whl", hash = "sha256:9fe633243a8c655fedace3a0b89ccdfc654c0290ea2d8e839bd5db3131186f73"}, @@ -3149,7 +3149,7 @@ typing-extensions = ">=3.7.4" name = "opentelemetry-semantic-conventions" version = "0.42b0" description = "OpenTelemetry Semantic Conventions" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_semantic_conventions-0.42b0-py3-none-any.whl", hash = "sha256:5cd719cbfec448af658860796c5d0fcea2fdf0945a2bed2363f42cb1ee39f526"}, @@ -3160,7 +3160,7 @@ files = [ name = "opentelemetry-util-http" version = "0.42b0" description = "Web util for OpenTelemetry" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "opentelemetry_util_http-0.42b0-py3-none-any.whl", hash = "sha256:764069ed2f7e9a98ed1a7a87111f838000484e388e81f467405933be4b0306c6"}, @@ -3230,7 +3230,7 @@ files = [ name = "overrides" version = "7.4.0" description = "A decorator to automatically detect mismatch when overriding a method." -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"}, @@ -3327,7 +3327,7 @@ files = [ name = "pgvector" version = "0.2.5" description = "pgvector support for Python" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "pgvector-0.2.5-py2.py3-none-any.whl", hash = "sha256:5e5e93ec4d3c45ab1fa388729d56c602f6966296e19deee8878928c6d567e41b"}, @@ -3437,7 +3437,7 @@ testing = ["pytest", "pytest-benchmark"] name = "portalocker" version = "2.8.2" description = "Wraps the portalocker recipe for easy usage" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "portalocker-2.8.2-py3-none-any.whl", hash = "sha256:cfb86acc09b9aa7c3b43594e19be1345b9d16af3feb08bf92f23d4dce513a28e"}, @@ -3456,7 +3456,7 @@ tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "p name = "posthog" version = "3.1.0" description = "Integrate PostHog into any python application." -optional = false +optional = true python-versions = "*" files = [ {file = "posthog-3.1.0-py2.py3-none-any.whl", hash = "sha256:acd033530bdfc275dce5587f205f62378991ecb9b7cd5479e79c7f4ac575d319"}, @@ -3497,7 +3497,7 @@ virtualenv = ">=20.10.0" name = "protobuf" version = "4.25.1" description = "" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "protobuf-4.25.1-cp310-abi3-win32.whl", hash = "sha256:193f50a6ab78a970c9b4f148e7c750cfde64f59815e86f686c22e26b4fe01ce7"}, @@ -3517,7 +3517,7 @@ files = [ name = "psycopg2-binary" version = "2.9.9" description = "psycopg2 - Python-PostgreSQL Database Adapter" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "psycopg2-binary-2.9.9.tar.gz", hash = "sha256:7f01846810177d829c7692f1f5ada8096762d9172af1b1a28d4ab5b77c923c1c"}, @@ -3598,7 +3598,7 @@ files = [ name = "pulsar-client" version = "3.3.0" description = "Apache Pulsar Python client library" -optional = false +optional = true python-versions = "*" files = [ {file = "pulsar_client-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:c31afd3e67a044ff93177df89e08febf214cc965e95ede097d9fe8755af00e01"}, @@ -3645,7 +3645,7 @@ functions = ["apache-bookkeeper-client (>=4.16.1)", "grpcio (>=1.8.2)", "prometh name = "pyasn1" version = "0.5.1" description = "Pure-Python implementation of ASN.1 types and DER/BER/CER codecs (X.208)" -optional = false +optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "pyasn1-0.5.1-py2.py3-none-any.whl", hash = "sha256:4439847c58d40b1d0a573d07e3856e95333f1976294494c325775aeca506eb58"}, @@ -3656,7 +3656,7 @@ files = [ name = "pyasn1-modules" version = "0.3.0" description = "A collection of ASN.1-based protocols modules" -optional = false +optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" files = [ {file = "pyasn1_modules-0.3.0-py2.py3-none-any.whl", hash = "sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d"}, @@ -3838,7 +3838,7 @@ python-dotenv = ">=0.21.0" name = "pydub" version = "0.25.1" description = "Manipulate audio with an simple and easy high level interface" -optional = false +optional = true python-versions = "*" files = [ {file = "pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6"}, @@ -3849,7 +3849,7 @@ files = [ name = "pygments" version = "2.17.2" description = "Pygments is a syntax highlighting package written in Python." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, @@ -3922,7 +3922,7 @@ files = [ name = "pyparsing" version = "3.1.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" -optional = false +optional = true python-versions = ">=3.6.8" files = [ {file = "pyparsing-3.1.1-py3-none-any.whl", hash = "sha256:32c7c0b711493c72ff18a981d24f28aaf9c1fb7ed5e9667c9e84e3db623bdbfb"}, @@ -3954,7 +3954,7 @@ image = ["Pillow (>=8.0.0)"] name = "pypika" version = "0.48.9" description = "A SQL query builder API for Python" -optional = false +optional = true python-versions = "*" files = [ {file = "PyPika-0.48.9.tar.gz", hash = "sha256:838836a61747e7c8380cd1b7ff638694b7a7335345d0f559b04b2cd832ad5378"}, @@ -3964,7 +3964,7 @@ files = [ name = "pyproject-hooks" version = "1.0.0" description = "Wrappers to call pyproject.toml-based build backend hooks." -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "pyproject_hooks-1.0.0-py3-none-any.whl", hash = "sha256:283c11acd6b928d2f6a7c73fa0d01cb2bdc5f07c57a2eeb6e83d5e56b97976f8"}, @@ -3975,7 +3975,7 @@ files = [ name = "pyreadline3" version = "3.4.1" description = "A python implementation of GNU readline." -optional = false +optional = true python-versions = "*" files = [ {file = "pyreadline3-3.4.1-py3-none-any.whl", hash = "sha256:b0efb6516fd4fb07b45949053826a62fa4cb353db5be2bbb4a7aa1fdd1e345fb"}, @@ -4095,7 +4095,7 @@ files = [ name = "pywin32" version = "306" description = "Python for Window Extensions" -optional = false +optional = true python-versions = "*" files = [ {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, @@ -4177,7 +4177,7 @@ files = [ name = "qdrant-client" version = "1.7.3" description = "Client library for the Qdrant vector search engine" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "qdrant_client-1.7.3-py3-none-any.whl", hash = "sha256:b062420ba55eb847652c7d2a26404fb1986bea13aa785763024013f96a7a915c"}, @@ -4200,7 +4200,7 @@ fastembed = ["fastembed (==0.1.1)"] name = "referencing" version = "0.32.0" description = "JSON Referencing + Python" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "referencing-0.32.0-py3-none-any.whl", hash = "sha256:bdcd3efb936f82ff86f993093f6da7435c7de69a3b3a5a06678a6050184bee99"}, @@ -4333,7 +4333,7 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "requests-oauthlib" version = "1.3.1" description = "OAuthlib authentication support for Requests." -optional = false +optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" files = [ {file = "requests-oauthlib-1.3.1.tar.gz", hash = "sha256:75beac4a47881eeb94d5ea5d6ad31ef88856affe2332b9aafb52c6452ccf0d7a"}, @@ -4351,7 +4351,7 @@ rsa = ["oauthlib[signedtoken] (>=3.0.0)"] name = "rich" version = "13.7.0" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" -optional = false +optional = true python-versions = ">=3.7.0" files = [ {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"}, @@ -4369,7 +4369,7 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] name = "rpds-py" version = "0.14.1" description = "Python bindings to Rust's persistent data structures (rpds)" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "rpds_py-0.14.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:47fce2015dcdb2bdf7184d1855ca57b4c54a50b717f38097270be7574a648a69"}, @@ -4477,7 +4477,7 @@ files = [ name = "rsa" version = "4.9" description = "Pure-Python RSA implementation" -optional = false +optional = true python-versions = ">=3.6,<4" files = [ {file = "rsa-4.9-py3-none-any.whl", hash = "sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7"}, @@ -4489,35 +4489,35 @@ pyasn1 = ">=0.1.3" [[package]] name = "ruff" -version = "0.1.8" +version = "0.2.2" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.1.8-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7de792582f6e490ae6aef36a58d85df9f7a0cfd1b0d4fe6b4fb51803a3ac96fa"}, - {file = "ruff-0.1.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:c8e3255afd186c142eef4ec400d7826134f028a85da2146102a1172ecc7c3696"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ff78a7583020da124dd0deb835ece1d87bb91762d40c514ee9b67a087940528b"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd8ee69b02e7bdefe1e5da2d5b6eaaddcf4f90859f00281b2333c0e3a0cc9cd6"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a05b0ddd7ea25495e4115a43125e8a7ebed0aa043c3d432de7e7d6e8e8cd6448"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e6f08ca730f4dc1b76b473bdf30b1b37d42da379202a059eae54ec7fc1fbcfed"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f35960b02df6b827c1b903091bb14f4b003f6cf102705efc4ce78132a0aa5af3"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7d076717c67b34c162da7c1a5bda16ffc205e0e0072c03745275e7eab888719f"}, - {file = "ruff-0.1.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6a21ab023124eafb7cef6d038f835cb1155cd5ea798edd8d9eb2f8b84be07d9"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:ce697c463458555027dfb194cb96d26608abab920fa85213deb5edf26e026664"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:db6cedd9ffed55548ab313ad718bc34582d394e27a7875b4b952c2d29c001b26"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:05ffe9dbd278965271252704eddb97b4384bf58b971054d517decfbf8c523f05"}, - {file = "ruff-0.1.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5daaeaf00ae3c1efec9742ff294b06c3a2a9db8d3db51ee4851c12ad385cda30"}, - {file = "ruff-0.1.8-py3-none-win32.whl", hash = "sha256:e49fbdfe257fa41e5c9e13c79b9e79a23a79bd0e40b9314bc53840f520c2c0b3"}, - {file = "ruff-0.1.8-py3-none-win_amd64.whl", hash = "sha256:f41f692f1691ad87f51708b823af4bb2c5c87c9248ddd3191c8f088e66ce590a"}, - {file = "ruff-0.1.8-py3-none-win_arm64.whl", hash = "sha256:aa8ee4f8440023b0a6c3707f76cadce8657553655dcbb5fc9b2f9bb9bee389f6"}, - {file = "ruff-0.1.8.tar.gz", hash = "sha256:f7ee467677467526cfe135eab86a40a0e8db43117936ac4f9b469ce9cdb3fb62"}, + {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:0a9efb032855ffb3c21f6405751d5e147b0c6b631e3ca3f6b20f917572b97eb6"}, + {file = "ruff-0.2.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d450b7fbff85913f866a5384d8912710936e2b96da74541c82c1b458472ddb39"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ecd46e3106850a5c26aee114e562c329f9a1fbe9e4821b008c4404f64ff9ce73"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e22676a5b875bd72acd3d11d5fa9075d3a5f53b877fe7b4793e4673499318ba"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1695700d1e25a99d28f7a1636d85bafcc5030bba9d0578c0781ba1790dbcf51c"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:b0c232af3d0bd8f521806223723456ffebf8e323bd1e4e82b0befb20ba18388e"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f63d96494eeec2fc70d909393bcd76c69f35334cdbd9e20d089fb3f0640216ca"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6a61ea0ff048e06de273b2e45bd72629f470f5da8f71daf09fe481278b175001"}, + {file = "ruff-0.2.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e1439c8f407e4f356470e54cdecdca1bd5439a0673792dbe34a2b0a551a2fe3"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:940de32dc8853eba0f67f7198b3e79bc6ba95c2edbfdfac2144c8235114d6726"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:0c126da55c38dd917621552ab430213bdb3273bb10ddb67bc4b761989210eb6e"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3b65494f7e4bed2e74110dac1f0d17dc8e1f42faaa784e7c58a98e335ec83d7e"}, + {file = "ruff-0.2.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:1ec49be4fe6ddac0503833f3ed8930528e26d1e60ad35c2446da372d16651ce9"}, + {file = "ruff-0.2.2-py3-none-win32.whl", hash = "sha256:d920499b576f6c68295bc04e7b17b6544d9d05f196bb3aac4358792ef6f34325"}, + {file = "ruff-0.2.2-py3-none-win_amd64.whl", hash = "sha256:cc9a91ae137d687f43a44c900e5d95e9617cb37d4c989e462980ba27039d239d"}, + {file = "ruff-0.2.2-py3-none-win_arm64.whl", hash = "sha256:c9d15fc41e6054bfc7200478720570078f0b41c9ae4f010bcc16bd6f4d1aacdd"}, + {file = "ruff-0.2.2.tar.gz", hash = "sha256:e62ed7f36b3068a30ba39193a14274cd706bc486fad521276458022f7bccb31d"}, ] [[package]] name = "s3transfer" version = "0.10.0" description = "An Amazon S3 Transfer Manager" -optional = false +optional = true python-versions = ">= 3.8" files = [ {file = "s3transfer-0.10.0-py3-none-any.whl", hash = "sha256:3cdb40f5cfa6966e812209d0994f2a4709b561c88e90cf00c2696d2df4e56b2e"}, @@ -4649,100 +4649,11 @@ tensorflow = ["safetensors[numpy]", "tensorflow (>=2.11.0)"] testing = ["h5py (>=3.7.0)", "huggingface_hub (>=0.12.1)", "hypothesis (>=6.70.2)", "pytest (>=7.2.0)", "pytest-benchmark (>=4.0.0)", "safetensors[numpy]", "setuptools_rust (>=1.5.2)"] torch = ["safetensors[numpy]", "torch (>=1.10)"] -[[package]] -name = "scikit-learn" -version = "1.3.2" -description = "A set of python modules for machine learning and data mining" -optional = false -python-versions = ">=3.8" -files = [ - {file = "scikit-learn-1.3.2.tar.gz", hash = "sha256:a2f54c76accc15a34bfb9066e6c7a56c1e7235dda5762b990792330b52ccfb05"}, - {file = "scikit_learn-1.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e326c0eb5cf4d6ba40f93776a20e9a7a69524c4db0757e7ce24ba222471ee8a1"}, - {file = "scikit_learn-1.3.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:535805c2a01ccb40ca4ab7d081d771aea67e535153e35a1fd99418fcedd1648a"}, - {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1215e5e58e9880b554b01187b8c9390bf4dc4692eedeaf542d3273f4785e342c"}, - {file = "scikit_learn-1.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ee107923a623b9f517754ea2f69ea3b62fc898a3641766cb7deb2f2ce450161"}, - {file = "scikit_learn-1.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:35a22e8015048c628ad099da9df5ab3004cdbf81edc75b396fd0cff8699ac58c"}, - {file = "scikit_learn-1.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6fb6bc98f234fda43163ddbe36df8bcde1d13ee176c6dc9b92bb7d3fc842eb66"}, - {file = "scikit_learn-1.3.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:18424efee518a1cde7b0b53a422cde2f6625197de6af36da0b57ec502f126157"}, - {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3271552a5eb16f208a6f7f617b8cc6d1f137b52c8a1ef8edf547db0259b2c9fb"}, - {file = "scikit_learn-1.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc4144a5004a676d5022b798d9e573b05139e77f271253a4703eed295bde0433"}, - {file = "scikit_learn-1.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:67f37d708f042a9b8d59551cf94d30431e01374e00dc2645fa186059c6c5d78b"}, - {file = "scikit_learn-1.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:8db94cd8a2e038b37a80a04df8783e09caac77cbe052146432e67800e430c028"}, - {file = "scikit_learn-1.3.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:61a6efd384258789aa89415a410dcdb39a50e19d3d8410bd29be365bcdd512d5"}, - {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb06f8dce3f5ddc5dee1715a9b9f19f20d295bed8e3cd4fa51e1d050347de525"}, - {file = "scikit_learn-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b2de18d86f630d68fe1f87af690d451388bb186480afc719e5f770590c2ef6c"}, - {file = "scikit_learn-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:0402638c9a7c219ee52c94cbebc8fcb5eb9fe9c773717965c1f4185588ad3107"}, - {file = "scikit_learn-1.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a19f90f95ba93c1a7f7924906d0576a84da7f3b2282ac3bfb7a08a32801add93"}, - {file = "scikit_learn-1.3.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:b8692e395a03a60cd927125eef3a8e3424d86dde9b2370d544f0ea35f78a8073"}, - {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15e1e94cc23d04d39da797ee34236ce2375ddea158b10bee3c343647d615581d"}, - {file = "scikit_learn-1.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:785a2213086b7b1abf037aeadbbd6d67159feb3e30263434139c98425e3dcfcf"}, - {file = "scikit_learn-1.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:64381066f8aa63c2710e6b56edc9f0894cc7bf59bd71b8ce5613a4559b6145e0"}, - {file = "scikit_learn-1.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c43290337f7a4b969d207e620658372ba3c1ffb611f8bc2b6f031dc5c6d1d03"}, - {file = "scikit_learn-1.3.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:dc9002fc200bed597d5d34e90c752b74df516d592db162f756cc52836b38fe0e"}, - {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1d08ada33e955c54355d909b9c06a4789a729977f165b8bae6f225ff0a60ec4a"}, - {file = "scikit_learn-1.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:763f0ae4b79b0ff9cca0bf3716bcc9915bdacff3cebea15ec79652d1cc4fa5c9"}, - {file = "scikit_learn-1.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:ed932ea780517b00dae7431e031faae6b49b20eb6950918eb83bd043237950e0"}, -] - -[package.dependencies] -joblib = ">=1.1.1" -numpy = ">=1.17.3,<2.0" -scipy = ">=1.5.0" -threadpoolctl = ">=2.0.0" - -[package.extras] -benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] -docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=6.0.0)", "sphinx-copybutton (>=0.5.2)", "sphinx-gallery (>=0.10.1)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] -examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.14.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] -tests = ["black (>=23.3.0)", "matplotlib (>=3.1.3)", "mypy (>=1.3)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=7.1.2)", "pytest-cov (>=2.9.0)", "ruff (>=0.0.272)", "scikit-image (>=0.16.2)"] - -[[package]] -name = "scipy" -version = "1.11.4" -description = "Fundamental algorithms for scientific computing in Python" -optional = false -python-versions = ">=3.9" -files = [ - {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"}, - {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"}, - {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"}, - {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"}, - {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"}, - {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"}, - {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"}, - {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"}, - {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"}, - {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"}, - {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"}, - {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"}, - {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"}, - {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"}, - {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"}, - {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"}, - {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"}, - {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"}, -] - -[package.dependencies] -numpy = ">=1.21.6,<1.28.0" - -[package.extras] -dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"] -doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] -test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] - [[package]] name = "semantic-version" version = "2.10.0" description = "A library implementing the 'SemVer' scheme." -optional = false +optional = true python-versions = ">=2.7" files = [ {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, @@ -4753,82 +4664,6 @@ files = [ dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] doc = ["Sphinx", "sphinx-rtd-theme"] -[[package]] -name = "sentence-transformers" -version = "2.2.2" -description = "Multilingual text embeddings" -optional = false -python-versions = ">=3.6.0" -files = [ - {file = "sentence-transformers-2.2.2.tar.gz", hash = "sha256:dbc60163b27de21076c9a30d24b5b7b6fa05141d68cf2553fa9a77bf79a29136"}, -] - -[package.dependencies] -huggingface-hub = ">=0.4.0" -nltk = "*" -numpy = "*" -scikit-learn = "*" -scipy = "*" -sentencepiece = "*" -torch = ">=1.6.0" -torchvision = "*" -tqdm = "*" -transformers = ">=4.6.0,<5.0.0" - -[[package]] -name = "sentencepiece" -version = "0.1.99" -description = "SentencePiece python wrapper" -optional = false -python-versions = "*" -files = [ - {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0eb528e70571b7c02723e5804322469b82fe7ea418c96051d0286c0fa028db73"}, - {file = "sentencepiece-0.1.99-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:77d7fafb2c4e4659cbdf303929503f37a26eabc4ff31d3a79bf1c5a1b338caa7"}, - {file = "sentencepiece-0.1.99-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:be9cf5b9e404c245aeb3d3723c737ba7a8f5d4ba262ef233a431fa6c45f732a0"}, - {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:baed1a26464998f9710d20e52607c29ffd4293e7c71c6a1f83f51ad0911ec12c"}, - {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9832f08bb372d4c8b567612f8eab9e36e268dff645f1c28f9f8e851be705f6d1"}, - {file = "sentencepiece-0.1.99-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:019e7535108e309dae2b253a75834fc3128240aa87c00eb80732078cdc182588"}, - {file = "sentencepiece-0.1.99-cp310-cp310-win32.whl", hash = "sha256:fa16a830416bb823fa2a52cbdd474d1f7f3bba527fd2304fb4b140dad31bb9bc"}, - {file = "sentencepiece-0.1.99-cp310-cp310-win_amd64.whl", hash = "sha256:14b0eccb7b641d4591c3e12ae44cab537d68352e4d3b6424944f0c447d2348d5"}, - {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6d3c56f24183a1e8bd61043ff2c58dfecdc68a5dd8955dc13bab83afd5f76b81"}, - {file = "sentencepiece-0.1.99-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ed6ea1819fd612c989999e44a51bf556d0ef6abfb553080b9be3d347e18bcfb7"}, - {file = "sentencepiece-0.1.99-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a2a0260cd1fb7bd8b4d4f39dc2444a8d5fd4e0a0c4d5c899810ef1abf99b2d45"}, - {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8a1abff4d1ff81c77cac3cc6fefa34fa4b8b371e5ee51cb7e8d1ebc996d05983"}, - {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:004e6a621d4bc88978eecb6ea7959264239a17b70f2cbc348033d8195c9808ec"}, - {file = "sentencepiece-0.1.99-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db361e03342c41680afae5807590bc88aa0e17cfd1a42696a160e4005fcda03b"}, - {file = "sentencepiece-0.1.99-cp311-cp311-win32.whl", hash = "sha256:2d95e19168875b70df62916eb55428a0cbcb834ac51d5a7e664eda74def9e1e0"}, - {file = "sentencepiece-0.1.99-cp311-cp311-win_amd64.whl", hash = "sha256:f90d73a6f81248a909f55d8e6ef56fec32d559e1e9af045f0b0322637cb8e5c7"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:62e24c81e74bd87a6e0d63c51beb6527e4c0add67e1a17bac18bcd2076afcfeb"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57efcc2d51caff20d9573567d9fd3f854d9efe613ed58a439c78c9f93101384a"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a904c46197993bd1e95b93a6e373dca2f170379d64441041e2e628ad4afb16f"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d89adf59854741c0d465f0e1525b388c0d174f611cc04af54153c5c4f36088c4"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-win32.whl", hash = "sha256:47c378146928690d1bc106fdf0da768cebd03b65dd8405aa3dd88f9c81e35dba"}, - {file = "sentencepiece-0.1.99-cp36-cp36m-win_amd64.whl", hash = "sha256:9ba142e7a90dd6d823c44f9870abdad45e6c63958eb60fe44cca6828d3b69da2"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b7b1a9ae4d7c6f1f867e63370cca25cc17b6f4886729595b885ee07a58d3cec3"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0f644c9d4d35c096a538507b2163e6191512460035bf51358794a78515b74f7"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c8843d23a0f686d85e569bd6dcd0dd0e0cbc03731e63497ca6d5bacd18df8b85"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33e6f690a1caebb4867a2e367afa1918ad35be257ecdb3455d2bbd787936f155"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-win32.whl", hash = "sha256:8a321866c2f85da7beac74a824b4ad6ddc2a4c9bccd9382529506d48f744a12c"}, - {file = "sentencepiece-0.1.99-cp37-cp37m-win_amd64.whl", hash = "sha256:c42f753bcfb7661c122a15b20be7f684b61fc8592c89c870adf52382ea72262d"}, - {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:85b476406da69c70586f0bb682fcca4c9b40e5059814f2db92303ea4585c650c"}, - {file = "sentencepiece-0.1.99-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cfbcfe13c69d3f87b7fcd5da168df7290a6d006329be71f90ba4f56bc77f8561"}, - {file = "sentencepiece-0.1.99-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:445b0ec381af1cd4eef95243e7180c63d9c384443c16c4c47a28196bd1cda937"}, - {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c6890ea0f2b4703f62d0bf27932e35808b1f679bdb05c7eeb3812b935ba02001"}, - {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fb71af492b0eefbf9f2501bec97bcd043b6812ab000d119eaf4bd33f9e283d03"}, - {file = "sentencepiece-0.1.99-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27b866b5bd3ddd54166bbcbf5c8d7dd2e0b397fac8537991c7f544220b1f67bc"}, - {file = "sentencepiece-0.1.99-cp38-cp38-win32.whl", hash = "sha256:b133e8a499eac49c581c3c76e9bdd08c338cc1939e441fee6f92c0ccb5f1f8be"}, - {file = "sentencepiece-0.1.99-cp38-cp38-win_amd64.whl", hash = "sha256:0eaf3591dd0690a87f44f4df129cf8d05d8a4029b5b6709b489b8e27f9a9bcff"}, - {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38efeda9bbfb55052d482a009c6a37e52f42ebffcea9d3a98a61de7aee356a28"}, - {file = "sentencepiece-0.1.99-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6c030b081dc1e1bcc9fadc314b19b740715d3d566ad73a482da20d7d46fd444c"}, - {file = "sentencepiece-0.1.99-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:84dbe53e02e4f8a2e45d2ac3e430d5c83182142658e25edd76539b7648928727"}, - {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b0f55d0a0ee1719b4b04221fe0c9f0c3461dc3dabd77a035fa2f4788eb3ef9a"}, - {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:18e800f206cd235dc27dc749299e05853a4e4332e8d3dfd81bf13d0e5b9007d9"}, - {file = "sentencepiece-0.1.99-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ae1c40cda8f9d5b0423cfa98542735c0235e7597d79caf318855cdf971b2280"}, - {file = "sentencepiece-0.1.99-cp39-cp39-win32.whl", hash = "sha256:c84ce33af12ca222d14a1cdd37bd76a69401e32bc68fe61c67ef6b59402f4ab8"}, - {file = "sentencepiece-0.1.99-cp39-cp39-win_amd64.whl", hash = "sha256:350e5c74d739973f1c9643edb80f7cc904dc948578bcb1d43c6f2b173e5d18dd"}, - {file = "sentencepiece-0.1.99.tar.gz", hash = "sha256:189c48f5cb2949288f97ccdb97f0473098d9c3dcf5a3d99d4eabe719ec27297f"}, -] - [[package]] name = "setuptools" version = "69.0.2" @@ -4849,7 +4684,7 @@ testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jar name = "shellingham" version = "1.5.4" description = "Tool to Detect Surrounding Shell" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "shellingham-1.5.4-py2.py3-none-any.whl", hash = "sha256:7ecfff8f2fd72616f7481040475a65b2bf8af90a56c89140852d1120324e8686"}, @@ -4997,7 +4832,7 @@ full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.7 name = "sympy" version = "1.12" description = "Computer algebra system (CAS) in Python" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "sympy-1.12-py3-none-any.whl", hash = "sha256:c3588cd4295d0c0f603d0f2ae780587e64e2efeedb3521e46b9bb1d08d184fa5"}, @@ -5021,17 +4856,6 @@ files = [ [package.extras] doc = ["reno", "sphinx", "tornado (>=4.5)"] -[[package]] -name = "threadpoolctl" -version = "3.2.0" -description = "threadpoolctl" -optional = false -python-versions = ">=3.8" -files = [ - {file = "threadpoolctl-3.2.0-py3-none-any.whl", hash = "sha256:2b7818516e423bdaebb97c723f86a7c6b0a83d3f3b0970328d66f4d9104dc032"}, - {file = "threadpoolctl-3.2.0.tar.gz", hash = "sha256:c96a0ba3bdddeaca37dc4cc7344aafad41cdb8c313f74fdfe387a867bba93355"}, -] - [[package]] name = "tiktoken" version = "0.5.2" @@ -5215,7 +5039,7 @@ testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] name = "tomlkit" version = "0.12.0" description = "Style preserving TOML library" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "tomlkit-0.12.0-py3-none-any.whl", hash = "sha256:926f1f37a1587c7a4f6c7484dae538f1345d96d793d9adab5d3675957b1d0766"}, @@ -5226,7 +5050,7 @@ files = [ name = "toolz" version = "0.12.0" description = "List processing tools and functional utilities" -optional = false +optional = true python-versions = ">=3.5" files = [ {file = "toolz-0.12.0-py3-none-any.whl", hash = "sha256:2059bd4148deb1884bb0eb770a3cde70e7f954cfbbdc2285f1f2de01fd21eb6f"}, @@ -5237,7 +5061,7 @@ files = [ name = "torch" version = "2.1.2" description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration" -optional = false +optional = true python-versions = ">=3.8.0" files = [ {file = "torch-2.1.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:3a871edd6c02dae77ad810335c0833391c1a4ce49af21ea8cf0f6a5d2096eea8"}, @@ -5286,44 +5110,6 @@ typing-extensions = "*" dynamo = ["jinja2"] opt-einsum = ["opt-einsum (>=3.3)"] -[[package]] -name = "torchvision" -version = "0.16.2" -description = "image and video datasets and models for torch deep learning" -optional = false -python-versions = ">=3.8" -files = [ - {file = "torchvision-0.16.2-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:bc86f2800cb2c0c1a09c581409cdd6bff66e62f103dc83fc63f73346264c3756"}, - {file = "torchvision-0.16.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b024bd412df6d3a007dcebf311a894eb3c5c21e1af80d12be382bbcb097a7c3a"}, - {file = "torchvision-0.16.2-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:e89f10f3c8351972b6e3fda95bc3e479ea8dbfc9dfcfd2c32902dbad4ba5cfc5"}, - {file = "torchvision-0.16.2-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:96c7583700112a410bdc4e1e4f118c429dab49c29c9a31a2cc3579bc9b08b19d"}, - {file = "torchvision-0.16.2-cp310-cp310-win_amd64.whl", hash = "sha256:9f4032ebb3277fb07ff6a9b818d50a547fb8fcd89d958cfd9e773322454bb688"}, - {file = "torchvision-0.16.2-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:67b1aaf8b8cb02ce75dd445f291a27c8036a502f8c0aa76e28c37a0faac2e153"}, - {file = "torchvision-0.16.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bef30d03e1d1c629761f4dca51d3b7d8a0dc0acce6f4068ab2a1634e8e7b64e0"}, - {file = "torchvision-0.16.2-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:e59cc7b2bd1ab5c0ce4ae382e4e37be8f1c174e8b5de2f6a23c170de9ae28495"}, - {file = "torchvision-0.16.2-cp311-cp311-manylinux2014_aarch64.whl", hash = "sha256:e130b08cc9b3cc73a6c59d6edf032394a322f9579bfd21d14bc2e1d0999aa758"}, - {file = "torchvision-0.16.2-cp311-cp311-win_amd64.whl", hash = "sha256:8692ab1e48807e9604046a6f4beeb67b523294cee1b00828654bb0df2cfce2b2"}, - {file = "torchvision-0.16.2-cp38-cp38-macosx_10_13_x86_64.whl", hash = "sha256:b82732dcf876a37c852772342aa6ee3480c03bb3e2a802ae109fc5f7e28d26e9"}, - {file = "torchvision-0.16.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4b065143d1a720fe8a9077fd4be35d491f98819ec80b3dbbc3ec64d0b707a906"}, - {file = "torchvision-0.16.2-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:bc5f274e4ecd1b86062063cdf4fd385a1d39d147a3a2685fbbde9ff08bb720b8"}, - {file = "torchvision-0.16.2-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:335959c43b371c0474af34c1ef2a52efdc7603c45700d29e4475eeb02984170c"}, - {file = "torchvision-0.16.2-cp38-cp38-win_amd64.whl", hash = "sha256:7fd22d86e08eba321af70cad291020c2cdeac069b00ce88b923ca52e06174769"}, - {file = "torchvision-0.16.2-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:56115268b37f0b75364e3654e47ad9abc66ac34c1f9e5e3dfa89a22d6a40017a"}, - {file = "torchvision-0.16.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:82805f8445b094f9d1e770390ee6cc86855e89955e08ce34af2e2274fc0e5c45"}, - {file = "torchvision-0.16.2-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:3f4bd5fcbc361476e2e78016636ac7d5509e59d9962521f06eb98e6803898182"}, - {file = "torchvision-0.16.2-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:8199acdf8ab066a28b84a5b6f4d97b58976d9e164b1acc3a9d14fccfaf74bb3a"}, - {file = "torchvision-0.16.2-cp39-cp39-win_amd64.whl", hash = "sha256:41dd4fa9f176d563fe9f1b9adef3b7e582cdfb60ce8c9bc51b094a025be687c9"}, -] - -[package.dependencies] -numpy = "*" -pillow = ">=5.3.0,<8.3.dev0 || >=8.4.dev0" -requests = "*" -torch = "2.1.2" - -[package.extras] -scipy = ["scipy"] - [[package]] name = "tqdm" version = "4.66.1" @@ -5416,7 +5202,7 @@ vision = ["Pillow (>=10.0.1,<=15.0)"] name = "triton" version = "2.1.0" description = "A language and compiler for custom Deep Learning operations" -optional = false +optional = true python-versions = "*" files = [ {file = "triton-2.1.0-0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:66439923a30d5d48399b08a9eae10370f6c261a5ec864a64983bae63152d39d7"}, @@ -5441,7 +5227,7 @@ tutorials = ["matplotlib", "pandas", "tabulate"] name = "typer" version = "0.9.0" description = "Typer, build great CLIs. Easy to code. Based on Python type hints." -optional = false +optional = true python-versions = ">=3.6" files = [ {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"}, @@ -5820,7 +5606,7 @@ anyio = ">=3.0.0" name = "websocket-client" version = "1.7.0" description = "WebSocket client for Python with low level API options" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "websocket-client-1.7.0.tar.gz", hash = "sha256:10e511ea3a8c744631d3bd77e61eb17ed09304c413ad42cf6ddfa4c7787e8fe6"}, @@ -6097,7 +5883,7 @@ multidict = ">=4.0" name = "zipp" version = "3.17.0" description = "Backport of pathlib-compatible object wrapper for zip files" -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"}, @@ -6109,10 +5895,17 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -chroma = ["chromadb"] -pgvector = ["asyncpg", "pgvector", "psycopg2-binary", "sqlalchemy"] +chroma = ["llama-index-vector-stores-chroma"] +local = ["llama-index-embeddings-huggingface", "llama-index-llms-llama-cpp"] +ollama = ["llama-index-llms-ollama"] +openai = ["llama-index-embeddings-openai", "llama-index-llms-openai"] +openai-like = ["llama-index-llms-openai-like"] +postgres = ["llama-index-vector-stores-postgres"] +qdrant = ["llama-index-vector-stores-qdrant"] +sagemaker = ["boto3"] +ui = ["gradio"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "61b133c46c9589c72010ebc29989fe6a01df7ce80f1b49e65dbfb5b1cf759a27" +content-hash = "c2e3a4c948a9a49cc11ea085a20dc82b73dc0516fefa11ee941389b6f1ca1f3e" diff --git a/private_gpt/components/embedding/custom/sagemaker.py b/private_gpt/components/embedding/custom/sagemaker.py index a1dddd0a4..be6b296db 100644 --- a/private_gpt/components/embedding/custom/sagemaker.py +++ b/private_gpt/components/embedding/custom/sagemaker.py @@ -3,7 +3,7 @@ from typing import Any import boto3 -from llama_index.embeddings.base import BaseEmbedding +from llama_index.core.base.embeddings.base import BaseEmbedding from pydantic import Field, PrivateAttr diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index 182b8184c..a5bf56f57 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -19,23 +19,37 @@ def __init__(self, settings: Settings) -> None: logger.info("Initializing the embedding model in mode=%s", embedding_mode) match embedding_mode: case "local": - from llama_index.embeddings.huggingface import HuggingFaceEmbedding + try: + from llama_index.embeddings.huggingface import HuggingFaceEmbedding + except ImportError as e: + raise ImportError( + "Local dependencies not found, install with `poetry install --extras local`" + ) from e self.embedding_model = HuggingFaceEmbedding( model_name=settings.local.embedding_hf_model_name, cache_folder=str(models_cache_path), ) case "sagemaker": - - from private_gpt.components.embedding.custom.sagemaker import ( - SagemakerEmbedding, - ) + try: + from private_gpt.components.embedding.custom.sagemaker import ( + SagemakerEmbedding, + ) + except ImportError as e: + raise ImportError( + "Sagemaker dependencies not found, install with `poetry install --extras sagemaker`" + ) from e self.embedding_model = SagemakerEmbedding( endpoint_name=settings.sagemaker.embedding_endpoint_name, ) case "openai": - from llama_index.embeddings.openai import OpenAIEmbedding + try: + from llama_index.embeddings.openai import OpenAIEmbedding + except ImportError as e: + raise ImportError( + "OpenAI dependencies not found, install with `poetry install --extras openai`" + ) from e openai_settings = settings.openai.api_key self.embedding_model = OpenAIEmbedding(api_key=openai_settings) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index b2ffd0489..d9d5eceb1 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -32,7 +32,12 @@ def __init__(self, settings: Settings) -> None: logger.info("Initializing the LLM in mode=%s", llm_mode) match settings.llm.mode: case "local": - from llama_index.llms.llama_cpp import LlamaCPP + try: + from llama_index.llms.llama_cpp import LlamaCPP + except ImportError as e: + raise ImportError( + "Local dependencies not found, install with `poetry install --extras local`" + ) from e prompt_style = get_prompt_style(settings.local.prompt_style) @@ -52,7 +57,12 @@ def __init__(self, settings: Settings) -> None: ) case "sagemaker": - from private_gpt.components.llm.custom.sagemaker import SagemakerLLM + try: + from private_gpt.components.llm.custom.sagemaker import SagemakerLLM + except ImportError as e: + raise ImportError( + "Sagemaker dependencies not found, install with `poetry install --extras sagemaker`" + ) from e self.llm = SagemakerLLM( endpoint_name=settings.sagemaker.llm_endpoint_name, @@ -60,7 +70,12 @@ def __init__(self, settings: Settings) -> None: context_window=settings.llm.context_window, ) case "openai": - from llama_index.llms.openai import OpenAI + try: + from llama_index.llms.openai import OpenAI + except ImportError as e: + raise ImportError( + "OpenAI dependencies not found, install with `poetry install --extras openai`" + ) from e openai_settings = settings.openai self.llm = OpenAI( @@ -69,7 +84,12 @@ def __init__(self, settings: Settings) -> None: model=openai_settings.model, ) case "openailike": - from llama_index.llms.openai_like import OpenAILike + try: + from llama_index.llms.openai_like import OpenAILike + except ImportError as e: + raise ImportError( + "OpenAILike dependencies not found, install with `poetry install --extras openailike`" + ) from e openai_settings = settings.openai self.llm = OpenAILike( @@ -80,12 +100,17 @@ def __init__(self, settings: Settings) -> None: max_tokens=None, api_version="", ) - case "mock": - self.llm = MockLLM() case "ollama": - from llama_index.llms.ollama import Ollama + try: + from llama_index.llms.ollama import Ollama + except ImportError as e: + raise ImportError( + "Ollama dependencies not found, install with `poetry install --extras ollama`" + ) from e ollama_settings = settings.ollama self.llm = Ollama( model=ollama_settings.model, base_url=ollama_settings.api_base ) + case "mock": + self.llm = MockLLM() diff --git a/private_gpt/components/vector_store/vector_store_component.py b/private_gpt/components/vector_store/vector_store_component.py index 0e965f8b4..ca60b4b44 100644 --- a/private_gpt/components/vector_store/vector_store_component.py +++ b/private_gpt/components/vector_store/vector_store_component.py @@ -5,7 +5,6 @@ from llama_index.core.indices.vector_store import VectorIndexRetriever, VectorStoreIndex from llama_index.core.vector_stores.types import VectorStore -from private_gpt.components.vector_store.batched_chroma import BatchedChromaVectorStore from private_gpt.open_ai.extensions.context_filter import ContextFilter from private_gpt.paths import local_data_path from private_gpt.settings.settings import Settings @@ -40,7 +39,12 @@ class VectorStoreComponent: def __init__(self, settings: Settings) -> None: match settings.vectorstore.database: case "pgvector": - from llama_index.vector_stores.postgres import PGVectorStore + try: + from llama_index.vector_stores.postgres import PGVectorStore + except ImportError as e: + raise ImportError( + "Postgres dependencies not found, install with `poetry install --extras postgres`" + ) from e if settings.pgvector is None: raise ValueError( @@ -56,15 +60,15 @@ def __init__(self, settings: Settings) -> None: case "chroma": try: + from private_gpt.components.vector_store.batched_chroma import \ + BatchedChromaVectorStore import chromadb # type: ignore from chromadb.config import ( # type: ignore Settings as ChromaSettings, ) except ImportError as e: raise ImportError( - "'chromadb' is not installed." - "To use PrivateGPT with Chroma, install the 'chroma' extra." - "`poetry install --extras chroma`" + "ChromaDB dependencies not found, install with `poetry install --extras chroma`" ) from e chroma_settings = ChromaSettings(anonymized_telemetry=False) @@ -84,8 +88,13 @@ def __init__(self, settings: Settings) -> None: ) case "qdrant": - from llama_index.vector_stores.qdrant import QdrantVectorStore - from qdrant_client import QdrantClient + try: + from llama_index.vector_stores.qdrant import QdrantVectorStore + from qdrant_client import QdrantClient + except ImportError as e: + raise ImportError( + "Qdrant dependencies not found, install with `poetry install --extras qdrant`" + ) from e if settings.qdrant is None: logger.info( diff --git a/private_gpt/launcher.py b/private_gpt/launcher.py index 3857a8cdc..1be2b838a 100644 --- a/private_gpt/launcher.py +++ b/private_gpt/launcher.py @@ -52,7 +52,12 @@ async def bind_injector_to_request(request: Request) -> None: if settings.ui.enabled: logger.debug("Importing the UI module") - from private_gpt.ui.ui import PrivateGptUi + try: + from private_gpt.ui.ui import PrivateGptUi + except ImportError as e: + raise ImportError( + "UI dependencies not found, install with `poetry install --extras ui`" + ) from e ui = root_injector.get(PrivateGptUi) ui.mount_in_app(app, settings.ui.path) diff --git a/pyproject.toml b/pyproject.toml index 59f8ff701..e495c3d6f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,29 +6,42 @@ authors = ["Zylon "] [tool.poetry.dependencies] python = ">=3.11,<3.12" +# PrivateGPT fastapi = { extras = ["all"], version = "^0.110.0" } -boto3 = "^1.34.51" +python-multipart = "^0.0.9" injector = "^0.21.0" pyyaml = "^6.0.1" -python-multipart = "^0.0.9" +watchdog = "^4.0.0" +transformers = "^4.38.1" +# LlamaIndex core libs llama-index-core = "^0.10.13" llama-index-readers-file = "^0.1.6" -llama-index-embeddings-huggingface = "^0.1.4" -llama-index-embeddings-openai = "^0.1.6" -llama-index-vector-stores-qdrant = "^0.1.3" -llama-index-vector-stores-chroma = "^0.1.4" -llama-index-llms-llama-cpp = "^0.1.3" -llama-index-llms-openai = "^0.1.6" -llama-index-llms-openai-like = "^0.1.3" -llama-index-llms-ollama = "^0.1.2" -llama-index-vector-stores-postgres = "^0.1.2" -watchdog = "^4.0.0" -qdrant-client = "^1.7.3" -chromadb = {version = "^0.4.13", optional = true} -asyncpg = {version = "^0.29.0", optional = true} -pgvector = {version = "^0.2.5", optional = true} -psycopg2-binary = {version = "^2.9.9", optional = true} -sqlalchemy = {version = "^2.0.27", optional = true} +# Optional LlamaIndex integration libs +llama-index-llms-llama-cpp = {version = "^0.1.3", optional = true} +llama-index-llms-openai = {version = "^0.1.6", optional = true} +llama-index-llms-openai-like = {version ="^0.1.3", optional = true} +llama-index-llms-ollama = {version ="^0.1.2", optional = true} +llama-index-embeddings-huggingface = {version ="^0.1.4", optional = true} +llama-index-embeddings-openai = {version ="^0.1.6", optional = true} +llama-index-vector-stores-qdrant = {version ="^0.1.3", optional = true} +llama-index-vector-stores-chroma = {version ="^0.1.4", optional = true} +llama-index-vector-stores-postgres = {version ="^0.1.2", optional = true} +# Optional Sagemaker dependency +boto3 = {version ="^1.34.51", optional = true} +# Optional UI +gradio = {version ="^4.19.2", optional = true} + +[tool.poetry.extras] +ui = ["gradio"] +local = ["llama-index-llms-llama-cpp", "llama-index-embeddings-huggingface"] +openai = ["llama-index-llms-openai", "llama-index-embeddings-openai"] +openai-like = ["llama-index-llms-openai-like"] +ollama = ["llama-index-llms-ollama"] +sagemaker = ["boto3"] +qdrant = ["llama-index-vector-stores-qdrant"] +chroma = ["llama-index-vector-stores-chroma"] +postgres = ["llama-index-vector-stores-postgres"] + [tool.poetry.group.dev.dependencies] black = "^22" @@ -40,26 +53,6 @@ ruff = "^0" pytest-asyncio = "^0.21.1" types-pyyaml = "^6.0.12.12" -# Dependencies for gradio UI -[tool.poetry.group.ui] -optional = true -[tool.poetry.group.ui.dependencies] -gradio = "^4.19.0" - -[tool.poetry.group.local] -optional = true -[tool.poetry.group.local.dependencies] -llama-cpp-python = "^0.2.23" -numpy = "1.26.0" -sentence-transformers = "^2.2.2" -# https://stackoverflow.com/questions/76327419/valueerror-libcublas-so-0-9-not-found-in-the-system-path -torch = ">=2.0.0, !=2.0.1, !=2.1.0" -transformers = "^4.34.0" - -[tool.poetry.extras] -chroma = ["chromadb"] -pgvector = ["sqlalchemy", "pgvector", "psycopg2-binary", "asyncpg"] - [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" From 8c390812ff86de3066a20d5c4cfec8b39ec55b32 Mon Sep 17 00:00:00 2001 From: imartinez Date: Thu, 29 Feb 2024 14:48:55 +0100 Subject: [PATCH 03/15] Documentation updates and default settings reviewed --- fern/docs.yml | 6 +- fern/docs/pages/installation/concepts.mdx | 56 +++++++ fern/docs/pages/installation/installation.mdx | 153 +++++++++++------- fern/docs/pages/manual/llms.mdx | 24 +++ fern/docs/pages/overview/quickstart.mdx | 21 --- fern/docs/pages/overview/welcome.mdx | 18 ++- settings-local.yaml | 19 +++ settings-ollama.yaml | 24 +++ settings-sagemaker.yaml | 3 + settings.yaml | 1 - 10 files changed, 233 insertions(+), 92 deletions(-) create mode 100644 fern/docs/pages/installation/concepts.mdx delete mode 100644 fern/docs/pages/overview/quickstart.mdx create mode 100644 settings-ollama.yaml diff --git a/fern/docs.yml b/fern/docs.yml index 67021673b..c22784dd2 100644 --- a/fern/docs.yml +++ b/fern/docs.yml @@ -30,15 +30,15 @@ navigation: layout: - section: Welcome contents: - - page: Welcome + - page: Introduction path: ./docs/pages/overview/welcome.mdx - - page: Quickstart - path: ./docs/pages/overview/quickstart.mdx # How to install privateGPT, with FAQ and troubleshooting - tab: installation layout: - section: Getting started contents: + - page: Main Concepts + path: ./docs/pages/installation/concepts.mdx - page: Installation path: ./docs/pages/installation/installation.mdx # Manual of privateGPT: how to use it and configure it diff --git a/fern/docs/pages/installation/concepts.mdx b/fern/docs/pages/installation/concepts.mdx new file mode 100644 index 000000000..cb26ed7f2 --- /dev/null +++ b/fern/docs/pages/installation/concepts.mdx @@ -0,0 +1,56 @@ +PrivateGPT is a service that wraps a set of AI RAG primitives in a comprehensive set of APIs providing a private, secure, customizable and easy to use GenAI development framework. + +It uses FastAPI and LLamaIndex as its core frameworks. Those can be customized by changing the codebase itself. + +It supports a variety of LLM providers, embeddings providers, and vector stores, both local and remote. Those can be easily changed without changing the codebase. + +# Different Setups support + +## Setup configurations available +You get to decide the setup for these 3 main components: +- LLM: the large language model provider used for inference. It can be local, or remote, or even OpenAI. +- Embeddings: the embeddings provider used to encode the input, the documents and the users' queries. Same as the LLM, it can be local, or remote, or even OpenAI. +- Vector store: the store used to index and retrieve the documents. + +There is an extra component that can be enabled or disabled: the UI. It is a Gradio UI that allows to interact with the API in a more user-friendly way. + +### Setups and Dependencies +Your setup will be the combination of the different options available. You'll find recommended setups in the [installation](/installation) section. +PrivateGPT uses poetry to manage its dependencies. You can install the dependencies for the different setups by running `poetry install --extras " ..."`. +Extras are the different options available for each component. For example, to install the dependencies for a local setup with UI and qdrant as vector database, you would run `poetry install --extras "ui local qdrant"`. +Refer to the [installation](/installation) section for more details. + +### Setups and Configuration +PrivateGPT uses yaml to define its configuration in files named `settings-.yaml`. +Different configuration files can be created in the root directory of the project. +PrivateGPT will load the configuration at startup from the profile specified in the `PGPT_PROFILES` environment variable. +For example, running: +```bash +PGPT_PROFILES=ollama make run +``` +will load the configuration from `settings.yaml` and `settings-ollama.yaml`. +- `settings.yaml` is always loaded and contains the default configuration. +- `settings-ollama.yaml` is loaded if the `ollama` profile is specified in the `PGPT_PROFILES` environment variable. It can override configuration from the default `settings.yaml` + +## About Fully Local Setups +In order to run PrivateGPT in a fully local setup, you will need to run the LLM, Embeddings and Vector Store locally. +### Vector stores +The 3 vector stores supported (Qdrant, ChromaDB and Postgres) run locally by default. +### Embeddings +For local embeddings you need to install the 'local' extra dependencies. It will use Huggingface Embeddings. + +Note: Ollama will support Embeddings in the short term for easier installation, but it doesn't as of today. + +In order for local embeddings to work, you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: +```bash +poetry run python scripts/setup +``` +### LLM +For local LLM there are two options: +* (Recommended) You can use the 'ollama' option in PrivateGPT, which will connect to your local Ollama instance. Ollama simplifies a lot the installation of local LLMs. +* You can use the 'local' option in PrivateGPT, which will use LlamaCPP. It works great on Mac with Metal most of the times (leverages Metal GPU), but it can be tricky in certain Linux and Windows distributions, depending on the GPU. In the installation document you'll find guides and troubleshooting. + +In order for local LLM to work (the second option), you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: +```bash +poetry run python scripts/setup +``` \ No newline at end of file diff --git a/fern/docs/pages/installation/installation.mdx b/fern/docs/pages/installation/installation.mdx index c45bb0db0..da693ffdb 100644 --- a/fern/docs/pages/installation/installation.mdx +++ b/fern/docs/pages/installation/installation.mdx @@ -1,8 +1,8 @@ -## Installation and Settings +It is important that you review the Main Concepts before you start the installation process. -### Base requirements to run PrivateGPT +## Base requirements to run PrivateGPT -* Git clone PrivateGPT repository, and navigate to it: +* Clone PrivateGPT repository, and navigate to it: ```bash git clone https://github.com/imartinez/privateGPT @@ -21,93 +21,128 @@ pyenv local 3.11 * Install [Poetry](https://python-poetry.org/docs/#installing-with-the-official-installer) for dependency management: -* Have a valid C++ compiler like gcc. See [Troubleshooting: C++ Compiler](#troubleshooting-c-compiler) for more details. - -* Install `make` for scripts: +* Install `make` to be able to run the different scripts: * osx: (Using homebrew): `brew install make` * windows: (Using chocolatey) `choco install make` -### Install dependencies +## Install and run your desired setup + +PrivateGPT allows to customize the setup -from fully local to cloud based- by deciding the modules to use. +Here are the different options available: + +- LLM: "local" (uses LlamaCPP), "ollama", "sagemaker", "openai", "openailike" +- Embeddings: "local" (uses HuggingFace embeddings), "openai", "sagemaker" +- Vector stores: "qdrant", "chroma", "postgres" +- UI: whether or not to enable UI (Gradio) or just go with the API -Install the dependencies: +In order to only install the required dependencies, PrivateGPT offers different `extras` that can be combined during the installation process: ```bash -poetry install --with ui +poetry install --extras " ..." ``` -Verify everything is working by running `make run` (or `poetry run python -m private_gpt`) and navigate to -http://localhost:8001. You should see a [Gradio UI](https://gradio.app/) **configured with a mock LLM** that will -echo back the input. Below we'll see how to configure a real LLM. +Where `` can be any of the following: + +- ui: adds support for UI using Gradio +- local: adds support for local LLM and Embeddings using LlamaCPP - expect a messy installation process on some platforms +- openai: adds support for OpenAI LLM and Embeddings, requires OpenAI API key +- sagemaker: adds support for Amazon Sagemaker LLM and Embeddings, requires Sagemaker endpoints +- ollama: adds support for Ollama LLM, the easiest way to get a local LLM running +- openai-like: adds support for 3rd party LLM providers that are compatible with OpenAI's API +- qdrant: adds support for Qdrant vector store +- chroma: adds support for Chroma DB vector store +- postgres: adds support for Postgres vector store + +## Recommended Setups -### Settings +There are just some examples of recommended setups. You can mix and match the different options to fit your needs. +You'll find more information in the Manual section of the documentation. - -The default settings of PrivateGPT should work out-of-the-box for a 100% local setup. **However**, as is, it runs exclusively on your CPU. -Skip this section if you just want to test PrivateGPT locally, and come back later to learn about more configuration options (and have better performances). - +### Local, Ollama-powered setup -
+The easiest way to run PrivateGPT fully locally is to depend on Ollama for the LLM. Ollama provides a local LLM that is easy to install and use. + +Go to [ollama.ai](https://ollama.ai/) and follow the instructions to install Ollama on your machine. + +Once done, you can install PrivateGPT with the following command: +```bash +poetry install --extras "ui local ollama qdrant" +``` -### Local LLM requirements +We are installing "local" dependency to support local embeddings, because Ollama doesn't support embeddings just yet. But they working on it! +In order for local embeddings to work, you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: +```bash +poetry run python scripts/setup +``` -Install extra dependencies for local execution: +Once installed, you can run PrivateGPT. Make sure you have a working Ollama running locally before running the following command. ```bash -poetry install --with local +PGPT_PROFILES=ollama make run ``` -For PrivateGPT to run fully locally GPU acceleration is required -(CPU execution is possible, but very slow), however, -typical Macbook laptops or window desktops with mid-range GPUs lack VRAM to run -even the smallest LLMs. For that reason -**local execution is only supported for models compatible with [llama.cpp](https://github.com/ggerganov/llama.cpp)** +PrivateGPT will use the already existing `settings-ollama.yaml` settings file, which is already configured to use Ollama LLM, local Embeddings, and Qdrant. Review it and adapt it to your needs (different LLM model, different Ollama port, etc.) + +The UI will be available at http://localhost:8001 + +### Private, Sagemaker-powered setup -These two models are known to work well: +If you need more performance, you can run a version of PrivateGPT that relies on powerful AWS Sagemaker machines to serve the LLM and Embeddings. -* https://huggingface.co/TheBloke/Llama-2-7B-chat-GGUF -* https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF (recommended) +You need to have access to sagemaker inference endpoints for the LLM and / or the embeddings, and have AWS credentials properly configured. -To ease the installation process, use the `setup` script that will download both -the embedding and the LLM model and place them in the correct location (under `models` folder): +Edit the `settings-sagemaker.yaml` file to include the correct Sagemaker endpoints. +Then, install PrivateGPT with the following command: ```bash -poetry run python scripts/setup +poetry install --extras "ui sagemaker qdrant" ``` -If you are ok with CPU execution, you can skip the rest of this section. +Once installed, you can run PrivateGPT. Make sure you have a working Ollama running locally before running the following command. -As stated before, llama.cpp is required and in -particular [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) -is used. +```bash +PGPT_PROFILES=sagemaker make run +``` -> It's highly encouraged that you fully read llama-cpp and llama-cpp-python documentation relevant to your platform. -> Running into installation issues is very likely, and you'll need to troubleshoot them yourself. +PrivateGPT will use the already existing `settings-sagemaker.yaml` settings file, which is already configured to use Sagemaker LLM and Embeddings endpoints, and Qdrant. -#### Customizing low level parameters +The UI will be available at http://localhost:8001 -Currently, not all the parameters of `llama.cpp` and `llama-cpp-python` are available at PrivateGPT's `settings.yaml` file. -In case you need to customize parameters such as the number of layers loaded into the GPU, you might change -these at the `llm_component.py` file under the `private_gpt/components/llm/llm_component.py`. +### Local, Llama-CPP powered setup -##### Available LLM config options +If you want to run PrivateGPT fully locally without relying on Ollama, you can run the following command: -The `llm` section of the settings allows for the following configurations: +```bash +poetry install --extras "ui local qdrant" +``` -- `mode`: how to run your llm -- `max_new_tokens`: this lets you configure the number of new tokens the LLM will generate and add to the context window (by default Llama.cpp uses `256`) +In order for local LLM and embeddings to work, you need to download the models to the `models` folder. You can do so by running the `setup` script: +```bash +poetry run python scripts/setup +``` -Example: +Once installed, you can run PrivateGPT with the following command: -```yaml -llm: - mode: local - max_new_tokens: 256 +```bash +PGPT_PROFILES=local make run ``` -If you are getting an out of memory error, you might also try a smaller model or stick to the proposed -recommended models, instead of custom tuning the parameters. +PrivateGPT will load the already existing `settings-local.yaml` file, which is already configured to use LlamaCPP and Qdrant. + +The UI will be available at http://localhost:8001 + +#### Llama-CPP support + +For PrivateGPT to run fully locally without Ollama, Llama.cpp is required and in +particular [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) +is used. + +You'll need to have a valid C++ compiler like gcc installed. See [Troubleshooting: C++ Compiler](#troubleshooting-c-compiler) for more details. + +> It's highly encouraged that you fully read llama-cpp and llama-cpp-python documentation relevant to your platform. +> Running into installation issues is very likely, and you'll need to troubleshoot them yourself. -#### OSX GPU support +##### Llama-CPP OSX GPU support You will need to build [llama.cpp](https://github.com/ggerganov/llama.cpp) with metal support. @@ -127,7 +162,7 @@ More information is available in the documentation of the libraries themselves: * [llama-cpp-python's documentation](https://llama-cpp-python.readthedocs.io/en/latest/#installation-with-hardware-acceleration) * [llama.cpp](https://github.com/ggerganov/llama.cpp#build) -#### Windows NVIDIA GPU support +##### Llama-CPP Windows NVIDIA GPU support Windows GPU support is done through CUDA. Follow the instructions on the original [llama.cpp](https://github.com/ggerganov/llama.cpp) repo to install the required @@ -160,7 +195,7 @@ Note that llama.cpp offloads matrix calculations to the GPU but the performance still hit heavily due to latency between CPU and GPU communication. You might need to tweak batch sizes and other parameters to get the best performance for your particular system. -#### Linux NVIDIA GPU support and Windows-WSL +##### Llama-CPP Linux NVIDIA GPU support and Windows-WSL Linux GPU support is done through CUDA. Follow the instructions on the original [llama.cpp](https://github.com/ggerganov/llama.cpp) repo to install the required @@ -188,7 +223,7 @@ llama_new_context_with_model: total VRAM used: 4857.93 MB (model: 4095.05 MB, co AVX = 1 | AVX2 = 1 | AVX512 = 0 | AVX512_VBMI = 0 | AVX512_VNNI = 0 | FMA = 1 | NEON = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 0 | VSX = 0 | ``` -### Known issues and Troubleshooting +##### Llama-CPP Known issues and Troubleshooting Execution of LLMs locally still has a lot of sharp edges, specially when running on non Linux platforms. You might encounter several issues: @@ -205,7 +240,7 @@ If, during your installation, something does not go as planned, retry in *verbos For example, when installing packages with `pip install`, you can add the option `-vvv` to show the details of the installation. -#### Troubleshooting: C++ Compiler +##### Llama-CPP Troubleshooting: C++ Compiler If you encounter an error while building a wheel during the `pip install` process, you may need to install a C++ compiler on your computer. @@ -227,7 +262,7 @@ To install a C++ compiler on Windows 10/11, follow these steps: Store and search for Xcode and install it. **Or** you can install the command line tools by running `xcode-select --install`. 2. If not, you can install clang or gcc with homebrew `brew install gcc` -#### Troubleshooting: Mac Running Intel +##### Llama-CPP Troubleshooting: Mac Running Intel When running a Mac with Intel hardware (not M1), you may run into _clang: error: the clang compiler does not support ' -march=native'_ during pip install. diff --git a/fern/docs/pages/manual/llms.mdx b/fern/docs/pages/manual/llms.mdx index 7445bff19..3013a0e62 100644 --- a/fern/docs/pages/manual/llms.mdx +++ b/fern/docs/pages/manual/llms.mdx @@ -25,6 +25,30 @@ When the server is started it will print a log *Application startup complete*. Navigate to http://localhost:8001 to use the Gradio UI or to http://localhost:8001/docs (API section) to try the API using Swagger UI. +#### Customizing low level parameters + +Currently, not all the parameters of `llama.cpp` and `llama-cpp-python` are available at PrivateGPT's `settings.yaml` file. +In case you need to customize parameters such as the number of layers loaded into the GPU, you might change +these at the `llm_component.py` file under the `private_gpt/components/llm/llm_component.py`. + +##### Available LLM config options + +The `llm` section of the settings allows for the following configurations: + +- `mode`: how to run your llm +- `max_new_tokens`: this lets you configure the number of new tokens the LLM will generate and add to the context window (by default Llama.cpp uses `256`) + +Example: + +```yaml +llm: + mode: local + max_new_tokens: 256 +``` + +If you are getting an out of memory error, you might also try a smaller model or stick to the proposed +recommended models, instead of custom tuning the parameters. + ### Using OpenAI If you cannot run a local model (because you don't have a GPU, for example) or for testing purposes, you may diff --git a/fern/docs/pages/overview/quickstart.mdx b/fern/docs/pages/overview/quickstart.mdx deleted file mode 100644 index d22aff1a3..000000000 --- a/fern/docs/pages/overview/quickstart.mdx +++ /dev/null @@ -1,21 +0,0 @@ -## Local Installation steps - -The steps in [Installation](/installation) section are better explained and cover more -setup scenarios (macOS, Windows, Linux). -But if you like one-liners, have python3.11 installed, and you are running a UNIX (macOS or Linux) -system, you can get up and running on CPU in few lines: - -```bash -git clone https://github.com/imartinez/privateGPT && cd privateGPT && \ -python3.11 -m venv .venv && source .venv/bin/activate && \ -pip install --upgrade pip poetry && poetry install --with ui,local && ./scripts/setup - -# Launch the privateGPT API server **and** the gradio UI -poetry run python3.11 -m private_gpt - -# In another terminal, create a new browser window on your private GPT! -open http://127.0.0.1:8001/ -``` - -The above is not working, or it is too slow, so **you want to run it on GPU(s)**? -Please check the more detailed [installation guide](/installation). diff --git a/fern/docs/pages/overview/welcome.mdx b/fern/docs/pages/overview/welcome.mdx index 8507e33bd..b825a6548 100644 --- a/fern/docs/pages/overview/welcome.mdx +++ b/fern/docs/pages/overview/welcome.mdx @@ -1,20 +1,19 @@ -## Introduction 👋 - PrivateGPT provides an **API** containing all the building blocks required to build **private, context-aware AI applications**. The API follows and extends OpenAI API standard, and supports both normal and streaming responses. That means that, if you can use OpenAI API in one of your tools, you can use your own PrivateGPT API instead, -with no code changes, **and for free** if you are running privateGPT in `local` mode. - -Looking for the installation quickstart? [Quickstart installation guide for Linux and macOS](/overview/welcome/quickstart). - -Do you want to install it on Windows? Or do you want to take full advantage of your hardware for better performances? -The installation guide will help you in the [Installation section](/installation). +with no code changes, **and for free** if you are running privateGPT in a `local` setup. +Get started by understanding the [Main Concepts and Installation](/installation) and then dive into the [API Reference](/api-reference). ## Frequently Visited Resources + +
+ + A working **Gradio UI client** is provided to test the API, together with a set of useful tools such as bulk model download script, ingestion script, documents folder watch, etc. diff --git a/settings-local.yaml b/settings-local.yaml index d4c7f1bd8..8ba8a6861 100644 --- a/settings-local.yaml +++ b/settings-local.yaml @@ -3,3 +3,22 @@ server: llm: mode: local + # Should be matching the selected model + max_new_tokens: 512 + context_window: 3900 + tokenizer: mistralai/Mistral-7B-Instruct-v0.2 + +embedding: + mode: local + +vectorstore: + database: qdrant + +qdrant: + path: local_data/private_gpt/qdrant + +local: + prompt_style: "mistral" + llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF + llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf + embedding_hf_model_name: BAAI/bge-small-en-v1.5 \ No newline at end of file diff --git a/settings-ollama.yaml b/settings-ollama.yaml new file mode 100644 index 000000000..b5060f6cc --- /dev/null +++ b/settings-ollama.yaml @@ -0,0 +1,24 @@ +server: + env_name: ${APP_ENV:ollama} + +llm: + mode: ollama + max_new_tokens: 512 + context_window: 3900 + +ollama: + model: llama2 + api_base: http://localhost:11434 + +embedding: + mode: local + +vectorstore: + database: qdrant + +qdrant: + path: local_data/private_gpt/qdrant + +local: + prompt_style: "llama2" + embedding_hf_model_name: BAAI/bge-small-en-v1.5 \ No newline at end of file diff --git a/settings-sagemaker.yaml b/settings-sagemaker.yaml index 774b8cb39..face354df 100644 --- a/settings-sagemaker.yaml +++ b/settings-sagemaker.yaml @@ -9,6 +9,9 @@ ui: llm: mode: sagemaker +embedding: + mode: sagemaker + sagemaker: llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140 embedding_endpoint_name: huggingface-pytorch-inference-2023-11-03-07-41-36-479 \ No newline at end of file diff --git a/settings.yaml b/settings.yaml index b9dcbaa78..32147c87d 100644 --- a/settings.yaml +++ b/settings.yaml @@ -34,7 +34,6 @@ ui: delete_file_button_enabled: true delete_all_files_button_enabled: true - llm: mode: local # Should be matching the selected model From 34d48d7b4d7f7c3a31fa954adc507992ea9df9a6 Mon Sep 17 00:00:00 2001 From: imartinez Date: Thu, 29 Feb 2024 14:50:47 +0100 Subject: [PATCH 04/15] Format fixes --- private_gpt/components/ingest/ingest_helper.py | 11 +++++------ private_gpt/components/llm/llm_component.py | 3 +-- .../components/vector_store/vector_store_component.py | 6 ++++-- private_gpt/launcher.py | 2 +- private_gpt/server/chat/chat_service.py | 1 + 5 files changed, 12 insertions(+), 11 deletions(-) diff --git a/private_gpt/components/ingest/ingest_helper.py b/private_gpt/components/ingest/ingest_helper.py index 15920c3b2..9178e783d 100644 --- a/private_gpt/components/ingest/ingest_helper.py +++ b/private_gpt/components/ingest/ingest_helper.py @@ -1,6 +1,5 @@ import logging from pathlib import Path -from typing import Dict, Type from llama_index.core.readers import StringIterableReader from llama_index.core.readers.base import BaseReader @@ -11,7 +10,7 @@ # Inspired by the `llama_index.core.readers.file.base` module -def _try_loading_included_file_formats() -> Dict[str, Type[BaseReader]]: +def _try_loading_included_file_formats() -> dict[str, type[BaseReader]]: try: from llama_index.readers.file.docs import DocxReader, HWPReader, PDFReader from llama_index.readers.file.epub import EpubReader @@ -19,13 +18,13 @@ def _try_loading_included_file_formats() -> Dict[str, Type[BaseReader]]: from llama_index.readers.file.ipynb import IPYNBReader from llama_index.readers.file.markdown import MarkdownReader from llama_index.readers.file.mbox import MboxReader - from llama_index.readers.file.tabular import PandasCSVReader from llama_index.readers.file.slides import PptxReader + from llama_index.readers.file.tabular import PandasCSVReader from llama_index.readers.file.video_audio import VideoAudioReader - except ImportError: - raise ImportError("`llama-index-readers-file` package not found") + except ImportError as e: + raise ImportError("`llama-index-readers-file` package not found") from e - default_file_reader_cls: Dict[str, Type[BaseReader]] = { + default_file_reader_cls: dict[str, type[BaseReader]] = { ".hwp": HWPReader, ".pdf": PDFReader, ".docx": DocxReader, diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index d9d5eceb1..5956182da 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -2,15 +2,14 @@ from injector import inject, singleton from llama_index.core.llms import LLM, MockLLM -from llama_index.core.utils import set_global_tokenizer from llama_index.core.settings import Settings as LlamaIndexSettings +from llama_index.core.utils import set_global_tokenizer from transformers import AutoTokenizer # type: ignore from private_gpt.components.llm.prompt_helper import get_prompt_style from private_gpt.paths import models_cache_path, models_path from private_gpt.settings.settings import Settings - logger = logging.getLogger(__name__) diff --git a/private_gpt/components/vector_store/vector_store_component.py b/private_gpt/components/vector_store/vector_store_component.py index ca60b4b44..b5541d657 100644 --- a/private_gpt/components/vector_store/vector_store_component.py +++ b/private_gpt/components/vector_store/vector_store_component.py @@ -60,12 +60,14 @@ def __init__(self, settings: Settings) -> None: case "chroma": try: - from private_gpt.components.vector_store.batched_chroma import \ - BatchedChromaVectorStore import chromadb # type: ignore from chromadb.config import ( # type: ignore Settings as ChromaSettings, ) + + from private_gpt.components.vector_store.batched_chroma import ( + BatchedChromaVectorStore, + ) except ImportError as e: raise ImportError( "ChromaDB dependencies not found, install with `poetry install --extras chroma`" diff --git a/private_gpt/launcher.py b/private_gpt/launcher.py index 1be2b838a..5cce8c72a 100644 --- a/private_gpt/launcher.py +++ b/private_gpt/launcher.py @@ -6,6 +6,7 @@ from injector import Injector from llama_index.core.callbacks import CallbackManager from llama_index.core.callbacks.global_handlers import create_global_handler +from llama_index.core.settings import Settings as LlamaIndexSettings from private_gpt.server.chat.chat_router import chat_router from private_gpt.server.chunks.chunks_router import chunks_router @@ -14,7 +15,6 @@ from private_gpt.server.health.health_router import health_router from private_gpt.server.ingest.ingest_router import ingest_router from private_gpt.settings.settings import Settings -from llama_index.core.settings import Settings as LlamaIndexSettings logger = logging.getLogger(__name__) diff --git a/private_gpt/server/chat/chat_service.py b/private_gpt/server/chat/chat_service.py index 6b94f352c..4fcd30cb9 100644 --- a/private_gpt/server/chat/chat_service.py +++ b/private_gpt/server/chat/chat_service.py @@ -105,6 +105,7 @@ def _chat_engine( # TODO ContextChatEngine is still not migrated by LlamaIndex to accept # llm directly, so we are passing legacy ServiceContext until it is fixed. from llama_index.core import ServiceContext + return ContextChatEngine.from_defaults( system_prompt=system_prompt, retriever=vector_index_retriever, From 63d3b9f93692d00a7957cb6ca2555ba55112edb5 Mon Sep 17 00:00:00 2001 From: imartinez Date: Thu, 29 Feb 2024 15:20:26 +0100 Subject: [PATCH 05/15] Fix mypy --- .../embedding/embedding_component.py | 8 +++++-- .../components/ingest/ingest_helper.py | 24 ++++++++++++------- private_gpt/components/llm/llm_component.py | 8 +++---- .../components/vector_store/batched_chroma.py | 4 ++-- .../vector_store/vector_store_component.py | 8 +++++-- private_gpt/server/ingest/ingest_watcher.py | 7 +++--- pyproject.toml | 3 +++ 7 files changed, 39 insertions(+), 23 deletions(-) diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index a5bf56f57..781f3e934 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -20,7 +20,9 @@ def __init__(self, settings: Settings) -> None: match embedding_mode: case "local": try: - from llama_index.embeddings.huggingface import HuggingFaceEmbedding + from llama_index.embeddings.huggingface import ( # type: ignore + HuggingFaceEmbedding, + ) except ImportError as e: raise ImportError( "Local dependencies not found, install with `poetry install --extras local`" @@ -45,7 +47,9 @@ def __init__(self, settings: Settings) -> None: ) case "openai": try: - from llama_index.embeddings.openai import OpenAIEmbedding + from llama_index.embeddings.openai import ( # type: ignore + OpenAIEmbedding, + ) except ImportError as e: raise ImportError( "OpenAI dependencies not found, install with `poetry install --extras openai`" diff --git a/private_gpt/components/ingest/ingest_helper.py b/private_gpt/components/ingest/ingest_helper.py index 9178e783d..a11090702 100644 --- a/private_gpt/components/ingest/ingest_helper.py +++ b/private_gpt/components/ingest/ingest_helper.py @@ -12,15 +12,21 @@ # Inspired by the `llama_index.core.readers.file.base` module def _try_loading_included_file_formats() -> dict[str, type[BaseReader]]: try: - from llama_index.readers.file.docs import DocxReader, HWPReader, PDFReader - from llama_index.readers.file.epub import EpubReader - from llama_index.readers.file.image import ImageReader - from llama_index.readers.file.ipynb import IPYNBReader - from llama_index.readers.file.markdown import MarkdownReader - from llama_index.readers.file.mbox import MboxReader - from llama_index.readers.file.slides import PptxReader - from llama_index.readers.file.tabular import PandasCSVReader - from llama_index.readers.file.video_audio import VideoAudioReader + from llama_index.readers.file.docs import ( # type: ignore + DocxReader, + HWPReader, + PDFReader, + ) + from llama_index.readers.file.epub import EpubReader # type: ignore + from llama_index.readers.file.image import ImageReader # type: ignore + from llama_index.readers.file.ipynb import IPYNBReader # type: ignore + from llama_index.readers.file.markdown import MarkdownReader # type: ignore + from llama_index.readers.file.mbox import MboxReader # type: ignore + from llama_index.readers.file.slides import PptxReader # type: ignore + from llama_index.readers.file.tabular import PandasCSVReader # type: ignore + from llama_index.readers.file.video_audio import ( # type: ignore + VideoAudioReader, + ) except ImportError as e: raise ImportError("`llama-index-readers-file` package not found") from e diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 5956182da..1d14c333e 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -32,7 +32,7 @@ def __init__(self, settings: Settings) -> None: match settings.llm.mode: case "local": try: - from llama_index.llms.llama_cpp import LlamaCPP + from llama_index.llms.llama_cpp import LlamaCPP # type: ignore except ImportError as e: raise ImportError( "Local dependencies not found, install with `poetry install --extras local`" @@ -70,7 +70,7 @@ def __init__(self, settings: Settings) -> None: ) case "openai": try: - from llama_index.llms.openai import OpenAI + from llama_index.llms.openai import OpenAI # type: ignore except ImportError as e: raise ImportError( "OpenAI dependencies not found, install with `poetry install --extras openai`" @@ -84,7 +84,7 @@ def __init__(self, settings: Settings) -> None: ) case "openailike": try: - from llama_index.llms.openai_like import OpenAILike + from llama_index.llms.openai_like import OpenAILike # type: ignore except ImportError as e: raise ImportError( "OpenAILike dependencies not found, install with `poetry install --extras openailike`" @@ -101,7 +101,7 @@ def __init__(self, settings: Settings) -> None: ) case "ollama": try: - from llama_index.llms.ollama import Ollama + from llama_index.llms.ollama import Ollama # type: ignore except ImportError as e: raise ImportError( "Ollama dependencies not found, install with `poetry install --extras ollama`" diff --git a/private_gpt/components/vector_store/batched_chroma.py b/private_gpt/components/vector_store/batched_chroma.py index eac536505..4f9ea25bc 100644 --- a/private_gpt/components/vector_store/batched_chroma.py +++ b/private_gpt/components/vector_store/batched_chroma.py @@ -3,7 +3,7 @@ from llama_index.core.schema import BaseNode, MetadataMode from llama_index.core.vector_stores.utils import node_to_metadata_dict -from llama_index.vector_stores.chroma import ChromaVectorStore +from llama_index.vector_stores.chroma import ChromaVectorStore # type: ignore def chunk_list( @@ -22,7 +22,7 @@ def chunk_list( yield lst[i : i + max_chunk_size] -class BatchedChromaVectorStore(ChromaVectorStore): +class BatchedChromaVectorStore(ChromaVectorStore): # type: ignore """Chroma vector store, batching additions to avoid reaching the max batch limit. In this vector store, embeddings are stored within a ChromaDB collection. diff --git a/private_gpt/components/vector_store/vector_store_component.py b/private_gpt/components/vector_store/vector_store_component.py index b5541d657..d260ea3e4 100644 --- a/private_gpt/components/vector_store/vector_store_component.py +++ b/private_gpt/components/vector_store/vector_store_component.py @@ -40,7 +40,9 @@ def __init__(self, settings: Settings) -> None: match settings.vectorstore.database: case "pgvector": try: - from llama_index.vector_stores.postgres import PGVectorStore + from llama_index.vector_stores.postgres import ( # type: ignore + PGVectorStore, + ) except ImportError as e: raise ImportError( "Postgres dependencies not found, install with `poetry install --extras postgres`" @@ -91,7 +93,9 @@ def __init__(self, settings: Settings) -> None: case "qdrant": try: - from llama_index.vector_stores.qdrant import QdrantVectorStore + from llama_index.vector_stores.qdrant import ( # type: ignore + QdrantVectorStore, + ) from qdrant_client import QdrantClient except ImportError as e: raise ImportError( diff --git a/private_gpt/server/ingest/ingest_watcher.py b/private_gpt/server/ingest/ingest_watcher.py index 51bba54a4..8bf4d1756 100644 --- a/private_gpt/server/ingest/ingest_watcher.py +++ b/private_gpt/server/ingest/ingest_watcher.py @@ -3,10 +3,9 @@ from typing import Any from watchdog.events import ( - DirCreatedEvent, - DirModifiedEvent, FileCreatedEvent, FileModifiedEvent, + FileSystemEvent, FileSystemEventHandler, ) from watchdog.observers import Observer @@ -20,11 +19,11 @@ def __init__( self.on_file_changed = on_file_changed class Handler(FileSystemEventHandler): - def on_modified(self, event: DirModifiedEvent | FileModifiedEvent) -> None: + def on_modified(self, event: FileSystemEvent) -> None: if isinstance(event, FileModifiedEvent): on_file_changed(Path(event.src_path)) - def on_created(self, event: DirCreatedEvent | FileCreatedEvent) -> None: + def on_created(self, event: FileSystemEvent) -> None: if isinstance(event, FileCreatedEvent): on_file_changed(Path(event.src_path)) diff --git a/pyproject.toml b/pyproject.toml index e495c3d6f..f4b7785d5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -145,6 +145,9 @@ explicit_package_bases = true warn_unused_ignores = false exclude = ["tests"] +[tool.mypy-llama-index] +ignore_missing_imports = true + [tool.pytest.ini_options] asyncio_mode = "auto" testpaths = ["tests"] From 85276893a3157c8e27ae142197a81bc49971a656 Mon Sep 17 00:00:00 2001 From: imartinez Date: Thu, 29 Feb 2024 15:38:32 +0100 Subject: [PATCH 06/15] Fix actions and dockerfiles --- .github/workflows/actions/install_dependencies/action.yml | 2 +- Dockerfile.external | 2 +- Dockerfile.local | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/workflows/actions/install_dependencies/action.yml b/.github/workflows/actions/install_dependencies/action.yml index e16c63370..68978d991 100644 --- a/.github/workflows/actions/install_dependencies/action.yml +++ b/.github/workflows/actions/install_dependencies/action.yml @@ -25,6 +25,6 @@ runs: python-version: ${{ inputs.python_version }} cache: "poetry" - name: Install Dependencies - run: poetry install --with ui --no-root + run: poetry install --extras "ui qdrant" --no-root shell: bash diff --git a/Dockerfile.external b/Dockerfile.external index 59ef285b3..7203a3b0c 100644 --- a/Dockerfile.external +++ b/Dockerfile.external @@ -14,7 +14,7 @@ FROM base as dependencies WORKDIR /home/worker/app COPY pyproject.toml poetry.lock ./ -RUN poetry install --with ui +RUN poetry install --extras "ui qdrant" FROM base as app diff --git a/Dockerfile.local b/Dockerfile.local index 66590fdbc..de02223db 100644 --- a/Dockerfile.local +++ b/Dockerfile.local @@ -24,8 +24,7 @@ FROM base as dependencies WORKDIR /home/worker/app COPY pyproject.toml poetry.lock ./ -RUN poetry install --with local -RUN poetry install --with ui +RUN poetry install --extras "ui local qdrant" FROM base as app From c3fe36e0708d447251e8cbcfa4bf76b685e2a461 Mon Sep 17 00:00:00 2001 From: imartinez Date: Thu, 29 Feb 2024 16:40:11 +0100 Subject: [PATCH 07/15] Separate local mode into llms-llama-cpp and embeddings-huggingface for clarity --- .../actions/install_dependencies/action.yml | 2 +- Dockerfile.external | 2 +- Dockerfile.local | 2 +- fern/docs/pages/installation/concepts.mdx | 11 ++- fern/docs/pages/installation/installation.mdx | 79 +++++++++++++++---- poetry.lock | 21 ++--- .../embedding/embedding_component.py | 10 +-- .../components/llm/custom/sagemaker.py | 24 +++--- private_gpt/components/llm/llm_component.py | 16 ++-- .../vector_store/vector_store_component.py | 6 +- private_gpt/settings/settings.py | 18 +++-- pyproject.toml | 19 +++-- settings-docker.yaml | 4 +- settings-local.yaml | 20 ++--- settings-mock.yaml | 3 +- settings-ollama.yaml | 8 +- settings-openai.yaml | 12 +++ settings-sagemaker.yaml | 6 +- settings-test.yaml | 3 + settings-vllm.yaml | 7 +- settings.yaml | 19 ++--- 21 files changed, 186 insertions(+), 106 deletions(-) create mode 100644 settings-openai.yaml diff --git a/.github/workflows/actions/install_dependencies/action.yml b/.github/workflows/actions/install_dependencies/action.yml index 68978d991..29234eeff 100644 --- a/.github/workflows/actions/install_dependencies/action.yml +++ b/.github/workflows/actions/install_dependencies/action.yml @@ -25,6 +25,6 @@ runs: python-version: ${{ inputs.python_version }} cache: "poetry" - name: Install Dependencies - run: poetry install --extras "ui qdrant" --no-root + run: poetry install --extras "ui vector-stores-qdrant" --no-root shell: bash diff --git a/Dockerfile.external b/Dockerfile.external index 7203a3b0c..b56af5018 100644 --- a/Dockerfile.external +++ b/Dockerfile.external @@ -14,7 +14,7 @@ FROM base as dependencies WORKDIR /home/worker/app COPY pyproject.toml poetry.lock ./ -RUN poetry install --extras "ui qdrant" +RUN poetry install --extras "ui vector-stores-qdrant" FROM base as app diff --git a/Dockerfile.local b/Dockerfile.local index de02223db..a52221164 100644 --- a/Dockerfile.local +++ b/Dockerfile.local @@ -24,7 +24,7 @@ FROM base as dependencies WORKDIR /home/worker/app COPY pyproject.toml poetry.lock ./ -RUN poetry install --extras "ui local qdrant" +RUN poetry install --extras "ui embeddings-huggingface llms-llama-cpp vector-stores-qdrant" FROM base as app diff --git a/fern/docs/pages/installation/concepts.mdx b/fern/docs/pages/installation/concepts.mdx index cb26ed7f2..b26029cb9 100644 --- a/fern/docs/pages/installation/concepts.mdx +++ b/fern/docs/pages/installation/concepts.mdx @@ -17,7 +17,10 @@ There is an extra component that can be enabled or disabled: the UI. It is a Gra ### Setups and Dependencies Your setup will be the combination of the different options available. You'll find recommended setups in the [installation](/installation) section. PrivateGPT uses poetry to manage its dependencies. You can install the dependencies for the different setups by running `poetry install --extras " ..."`. -Extras are the different options available for each component. For example, to install the dependencies for a local setup with UI and qdrant as vector database, you would run `poetry install --extras "ui local qdrant"`. +Extras are the different options available for each component. For example, to install the dependencies for a a local setup with UI and qdrant as vector database, Ollama as LLM and HuggingFace as local embeddings, you would run + +`poetry install --extras "ui vector-stores-qdrant llms-ollama embeddings-huggingface"`. + Refer to the [installation](/installation) section for more details. ### Setups and Configuration @@ -35,9 +38,9 @@ will load the configuration from `settings.yaml` and `settings-ollama.yaml`. ## About Fully Local Setups In order to run PrivateGPT in a fully local setup, you will need to run the LLM, Embeddings and Vector Store locally. ### Vector stores -The 3 vector stores supported (Qdrant, ChromaDB and Postgres) run locally by default. +The vector stores supported (Qdrant, ChromaDB and Postgres) run locally by default. ### Embeddings -For local embeddings you need to install the 'local' extra dependencies. It will use Huggingface Embeddings. +For local embeddings you need to install the 'embeddings-huggingface' extra dependencies. It will use Huggingface Embeddings. Note: Ollama will support Embeddings in the short term for easier installation, but it doesn't as of today. @@ -48,7 +51,7 @@ poetry run python scripts/setup ### LLM For local LLM there are two options: * (Recommended) You can use the 'ollama' option in PrivateGPT, which will connect to your local Ollama instance. Ollama simplifies a lot the installation of local LLMs. -* You can use the 'local' option in PrivateGPT, which will use LlamaCPP. It works great on Mac with Metal most of the times (leverages Metal GPU), but it can be tricky in certain Linux and Windows distributions, depending on the GPU. In the installation document you'll find guides and troubleshooting. +* You can use the 'llms-llama-cpp' option in PrivateGPT, which will use LlamaCPP. It works great on Mac with Metal most of the times (leverages Metal GPU), but it can be tricky in certain Linux and Windows distributions, depending on the GPU. In the installation document you'll find guides and troubleshooting. In order for local LLM to work (the second option), you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: ```bash diff --git a/fern/docs/pages/installation/installation.mdx b/fern/docs/pages/installation/installation.mdx index da693ffdb..4ff8b2eeb 100644 --- a/fern/docs/pages/installation/installation.mdx +++ b/fern/docs/pages/installation/installation.mdx @@ -30,8 +30,8 @@ pyenv local 3.11 PrivateGPT allows to customize the setup -from fully local to cloud based- by deciding the modules to use. Here are the different options available: -- LLM: "local" (uses LlamaCPP), "ollama", "sagemaker", "openai", "openailike" -- Embeddings: "local" (uses HuggingFace embeddings), "openai", "sagemaker" +- LLM: "llama-cpp", "ollama", "sagemaker", "openai", "openailike" +- Embeddings: "huggingface", "openai", "sagemaker" - Vector stores: "qdrant", "chroma", "postgres" - UI: whether or not to enable UI (Gradio) or just go with the API @@ -44,14 +44,17 @@ poetry install --extras " ..." Where `` can be any of the following: - ui: adds support for UI using Gradio -- local: adds support for local LLM and Embeddings using LlamaCPP - expect a messy installation process on some platforms -- openai: adds support for OpenAI LLM and Embeddings, requires OpenAI API key -- sagemaker: adds support for Amazon Sagemaker LLM and Embeddings, requires Sagemaker endpoints -- ollama: adds support for Ollama LLM, the easiest way to get a local LLM running -- openai-like: adds support for 3rd party LLM providers that are compatible with OpenAI's API -- qdrant: adds support for Qdrant vector store -- chroma: adds support for Chroma DB vector store -- postgres: adds support for Postgres vector store +- llms-ollama: adds support for Ollama LLM, the easiest way to get a local LLM running +- llms-llama-cpp: adds support for local LLM using LlamaCPP - expect a messy installation process on some platforms +- llms-sagemaker: adds support for Amazon Sagemaker LLM, requires Sagemaker inference endpoints +- llms-openai: adds support for OpenAI LLM, requires OpenAI API key +- llms-openai-like: adds support for 3rd party LLM providers that are compatible with OpenAI's API +- embeddings-huggingface: adds support for local Embeddings using HuggingFace +- embeddings-sagemaker: adds support for Amazon Sagemaker Embeddings, requires Sagemaker inference endpoints +- embeddings-openai = adds support for OpenAI Embeddings, requires OpenAI API key +- vector-stores-qdrant: adds support for Qdrant vector store +- vector-stores-chroma: adds support for Chroma DB vector store +- vector-stores-postgres: adds support for Postgres vector store ## Recommended Setups @@ -66,10 +69,10 @@ Go to [ollama.ai](https://ollama.ai/) and follow the instructions to install Oll Once done, you can install PrivateGPT with the following command: ```bash -poetry install --extras "ui local ollama qdrant" +poetry install --extras "ui llms-ollama embeddings-huggingface vector-stores-qdrant" ``` -We are installing "local" dependency to support local embeddings, because Ollama doesn't support embeddings just yet. But they working on it! +We are installing "embeddings-huggingface" dependency to support local embeddings, because Ollama doesn't support embeddings just yet. But they working on it! In order for local embeddings to work, you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: ```bash poetry run python scripts/setup @@ -95,7 +98,7 @@ Edit the `settings-sagemaker.yaml` file to include the correct Sagemaker endpoin Then, install PrivateGPT with the following command: ```bash -poetry install --extras "ui sagemaker qdrant" +poetry install --extras "ui llms-sagemaker embeddings-sagemaker vector-stores-qdrant" ``` Once installed, you can run PrivateGPT. Make sure you have a working Ollama running locally before running the following command. @@ -113,7 +116,7 @@ The UI will be available at http://localhost:8001 If you want to run PrivateGPT fully locally without relying on Ollama, you can run the following command: ```bash -poetry install --extras "ui local qdrant" +poetry install --extras "ui llms-llama-cpp embeddings-huggingface vector-stores-qdrant" ``` In order for local LLM and embeddings to work, you need to download the models to the `models` folder. You can do so by running the `setup` script: @@ -127,7 +130,53 @@ Once installed, you can run PrivateGPT with the following command: PGPT_PROFILES=local make run ``` -PrivateGPT will load the already existing `settings-local.yaml` file, which is already configured to use LlamaCPP and Qdrant. +PrivateGPT will load the already existing `settings-local.yaml` file, which is already configured to use LlamaCPP LLM, HuggingFace embeddings and Qdrant. + +The UI will be available at http://localhost:8001 + +### Non-Private, OpenAI-powered test setup + +If you want to test PrivateGPT with OpenAI's LLM and Embeddings -taking into account your data is going to OpenAI!- you can run the following command: + +You need an OPENAI API key to run this setup. + +Edit the `settings-openai.yaml` file to include the correct API KEY. Never commit it! It's a secret! As an alternative to editing `settings-openai.yaml`, you can just set the env var OPENAI_API_KEY. + +Then, install PrivateGPT with the following command: +```bash +poetry install --extras "ui llms-openai embeddings-openai vector-stores-qdrant" +``` + +Once installed, you can run PrivateGPT. + +```bash +PGPT_PROFILES=openai make run +``` + +PrivateGPT will use the already existing `settings-openai.yaml` settings file, which is already configured to use OpenAI LLM and Embeddings endpoints, and Qdrant. + +The UI will be available at http://localhost:8001 + +### Local, Llama-CPP powered setup + +If you want to run PrivateGPT fully locally without relying on Ollama, you can run the following command: + +```bash +poetry install --extras "ui llms-llama-cpp embeddings-huggingface vector-stores-qdrant" +``` + +In order for local LLM and embeddings to work, you need to download the models to the `models` folder. You can do so by running the `setup` script: +```bash +poetry run python scripts/setup +``` + +Once installed, you can run PrivateGPT with the following command: + +```bash +PGPT_PROFILES=local make run +``` + +PrivateGPT will load the already existing `settings-local.yaml` file, which is already configured to use LlamaCPP LLM, HuggingFace embeddings and Qdrant. The UI will be available at http://localhost:8001 diff --git a/poetry.lock b/poetry.lock index 74713ef86..714399e26 100644 --- a/poetry.lock +++ b/poetry.lock @@ -5895,17 +5895,20 @@ docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.link testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] [extras] -chroma = ["llama-index-vector-stores-chroma"] -local = ["llama-index-embeddings-huggingface", "llama-index-llms-llama-cpp"] -ollama = ["llama-index-llms-ollama"] -openai = ["llama-index-embeddings-openai", "llama-index-llms-openai"] -openai-like = ["llama-index-llms-openai-like"] -postgres = ["llama-index-vector-stores-postgres"] -qdrant = ["llama-index-vector-stores-qdrant"] -sagemaker = ["boto3"] +embeddings-huggingface = ["llama-index-embeddings-huggingface"] +embeddings-openai = ["llama-index-embeddings-openai"] +embeddings-sagemaker = ["boto3"] +llms-llama-cpp = ["llama-index-llms-llama-cpp"] +llms-ollama = ["llama-index-llms-ollama"] +llms-openai = ["llama-index-llms-openai"] +llms-openai-like = ["llama-index-llms-openai-like"] +llms-sagemaker = ["boto3"] ui = ["gradio"] +vector-stores-chroma = ["llama-index-vector-stores-chroma"] +vector-stores-postgres = ["llama-index-vector-stores-postgres"] +vector-stores-qdrant = ["llama-index-vector-stores-qdrant"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "c2e3a4c948a9a49cc11ea085a20dc82b73dc0516fefa11ee941389b6f1ca1f3e" +content-hash = "0249c25c783180d0c483c533d9102e3885e4a4f5261dc331a41323bd79d446f3" diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index 781f3e934..7b035162a 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -18,18 +18,18 @@ def __init__(self, settings: Settings) -> None: embedding_mode = settings.embedding.mode logger.info("Initializing the embedding model in mode=%s", embedding_mode) match embedding_mode: - case "local": + case "huggingface": try: from llama_index.embeddings.huggingface import ( # type: ignore HuggingFaceEmbedding, ) except ImportError as e: raise ImportError( - "Local dependencies not found, install with `poetry install --extras local`" + "Local dependencies not found, install with `poetry install --extras embeddings-huggingface`" ) from e self.embedding_model = HuggingFaceEmbedding( - model_name=settings.local.embedding_hf_model_name, + model_name=settings.huggingface.embedding_hf_model_name, cache_folder=str(models_cache_path), ) case "sagemaker": @@ -39,7 +39,7 @@ def __init__(self, settings: Settings) -> None: ) except ImportError as e: raise ImportError( - "Sagemaker dependencies not found, install with `poetry install --extras sagemaker`" + "Sagemaker dependencies not found, install with `poetry install --extras embeddings-sagemaker`" ) from e self.embedding_model = SagemakerEmbedding( @@ -52,7 +52,7 @@ def __init__(self, settings: Settings) -> None: ) except ImportError as e: raise ImportError( - "OpenAI dependencies not found, install with `poetry install --extras openai`" + "OpenAI dependencies not found, install with `poetry install --extras embeddings-openai`" ) from e openai_settings = settings.openai.api_key diff --git a/private_gpt/components/llm/custom/sagemaker.py b/private_gpt/components/llm/custom/sagemaker.py index 2eedb1dab..7c46111fb 100644 --- a/private_gpt/components/llm/custom/sagemaker.py +++ b/private_gpt/components/llm/custom/sagemaker.py @@ -7,26 +7,20 @@ from typing import TYPE_CHECKING, Any import boto3 # type: ignore -from llama_index.bridge.pydantic import Field -from llama_index.llms import ( +from llama_index.core.base.llms.generic_utils import ( + completion_response_to_chat_response, + stream_completion_response_to_chat_response, +) +from llama_index.core.bridge.pydantic import Field +from llama_index.core.llms import ( CompletionResponse, CustomLLM, LLMMetadata, ) -from llama_index.llms.base import ( +from llama_index.core.llms.callbacks import ( llm_chat_callback, llm_completion_callback, ) -from llama_index.llms.generic_utils import ( - completion_response_to_chat_response, - stream_completion_response_to_chat_response, -) -from llama_index.llms.llama_utils import ( - completion_to_prompt as generic_completion_to_prompt, -) -from llama_index.llms.llama_utils import ( - messages_to_prompt as generic_messages_to_prompt, -) if TYPE_CHECKING: from collections.abc import Sequence @@ -161,8 +155,8 @@ def __init__( model_kwargs = model_kwargs or {} model_kwargs.update({"n_ctx": context_window, "verbose": verbose}) - messages_to_prompt = messages_to_prompt or generic_messages_to_prompt - completion_to_prompt = completion_to_prompt or generic_completion_to_prompt + messages_to_prompt = messages_to_prompt or {} + completion_to_prompt = completion_to_prompt or {} generate_kwargs = generate_kwargs or {} generate_kwargs.update( diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 1d14c333e..232d5b283 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -30,18 +30,18 @@ def __init__(self, settings: Settings) -> None: logger.info("Initializing the LLM in mode=%s", llm_mode) match settings.llm.mode: - case "local": + case "llamacpp": try: from llama_index.llms.llama_cpp import LlamaCPP # type: ignore except ImportError as e: raise ImportError( - "Local dependencies not found, install with `poetry install --extras local`" + "Local dependencies not found, install with `poetry install --extras llms-llama-cpp`" ) from e - prompt_style = get_prompt_style(settings.local.prompt_style) + prompt_style = get_prompt_style(settings.llamacpp.prompt_style) self.llm = LlamaCPP( - model_path=str(models_path / settings.local.llm_hf_model_file), + model_path=str(models_path / settings.llamacpp.llm_hf_model_file), temperature=0.1, max_new_tokens=settings.llm.max_new_tokens, context_window=settings.llm.context_window, @@ -60,7 +60,7 @@ def __init__(self, settings: Settings) -> None: from private_gpt.components.llm.custom.sagemaker import SagemakerLLM except ImportError as e: raise ImportError( - "Sagemaker dependencies not found, install with `poetry install --extras sagemaker`" + "Sagemaker dependencies not found, install with `poetry install --extras llms-sagemaker`" ) from e self.llm = SagemakerLLM( @@ -73,7 +73,7 @@ def __init__(self, settings: Settings) -> None: from llama_index.llms.openai import OpenAI # type: ignore except ImportError as e: raise ImportError( - "OpenAI dependencies not found, install with `poetry install --extras openai`" + "OpenAI dependencies not found, install with `poetry install --extras llms-openai`" ) from e openai_settings = settings.openai @@ -87,7 +87,7 @@ def __init__(self, settings: Settings) -> None: from llama_index.llms.openai_like import OpenAILike # type: ignore except ImportError as e: raise ImportError( - "OpenAILike dependencies not found, install with `poetry install --extras openailike`" + "OpenAILike dependencies not found, install with `poetry install --extras llms-openailike`" ) from e openai_settings = settings.openai @@ -104,7 +104,7 @@ def __init__(self, settings: Settings) -> None: from llama_index.llms.ollama import Ollama # type: ignore except ImportError as e: raise ImportError( - "Ollama dependencies not found, install with `poetry install --extras ollama`" + "Ollama dependencies not found, install with `poetry install --extras llms-ollama`" ) from e ollama_settings = settings.ollama diff --git a/private_gpt/components/vector_store/vector_store_component.py b/private_gpt/components/vector_store/vector_store_component.py index d260ea3e4..283f6c399 100644 --- a/private_gpt/components/vector_store/vector_store_component.py +++ b/private_gpt/components/vector_store/vector_store_component.py @@ -45,7 +45,7 @@ def __init__(self, settings: Settings) -> None: ) except ImportError as e: raise ImportError( - "Postgres dependencies not found, install with `poetry install --extras postgres`" + "Postgres dependencies not found, install with `poetry install --extras vector-stores-postgres`" ) from e if settings.pgvector is None: @@ -72,7 +72,7 @@ def __init__(self, settings: Settings) -> None: ) except ImportError as e: raise ImportError( - "ChromaDB dependencies not found, install with `poetry install --extras chroma`" + "ChromaDB dependencies not found, install with `poetry install --extras vector-stores-chroma`" ) from e chroma_settings = ChromaSettings(anonymized_telemetry=False) @@ -99,7 +99,7 @@ def __init__(self, settings: Settings) -> None: from qdrant_client import QdrantClient except ImportError as e: raise ImportError( - "Qdrant dependencies not found, install with `poetry install --extras qdrant`" + "Qdrant dependencies not found, install with `poetry install --extras vector-stores-qdrant`" ) from e if settings.qdrant is None: diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 415acf4e5..4493f90fc 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -81,7 +81,7 @@ class DataSettings(BaseModel): class LLMSettings(BaseModel): - mode: Literal["local", "openai", "openailike", "sagemaker", "mock", "ollama"] + mode: Literal["llamacpp", "openai", "openailike", "sagemaker", "mock", "ollama"] max_new_tokens: int = Field( 256, description="The maximum number of token that the LLM is authorized to generate in one completion.", @@ -104,12 +104,9 @@ class VectorstoreSettings(BaseModel): database: Literal["chroma", "qdrant", "pgvector"] -class LocalSettings(BaseModel): +class LlamaCPPSettings(BaseModel): llm_hf_repo_id: str llm_hf_model_file: str - embedding_hf_model_name: str = Field( - description="Name of the HuggingFace model to use for embeddings" - ) prompt_style: Literal["default", "llama2", "tag", "mistral", "chatml"] = Field( "llama2", description=( @@ -123,8 +120,14 @@ class LocalSettings(BaseModel): ) +class HuggingFaceSettings(BaseModel): + embedding_hf_model_name: str = Field( + description="Name of the HuggingFace model to use for embeddings" + ) + + class EmbeddingSettings(BaseModel): - mode: Literal["local", "openai", "sagemaker", "mock"] + mode: Literal["huggingface", "openai", "sagemaker", "mock"] ingest_mode: Literal["simple", "batch", "parallel"] = Field( "simple", description=( @@ -292,7 +295,8 @@ class Settings(BaseModel): ui: UISettings llm: LLMSettings embedding: EmbeddingSettings - local: LocalSettings + llamacpp: LlamaCPPSettings + huggingface: HuggingFaceSettings sagemaker: SagemakerSettings openai: OpenAISettings ollama: OllamaSettings diff --git a/pyproject.toml b/pyproject.toml index f4b7785d5..67961fe73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,14 +33,17 @@ gradio = {version ="^4.19.2", optional = true} [tool.poetry.extras] ui = ["gradio"] -local = ["llama-index-llms-llama-cpp", "llama-index-embeddings-huggingface"] -openai = ["llama-index-llms-openai", "llama-index-embeddings-openai"] -openai-like = ["llama-index-llms-openai-like"] -ollama = ["llama-index-llms-ollama"] -sagemaker = ["boto3"] -qdrant = ["llama-index-vector-stores-qdrant"] -chroma = ["llama-index-vector-stores-chroma"] -postgres = ["llama-index-vector-stores-postgres"] +llms-llama-cpp = ["llama-index-llms-llama-cpp"] +llms-openai = ["llama-index-llms-openai"] +llms-openai-like = ["llama-index-llms-openai-like"] +llms-ollama = ["llama-index-llms-ollama"] +llms-sagemaker = ["boto3"] +embeddings-huggingface = ["llama-index-embeddings-huggingface"] +embeddings-openai = ["llama-index-embeddings-openai"] +embeddings-sagemaker = ["boto3"] +vector-stores-qdrant = ["llama-index-vector-stores-qdrant"] +vector-stores-chroma = ["llama-index-vector-stores-chroma"] +vector-stores-postgres = ["llama-index-vector-stores-postgres"] [tool.poetry.group.dev.dependencies] diff --git a/settings-docker.yaml b/settings-docker.yaml index 6b915cca4..d71c4070a 100644 --- a/settings-docker.yaml +++ b/settings-docker.yaml @@ -8,9 +8,11 @@ llm: embedding: mode: ${PGPT_MODE:sagemaker} -local: +llamacpp: llm_hf_repo_id: ${PGPT_HF_REPO_ID:TheBloke/Mistral-7B-Instruct-v0.1-GGUF} llm_hf_model_file: ${PGPT_HF_MODEL_FILE:mistral-7b-instruct-v0.1.Q4_K_M.gguf} + +huggingface: embedding_hf_model_name: ${PGPT_EMBEDDING_HF_MODEL_NAME:BAAI/bge-small-en-v1.5} sagemaker: diff --git a/settings-local.yaml b/settings-local.yaml index 8ba8a6861..2c1995bc3 100644 --- a/settings-local.yaml +++ b/settings-local.yaml @@ -2,23 +2,25 @@ server: env_name: ${APP_ENV:local} llm: - mode: local + mode: llamacpp # Should be matching the selected model max_new_tokens: 512 context_window: 3900 tokenizer: mistralai/Mistral-7B-Instruct-v0.2 +llamacpp: + prompt_style: "mistral" + llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF + llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf + embedding: - mode: local + mode: huggingface + +huggingface: + embedding_hf_model_name: BAAI/bge-small-en-v1.5 vectorstore: database: qdrant qdrant: - path: local_data/private_gpt/qdrant - -local: - prompt_style: "mistral" - llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF - llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf - embedding_hf_model_name: BAAI/bge-small-en-v1.5 \ No newline at end of file + path: local_data/private_gpt/qdrant \ No newline at end of file diff --git a/settings-mock.yaml b/settings-mock.yaml index 8f9c01f7c..3c63f7fed 100644 --- a/settings-mock.yaml +++ b/settings-mock.yaml @@ -4,5 +4,6 @@ server: # This configuration allows you to use GPU for creating embeddings while avoiding loading LLM into vRAM llm: mode: mock + embedding: - mode: local + mode: huggingface diff --git a/settings-ollama.yaml b/settings-ollama.yaml index b5060f6cc..93fbf526a 100644 --- a/settings-ollama.yaml +++ b/settings-ollama.yaml @@ -11,7 +11,10 @@ ollama: api_base: http://localhost:11434 embedding: - mode: local + mode: huggingface + +huggingface: + embedding_hf_model_name: BAAI/bge-small-en-v1.5 vectorstore: database: qdrant @@ -19,6 +22,3 @@ vectorstore: qdrant: path: local_data/private_gpt/qdrant -local: - prompt_style: "llama2" - embedding_hf_model_name: BAAI/bge-small-en-v1.5 \ No newline at end of file diff --git a/settings-openai.yaml b/settings-openai.yaml new file mode 100644 index 000000000..093fcea93 --- /dev/null +++ b/settings-openai.yaml @@ -0,0 +1,12 @@ +server: + env_name: ${APP_ENV:openai} + +llm: + mode: openai + +embedding: + mode: openai + +openai: + api_key: ${OPENAI_API_KEY:} + model: gpt-3.5-turbo diff --git a/settings-sagemaker.yaml b/settings-sagemaker.yaml index face354df..678fb8b5c 100644 --- a/settings-sagemaker.yaml +++ b/settings-sagemaker.yaml @@ -1,5 +1,5 @@ server: - env_name: ${APP_ENV:prod} + env_name: ${APP_ENV:sagemaker} port: ${PORT:8001} ui: @@ -13,5 +13,5 @@ embedding: mode: sagemaker sagemaker: - llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140 - embedding_endpoint_name: huggingface-pytorch-inference-2023-11-03-07-41-36-479 \ No newline at end of file + llm_endpoint_name: llm + embedding_endpoint_name: embedding \ No newline at end of file diff --git a/settings-test.yaml b/settings-test.yaml index e1164b532..b6ca869da 100644 --- a/settings-test.yaml +++ b/settings-test.yaml @@ -14,5 +14,8 @@ qdrant: llm: mode: mock +embedding: + mode: mock + ui: enabled: false \ No newline at end of file diff --git a/settings-vllm.yaml b/settings-vllm.yaml index c3907f29d..5a0a68c67 100644 --- a/settings-vllm.yaml +++ b/settings-vllm.yaml @@ -1,11 +1,14 @@ +server: + env_name: ${APP_ENV:vllm} + llm: mode: openailike embedding: - mode: local + mode: huggingface ingest_mode: simple -local: +huggingface: embedding_hf_model_name: BAAI/bge-small-en-v1.5 openai: diff --git a/settings.yaml b/settings.yaml index 32147c87d..8c581a903 100644 --- a/settings.yaml +++ b/settings.yaml @@ -35,17 +35,24 @@ ui: delete_all_files_button_enabled: true llm: - mode: local + mode: llamacpp # Should be matching the selected model max_new_tokens: 512 context_window: 3900 - tokenizer: mistralai/Mistral-7B-Instruct-v0.2 + +llamacpp: + prompt_style: "mistral" + llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF + llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf embedding: # Should be matching the value above in most cases - mode: local + mode: huggingface ingest_mode: simple +huggingface: + embedding_hf_model_name: BAAI/bge-small-en-v1.5 + vectorstore: database: qdrant @@ -62,12 +69,6 @@ pgvector: schema_name: private_gpt table_name: embeddings -local: - prompt_style: "mistral" - llm_hf_repo_id: TheBloke/Mistral-7B-Instruct-v0.2-GGUF - llm_hf_model_file: mistral-7b-instruct-v0.2.Q4_K_M.gguf - embedding_hf_model_name: BAAI/bge-small-en-v1.5 - sagemaker: llm_endpoint_name: huggingface-pytorch-tgi-inference-2023-09-25-19-53-32-140 embedding_endpoint_name: huggingface-pytorch-inference-2023-11-03-07-41-36-479 From 8fad1966f078f15036d27dd68ab65f64a7a4d6fe Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 09:15:43 +0100 Subject: [PATCH 08/15] Update setup script to point to the new settings --- scripts/setup | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/setup b/scripts/setup index e08516a2b..3e02e6413 100755 --- a/scripts/setup +++ b/scripts/setup @@ -19,19 +19,19 @@ os.makedirs(models_path, exist_ok=True) # Download Embedding model embedding_path = models_path / "embedding" -print(f"Downloading embedding {settings().local.embedding_hf_model_name}") +print(f"Downloading embedding {settings().huggingface.embedding_hf_model_name}") snapshot_download( - repo_id=settings().local.embedding_hf_model_name, + repo_id=settings().huggingface.embedding_hf_model_name, cache_dir=models_cache_path, local_dir=embedding_path, ) print("Embedding model downloaded!") # Download LLM and create a symlink to the model file -print(f"Downloading LLM {settings().local.llm_hf_model_file}") +print(f"Downloading LLM {settings().llamacpp.llm_hf_model_file}") hf_hub_download( - repo_id=settings().local.llm_hf_repo_id, - filename=settings().local.llm_hf_model_file, + repo_id=settings().llamacpp.llm_hf_repo_id, + filename=settings().llamacpp.llm_hf_model_file, cache_dir=models_cache_path, local_dir=models_path, resume_download=resume_download, From e1456c13fe36b1a6d290dd50a7094878dbbd87a7 Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 09:31:08 +0100 Subject: [PATCH 09/15] Windows note for setting env vars --- fern/docs/pages/installation/installation.mdx | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/fern/docs/pages/installation/installation.mdx b/fern/docs/pages/installation/installation.mdx index 4ff8b2eeb..1d2975627 100644 --- a/fern/docs/pages/installation/installation.mdx +++ b/fern/docs/pages/installation/installation.mdx @@ -61,6 +61,23 @@ Where `` can be any of the following: There are just some examples of recommended setups. You can mix and match the different options to fit your needs. You'll find more information in the Manual section of the documentation. +> **Important for Windows**: In the examples below or how to run PrivateGPT with `make run`, `PGPT_PROFILES` env var is being set inline following Unix command line syntax (works on MacOS and Linux). +If you are using Windows, you'll need to set the env var in a different way, for example: + +```powershell +# Powershell +$env:PGPT_PROFILES="ollama" +make run +``` + +or + +```cmd +# CMD +set PGPT_PROFILES=ollama +make run +``` + ### Local, Ollama-powered setup The easiest way to run PrivateGPT fully locally is to depend on Ollama for the LLM. Ollama provides a local LLM that is easy to install and use. From 274c3863127e8911428ffd8521f9103efc2fad62 Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 09:33:11 +0100 Subject: [PATCH 10/15] Fix error comment for openailike --- private_gpt/components/llm/llm_component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index 232d5b283..b553e2d9c 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -87,7 +87,7 @@ def __init__(self, settings: Settings) -> None: from llama_index.llms.openai_like import OpenAILike # type: ignore except ImportError as e: raise ImportError( - "OpenAILike dependencies not found, install with `poetry install --extras llms-openailike`" + "OpenAILike dependencies not found, install with `poetry install --extras llms-openai-like`" ) from e openai_settings = settings.openai From f6ff280482c93f07bfec059f790ed9f8e1652966 Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 13:00:25 +0100 Subject: [PATCH 11/15] Support Ollama embeddings --- fern/docs/pages/installation/concepts.mdx | 11 ++-- fern/docs/pages/installation/installation.mdx | 50 +++++++------------ poetry.lock | 17 ++++++- .../embedding/embedding_component.py | 15 ++++++ private_gpt/components/llm/llm_component.py | 2 +- private_gpt/settings/settings.py | 8 ++- pyproject.toml | 2 + settings-ollama.yaml | 12 ++--- settings.yaml | 4 +- 9 files changed, 72 insertions(+), 49 deletions(-) diff --git a/fern/docs/pages/installation/concepts.mdx b/fern/docs/pages/installation/concepts.mdx index b26029cb9..1ccb44682 100644 --- a/fern/docs/pages/installation/concepts.mdx +++ b/fern/docs/pages/installation/concepts.mdx @@ -40,20 +40,21 @@ In order to run PrivateGPT in a fully local setup, you will need to run the LLM, ### Vector stores The vector stores supported (Qdrant, ChromaDB and Postgres) run locally by default. ### Embeddings -For local embeddings you need to install the 'embeddings-huggingface' extra dependencies. It will use Huggingface Embeddings. - -Note: Ollama will support Embeddings in the short term for easier installation, but it doesn't as of today. +For local Embeddings there are two options: +* (Recommended) You can use the 'ollama' option in PrivateGPT, which will connect to your local Ollama instance. Ollama simplifies a lot the installation of local LLMs. +* You can use the 'embeddings-huggingface' option in PrivateGPT, which will use HuggingFace. -In order for local embeddings to work, you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: +In order for HuggingFace LLM to work (the second option), you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: ```bash poetry run python scripts/setup ``` + ### LLM For local LLM there are two options: * (Recommended) You can use the 'ollama' option in PrivateGPT, which will connect to your local Ollama instance. Ollama simplifies a lot the installation of local LLMs. * You can use the 'llms-llama-cpp' option in PrivateGPT, which will use LlamaCPP. It works great on Mac with Metal most of the times (leverages Metal GPU), but it can be tricky in certain Linux and Windows distributions, depending on the GPU. In the installation document you'll find guides and troubleshooting. -In order for local LLM to work (the second option), you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: +In order for LlamaCPP powered LLM to work (the second option), you need to download the LLM model to the `models` folder. You can do so by running the `setup` script: ```bash poetry run python scripts/setup ``` \ No newline at end of file diff --git a/fern/docs/pages/installation/installation.mdx b/fern/docs/pages/installation/installation.mdx index 1d2975627..67431ebdb 100644 --- a/fern/docs/pages/installation/installation.mdx +++ b/fern/docs/pages/installation/installation.mdx @@ -44,11 +44,12 @@ poetry install --extras " ..." Where `` can be any of the following: - ui: adds support for UI using Gradio -- llms-ollama: adds support for Ollama LLM, the easiest way to get a local LLM running +- llms-ollama: adds support for Ollama LLM, the easiest way to get a local LLM running, requires Ollama running locally - llms-llama-cpp: adds support for local LLM using LlamaCPP - expect a messy installation process on some platforms - llms-sagemaker: adds support for Amazon Sagemaker LLM, requires Sagemaker inference endpoints - llms-openai: adds support for OpenAI LLM, requires OpenAI API key - llms-openai-like: adds support for 3rd party LLM providers that are compatible with OpenAI's API +- embeddings-ollama: adds support for Ollama Embeddings, requires Ollama running locally - embeddings-huggingface: adds support for local Embeddings using HuggingFace - embeddings-sagemaker: adds support for Amazon Sagemaker Embeddings, requires Sagemaker inference endpoints - embeddings-openai = adds support for OpenAI Embeddings, requires OpenAI API key @@ -78,21 +79,29 @@ set PGPT_PROFILES=ollama make run ``` -### Local, Ollama-powered setup +### Local, Ollama-powered setup - RECOMMENDED -The easiest way to run PrivateGPT fully locally is to depend on Ollama for the LLM. Ollama provides a local LLM that is easy to install and use. +**The easiest way to run PrivateGPT fully locally** is to depend on Ollama for the LLM. Ollama provides local LLM and Embeddings super easy to install and use, abstracting the complexity of GPU support. It's the recommended setup for local development. Go to [ollama.ai](https://ollama.ai/) and follow the instructions to install Ollama on your machine. -Once done, you can install PrivateGPT with the following command: +After the installation, make sure the Ollama desktop app is closed. + +Install the models to be used, the default settings-ollama.yaml is configured to user `mistral 7b` LLM (~4GB) and `nomic-embed-text` Embeddings (~275MB). Therefore: + ```bash -poetry install --extras "ui llms-ollama embeddings-huggingface vector-stores-qdrant" +ollama pull mistral +ollama pull nomic-embed-text ``` -We are installing "embeddings-huggingface" dependency to support local embeddings, because Ollama doesn't support embeddings just yet. But they working on it! -In order for local embeddings to work, you need to download the embeddings model to the `models` folder. You can do so by running the `setup` script: +Now, start Ollama service (it will start a local inference server, serving both the LLM and the Embeddings): ```bash -poetry run python scripts/setup +ollama serve +``` + +Once done, on a different terminal, you can install PrivateGPT with the following command: +```bash +poetry install --extras "ui llms-ollama embeddings-ollama vector-stores-qdrant" ``` Once installed, you can run PrivateGPT. Make sure you have a working Ollama running locally before running the following command. @@ -101,7 +110,7 @@ Once installed, you can run PrivateGPT. Make sure you have a working Ollama runn PGPT_PROFILES=ollama make run ``` -PrivateGPT will use the already existing `settings-ollama.yaml` settings file, which is already configured to use Ollama LLM, local Embeddings, and Qdrant. Review it and adapt it to your needs (different LLM model, different Ollama port, etc.) +PrivateGPT will use the already existing `settings-ollama.yaml` settings file, which is already configured to use Ollama LLM and Embeddings, and Qdrant. Review it and adapt it to your needs (different models, different Ollama port, etc.) The UI will be available at http://localhost:8001 @@ -128,29 +137,6 @@ PrivateGPT will use the already existing `settings-sagemaker.yaml` settings file The UI will be available at http://localhost:8001 -### Local, Llama-CPP powered setup - -If you want to run PrivateGPT fully locally without relying on Ollama, you can run the following command: - -```bash -poetry install --extras "ui llms-llama-cpp embeddings-huggingface vector-stores-qdrant" -``` - -In order for local LLM and embeddings to work, you need to download the models to the `models` folder. You can do so by running the `setup` script: -```bash -poetry run python scripts/setup -``` - -Once installed, you can run PrivateGPT with the following command: - -```bash -PGPT_PROFILES=local make run -``` - -PrivateGPT will load the already existing `settings-local.yaml` file, which is already configured to use LlamaCPP LLM, HuggingFace embeddings and Qdrant. - -The UI will be available at http://localhost:8001 - ### Non-Private, OpenAI-powered test setup If you want to test PrivateGPT with OpenAI's LLM and Embeddings -taking into account your data is going to OpenAI!- you can run the following command: diff --git a/poetry.lock b/poetry.lock index 714399e26..2a9361e34 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2097,6 +2097,20 @@ llama-index-core = ">=0.10.1,<0.11.0" torch = ">=2.1.2,<3.0.0" transformers = ">=4.37.0,<5.0.0" +[[package]] +name = "llama-index-embeddings-ollama" +version = "0.1.2" +description = "llama-index embeddings ollama integration" +optional = true +python-versions = ">=3.8.1,<4.0" +files = [ + {file = "llama_index_embeddings_ollama-0.1.2-py3-none-any.whl", hash = "sha256:ac7afabfa1134059af351b021e05e256bf86dd15e5176ffa5ab0305bcf03b33f"}, + {file = "llama_index_embeddings_ollama-0.1.2.tar.gz", hash = "sha256:a9e0809bddd2e4ad888f249519edc7e3d339c74e4e03fc5a40c3060dc41d47a9"}, +] + +[package.dependencies] +llama-index-core = ">=0.10.1,<0.11.0" + [[package]] name = "llama-index-embeddings-openai" version = "0.1.6" @@ -5896,6 +5910,7 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [extras] embeddings-huggingface = ["llama-index-embeddings-huggingface"] +embeddings-ollama = ["llama-index-embeddings-ollama"] embeddings-openai = ["llama-index-embeddings-openai"] embeddings-sagemaker = ["boto3"] llms-llama-cpp = ["llama-index-llms-llama-cpp"] @@ -5911,4 +5926,4 @@ vector-stores-qdrant = ["llama-index-vector-stores-qdrant"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "0249c25c783180d0c483c533d9102e3885e4a4f5261dc331a41323bd79d446f3" +content-hash = "c24bce23b7deccc8958979e2836151a7f5e5c08424d12945785a512119ac0f9c" diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index 7b035162a..2a5dd48b6 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -57,6 +57,21 @@ def __init__(self, settings: Settings) -> None: openai_settings = settings.openai.api_key self.embedding_model = OpenAIEmbedding(api_key=openai_settings) + case "ollama": + try: + from llama_index.embeddings.ollama import ( # type: ignore + OllamaEmbedding, + ) + except ImportError as e: + raise ImportError( + "Local dependencies not found, install with `poetry install --extras embeddings-ollama`" + ) from e + + ollama_settings = settings.ollama + self.embedding_model = OllamaEmbedding( + model_name=ollama_settings.embedding_model, + base_url=ollama_settings.api_base + ) case "mock": # Not a random number, is the dimensionality used by # the default embedding model diff --git a/private_gpt/components/llm/llm_component.py b/private_gpt/components/llm/llm_component.py index b553e2d9c..351513e46 100644 --- a/private_gpt/components/llm/llm_component.py +++ b/private_gpt/components/llm/llm_component.py @@ -109,7 +109,7 @@ def __init__(self, settings: Settings) -> None: ollama_settings = settings.ollama self.llm = Ollama( - model=ollama_settings.model, base_url=ollama_settings.api_base + model=ollama_settings.llm_model, base_url=ollama_settings.api_base ) case "mock": self.llm = MockLLM() diff --git a/private_gpt/settings/settings.py b/private_gpt/settings/settings.py index 4493f90fc..cbb890237 100644 --- a/private_gpt/settings/settings.py +++ b/private_gpt/settings/settings.py @@ -127,7 +127,7 @@ class HuggingFaceSettings(BaseModel): class EmbeddingSettings(BaseModel): - mode: Literal["huggingface", "openai", "sagemaker", "mock"] + mode: Literal["huggingface", "openai", "sagemaker", "ollama", "mock"] ingest_mode: Literal["simple", "batch", "parallel"] = Field( "simple", description=( @@ -176,10 +176,14 @@ class OllamaSettings(BaseModel): "http://localhost:11434", description="Base URL of Ollama API. Example: 'https://localhost:11434'.", ) - model: str = Field( + llm_model: str = Field( None, description="Model to use. Example: 'llama2-uncensored'.", ) + embedding_model: str = Field( + None, + description="Model to use. Example: 'nomic-embed-text'.", + ) class UISettings(BaseModel): diff --git a/pyproject.toml b/pyproject.toml index 67961fe73..105be9f7b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,6 +21,7 @@ llama-index-llms-llama-cpp = {version = "^0.1.3", optional = true} llama-index-llms-openai = {version = "^0.1.6", optional = true} llama-index-llms-openai-like = {version ="^0.1.3", optional = true} llama-index-llms-ollama = {version ="^0.1.2", optional = true} +llama-index-embeddings-ollama = {version ="^0.1.2", optional = true} llama-index-embeddings-huggingface = {version ="^0.1.4", optional = true} llama-index-embeddings-openai = {version ="^0.1.6", optional = true} llama-index-vector-stores-qdrant = {version ="^0.1.3", optional = true} @@ -38,6 +39,7 @@ llms-openai = ["llama-index-llms-openai"] llms-openai-like = ["llama-index-llms-openai-like"] llms-ollama = ["llama-index-llms-ollama"] llms-sagemaker = ["boto3"] +embeddings-ollama = ["llama-index-embeddings-ollama"] embeddings-huggingface = ["llama-index-embeddings-huggingface"] embeddings-openai = ["llama-index-embeddings-openai"] embeddings-sagemaker = ["boto3"] diff --git a/settings-ollama.yaml b/settings-ollama.yaml index 93fbf526a..4f2cab4d8 100644 --- a/settings-ollama.yaml +++ b/settings-ollama.yaml @@ -6,15 +6,13 @@ llm: max_new_tokens: 512 context_window: 3900 -ollama: - model: llama2 - api_base: http://localhost:11434 - embedding: - mode: huggingface + mode: ollama -huggingface: - embedding_hf_model_name: BAAI/bge-small-en-v1.5 +ollama: + llm_model: mistral + embedding_model: nomic-embed-text + api_base: http://localhost:11434 vectorstore: database: qdrant diff --git a/settings.yaml b/settings.yaml index 8c581a903..9d3cd0737 100644 --- a/settings.yaml +++ b/settings.yaml @@ -78,4 +78,6 @@ openai: model: gpt-3.5-turbo ollama: - model: llama2-uncensored + llm_model: llama2 + embedding_model: nomic-embed-text + api_base: http://localhost:11434 From 89c43f51a010f4a91098a63571c242df2c01e5a7 Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 13:00:45 +0100 Subject: [PATCH 12/15] Fix lint --- private_gpt/components/embedding/embedding_component.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/private_gpt/components/embedding/embedding_component.py b/private_gpt/components/embedding/embedding_component.py index 2a5dd48b6..f384262db 100644 --- a/private_gpt/components/embedding/embedding_component.py +++ b/private_gpt/components/embedding/embedding_component.py @@ -70,7 +70,7 @@ def __init__(self, settings: Settings) -> None: ollama_settings = settings.ollama self.embedding_model = OllamaEmbedding( model_name=ollama_settings.embedding_model, - base_url=ollama_settings.api_base + base_url=ollama_settings.api_base, ) case "mock": # Not a random number, is the dimensionality used by From 823a0c0d5f09d00d01a331822b1fd1362166dbdd Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 16:31:44 +0100 Subject: [PATCH 13/15] Upgrade to llamaindex 0.10.14. Remove legacy use of ServiceContext in ContextChatEngine --- poetry.lock | 14 +++++++------- private_gpt/server/chat/chat_service.py | 8 -------- pyproject.toml | 4 ++-- 3 files changed, 9 insertions(+), 17 deletions(-) diff --git a/poetry.lock b/poetry.lock index 2a9361e34..c9d0057e7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2039,13 +2039,13 @@ test = ["httpx (>=0.24.1)", "pytest (>=7.4.0)", "scipy (>=1.10)"] [[package]] name = "llama-index-core" -version = "0.10.13" +version = "0.10.14.post1" description = "Interface between LLMs and your data" optional = false python-versions = ">=3.8.1,<4.0" files = [ - {file = "llama_index_core-0.10.13-py3-none-any.whl", hash = "sha256:40c76fc02be7cd948a333ca541f2ff38cf02774e1c960674e2b68c61943bac90"}, - {file = "llama_index_core-0.10.13.tar.gz", hash = "sha256:826fded00767923fba8aca94f46c32b259e8879f517016ab7a3801b1b37187a1"}, + {file = "llama_index_core-0.10.14.post1-py3-none-any.whl", hash = "sha256:7b12ebebe023e8f5e50c0fcff4af7a67e4842b2e1ca6a84b09442394d2689de6"}, + {file = "llama_index_core-0.10.14.post1.tar.gz", hash = "sha256:adb931fced7bff092b26599e7f89952c171bf2994872906b5712ecc8107d4727"}, ] [package.dependencies] @@ -5146,13 +5146,13 @@ telegram = ["requests"] [[package]] name = "transformers" -version = "4.38.1" +version = "4.38.2" description = "State-of-the-art Machine Learning for JAX, PyTorch and TensorFlow" optional = false python-versions = ">=3.8.0" files = [ - {file = "transformers-4.38.1-py3-none-any.whl", hash = "sha256:a7a9265fb060183e9d975cbbadc4d531b10281589c43f6d07563f86322728973"}, - {file = "transformers-4.38.1.tar.gz", hash = "sha256:86dc84ccbe36123647e84cbd50fc31618c109a41e6be92514b064ab55bf1304c"}, + {file = "transformers-4.38.2-py3-none-any.whl", hash = "sha256:c4029cb9f01b3dd335e52f364c52d2b37c65b4c78e02e6a08b1919c5c928573e"}, + {file = "transformers-4.38.2.tar.gz", hash = "sha256:c5fc7ad682b8a50a48b2a4c05d4ea2de5567adb1bdd00053619dbe5960857dd5"}, ] [package.dependencies] @@ -5926,4 +5926,4 @@ vector-stores-qdrant = ["llama-index-vector-stores-qdrant"] [metadata] lock-version = "2.0" python-versions = ">=3.11,<3.12" -content-hash = "c24bce23b7deccc8958979e2836151a7f5e5c08424d12945785a512119ac0f9c" +content-hash = "41849a9d15848a354fd4cc0ca9d752148e76fee64d8bb5b881210c2290fc8072" diff --git a/private_gpt/server/chat/chat_service.py b/private_gpt/server/chat/chat_service.py index 4fcd30cb9..5369200b0 100644 --- a/private_gpt/server/chat/chat_service.py +++ b/private_gpt/server/chat/chat_service.py @@ -102,18 +102,10 @@ def _chat_engine( vector_index_retriever = self.vector_store_component.get_retriever( index=self.index, context_filter=context_filter ) - # TODO ContextChatEngine is still not migrated by LlamaIndex to accept - # llm directly, so we are passing legacy ServiceContext until it is fixed. - from llama_index.core import ServiceContext - return ContextChatEngine.from_defaults( system_prompt=system_prompt, retriever=vector_index_retriever, llm=self.llm_component.llm, # Takes no effect at the moment - service_context=ServiceContext.from_defaults( - llm=self.llm_component.llm, - embed_model=self.embedding_component.embedding_model, - ), node_postprocessors=[ MetadataReplacementPostProcessor(target_metadata_key="window"), ], diff --git a/pyproject.toml b/pyproject.toml index 105be9f7b..cc668b672 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,9 +12,9 @@ python-multipart = "^0.0.9" injector = "^0.21.0" pyyaml = "^6.0.1" watchdog = "^4.0.0" -transformers = "^4.38.1" +transformers = "^4.38.2" # LlamaIndex core libs -llama-index-core = "^0.10.13" +llama-index-core = "^0.10.14" llama-index-readers-file = "^0.1.6" # Optional LlamaIndex integration libs llama-index-llms-llama-cpp = {version = "^0.1.3", optional = true} From ab4b3d9b6a8975722b4dae7c5464f31f241a5089 Mon Sep 17 00:00:00 2001 From: imartinez Date: Fri, 1 Mar 2024 16:35:03 +0100 Subject: [PATCH 14/15] Bump to 0.4.0 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index cc668b672..c65afbfd2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "private-gpt" -version = "0.2.0" +version = "0.4.0" description = "Private GPT" authors = ["Zylon "] From e70f17e2a8dbd8775bd71e21197036ca78feeb22 Mon Sep 17 00:00:00 2001 From: imartinez Date: Tue, 5 Mar 2024 15:57:55 +0100 Subject: [PATCH 15/15] Fix vector retriever filters --- .../vector_store/vector_store_component.py | 46 +++++++++---------- 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/private_gpt/components/vector_store/vector_store_component.py b/private_gpt/components/vector_store/vector_store_component.py index 283f6c399..0b677c875 100644 --- a/private_gpt/components/vector_store/vector_store_component.py +++ b/private_gpt/components/vector_store/vector_store_component.py @@ -3,7 +3,12 @@ from injector import inject, singleton from llama_index.core.indices.vector_store import VectorIndexRetriever, VectorStoreIndex -from llama_index.core.vector_stores.types import VectorStore +from llama_index.core.vector_stores.types import ( + FilterCondition, + MetadataFilter, + MetadataFilters, + VectorStore, +) from private_gpt.open_ai.extensions.context_filter import ContextFilter from private_gpt.paths import local_data_path @@ -12,31 +17,26 @@ logger = logging.getLogger(__name__) -@typing.no_type_check -def _chromadb_doc_id_metadata_filter( +def _doc_id_metadata_filter( context_filter: ContextFilter | None, -) -> dict | None: - if context_filter is None or context_filter.docs_ids is None: - return {} # No filter - elif len(context_filter.docs_ids) < 1: - return {"doc_id": "-"} # Effectively filtering out all docs - else: - doc_filter_items = [] - if len(context_filter.docs_ids) > 1: - doc_filter = {"$or": doc_filter_items} - for doc_id in context_filter.docs_ids: - doc_filter_items.append({"doc_id": doc_id}) - else: - doc_filter = {"doc_id": context_filter.docs_ids[0]} - return doc_filter +) -> MetadataFilters: + filters = MetadataFilters(filters=[], condition=FilterCondition.OR) + + if context_filter is not None and context_filter.docs_ids is not None: + for doc_id in context_filter.docs_ids: + filters.filters.append(MetadataFilter(key="doc_id", value=doc_id)) + + return filters @singleton class VectorStoreComponent: + settings: Settings vector_store: VectorStore @inject def __init__(self, settings: Settings) -> None: + self.settings = settings match settings.vectorstore.database: case "pgvector": try: @@ -96,7 +96,7 @@ def __init__(self, settings: Settings) -> None: from llama_index.vector_stores.qdrant import ( # type: ignore QdrantVectorStore, ) - from qdrant_client import QdrantClient + from qdrant_client import QdrantClient # type: ignore except ImportError as e: raise ImportError( "Qdrant dependencies not found, install with `poetry install --extras vector-stores-qdrant`" @@ -126,20 +126,20 @@ def __init__(self, settings: Settings) -> None: f"Vectorstore database {settings.vectorstore.database} not supported" ) - @staticmethod def get_retriever( + self, index: VectorStoreIndex, context_filter: ContextFilter | None = None, similarity_top_k: int = 2, ) -> VectorIndexRetriever: - # This way we support qdrant (using doc_ids) and chroma (using where clause) + # This way we support qdrant (using doc_ids) and the rest (using filters) return VectorIndexRetriever( index=index, similarity_top_k=similarity_top_k, doc_ids=context_filter.docs_ids if context_filter else None, - vector_store_kwargs={ - "where": _chromadb_doc_id_metadata_filter(context_filter) - }, + filters=_doc_id_metadata_filter(context_filter) + if self.settings.vectorstore.database != "qdrant" + else None, ) def close(self) -> None: