diff --git a/.pin/constraints-cuda-torch.txt b/.pin/constraints-cuda-torch.txt index 96876ac75..1886ffa21 100644 --- a/.pin/constraints-cuda-torch.txt +++ b/.pin/constraints-cuda-torch.txt @@ -32,9 +32,9 @@ accelerate==0.34.2 # -r benchmarks/rlhf/requirements.in # diffusers # trl -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # datasets # fsspec @@ -50,7 +50,7 @@ argklass==1.4.4 # -r benchmarks/diffusion/requirements.in # -r benchmarks/llm/requirements.in # -r benchmarks/purejaxrl/requirements.in -astroid==3.2.4 +astroid==3.3.4 # via pylint asttokens==2.4.1 # via giving @@ -58,7 +58,7 @@ async-timeout==4.0.3 # via aiohttp attrs==24.2.0 # via aiohttp -beartype==0.18.5 +beartype==0.19.0 # via -r benchmarks/vjepa/requirements.in black==24.8.0 # via navix @@ -88,7 +88,7 @@ certifi==2024.8.30 # sentry-sdk charset-normalizer==3.3.2 # via requests -chex==0.1.86 +chex==0.1.87 # via # distrax # evosax @@ -117,7 +117,7 @@ cvxopt==1.3.2 # via -r benchmarks/recursiongfn/requirements.in cycler==0.12.1 # via matplotlib -datasets==3.0.0 +datasets==3.0.1 # via # -r benchmarks/diffusion/requirements.in # -r benchmarks/llama/requirements.in @@ -188,7 +188,7 @@ filelock==3.16.1 # torch # transformers # triton -fire==0.6.0 +fire==0.7.0 # via # -r benchmarks/llama/requirements.in # -r benchmarks/llm/requirements.txt @@ -210,7 +210,7 @@ flax==0.9.0 # flashbax # gymnax # navix -fonttools==4.53.1 +fonttools==4.54.1 # via matplotlib frozenlist==1.4.1 # via @@ -245,7 +245,7 @@ gpytorch==1.13 # via # -r benchmarks/recursiongfn/requirements.in # botorch -grpcio==1.66.1 +grpcio==1.66.2 # via # brax # tensorboard @@ -267,7 +267,7 @@ gymnax==0.0.8 # -r benchmarks/purejaxrl/requirements.in hjson==3.1.0 # via argklass -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -r benchmarks/timm/requirements.in # accelerate @@ -420,7 +420,7 @@ mypy-extensions==1.0.0 # via black navix==0.7.0 # via -r benchmarks/purejaxrl/requirements.in -ndindex==1.8 +ndindex==1.9.2 # via blosc2 nest-asyncio==1.6.0 # via orbax-checkpoint @@ -466,7 +466,6 @@ numpy==1.26.4 # navix # numexpr # opencv-python - # opt-einsum # optax # orbax-checkpoint # pandas @@ -501,7 +500,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # via # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via jax-cuda12-plugin nvidia-cuda-nvrtc-cu12==12.1.105 # via torch @@ -534,7 +533,7 @@ nvidia-nccl-cu12==2.20.5 # via # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # jax-cuda12-plugin # nvidia-cusolver-cu12 @@ -549,7 +548,7 @@ omegaconf==2.3.0 # voir opencv-python==4.10.0.84 # via -r benchmarks/vjepa/requirements.in -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # jax # pyro-ppl @@ -584,7 +583,7 @@ packaging==24.1 # tensorboardx # torchmetrics # transformers -pandas==2.2.2 +pandas==2.2.3 # via # -r benchmarks/geo_gnn/requirements.in # -r benchmarks/recursiongfn/requirements.in @@ -637,13 +636,13 @@ pyarrow==17.0.0 # datasets pycodestyle==2.12.1 # via flake8 -pycryptodomex==3.20.0 +pycryptodomex==3.21.0 # via blobfile pyflakes==3.2.0 # via flake8 pygments==2.18.0 # via rich -pylint==3.2.7 +pylint==3.3.1 # via navix pyopengl==3.1.7 # via mujoco @@ -711,7 +710,7 @@ requests==2.32.3 # torch-geometric # transformers # wandb -rich==13.8.1 +rich==13.9.1 # via # flax # tyro @@ -749,7 +748,7 @@ sentencepiece==0.2.0 # via # -r benchmarks/llama/requirements.in # torchtune -sentry-sdk==2.14.0 +sentry-sdk==2.15.0 # via wandb setproctitle==1.3.3 # via wandb @@ -761,7 +760,6 @@ six==1.16.0 # via # asttokens # docker-pycreds - # fire # ml-collections # python-dateutil # tensorboard @@ -778,7 +776,7 @@ tables==3.10.1 # via -r benchmarks/recursiongfn/requirements.in tabulate==0.9.0 # via fvcore -tensorboard==2.17.1 +tensorboard==2.18.0 # via # -r benchmarks/recursiongfn/requirements.in # -r benchmarks/torchatari/requirements.in @@ -788,7 +786,7 @@ tensorboardx==2.6.2.2 # via brax tensorflow-probability==0.24.0 # via distrax -tensorstore==0.1.65 +tensorstore==0.1.66 # via # flashbax # flax @@ -805,7 +803,7 @@ timm==1.0.9 # via -r benchmarks/vjepa/requirements.in tokenizers==0.19.1 # via transformers -tomli==2.0.1 +tomli==2.0.2 # via # black # pylint @@ -852,7 +850,7 @@ torch-cluster==1.6.3+pt24cu121 # via # -r benchmarks/geo_gnn/requirements.in # -r benchmarks/recursiongfn/requirements.in -torch-geometric==2.6.0 +torch-geometric==2.6.1 # via # -r benchmarks/geo_gnn/requirements.in # -r benchmarks/recursiongfn/requirements.in @@ -864,8 +862,11 @@ torch-sparse==0.6.18+pt24cu121 # via # -r benchmarks/geo_gnn/requirements.in # -r benchmarks/recursiongfn/requirements.in -torchao==0.5.0+cu121 - # via -r benchmarks/llm/requirements.in +torchao==0.3.1+cu121 + # via + # -c .pin/../constraints/cuda.txt + # -r benchmarks/llm/requirements.in + # torchtune torchcompat==1.1.4 # via # -c .pin/../constraints/cuda.txt @@ -879,8 +880,10 @@ torchmetrics==1.4.2 # -r benchmarks/dinov2/requirements.in # lightning # pytorch-lightning -torchtune==0.3.0+cu121 - # via -r benchmarks/llm/requirements.in +torchtune==0.2.1+cu121 + # via + # -c .pin/../constraints/cuda.txt + # -r benchmarks/llm/requirements.in torchvision==0.19.0+cu121 # via # -r benchmarks/diffusion/requirements.in @@ -910,6 +913,7 @@ tqdm==4.66.5 # transformers transformers==4.44.2 # via + # -c .pin/../constraints/cuda.txt # -r benchmarks/diffusion/requirements.in # -r benchmarks/huggingface/requirements.in # -r benchmarks/llama/requirements.in @@ -923,11 +927,13 @@ trimesh==4.4.9 # mujoco-mjx triton==3.0.0 # via torch -trl==0.11.0 - # via -r benchmarks/rlhf/requirements.in +trl==0.10.1 + # via + # -c .pin/../constraints/cuda.txt + # -r benchmarks/rlhf/requirements.in typeguard==4.3.0 # via jaxtyping -types-protobuf==5.27.0.20240907 +types-protobuf==5.28.0.20240924 # via envpool typing-extensions==4.12.2 # via @@ -952,6 +958,7 @@ typing-extensions==4.12.2 # orbax-checkpoint # pytorch-lightning # reactivex + # rich # submitit # tables # torch @@ -962,7 +969,7 @@ tyro==0.8.11 # -r benchmarks/torchatari/requirements.in # navix # trl -tzdata==2024.1 +tzdata==2024.2 # via pandas urllib3==2.2.3 # via @@ -992,7 +999,7 @@ voir==0.2.19 # -r benchmarks/torchvision/requirements.in # -r benchmarks/torchvision_ddp/requirements.in # -r benchmarks/vjepa/requirements.in -wandb==0.18.1 +wandb==0.18.3 # via # -r benchmarks/recursiongfn/requirements.in # navix @@ -1010,7 +1017,7 @@ xxhash==3.5.0 # via datasets yacs==0.1.8 # via fvcore -yarl==1.11.1 +yarl==1.13.1 # via aiohttp zipp==3.20.2 # via diff --git a/benchmarks/brax/requirements.cuda.txt b/benchmarks/brax/requirements.cuda.txt index 89ebe8840..aae485613 100644 --- a/benchmarks/brax/requirements.cuda.txt +++ b/benchmarks/brax/requirements.cuda.txt @@ -37,7 +37,7 @@ brax==0.10.5 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/brax/requirements.in -chex==0.1.86 +chex==0.1.87 # via # -c .pin/../.pin/constraints-cuda-torch.txt # optax @@ -109,7 +109,7 @@ glfw==2.7.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # mujoco -grpcio==1.66.1 +grpcio==1.66.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # brax @@ -234,7 +234,6 @@ numpy==1.26.4 # jaxopt # ml-dtypes # mujoco - # opt-einsum # optax # orbax-checkpoint # scipy @@ -254,7 +253,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -301,7 +300,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -315,7 +314,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -377,7 +376,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # flax @@ -403,7 +402,7 @@ tensorboardx==2.6.2.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # brax -tensorstore==0.1.65 +tensorstore==0.1.66 # via # -c .pin/../.pin/constraints-cuda-torch.txt # flax @@ -435,6 +434,7 @@ typing-extensions==4.12.2 # flax # orbax-checkpoint # reactivex + # rich # torch varname==0.13.3 # via diff --git a/benchmarks/diffusion/requirements.cuda.txt b/benchmarks/diffusion/requirements.cuda.txt index 34a92c65d..676489f43 100644 --- a/benchmarks/diffusion/requirements.cuda.txt +++ b/benchmarks/diffusion/requirements.cuda.txt @@ -15,11 +15,11 @@ accelerate==0.34.2 # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/diffusion/requirements.in # diffusers -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -60,7 +60,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==3.0.0 +datasets==3.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/diffusion/requirements.in @@ -106,7 +106,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # argklass -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -190,7 +190,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # pandas # pyarrow # scipy @@ -209,7 +208,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -256,7 +255,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -270,7 +269,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -285,7 +284,7 @@ packaging==24.1 # datasets # huggingface-hub # transformers -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -343,7 +342,7 @@ requests==2.32.3 # diffusers # huggingface-hub # transformers -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -392,6 +391,7 @@ tqdm==4.66.5 transformers==4.44.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/diffusion/requirements.in triton==3.0.0 # via @@ -403,8 +403,9 @@ typing-extensions==4.12.2 # huggingface-hub # multidict # reactivex + # rich # torch -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -429,7 +430,7 @@ xxhash==3.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/dinov2/requirements.cuda.txt b/benchmarks/dinov2/requirements.cuda.txt index 9b3940ff2..bb0535894 100644 --- a/benchmarks/dinov2/requirements.cuda.txt +++ b/benchmarks/dinov2/requirements.cuda.txt @@ -109,7 +109,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # torchmetrics # torchvision @@ -126,7 +125,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -173,7 +172,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -188,7 +187,7 @@ omegaconf==2.3.0 # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/dinov2/requirements.in # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -232,7 +231,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -292,6 +291,7 @@ typing-extensions==4.12.2 # iopath # lightning-utilities # reactivex + # rich # submitit # torch varname==0.13.3 diff --git a/benchmarks/flops/requirements.cuda.txt b/benchmarks/flops/requirements.cuda.txt index e529152e3..fd027a8fb 100644 --- a/benchmarks/flops/requirements.cuda.txt +++ b/benchmarks/flops/requirements.cuda.txt @@ -95,7 +95,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # torchvision # xformers @@ -111,7 +110,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -158,7 +157,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -172,7 +171,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -204,7 +203,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -248,6 +247,7 @@ typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex + # rich # torch varname==0.13.3 # via diff --git a/benchmarks/geo_gnn/requirements-pre.cuda.txt b/benchmarks/geo_gnn/requirements-pre.cuda.txt index 6c76b0c91..f56bb4988 100644 --- a/benchmarks/geo_gnn/requirements-pre.cuda.txt +++ b/benchmarks/geo_gnn/requirements-pre.cuda.txt @@ -62,7 +62,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # xformers nvidia-cublas-cu12==12.1.3.1 @@ -77,7 +76,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -120,7 +119,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -130,7 +129,7 @@ nvidia-nvtx-cu12==12.1.105 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax diff --git a/benchmarks/geo_gnn/requirements.cuda.txt b/benchmarks/geo_gnn/requirements.cuda.txt index 37b77babb..c4ffaa639 100644 --- a/benchmarks/geo_gnn/requirements.cuda.txt +++ b/benchmarks/geo_gnn/requirements.cuda.txt @@ -10,11 +10,11 @@ --find-links https://data.pyg.org/whl/torch-2.4.0+cu121.html --trusted-host pypi.ngc.nvidia.com -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric @@ -149,7 +149,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # pandas # rdkit # scipy @@ -169,7 +168,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt @@ -225,7 +224,7 @@ nvidia-nccl-cu12==2.20.5 # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt @@ -241,7 +240,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt @@ -250,7 +249,7 @@ ovld==0.3.9 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in @@ -299,7 +298,7 @@ requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -330,7 +329,7 @@ torch-cluster==1.6.3+pt24cu121 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in -torch-geometric==2.6.0 +torch-geometric==2.6.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/geo_gnn/requirements.in @@ -357,8 +356,9 @@ typing-extensions==4.12.2 # -r benchmarks/geo_gnn/requirements-pre.cuda.txt # multidict # reactivex + # rich # torch -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -380,7 +380,7 @@ xformers==0.0.27.post2 # -c .pin/../.pin/constraints-cuda-torch.txt # -r .pin/../constraints/extra/torch.cuda.txt # -r benchmarks/geo_gnn/requirements-pre.cuda.txt -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/huggingface/requirements.cuda.txt b/benchmarks/huggingface/requirements.cuda.txt index d4323b4af..45e68e325 100644 --- a/benchmarks/huggingface/requirements.cuda.txt +++ b/benchmarks/huggingface/requirements.cuda.txt @@ -51,7 +51,7 @@ giving==0.4.3 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tokenizers @@ -111,7 +111,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # transformers # xformers @@ -127,7 +126,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -174,7 +173,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -188,7 +187,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -236,7 +235,7 @@ requests==2.32.3 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # transformers -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -274,6 +273,7 @@ tqdm==4.66.5 transformers==4.44.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/huggingface/requirements.in triton==3.0.0 # via @@ -284,6 +284,7 @@ typing-extensions==4.12.2 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # reactivex + # rich # torch urllib3==2.2.3 # via diff --git a/benchmarks/lightning/requirements.cuda.txt b/benchmarks/lightning/requirements.cuda.txt index db0745882..04b4eb4b3 100644 --- a/benchmarks/lightning/requirements.cuda.txt +++ b/benchmarks/lightning/requirements.cuda.txt @@ -10,11 +10,11 @@ --find-links https://data.pyg.org/whl/torch-2.4.0+cu121.html --trusted-host pypi.ngc.nvidia.com -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # fsspec @@ -141,7 +141,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # torchmetrics # torchvision @@ -158,7 +157,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -205,7 +204,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -219,7 +218,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -264,7 +263,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -321,6 +320,7 @@ typing-extensions==4.12.2 # multidict # pytorch-lightning # reactivex + # rich # torch varname==0.13.3 # via @@ -335,7 +335,7 @@ xformers==0.0.27.post2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r .pin/../constraints/extra/torch.cuda.txt -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/llama/requirements.cuda.txt b/benchmarks/llama/requirements.cuda.txt index 1f52de100..0b3188482 100644 --- a/benchmarks/llama/requirements.cuda.txt +++ b/benchmarks/llama/requirements.cuda.txt @@ -10,11 +10,11 @@ --find-links https://data.pyg.org/whl/torch-2.4.0+cu121.html --trusted-host pypi.ngc.nvidia.com -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -51,7 +51,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==3.0.0 +datasets==3.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/llama/requirements.in @@ -76,7 +76,7 @@ filelock==3.16.1 # torch # transformers # triton -fire==0.6.0 +fire==0.7.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/llama/requirements.in @@ -96,7 +96,7 @@ giving==0.4.3 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -169,7 +169,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # pandas # pyarrow # scipy @@ -187,7 +186,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -234,7 +233,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -248,7 +247,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -262,7 +261,7 @@ packaging==24.1 # datasets # huggingface-hub # transformers -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -311,7 +310,7 @@ requests==2.32.3 # datasets # huggingface-hub # transformers -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -332,7 +331,6 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens - # fire # python-dateutil sympy==1.13.3 # via @@ -361,6 +359,7 @@ tqdm==4.66.5 transformers==4.44.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/llama/requirements.in triton==3.0.0 # via @@ -372,8 +371,9 @@ typing-extensions==4.12.2 # huggingface-hub # multidict # reactivex + # rich # torch -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -398,7 +398,7 @@ xxhash==3.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/llava/requirements.cuda.txt b/benchmarks/llava/requirements.cuda.txt index 91e94c4bf..5c6f9f64b 100644 --- a/benchmarks/llava/requirements.cuda.txt +++ b/benchmarks/llava/requirements.cuda.txt @@ -14,11 +14,11 @@ accelerate==0.34.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/llava/requirements.in -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -55,7 +55,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==3.0.0 +datasets==3.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/llava/requirements.in @@ -92,7 +92,7 @@ giving==0.4.3 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -167,7 +167,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # pandas # pyarrow # scipy @@ -185,7 +184,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -232,7 +231,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -246,7 +245,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -261,7 +260,7 @@ packaging==24.1 # datasets # huggingface-hub # transformers -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -316,7 +315,7 @@ requests==2.32.3 # datasets # huggingface-hub # transformers -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -358,6 +357,7 @@ tqdm==4.66.5 transformers==4.44.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/llava/requirements.in triton==3.0.0 # via @@ -369,8 +369,9 @@ typing-extensions==4.12.2 # huggingface-hub # multidict # reactivex + # rich # torch -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -395,7 +396,7 @@ xxhash==3.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/llm/requirements.cuda.txt b/benchmarks/llm/requirements.cuda.txt index 3abff6b50..db34901fd 100644 --- a/benchmarks/llm/requirements.cuda.txt +++ b/benchmarks/llm/requirements.cuda.txt @@ -14,11 +14,11 @@ accelerate==0.34.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/llm/requirements.in -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -64,7 +64,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==3.0.0 +datasets==3.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torchtune @@ -91,7 +91,7 @@ filelock==3.16.1 # torch # transformers # triton -fire==0.6.0 +fire==0.7.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/llm/requirements.txt @@ -115,7 +115,7 @@ hjson==3.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # argklass -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -199,7 +199,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # pandas # pyarrow # scipy @@ -218,7 +217,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -265,7 +264,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -280,7 +279,7 @@ omegaconf==2.3.0 # -c .pin/../.pin/constraints-cuda-torch.txt # torchtune # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -295,7 +294,7 @@ packaging==24.1 # datasets # huggingface-hub # transformers -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -312,7 +311,7 @@ pyarrow==17.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -pycryptodomex==3.20.0 +pycryptodomex==3.21.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # blobfile @@ -353,7 +352,7 @@ requests==2.32.3 # huggingface-hub # tiktoken # transformers -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -376,7 +375,6 @@ six==1.16.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # asttokens - # fire # python-dateutil sympy==1.13.3 # via @@ -402,13 +400,16 @@ torch==2.4.0+cu121 # accelerate # fairscale # xformers -torchao==0.5.0+cu121 +torchao==0.3.1+cu121 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/llm/requirements.in -torchtune==0.3.0+cu121 + # torchtune +torchtune==0.2.1+cu121 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/llm/requirements.in tqdm==4.66.5 # via @@ -420,6 +421,7 @@ tqdm==4.66.5 transformers==4.44.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/llm/requirements.in triton==3.0.0 # via @@ -431,8 +433,9 @@ typing-extensions==4.12.2 # huggingface-hub # multidict # reactivex + # rich # torch -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -458,7 +461,7 @@ xxhash==3.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/llm/requirements.in b/benchmarks/llm/requirements.in index 36832ad67..a3ab63c07 100644 --- a/benchmarks/llm/requirements.in +++ b/benchmarks/llm/requirements.in @@ -1,5 +1,5 @@ voir>=0.2.19,<0.3 -torchtune +torchtune<0.3.0 torch PyYAML argklass diff --git a/benchmarks/purejaxrl/requirements.cuda.txt b/benchmarks/purejaxrl/requirements.cuda.txt index d495163a9..3f09e47f7 100644 --- a/benchmarks/purejaxrl/requirements.cuda.txt +++ b/benchmarks/purejaxrl/requirements.cuda.txt @@ -32,7 +32,7 @@ argklass==1.4.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/purejaxrl/requirements.in -astroid==3.2.4 +astroid==3.3.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pylint @@ -61,7 +61,7 @@ charset-normalizer==3.3.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # requests -chex==0.1.86 +chex==0.1.87 # via # -c .pin/../.pin/constraints-cuda-torch.txt # distrax @@ -188,7 +188,7 @@ flax==0.9.0 # flashbax # gymnax # navix -fonttools==4.53.1 +fonttools==4.54.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # matplotlib @@ -218,7 +218,7 @@ glfw==2.7.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # mujoco -grpcio==1.66.1 +grpcio==1.66.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # brax @@ -411,7 +411,6 @@ numpy==1.26.4 # ml-dtypes # mujoco # navix - # opt-einsum # optax # orbax-checkpoint # pandas @@ -435,7 +434,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -482,7 +481,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -496,7 +495,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -523,7 +522,7 @@ packaging==24.1 # pytest # setuptools-scm # tensorboardx -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # seaborn @@ -574,7 +573,7 @@ pygments==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # rich -pylint==3.2.7 +pylint==3.3.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # navix @@ -621,7 +620,7 @@ requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # wandb -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # flax @@ -643,7 +642,7 @@ seaborn==0.13.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # gymnax -sentry-sdk==2.14.0 +sentry-sdk==2.15.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # wandb @@ -683,13 +682,13 @@ tensorflow-probability==0.24.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # distrax -tensorstore==0.1.65 +tensorstore==0.1.66 # via # -c .pin/../.pin/constraints-cuda-torch.txt # flashbax # flax # orbax-checkpoint -tomli==2.0.1 +tomli==2.0.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # black @@ -732,13 +731,14 @@ typing-extensions==4.12.2 # navix # orbax-checkpoint # reactivex + # rich # torch # tyro tyro==0.8.11 # via # -c .pin/../.pin/constraints-cuda-torch.txt # navix -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -756,7 +756,7 @@ voir==0.2.19 # -c .pin/../.pin/constraints-cuda-torch.txt # -c .pin/../constraints/cuda.txt # -r benchmarks/purejaxrl/requirements.in -wandb==0.18.1 +wandb==0.18.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # navix diff --git a/benchmarks/recursiongfn/requirements.cuda.txt b/benchmarks/recursiongfn/requirements.cuda.txt index 2c852b71d..497f573ab 100644 --- a/benchmarks/recursiongfn/requirements.cuda.txt +++ b/benchmarks/recursiongfn/requirements.cuda.txt @@ -14,11 +14,11 @@ absl-py==2.1.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric @@ -113,7 +113,7 @@ gpytorch==1.13 # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in # botorch -grpcio==1.66.1 +grpcio==1.66.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard @@ -199,7 +199,7 @@ multipledispatch==1.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # botorch -ndindex==1.8 +ndindex==1.9.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # blosc2 @@ -222,7 +222,6 @@ numpy==1.26.4 # jaxtyping # ml-dtypes # numexpr - # opt-einsum # pandas # pyarrow # pyro-ppl @@ -245,7 +244,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -292,7 +291,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -307,7 +306,7 @@ omegaconf==2.3.0 # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -321,7 +320,7 @@ packaging==24.1 # -c .pin/../.pin/constraints-cuda-torch.txt # tables # tensorboard -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in @@ -400,7 +399,7 @@ requests==2.32.3 # -c .pin/../.pin/constraints-cuda-torch.txt # torch-geometric # wandb -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -420,7 +419,7 @@ scipy==1.14.1 # scikit-learn # torch-cluster # torch-sparse -sentry-sdk==2.14.0 +sentry-sdk==2.15.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # wandb @@ -447,7 +446,7 @@ tables==3.10.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in -tensorboard==2.17.1 +tensorboard==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in @@ -471,7 +470,7 @@ torch-cluster==1.6.3+pt24cu121 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in -torch-geometric==2.6.0 +torch-geometric==2.6.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in @@ -503,10 +502,11 @@ typing-extensions==4.12.2 # jaxtyping # multidict # reactivex + # rich # tables # torch # typeguard -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -524,7 +524,7 @@ voir==0.2.19 # -c .pin/../.pin/constraints-cuda-torch.txt # -c .pin/../constraints/cuda.txt # -r benchmarks/recursiongfn/requirements.in -wandb==0.18.1 +wandb==0.18.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/recursiongfn/requirements.in @@ -536,7 +536,7 @@ xformers==0.0.27.post2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r .pin/../constraints/extra/torch.cuda.txt -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/rlhf/requirements.cuda.txt b/benchmarks/rlhf/requirements.cuda.txt index acc448aee..dee2ae27c 100644 --- a/benchmarks/rlhf/requirements.cuda.txt +++ b/benchmarks/rlhf/requirements.cuda.txt @@ -15,11 +15,11 @@ accelerate==0.34.2 # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/rlhf/requirements.in # trl -aiohappyeyeballs==2.4.0 +aiohappyeyeballs==2.4.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp -aiohttp==3.10.5 +aiohttp==3.10.8 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -56,7 +56,7 @@ codefind==0.1.7 # via # -c .pin/../.pin/constraints-cuda-torch.txt # ptera -datasets==3.0.0 +datasets==3.0.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/rlhf/requirements.in @@ -98,7 +98,7 @@ giving==0.4.3 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # accelerate @@ -172,7 +172,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # pandas # pyarrow # scipy @@ -191,7 +190,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -238,7 +237,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -252,7 +251,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -267,7 +266,7 @@ packaging==24.1 # datasets # huggingface-hub # transformers -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets @@ -318,7 +317,7 @@ requests==2.32.3 # datasets # huggingface-hub # transformers -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tyro @@ -366,15 +365,17 @@ tqdm==4.66.5 transformers==4.44.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/rlhf/requirements.in # trl triton==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -trl==0.11.0 +trl==0.10.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt + # -c .pin/../constraints/cuda.txt # -r benchmarks/rlhf/requirements.in typing-extensions==4.12.2 # via @@ -382,13 +383,14 @@ typing-extensions==4.12.2 # huggingface-hub # multidict # reactivex + # rich # torch # tyro tyro==0.8.11 # via # -c .pin/../.pin/constraints-cuda-torch.txt # trl -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas @@ -413,7 +415,7 @@ xxhash==3.5.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # datasets -yarl==1.11.1 +yarl==1.13.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # aiohttp diff --git a/benchmarks/timm/requirements.cuda.txt b/benchmarks/timm/requirements.cuda.txt index 1ac873600..b55428950 100644 --- a/benchmarks/timm/requirements.cuda.txt +++ b/benchmarks/timm/requirements.cuda.txt @@ -50,7 +50,7 @@ giving==0.4.3 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/timm/requirements.in @@ -109,7 +109,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # torchvision # xformers @@ -125,7 +124,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -172,7 +171,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -186,7 +185,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -228,7 +227,7 @@ requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -272,6 +271,7 @@ typing-extensions==4.12.2 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # reactivex + # rich # torch urllib3==2.2.3 # via diff --git a/benchmarks/torchatari/requirements.cuda.txt b/benchmarks/torchatari/requirements.cuda.txt index 0ed7f915c..1be36a969 100644 --- a/benchmarks/torchatari/requirements.cuda.txt +++ b/benchmarks/torchatari/requirements.cuda.txt @@ -78,7 +78,7 @@ giving==0.4.3 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -grpcio==1.66.1 +grpcio==1.66.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tensorboard @@ -161,7 +161,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # tensorboard # xformers @@ -177,7 +176,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -224,7 +223,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -238,7 +237,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -279,7 +278,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # tyro @@ -302,7 +301,7 @@ sympy==1.13.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -tensorboard==2.17.1 +tensorboard==2.18.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/torchatari/requirements.in @@ -324,7 +323,7 @@ triton==3.0.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # torch -types-protobuf==5.27.0.20240907 +types-protobuf==5.28.0.20240924 # via # -c .pin/../.pin/constraints-cuda-torch.txt # envpool @@ -335,6 +334,7 @@ typing-extensions==4.12.2 # gymnasium # optree # reactivex + # rich # torch # tyro tyro==0.8.11 diff --git a/benchmarks/torchvision/requirements.cuda.txt b/benchmarks/torchvision/requirements.cuda.txt index 3b994c798..108cc0e69 100644 --- a/benchmarks/torchvision/requirements.cuda.txt +++ b/benchmarks/torchvision/requirements.cuda.txt @@ -95,7 +95,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # torchvision # xformers @@ -111,7 +110,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -158,7 +157,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -172,7 +171,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -204,7 +203,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -248,6 +247,7 @@ typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex + # rich # torch varname==0.13.3 # via diff --git a/benchmarks/torchvision_ddp/requirements.cuda.txt b/benchmarks/torchvision_ddp/requirements.cuda.txt index 4e6a2a2b8..8572482df 100644 --- a/benchmarks/torchvision_ddp/requirements.cuda.txt +++ b/benchmarks/torchvision_ddp/requirements.cuda.txt @@ -95,7 +95,6 @@ numpy==1.26.4 # jax # jaxlib # ml-dtypes - # opt-einsum # scipy # torchvision # xformers @@ -111,7 +110,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -158,7 +157,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -172,7 +171,7 @@ omegaconf==2.3.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -204,7 +203,7 @@ reactivex==4.0.4 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -248,6 +247,7 @@ typing-extensions==4.12.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # reactivex + # rich # torch varname==0.13.3 # via diff --git a/benchmarks/vjepa/requirements.cuda.txt b/benchmarks/vjepa/requirements.cuda.txt index 2386bbd24..867c50b53 100644 --- a/benchmarks/vjepa/requirements.cuda.txt +++ b/benchmarks/vjepa/requirements.cuda.txt @@ -18,7 +18,7 @@ asttokens==2.4.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # giving -beartype==0.18.5 +beartype==0.19.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/vjepa/requirements.in @@ -71,7 +71,7 @@ giving==0.4.3 # -c .pin/../.pin/constraints-cuda-torch.txt # ptera # voir -huggingface-hub==0.25.0 +huggingface-hub==0.25.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # timm @@ -133,7 +133,6 @@ numpy==1.26.4 # jaxlib # ml-dtypes # opencv-python - # opt-einsum # pandas # scipy # torchvision @@ -151,7 +150,7 @@ nvidia-cuda-cupti-cu12==12.1.105 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-cuda-nvcc-cu12==12.6.68 +nvidia-cuda-nvcc-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -198,7 +197,7 @@ nvidia-nccl-cu12==2.20.5 # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin # torch -nvidia-nvjitlink-cu12==12.6.68 +nvidia-nvjitlink-cu12==12.6.77 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax-cuda12-plugin @@ -216,7 +215,7 @@ opencv-python==4.10.0.84 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/vjepa/requirements.in -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via # -c .pin/../.pin/constraints-cuda-torch.txt # jax @@ -228,7 +227,7 @@ packaging==24.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub -pandas==2.2.2 +pandas==2.2.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # -r benchmarks/vjepa/requirements.in @@ -272,7 +271,7 @@ requests==2.32.3 # via # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub -rich==13.8.1 +rich==13.9.1 # via # -c .pin/../.pin/constraints-cuda-torch.txt # voir @@ -327,9 +326,10 @@ typing-extensions==4.12.2 # -c .pin/../.pin/constraints-cuda-torch.txt # huggingface-hub # reactivex + # rich # submitit # torch -tzdata==2024.1 +tzdata==2024.2 # via # -c .pin/../.pin/constraints-cuda-torch.txt # pandas diff --git a/config/base.yaml b/config/base.yaml index b06cbea58..d7926799f 100644 --- a/config/base.yaml +++ b/config/base.yaml @@ -27,7 +27,6 @@ _torchvision: --loader: pytorch --data: "{milabench_data}/FakeImageNet" - _torchvision_ddp: inherits: _defaults definition: ../benchmarks/torchvision_ddp @@ -119,7 +118,6 @@ _timm: --dataset: "FakeImageNet" --workers: "auto({n_worker}, 8)" - _accelerate_opt: inherits: _defaults tags: @@ -156,7 +154,6 @@ _accelerate_opt: use_deepspeed: true num_machines: 1 - fp16: inherits: _flops @@ -398,7 +395,6 @@ brax: --num-minibatches: 32 --num-envs: 8192 - _diffusion: inherits: _defaults definition: ../benchmarks/diffusion @@ -551,13 +547,13 @@ _llm: definition: ../benchmarks/llm install_group: torch - llm-lora-single: inherits: _llm tags: - monogpu plan: method: per_gpu + argv: "{milabench_code}/recipes/lora_finetune_single_device.py": true --config: "{milabench_code}/configs/llama3_8B_lora_single_device.yaml" @@ -619,7 +615,6 @@ llm-lora-ddp-nodes: requires_capabilities: - "len(nodes) >= ${num_machines}" - llm-lora-mp-gpus: inherits: _llm tags: @@ -793,7 +788,6 @@ _llava: method: per_gpu tags: - llm - - monogpu argv: --batch_size: 1 --num_workers: "auto({n_worker}, 4)" @@ -801,6 +795,8 @@ _llava: llava-single: inherits: _llava + tags: + - monogpu plan: method: per_gpu argv: @@ -828,7 +824,6 @@ _rlhf: plan: method: per_gpu tags: - - monogpu - rl - rlhf - llm @@ -842,6 +837,8 @@ _rlhf: rlhf-single: inherits: _rlhf + tags: + - monogpu plan: method: per_gpu @@ -884,6 +881,8 @@ cleanrljax: inherits: _defaults install_group: torch definition: ../benchmarks/cleanrl_jax + tags: + - monogpu plan: method: per_gpu diff --git a/constraints/cuda.txt b/constraints/cuda.txt index eb6bbcedf..49675b577 100644 --- a/constraints/cuda.txt +++ b/constraints/cuda.txt @@ -5,3 +5,14 @@ voir >= 0.2.19 torchcompat >= 1.0.0 gymnax >= 0.0.8 +trl<0.11.0 + +# latest torchtune is slower than before and cause failures +# next version of pytorch seems to work better +# so pending a new version of pytorch this is what we get +torchtune<0.3.0 + +# transformers added torchao support recently +# but only the most recent version we do not support +transformers<4.45.0 +torchao \ No newline at end of file diff --git a/milabench/_version.py b/milabench/_version.py index 4b49d0506..cdd2418dd 100644 --- a/milabench/_version.py +++ b/milabench/_version.py @@ -1,5 +1,5 @@ """This file is generated, do not modify""" -__tag__ = "v0.1.0-113-g9a5dfe3e" -__commit__ = "9a5dfe3ef36e6baab6584faa3fa939e63ba2aed5" -__date__ = "2024-09-16 09:08:28 -0400" +__tag__ = "v1.0.0_RC1-9-g6d1e1140" +__commit__ = "6d1e114000cc4200ea307330032234db6696e40d" +__date__ = "2024-09-30 14:39:43 -0400" diff --git a/milabench/cli/compare.py b/milabench/cli/compare.py index b2992857c..83f0c59ce 100644 --- a/milabench/cli/compare.py +++ b/milabench/cli/compare.py @@ -15,6 +15,7 @@ class Arguments: last : int = None metric : str = "train_rate" stat : str = "median" + filter : str = None # fmt: on @@ -23,13 +24,15 @@ def arguments(): # [positional: ?] folder: Option = None + filter: Option & str = None + last: Option & int = None metric: Option & str = "train_rate" stat: Option & str = "median" - return Arguments(folder, last, metric, stat) + return Arguments(folder, last, metric, stat, filter) @tooled @@ -66,7 +69,7 @@ def cli_compare(args=None): if base is not None: args.folder = os.path.join(base, "runs") - runs = fetch_runs(args.folder) + runs = fetch_runs(args.folder, args.filter) for run in runs: all_data = _read_reports(run.path) diff --git a/milabench/compare.py b/milabench/compare.py index e3b88b10c..cae068203 100644 --- a/milabench/compare.py +++ b/milabench/compare.py @@ -21,14 +21,22 @@ def retrieve_datetime_from_name(date): pass -def fetch_runs(folder): +def fetch_runs(folder, filter): + import fnmatch + runs = [] + ignored = 0 for run in os.listdir(folder): + if filter is not None and (not fnmatch.fnmatch(run, filter)): + ignored += 1 + continue + pth = os.path.join(folder, run) if not os.path.isdir(pth): continue if "." in run: - name, date = run.split(".", maxsplit=1) + name, fractional_seconds = run.rsplit(".", maxsplit=1) + name, date = name.rsplit(".", maxsplit=1) date = retrieve_datetime_from_name(date) else: name = run @@ -39,6 +47,8 @@ def fetch_runs(folder): out = _Output(pth, name, date) runs.append(out) + if ignored > 0: + print(f"Ignoring run {ignored} runs because of filter {filter}") runs.sort(key=lambda out: out.date) return runs diff --git a/milabench/config.py b/milabench/config.py index ebc041060..039a85cc4 100644 --- a/milabench/config.py +++ b/milabench/config.py @@ -11,6 +11,8 @@ config_global = contextvars.ContextVar("config", default=None) execution_count = (0, 0) +_MONITOR_TAGS = {"monogpu", "multigpu", "multinode"} + def set_run_count(total_run, total_bench): global execution_count @@ -80,6 +82,13 @@ def finalize_config(name, bench_config): pack = (XPath(bench_config["config_base"]) / pack).resolve() bench_config["definition"] = str(pack) + if not name.startswith("_") and name != "*": + _tags = set(bench_config["tags"]) + _monitor_tags = _tags & _MONITOR_TAGS + assert len(_monitor_tags) == 1, ( + f"Bench {name} should have exactly one monitor tag. Found {_monitor_tags}" + ) + bench_config["tag"] = [bench_config["name"]] bench_config = OmegaConf.to_object(OmegaConf.create(bench_config)) diff --git a/milabench/report.py b/milabench/report.py index aebcaf093..c54ed8ddd 100644 --- a/milabench/report.py +++ b/milabench/report.py @@ -342,6 +342,35 @@ def short_meta(out, meta): out.print(Table(stats)) +def to_latex(df): + from dataclasses import dataclass + from .system import option + + default_columns = [ + "ngpu", + "perf", + "sem%", + "std%" + ] + + @dataclass + class LatexTable: + output: str = option("latex.output", str, None) + columns: str = option("latex.columns", str, ",".join(default_columns)) + + options = LatexTable() + + columns = options.columns.split(",") + + df = df[columns] + + if options.output is not None: + with open(options.output, "w") as fp: + txt = df.to_latex(formatters=_formatters, escape=False) + txt = txt.replace("%", "\%").replace("_", "\_") + fp.write(txt) + + @error_guard({}) def make_report( summary: dict[str, Summary], @@ -376,7 +405,10 @@ def make_report( out.section("Breakdown") # Reorder columns - out.print(normalize_dataframe(df)) + normalized = normalize_dataframe(df) + out.print(normalized) + + to_latex(normalized) out.section("Scores") diff --git a/scripts/article/run_cuda.sh b/scripts/article/run_cuda.sh index 7328ca54b..ba4c1ae38 100644 --- a/scripts/article/run_cuda.sh +++ b/scripts/article/run_cuda.sh @@ -49,7 +49,7 @@ install_prepare() { # Install milabench's benchmarks in their venv # # pip install torch - # milabench pin --variant cuda --from-scratch $ARGS + milabench pin --variant cuda --from-scratch $ARGS milabench install --system $MILABENCH_WORDIR/system.yaml $ARGS which pip @@ -84,8 +84,19 @@ if [ "$MILABENCH_PREPARE" -eq 0 ]; then . $MILABENCH_WORDIR/env/bin/activate - milabench install --system $MILABENCH_WORDIR/system.yaml - # milabench prepare --system $MILABENCH_WORDIR/system.yaml $ARGS + # pip install torch + # milabench pin --variant cuda --from-scratch + # rm -rf $MILABENCH_WORDIR/results/venv/ + # rm -rf $MILABENCH_WORDIR/results/extra + # milabench install --system $MILABENCH_WORDIR/system.yaml + milabench prepare --system $MILABENCH_WORDIR/system.yaml $ARGS + + ( + . $BENCHMARK_VENV/bin/activate + which pip + # pip uninstall torchao -y + # pip install torchao --no-input + ) # pip install torch # milabench pin --variant cuda --from-scratch