From 1814c47cbf4326493ae03365270e5a766829bc09 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 13:54:07 +0100 Subject: [PATCH 01/36] moco vit --- .../pretrain/imagenet-100/mocov3_vit.sh | 41 +++++++++ solo/methods/mocov3.py | 89 ++++++++++++++----- 2 files changed, 106 insertions(+), 24 deletions(-) create mode 100644 bash_files/pretrain/imagenet-100/mocov3_vit.sh diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit.sh b/bash_files/pretrain/imagenet-100/mocov3_vit.sh new file mode 100644 index 00000000..bd1cde0b --- /dev/null +++ b/bash_files/pretrain/imagenet-100/mocov3_vit.sh @@ -0,0 +1,41 @@ +python3 main_pretrain.py \ + --dataset imagenet100 \ + --backbone vit_small \ + --data_dir /datasets \ + --train_dir imagenet-100/train \ + --val_dir imagenet-100/val \ + --max_epochs 400 \ + --warmup_epochs 40 \ + --devices 0,1 \ + --accelerator gpu \ + --strategy ddp \ + --sync_batchnorm \ + --precision 16 \ + --optimizer adamw \ + --scheduler warmup_cosine \ + --lr 1.5e-4 \ + --classifier_lr 1.5e-4 \ + --weight_decay 0.1 \ + --batch_size 128 \ + --num_workers 4 \ + --dali \ + --brightness 0.4 \ + --contrast 0.4 \ + --saturation 0.2 \ + --hue 0.1 \ + --gaussian_prob 1.0 0.1 \ + --solarization_prob 0.0 0.2 \ + --min_scale 0.08 \ + --num_crops_per_aug 1 1 \ + --name mocov3-vit-400ep-imagenet100 \ + --project solo-learn \ + --entity unitn-mhug \ + --save_checkpoint \ + --wandb \ + --auto_resume \ + --method mocov3 \ + --proj_hidden_dim 4096 \ + --pred_hidden_dim 4096 \ + --temperature 0.2 \ + --base_tau_momentum 0.99 \ + --final_tau_momentum 1.0 diff --git a/solo/methods/mocov3.py b/solo/methods/mocov3.py index c04af4ac..1b2157a1 100644 --- a/solo/methods/mocov3.py +++ b/solo/methods/mocov3.py @@ -49,32 +49,73 @@ def __init__( self.temperature = temperature - # projector - self.projector = nn.Sequential( - nn.Linear(self.features_dim, proj_hidden_dim), - nn.BatchNorm1d(proj_hidden_dim), - nn.ReLU(), - nn.Linear(proj_hidden_dim, proj_output_dim), - nn.BatchNorm1d(proj_output_dim, affine=False), - ) - - # momentum projector - self.momentum_projector = nn.Sequential( - nn.Linear(self.features_dim, proj_hidden_dim), - nn.BatchNorm1d(proj_hidden_dim), - nn.ReLU(), - nn.Linear(proj_hidden_dim, proj_output_dim), - nn.BatchNorm1d(proj_output_dim, affine=False), - ) + if "resnet" in self.backbone_name: + # projector + self.projector = self._build_mlp( + 2, + self.features_dim, + proj_hidden_dim, + proj_output_dim, + ) + # momentum projector + self.momentum_projector = self._build_mlp( + 2, + self.features_dim, + proj_hidden_dim, + proj_output_dim, + ) + + # predictor + self.predictor = self._build_mlp( + 2, + proj_output_dim, + pred_hidden_dim, + proj_output_dim, + last_bn=False, + ) + else: + # specifically for ViT but allow all the other backbones + # projector + self.projector = self._build_mlp( + 3, + self.features_dim, + proj_hidden_dim, + proj_output_dim, + ) + # momentum projector + self.momentum_projector = self._build_mlp( + 3, + self.features_dim, + proj_hidden_dim, + proj_output_dim, + ) + + # predictor + self.predictor = self._build_mlp( + 2, + proj_output_dim, + pred_hidden_dim, + proj_output_dim, + ) + initialize_momentum_params(self.projector, self.momentum_projector) - # predictor - self.predictor = nn.Sequential( - nn.Linear(proj_output_dim, pred_hidden_dim), - nn.BatchNorm1d(pred_hidden_dim), - nn.ReLU(), - nn.Linear(pred_hidden_dim, proj_output_dim), - ) + def _build_mlp(self, num_layers, input_dim, mlp_dim, output_dim, last_bn=True): + mlp = [] + for l in range(num_layers): + dim1 = input_dim if l == 0 else mlp_dim + dim2 = output_dim if l == num_layers - 1 else mlp_dim + + mlp.append(nn.Linear(dim1, dim2, bias=False)) + + if l < num_layers - 1: + mlp.append(nn.BatchNorm1d(dim2)) + mlp.append(nn.ReLU(inplace=True)) + elif last_bn: + # follow SimCLR's design + mlp.append(nn.BatchNorm1d(dim2, affine=False)) + + return nn.Sequential(*mlp) @staticmethod def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser: From 7acb1f8368c39aa5fd9ad96da06db33a323cc82f Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 16:25:44 +0100 Subject: [PATCH 02/36] wip --- main_linear.py | 28 +--- main_pretrain.py | 3 + solo/args/dataset.py | 7 + solo/methods/base.py | 2 + solo/utils/__init__.py | 2 + solo/utils/classification_dataloader.py | 23 +++- solo/utils/convert_imgfolder_to_h5.py | 20 +++ .../dataset_subset/imagenet100_classes.txt | 1 + solo/utils/h5_dataset.py | 129 ++++++++++++++++++ solo/utils/misc.py | 24 +++- solo/utils/pretrain_dataloader.py | 19 ++- 11 files changed, 217 insertions(+), 41 deletions(-) create mode 100644 solo/utils/convert_imgfolder_to_h5.py create mode 100644 solo/utils/dataset_subset/imagenet100_classes.txt create mode 100644 solo/utils/h5_dataset.py diff --git a/main_linear.py b/main_linear.py index 73ab387a..689a7ae6 100644 --- a/main_linear.py +++ b/main_linear.py @@ -26,21 +26,9 @@ from pytorch_lightning.callbacks import LearningRateMonitor from pytorch_lightning.loggers import WandbLogger from pytorch_lightning.strategies.ddp import DDPStrategy -from torchvision.models import resnet18, resnet50 - from solo.args.setup import parse_args_linear from solo.methods.base import BaseMethod from solo.utils.auto_resumer import AutoResumer -from solo.utils.backbones import ( - swin_base, - swin_large, - swin_small, - swin_tiny, - vit_base, - vit_large, - vit_small, - vit_tiny, -) from solo.utils.misc import make_contiguous try: @@ -59,18 +47,7 @@ def main(): args = parse_args_linear() assert args.backbone in BaseMethod._BACKBONES - backbone_model = { - "resnet18": resnet18, - "resnet50": resnet50, - "vit_tiny": vit_tiny, - "vit_small": vit_small, - "vit_base": vit_base, - "vit_large": vit_large, - "swin_tiny": swin_tiny, - "swin_small": swin_small, - "swin_base": swin_base, - "swin_large": swin_large, - }[args.backbone] + backbone_model = BaseMethod._BACKBONES[args.backbone] # initialize backbone kwargs = args.backbone_args @@ -117,10 +94,13 @@ def main(): data_dir=args.data_dir, train_dir=args.train_dir, val_dir=args.val_dir, + train_h5_path=args.train_h5_path, + val_h5_path=args.val_h5_path, batch_size=args.batch_size, num_workers=args.num_workers, data_fraction=args.data_fraction, ) + if args.dali: assert ( _dali_avaliable diff --git a/main_pretrain.py b/main_pretrain.py index 7e5eb939..789a831d 100644 --- a/main_pretrain.py +++ b/main_pretrain.py @@ -79,6 +79,8 @@ def main(): data_dir=args.data_dir, train_dir=args.train_dir, val_dir=args.val_dir, + train_h5_path=args.train_h5_path, + val_h5_path=args.val_h5_path, batch_size=args.batch_size, num_workers=args.num_workers, ) @@ -123,6 +125,7 @@ def main(): data_dir=args.data_dir, train_dir=args.train_dir, no_labels=args.no_labels, + train_h5_path=args.train_h5_path, data_fraction=args.data_fraction, ) train_loader = prepare_dataloader( diff --git a/solo/args/dataset.py b/solo/args/dataset.py index bc2a2008..c4f1d8cd 100644 --- a/solo/args/dataset.py +++ b/solo/args/dataset.py @@ -44,12 +44,19 @@ def dataset_args(parser: ArgumentParser): parser.add_argument("--train_dir", type=Path, default=None) parser.add_argument("--val_dir", type=Path, default=None) + # h5 files + parser.add_argument("--train_h5_path", type=str, default=None) + parser.add_argument("--val_h5_path", type=str, default=None) + # percentage of data used from training, leave -1.0 to use all data available parser.add_argument("--data_fraction", default=-1.0, type=float) # dali (imagenet-100/imagenet/custom only) parser.add_argument("--dali", action="store_true") + # manually bypass all checks for dataset size by providing the correct size by hand + parser.add_argument("--total_dataset_size", default=None, type=int) + def augmentations_args(parser: ArgumentParser): """Adds augmentation-related arguments to a parser. diff --git a/solo/methods/base.py b/solo/methods/base.py index f29ccd59..54722dd0 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -361,12 +361,14 @@ def num_training_steps(self) -> int: folder = None no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) + h5py_file = self.extra_args.get("train_h5_path", None) dataset_size = compute_dataset_size( dataset=dataset, folder=folder, train=True, no_labels=no_labels, + h5py_file=h5py_file, data_fraction=data_fraction, ) except: diff --git a/solo/utils/__init__.py b/solo/utils/__init__.py index fd92173e..98dd8f00 100644 --- a/solo/utils/__init__.py +++ b/solo/utils/__init__.py @@ -20,6 +20,7 @@ from solo.utils import ( checkpointer, classification_dataloader, + h5_dataset, knn, lars, metrics, @@ -32,6 +33,7 @@ __all__ = [ "classification_dataloader", "pretrain_dataloader", + "h5_dataset", "checkpointer", "knn", "misc", diff --git a/solo/utils/classification_dataloader.py b/solo/utils/classification_dataloader.py index 33d165af..5fffbffa 100644 --- a/solo/utils/classification_dataloader.py +++ b/solo/utils/classification_dataloader.py @@ -22,6 +22,7 @@ from typing import Callable, Optional, Tuple, Union import torchvision +from solo.utils.h5_dataset import H5Dataset from torch import nn from torch.utils.data import DataLoader, Dataset from torchvision import transforms @@ -146,6 +147,8 @@ def prepare_datasets( data_dir: Optional[Union[str, Path]] = None, train_dir: Optional[Union[str, Path]] = None, val_dir: Optional[Union[str, Path]] = None, + train_h5_path: Optional[str] = None, + val_h5_path: Optional[str] = None, download: bool = True, data_fraction: float = -1.0, ) -> Tuple[Dataset, Dataset]: @@ -158,6 +161,8 @@ def prepare_datasets( data_dir Optional[Union[str, Path]]: path where to download/locate the dataset. train_dir Optional[Union[str, Path]]: subpath where the training data is located. val_dir Optional[Union[str, Path]]: subpath where the validation data is located. + train_h5_path Optional[str]: path to the train h5 dataset file, if it exists. + val_h5_path Optional[str]: path to the val h5 dataset file, if it exists. data_fraction (Optional[float]): percentage of data to use. Use all data when set to -1.0. Defaults to -1.0. @@ -214,11 +219,19 @@ def prepare_datasets( ) elif dataset in ["imagenet", "imagenet100", "custom"]: - train_dir = data_dir / train_dir - val_dir = data_dir / val_dir - - train_dataset = ImageFolder(train_dir, T_train) - val_dataset = ImageFolder(val_dir, T_val) + if train_h5_path: + train_h5_path = data_dir / train_h5_path + train_dataset = H5Dataset(dataset, train_h5_path, T_train) + else: + train_dir = data_dir / train_dir + train_dataset = ImageFolder(train_dir, T_train) + + if val_h5_path: + val_h5_path = data_dir / val_h5_path + val_dataset = H5Dataset(dataset, val_h5_path, T_val) + else: + val_dir = data_dir / val_dir + val_dataset = ImageFolder(val_dir, T_val) if data_fraction > 0: assert data_fraction < 1, "Only use data_fraction for values smaller than 1." diff --git a/solo/utils/convert_imgfolder_to_h5.py b/solo/utils/convert_imgfolder_to_h5.py new file mode 100644 index 00000000..2f418c1e --- /dev/null +++ b/solo/utils/convert_imgfolder_to_h5.py @@ -0,0 +1,20 @@ +# Copyright 2022 solo-learn development team. + +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in +# the Software without restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the +# Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + +# TODO diff --git a/solo/utils/dataset_subset/imagenet100_classes.txt b/solo/utils/dataset_subset/imagenet100_classes.txt new file mode 100644 index 00000000..0615ff9f --- /dev/null +++ b/solo/utils/dataset_subset/imagenet100_classes.txt @@ -0,0 +1 @@ +n02869837 n01749939 n02488291 n02107142 n13037406 n02091831 n04517823 n04589890 n03062245 n01773797 n01735189 n07831146 n07753275 n03085013 n04485082 n02105505 n01983481 n02788148 n03530642 n04435653 n02086910 n02859443 n13040303 n03594734 n02085620 n02099849 n01558993 n04493381 n02109047 n04111531 n02877765 n04429376 n02009229 n01978455 n02106550 n01820546 n01692333 n07714571 n02974003 n02114855 n03785016 n03764736 n03775546 n02087046 n07836838 n04099969 n04592741 n03891251 n02701002 n03379051 n02259212 n07715103 n03947888 n04026417 n02326432 n03637318 n01980166 n02113799 n02086240 n03903868 n02483362 n04127249 n02089973 n03017168 n02093428 n02804414 n02396427 n04418357 n02172182 n01729322 n02113978 n03787032 n02089867 n02119022 n03777754 n04238763 n02231487 n03032252 n02138441 n02104029 n03837869 n03494278 n04136333 n03794056 n03492542 n02018207 n04067472 n03930630 n03584829 n02123045 n04229816 n02100583 n03642806 n04336792 n03259280 n02116738 n02108089 n03424325 n01855672 n02090622 \ No newline at end of file diff --git a/solo/utils/h5_dataset.py b/solo/utils/h5_dataset.py new file mode 100644 index 00000000..50ffd5a1 --- /dev/null +++ b/solo/utils/h5_dataset.py @@ -0,0 +1,129 @@ +# Copyright 2022 solo-learn development team. + +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in +# the Software without restriction, including without limitation the rights to use, +# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the +# Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: + +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. + +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +# DEALINGS IN THE SOFTWARE. + + +import io +import os +from pathlib import Path +from typing import Callable + +import h5py +from PIL import Image +from pyparsing import Optional +from torch.utils.data import Dataset + + +class H5Dataset(Dataset): + def __init__( + self, + dataset: str, + h5_path: str, + transform: Optional[Callable] = None, + ): + """H5 Dataset. + The dataset assumes that data is organized as: + "class_name" + "img_name" + "img_name" + "img_name" + "class_name" + "img_name" + "img_name" + "img_name" + + Args: + dataset (str): dataset name. + h5_path (str): path of the h5 file. + transform (Optional[Callable]): pipeline of transformations. Defaults to None. + pre_parsed_paths_file Optional[str]: path of the pre-parsed paths files. + This allows the user to specify the file names and their classes in this format: + {class}/{file} CLASS-ID + {class}/{file} CLASS-ID + {class}/{file} CLASS-ID + If this is None, this object will automatically find all the files, + but might take a while if the dataset is large. Defaults to None. + """ + + self.h5_path = h5_path + self.h5_file = None + self.transform = transform + + assert dataset in ["imagenet100", "imagenet"] + + self._load_h5_data_info() + # filter if needed to avoid having a copy of imagenet100 data + if dataset == "imagenet100": + script_folder = Path(os.path.dirname(__file__)) + classes_file = script_folder / "dataset_subset" / "imagenet100_classes.txt" + with open(classes_file, "r") as f: + self.classes = f.readline().strip().split() + self.classes = sorted(self.classes) + self.class_to_idx = {cls_name: i for i, cls_name in enumerate(self.classes)} + + self._data = filter(lambda entry: entry[0] in self.classes, self._data) + + def _load_h5_data_info(self): + self._data = [] + h5_data_info_file = os.path.splitext(self.h5_path + ".txt") + if not os.path.isfile(h5_data_info_file): + temp_h5_file = h5py.File(self.h5_path, "r") + + # collect data from the h5 file directly + self.classes, self.class_to_idx = self._find_classes(self.h5_file) + for class_name in self.classes: + y = self.class_to_idx[class_name] + for img_name in temp_h5_file[class_name].keys(): + self._data.append((class_name, img_name, y)) + + # save the info locally to speed up sequential executions + with open(h5_data_info_file, "w") as f: + for class_name, img_name, y in self._data: + f.write(f"{class_name}/{img_name} {y}\n") + else: + # load data info file that was already generated by previous runs + with open(h5_data_info_file, "r") as f: + for line in f: + class_name_img, y = line.strip().split(" ") + class_name, img_name = class_name_img.split("/") + self._data.append((class_name, img_name, y)) + + def _find_classes(self, h5_file: h5py.File): + classes = sorted(h5_file.keys()) + class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)} + return classes, class_to_idx + + def _load_img(self, class_name: str, img: str): + img = self.h5_file[class_name][img][:] + img = Image.open(io.BytesIO(img)).convert("RGB") + return img + + def __getitem__(self, index: int): + if self.h5_file is None: + self.h5_file = h5py.File(self.h5_path, "r") + + class_name, img, y = self._data[index] + + x = self._load_img(class_name, img) + if self.transform: + x = self.transform(x) + + return x, y + + def __len__(self): + return len(self._data) diff --git a/solo/utils/misc.py b/solo/utils/misc.py index 64daff4f..6b794056 100644 --- a/solo/utils/misc.py +++ b/solo/utils/misc.py @@ -26,6 +26,8 @@ import torch.distributed as dist import torch.nn as nn +from solo.utils.h5_dataset import H5Dataset + def _1d_filter(tensor: torch.Tensor) -> torch.Tensor: return tensor.isfinite() @@ -205,9 +207,10 @@ def gather(X, dim=0): def compute_dataset_size( dataset: Optional[str] = None, - train: Optional[str] = True, + train: Optional[bool] = True, folder: Optional[str] = None, no_labels: Optional[bool] = False, + h5py_file: Optional[str] = None, data_fraction: Optional[float] = -1, ): """Utility function to get the dataset size. If using cifar or stl, @@ -217,16 +220,19 @@ def compute_dataset_size( specify if it has labels or not with the no_labels flag. Args: - folder (Optional[str], optional): path to the ImageFolder. Defaults to None. - dataset (Optional[str], optional): dataset size for predefined datasets + folder (Optional[str]): path to the ImageFolder. Defaults to None. + dataset (Optional[str]): dataset size for predefined datasets [cifar10, cifar100, stl10]. Defaults to None. - train (Optional[str], optional): either train dataset or validation. Defaults to True. - no_labels (Optional[bool], optional): if the dataset has no labels. Defaults to False. - data_fraction (Optional[float], optional): amount of data to use. Defaults to -1. + train (Optional[bool]): train dataset flag. Defaults to True. + no_labels (Optional[bool]): if the dataset has no labels. Defaults to False. + h5py_file (Optional[str]): if using an h5py file, create a dummy H5Dataset to count the number of images. + Defaults to None. + data_fraction (Optional[float]): amount of data to use. Defaults to -1. Returns: - _type_: _description_ + int: size of the dataset """ + DATASET_SIZES = { "cifar10": {"train": 50_000, "val": 10_000}, "cifar100": {"train": 50_000, "val": 10_000}, @@ -237,6 +243,9 @@ def compute_dataset_size( if dataset is not None: size = DATASET_SIZES.get(dataset.lower(), {}).get("train" if train else "val", None) + if h5py_file is not None: + size = len(H5Dataset(dataset, h5py_file)) + if size is None: if no_labels: size = len(os.listdir(folder)) @@ -247,6 +256,7 @@ def compute_dataset_size( if data_fraction != -1: size = int(size * data_fraction) + return size diff --git a/solo/utils/pretrain_dataloader.py b/solo/utils/pretrain_dataloader.py index 224a0c0d..c3ec6e22 100644 --- a/solo/utils/pretrain_dataloader.py +++ b/solo/utils/pretrain_dataloader.py @@ -30,6 +30,8 @@ from torchvision import transforms from torchvision.datasets import STL10, ImageFolder +from solo.utils.h5_dataset import H5Dataset + def dataset_with_index(DatasetClass: Type[Dataset]) -> Type[Dataset]: """Factory for datasets that also returns the data index. @@ -498,6 +500,7 @@ def prepare_datasets( data_dir: Optional[Union[str, Path]] = None, train_dir: Optional[Union[str, Path]] = None, no_labels: Optional[Union[str, Path]] = False, + train_h5_path: Optional[str] = None, download: bool = True, data_fraction: float = -1.0, ) -> Dataset: @@ -506,11 +509,12 @@ def prepare_datasets( Args: dataset (str): the name of the dataset. transform (Callable): a transformation. - data_dir (Optional[Union[str, Path]], optional): the directory to load data from. + data_dir (Optional[Union[str, Path]]): the directory to load data from. Defaults to None. - train_dir (Optional[Union[str, Path]], optional): training data directory + train_dir (Optional[Union[str, Path]]): training data directory to be appended to data_dir. Defaults to None. - no_labels (Optional[bool], optional): if the custom dataset has no labels. + no_labels (Optional[bool]): if the custom dataset has no labels. + train_h5_path Optional[str]: path to the train h5 dataset file, if it exists. data_fraction (Optional[float]): percentage of data to use. Use all data when set to -1.0. Defaults to -1.0. Returns: @@ -544,8 +548,13 @@ def prepare_datasets( ) elif dataset in ["imagenet", "imagenet100"]: - train_dir = data_dir / train_dir - train_dataset = dataset_with_index(ImageFolder)(train_dir, transform) + if train_h5_path: + train_h5_path = data_dir / train_h5_path + train_dataset = dataset_with_index(H5Dataset)(dataset, train_h5_path, transform) + + else: + train_dir = data_dir / train_dir + train_dataset = dataset_with_index(ImageFolder)(train_dir, transform) elif dataset == "custom": train_dir = data_dir / train_dir From e038f917cada330aec9ff7326c6329ddc2ffe1f4 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 16:26:13 +0100 Subject: [PATCH 03/36] wip --- requirements.txt | 1 + setup.py | 1 + 2 files changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index a0005de6..744b6410 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ wandb scipy timm scikit-learn +h5py \ No newline at end of file diff --git a/setup.py b/setup.py index 2ad55a3d..278e8e87 100644 --- a/setup.py +++ b/setup.py @@ -55,6 +55,7 @@ def parse_requirements(path): "scipy", "timm", "scikit-learn", + "h5py", ], extras_require=EXTRA_REQUIREMENTS, dependency_links=["https://developer.download.nvidia.com/compute/redist"], From c9efc6c40768fef0f7f90b154ce1832d119accd0 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 16:38:35 +0100 Subject: [PATCH 04/36] script --- .../pretrain/imagenet-100/mocov3_vit.sh | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 bash_files/pretrain/imagenet-100/mocov3_vit.sh diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit.sh b/bash_files/pretrain/imagenet-100/mocov3_vit.sh new file mode 100644 index 00000000..921c89ac --- /dev/null +++ b/bash_files/pretrain/imagenet-100/mocov3_vit.sh @@ -0,0 +1,39 @@ +ython3 main_pretrain.py \ + --dataset imagenet100 \ + --backbone moco_vit_small \ + --data_dir /nfs/datasets/imagenet1k \ + --train_h5_path train.h5 \ + --val_h5_path val.h5 \ + --max_epochs 400 \ + --devices 6,7 \ + --accelerator gpu \ + --strategy ddp \ + --sync_batchnorm \ + --precision 16 \ + --optimizer adamw \ + --eta_lars 0.02 \ + --exclude_bias_n_norm \ + --scheduler warmup_cosine \ + --lr 1.5e-4 \ + --classifier_lr 1.5e-4 \ + --weight_decay 0.1 \ + --batch_size 32 \ + --num_workers 8 \ + --brightness 0.4 \ + --contrast 0.4 \ + --saturation 0.2 \ + --hue 0.1 \ + --gaussian_prob 1.0 0.1 \ + --solarization_prob 0.0 0.2 \ + --num_crops_per_aug 1 1 \ + --name mocov3-400ep-imagenet100 \ + --project solo-learn \ + --wandb \ + --save_checkpoint \ + --auto_resume \ + --method mocov3 \ + --proj_hidden_dim 4096 \ + --pred_hidden_dim 4096 \ + --temperature 0.2 \ + --base_tau_momentum 0.99 \ + --final_tau_momentum 1.0 From 6cc4b1ad252ddef4f45adca4c92228c59d18d128 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 16:52:55 +0100 Subject: [PATCH 05/36] fixes --- bash_files/pretrain/imagenet-100/mocov3_vit.sh | 6 +++--- solo/utils/h5_dataset.py | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit.sh b/bash_files/pretrain/imagenet-100/mocov3_vit.sh index 921c89ac..1e104ec3 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit.sh +++ b/bash_files/pretrain/imagenet-100/mocov3_vit.sh @@ -1,11 +1,11 @@ -ython3 main_pretrain.py \ +python3 main_pretrain.py \ --dataset imagenet100 \ - --backbone moco_vit_small \ + --backbone vit_small \ --data_dir /nfs/datasets/imagenet1k \ --train_h5_path train.h5 \ --val_h5_path val.h5 \ --max_epochs 400 \ - --devices 6,7 \ + --devices 0,1 \ --accelerator gpu \ --strategy ddp \ --sync_batchnorm \ diff --git a/solo/utils/h5_dataset.py b/solo/utils/h5_dataset.py index 50ffd5a1..704f03ae 100644 --- a/solo/utils/h5_dataset.py +++ b/solo/utils/h5_dataset.py @@ -21,11 +21,10 @@ import io import os from pathlib import Path -from typing import Callable +from typing import Callable, Optional import h5py from PIL import Image -from pyparsing import Optional from torch.utils.data import Dataset @@ -50,7 +49,7 @@ def __init__( Args: dataset (str): dataset name. h5_path (str): path of the h5 file. - transform (Optional[Callable]): pipeline of transformations. Defaults to None. + transform (Callable): pipeline of transformations. Defaults to None. pre_parsed_paths_file Optional[str]: path of the pre-parsed paths files. This allows the user to specify the file names and their classes in this format: {class}/{file} CLASS-ID @@ -67,6 +66,7 @@ def __init__( assert dataset in ["imagenet100", "imagenet"] self._load_h5_data_info() + # filter if needed to avoid having a copy of imagenet100 data if dataset == "imagenet100": script_folder = Path(os.path.dirname(__file__)) @@ -80,7 +80,7 @@ def __init__( def _load_h5_data_info(self): self._data = [] - h5_data_info_file = os.path.splitext(self.h5_path + ".txt") + h5_data_info_file = os.path.splitext(self.h5_path)[0] + ".txt" if not os.path.isfile(h5_data_info_file): temp_h5_file = h5py.File(self.h5_path, "r") From 80972d1b648597fe110a4c1085a28f75d4dcfc9a Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 16:53:46 +0100 Subject: [PATCH 06/36] fixes --- solo/utils/h5_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solo/utils/h5_dataset.py b/solo/utils/h5_dataset.py index 704f03ae..9b2fec64 100644 --- a/solo/utils/h5_dataset.py +++ b/solo/utils/h5_dataset.py @@ -76,7 +76,7 @@ def __init__( self.classes = sorted(self.classes) self.class_to_idx = {cls_name: i for i, cls_name in enumerate(self.classes)} - self._data = filter(lambda entry: entry[0] in self.classes, self._data) + self._data = list(filter(lambda entry: entry[0] in self.classes, self._data)) def _load_h5_data_info(self): self._data = [] From ece07e7edaee2afa4af4d464611a3cf3b963717f Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 16:56:12 +0100 Subject: [PATCH 07/36] fixes --- solo/methods/base.py | 2 +- solo/methods/linear.py | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/solo/methods/base.py b/solo/methods/base.py index 54722dd0..cd73bf43 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -356,7 +356,7 @@ def num_training_steps(self) -> int: if dataset not in ["cifar10", "cifar100", "stl10"]: data_dir = self.extra_args.get("data_dir", ".") train_dir = self.extra_args.get("train_dir", "train") - folder = os.path.join(data_dir, train_dir) + folder = os.path.join(data_dir, str(train_dir)) else: folder = None no_labels = self.extra_args.get("no_labels", False) diff --git a/solo/methods/linear.py b/solo/methods/linear.py index 459a90cf..824a2f53 100644 --- a/solo/methods/linear.py +++ b/solo/methods/linear.py @@ -183,17 +183,19 @@ def num_training_steps(self) -> int: if dataset not in ["cifar10", "cifar100", "stl10"]: data_dir = self.extra_args.get("data_dir", ".") train_dir = self.extra_args.get("train_dir", "train") - folder = os.path.join(data_dir, train_dir) + folder = os.path.join(data_dir, str(train_dir)) else: folder = None no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) + h5py_file = self.extra_args.get("train_h5_path", None) dataset_size = compute_dataset_size( dataset=dataset, folder=folder, train=True, no_labels=no_labels, + h5py_file=h5py_file, data_fraction=data_fraction, ) except: From 654b33fc83b5f818a7890ac69cae5a15ef59b76e Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 18:02:24 +0100 Subject: [PATCH 08/36] fixes --- main_pretrain.py | 4 +++- solo/methods/base.py | 1 + solo/methods/linear.py | 1 + solo/utils/classification_dataloader.py | 6 ++++++ solo/utils/h5_dataset.py | 18 ++++++++++++------ 5 files changed, 23 insertions(+), 7 deletions(-) diff --git a/main_pretrain.py b/main_pretrain.py index 789a831d..b0f565f6 100644 --- a/main_pretrain.py +++ b/main_pretrain.py @@ -71,7 +71,9 @@ def main(): # validation dataloader for when it is available if args.dataset == "custom" and (args.no_labels or args.val_dir is None): val_loader = None - elif args.dataset in ["imagenet100", "imagenet"] and args.val_dir is None: + elif args.dataset in ["imagenet100", "imagenet"] and ( + args.val_dir is None and args.val_h5_path is None + ): val_loader = None else: _, val_loader = prepare_data_classification( diff --git a/solo/methods/base.py b/solo/methods/base.py index cd73bf43..b6ef2c87 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -362,6 +362,7 @@ def num_training_steps(self) -> int: no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) h5py_file = self.extra_args.get("train_h5_path", None) + h5py_file = os.path.join(data_dir, h5py_file) dataset_size = compute_dataset_size( dataset=dataset, diff --git a/solo/methods/linear.py b/solo/methods/linear.py index 824a2f53..fe8f29c9 100644 --- a/solo/methods/linear.py +++ b/solo/methods/linear.py @@ -189,6 +189,7 @@ def num_training_steps(self) -> int: no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) h5py_file = self.extra_args.get("train_h5_path", None) + h5py_file = os.path.join(data_dir, h5py_file) dataset_size = compute_dataset_size( dataset=dataset, diff --git a/solo/utils/classification_dataloader.py b/solo/utils/classification_dataloader.py index 5fffbffa..459dbe95 100644 --- a/solo/utils/classification_dataloader.py +++ b/solo/utils/classification_dataloader.py @@ -286,6 +286,8 @@ def prepare_data( data_dir: Optional[Union[str, Path]] = None, train_dir: Optional[Union[str, Path]] = None, val_dir: Optional[Union[str, Path]] = None, + train_h5_path: Optional[str] = None, + val_h5_path: Optional[str] = None, batch_size: int = 64, num_workers: int = 4, download: bool = True, @@ -301,6 +303,8 @@ def prepare_data( training data is located. Defaults to None. val_dir (Optional[Union[str, Path]], optional): subpath where the validation data is located. Defaults to None. + train_h5_path Optional[str]: path to the train h5 dataset file, if it exists. + val_h5_path Optional[str]: path to the val h5 dataset file, if it exists. batch_size (int, optional): batch size. Defaults to 64. num_workers (int, optional): number of parallel workers. Defaults to 4. data_fraction (Optional[float]): percentage of data to use. Use all data when set to -1.0. @@ -320,6 +324,8 @@ def prepare_data( val_dir=val_dir, download=download, data_fraction=data_fraction, + train_h5_path=train_h5_path, + val_h5_path=val_h5_path, ) train_loader, val_loader = prepare_dataloaders( train_dataset, diff --git a/solo/utils/h5_dataset.py b/solo/utils/h5_dataset.py index 9b2fec64..eeb77e09 100644 --- a/solo/utils/h5_dataset.py +++ b/solo/utils/h5_dataset.py @@ -22,7 +22,7 @@ import os from pathlib import Path from typing import Callable, Optional - +from tqdm import tqdm import h5py from PIL import Image from torch.utils.data import Dataset @@ -76,20 +76,26 @@ def __init__( self.classes = sorted(self.classes) self.class_to_idx = {cls_name: i for i, cls_name in enumerate(self.classes)} - self._data = list(filter(lambda entry: entry[0] in self.classes, self._data)) + class_set = set(self.classes) + new_data = [] + for class_name, img_name, _ in self._data: + if class_name in class_set: + new_data.append((class_name, img_name, self.class_to_idx[class_name])) + self._data = new_data def _load_h5_data_info(self): self._data = [] h5_data_info_file = os.path.splitext(self.h5_path)[0] + ".txt" + if not os.path.isfile(h5_data_info_file): temp_h5_file = h5py.File(self.h5_path, "r") # collect data from the h5 file directly - self.classes, self.class_to_idx = self._find_classes(self.h5_file) - for class_name in self.classes: + self.classes, self.class_to_idx = self._find_classes(temp_h5_file) + for class_name in tqdm(self.classes, desc="Collecting information about the h5 file"): y = self.class_to_idx[class_name] for img_name in temp_h5_file[class_name].keys(): - self._data.append((class_name, img_name, y)) + self._data.append((class_name, img_name, int(y))) # save the info locally to speed up sequential executions with open(h5_data_info_file, "w") as f: @@ -101,7 +107,7 @@ def _load_h5_data_info(self): for line in f: class_name_img, y = line.strip().split(" ") class_name, img_name = class_name_img.split("/") - self._data.append((class_name, img_name, y)) + self._data.append((class_name, img_name, int(y))) def _find_classes(self, h5_file: h5py.File): classes = sorted(h5_file.keys()) From 580d21200939c8a8161630db22b06ace0ac24a57 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Fri, 8 Jul 2022 18:17:03 +0100 Subject: [PATCH 09/36] Update dataset.py --- solo/args/dataset.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/solo/args/dataset.py b/solo/args/dataset.py index c4f1d8cd..89ff7bda 100644 --- a/solo/args/dataset.py +++ b/solo/args/dataset.py @@ -54,9 +54,6 @@ def dataset_args(parser: ArgumentParser): # dali (imagenet-100/imagenet/custom only) parser.add_argument("--dali", action="store_true") - # manually bypass all checks for dataset size by providing the correct size by hand - parser.add_argument("--total_dataset_size", default=None, type=int) - def augmentations_args(parser: ArgumentParser): """Adds augmentation-related arguments to a parser. From 87f0d1a77008788bf057c99df2a35461d7bca500 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Mon, 11 Jul 2022 11:30:20 +0100 Subject: [PATCH 10/36] conversion func for h5 --- .../pretrain/imagenet-100/mocov3_vit.sh | 17 ++++---- .../pretrain/imagenet-100/mocov3_vit_h5.sh | 42 +++++++++++++++++++ solo/utils/convert_imgfolder_to_h5.py | 41 +++++++++++++++++- solo/utils/h5_dataset.py | 7 ---- 4 files changed, 91 insertions(+), 16 deletions(-) create mode 100644 bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit.sh b/bash_files/pretrain/imagenet-100/mocov3_vit.sh index de502e7f..71d71f5c 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit.sh +++ b/bash_files/pretrain/imagenet-100/mocov3_vit.sh @@ -1,12 +1,12 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone vit_small \ - --data_dir /nfs/datasets/imagenet1k \ - --train_h5_path train.h5 \ - --val_h5_path val.h5 \ + --data_dir /datasets \ + --train_dir imagenet-100/train \ + --val_dir imagenet-100/val \ --max_epochs 400 \ --warmup_epochs 40 \ - --devices 0,1 \ + --devices 0,1,2,3,4,5,6,7 \ --accelerator gpu \ --strategy ddp \ --sync_batchnorm \ @@ -15,11 +15,12 @@ python3 main_pretrain.py \ --eta_lars 0.02 \ --exclude_bias_n_norm \ --scheduler warmup_cosine \ - --lr 1.5e-4 \ - --classifier_lr 1.5e-4 \ + --lr 2.0e-4 \ + --classifier_lr 3.0e-4 \ --weight_decay 0.1 \ - --batch_size 32 \ - --num_workers 8 \ + --batch_size 64 \ + --num_workers 4 \ + --dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh new file mode 100644 index 00000000..ded0965f --- /dev/null +++ b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh @@ -0,0 +1,42 @@ +python3 main_pretrain.py \ + --dataset imagenet100 \ + --backbone vit_small \ + --data_dir /nfs/datasets/imagenet1k \ + --train_h5_path train.h5 \ + --val_h5_path val.h5 \ + --max_epochs 400 \ + --warmup_epochs 40 \ + --devices 0,1,2,3,4,5,6,7 \ + --accelerator gpu \ + --strategy ddp \ + --sync_batchnorm \ + --precision 16 \ + --optimizer adamw \ + --eta_lars 0.02 \ + --exclude_bias_n_norm \ + --scheduler warmup_cosine \ + --lr 2.0e-4 \ + --classifier_lr 3.0e-4 \ + --weight_decay 0.1 \ + --batch_size 64 \ + --num_workers 8 \ + --brightness 0.4 \ + --contrast 0.4 \ + --saturation 0.2 \ + --hue 0.1 \ + --gaussian_prob 1.0 0.1 \ + --solarization_prob 0.0 0.2 \ + --min_scale 0.08 \ + --num_crops_per_aug 1 1 \ + --name mocov3-vit-400ep-imagenet100 \ + --project solo-learn \ + --entity unitn-mhug \ + --save_checkpoint \ + --wandb \ + --auto_resume \ + --method mocov3 \ + --proj_hidden_dim 4096 \ + --pred_hidden_dim 4096 \ + --temperature 0.2 \ + --base_tau_momentum 0.99 \ + --final_tau_momentum 1.0 diff --git a/solo/utils/convert_imgfolder_to_h5.py b/solo/utils/convert_imgfolder_to_h5.py index 2f418c1e..87e06b7e 100644 --- a/solo/utils/convert_imgfolder_to_h5.py +++ b/solo/utils/convert_imgfolder_to_h5.py @@ -17,4 +17,43 @@ # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. -# TODO +import argparse +import os + +import h5py +import numpy as np +from tqdm import tqdm + + +def convert_imgfolder_to_h5(folder_path: str, h5_path: str): + """Converts image folder to a h5 dataset. + + Args: + folder_path (str): path to the image folder. + h5_path (str): output path of the h5 file. + """ + + with h5py.File(h5_path, "w") as h5: + classes = os.listdir(folder_path) + for class_name in tqdm(classes, desc="Processing classes:"): + cur_folder = os.path.join(folder_path, class_name) + class_group = h5.create_group(class_name) + for i, img_name in enumerate(os.listdir(cur_folder)): + with open(os.path.join(cur_folder, img_name), "rb") as fid_img: + binary_data = fid_img.read() + data = np.frombuffer(binary_data, dtype="uint8") + class_group.create_dataset( + img_name, + data=data, + shape=data.shape, + compression="gzip", + compression_opts=9, + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--folder_path", type=str, required=True) + parser.add_argument("--h5_path", type=str, required=True) + args = parser.parse_args() + convert_imgfolder_to_h5(args.folder_path, args.h5_path) diff --git a/solo/utils/h5_dataset.py b/solo/utils/h5_dataset.py index eeb77e09..f03381e3 100644 --- a/solo/utils/h5_dataset.py +++ b/solo/utils/h5_dataset.py @@ -50,13 +50,6 @@ def __init__( dataset (str): dataset name. h5_path (str): path of the h5 file. transform (Callable): pipeline of transformations. Defaults to None. - pre_parsed_paths_file Optional[str]: path of the pre-parsed paths files. - This allows the user to specify the file names and their classes in this format: - {class}/{file} CLASS-ID - {class}/{file} CLASS-ID - {class}/{file} CLASS-ID - If this is None, this object will automatically find all the files, - but might take a while if the dataset is large. Defaults to None. """ self.h5_path = h5_path From cc5488e2a8edc58840fcb05cb8119ae7fead242b Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Mon, 11 Jul 2022 11:45:13 +0100 Subject: [PATCH 11/36] fixes --- solo/methods/base.py | 6 ++++-- solo/methods/linear.py | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/solo/methods/base.py b/solo/methods/base.py index b6ef2c87..9e846a7c 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -357,12 +357,14 @@ def num_training_steps(self) -> int: data_dir = self.extra_args.get("data_dir", ".") train_dir = self.extra_args.get("train_dir", "train") folder = os.path.join(data_dir, str(train_dir)) + h5py_file = self.extra_args.get("train_h5_path", None) + h5py_file = os.path.join(data_dir, h5py_file) else: folder = None + h5py_file = None + no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) - h5py_file = self.extra_args.get("train_h5_path", None) - h5py_file = os.path.join(data_dir, h5py_file) dataset_size = compute_dataset_size( dataset=dataset, diff --git a/solo/methods/linear.py b/solo/methods/linear.py index fe8f29c9..2ad5967c 100644 --- a/solo/methods/linear.py +++ b/solo/methods/linear.py @@ -184,12 +184,14 @@ def num_training_steps(self) -> int: data_dir = self.extra_args.get("data_dir", ".") train_dir = self.extra_args.get("train_dir", "train") folder = os.path.join(data_dir, str(train_dir)) + h5py_file = self.extra_args.get("train_h5_path", None) + h5py_file = os.path.join(data_dir, h5py_file) else: folder = None + h5py_file = None + no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) - h5py_file = self.extra_args.get("train_h5_path", None) - h5py_file = os.path.join(data_dir, h5py_file) dataset_size = compute_dataset_size( dataset=dataset, From 45357a51927ac88a7a940fab3adcae598c92071d Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Mon, 11 Jul 2022 11:53:38 +0100 Subject: [PATCH 12/36] fix test --- solo/backbones/vit/vit_mocov3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solo/backbones/vit/vit_mocov3.py b/solo/backbones/vit/vit_mocov3.py index 44742a20..90b43ca3 100644 --- a/solo/backbones/vit/vit_mocov3.py +++ b/solo/backbones/vit/vit_mocov3.py @@ -73,7 +73,7 @@ def build_2d_sincos_position_embedding(self, temperature=10000.0): [torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], dim=1 )[None, :, :] - assert self.num_tokens == 1, "Assuming one and only one token, [cls]" + assert self.num_prefix_tokens == 1, "Assuming one and only one token, [cls]" pe_token = torch.zeros([1, 1, self.embed_dim], dtype=torch.float32) self.pos_embed = nn.Parameter(torch.cat([pe_token, pos_emb], dim=1)) self.pos_embed.requires_grad = False From 39aa09b0ab398c734bbf830b5b16dfa39a4d3a96 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Tue, 12 Jul 2022 00:59:21 +0100 Subject: [PATCH 13/36] Update utils.py --- solo/args/utils.py | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/solo/args/utils.py b/solo/args/utils.py index fbe5ddb1..d7217f25 100644 --- a/solo/args/utils.py +++ b/solo/args/utils.py @@ -241,16 +241,12 @@ def additional_setup_pretrain(args: Namespace): # adjust lr according to batch size if args.strategy == "horovod": warnings.warn( - "When using horovod, be aware of how the processes are divided. " - "The learning rate will only be scaled considering the number of " - "devices in each process. " - "If each gpu corresponds to each process, you should pass --num_nodes_horovod " - "N_GPUS to properly scale the lr. " - "You can also manually scale your lr if you are not sure, by checking your logs." + "You should manually pass --num_nodes_horovod for everything to work properly!" ) + # horovod+pl scales the lr by default try: - num_nodes = args.num_nodes_horovod or args.num_nodes or 1 + num_nodes = args.num_nodes or 1 except AttributeError: num_nodes = 1 @@ -317,16 +313,12 @@ def additional_setup_linear(args: Namespace): # adjust lr according to batch size if args.strategy == "horovod": warnings.warn( - "When using horovod, be aware of how the processes are divided. " - "The learning rate will only be scaled considering the number of " - "devices in each process. " - "If each gpu corresponds to each process, you should pass --num_nodes_horovod " - "N_GPUS to properly scale the lr. " - "You can also manually scale your lr if you are not sure, by checking your logs." + "You should manually pass --num_nodes_horovod for everything to work properly!" ) + # horovod+pl scales the lr by default try: - num_nodes = args.num_nodes_horovod or args.num_nodes or 1 + num_nodes = args.num_nodes or 1 except AttributeError: num_nodes = 1 From 99eee048af81b63209760eaf6a43a7169d5662d4 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Tue, 12 Jul 2022 14:18:05 +0100 Subject: [PATCH 14/36] wip --- README.md | 2 +- bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh | 2 +- main_knn.py | 2 +- main_linear.py | 2 +- main_pretrain.py | 2 +- main_umap.py | 2 +- setup.py | 2 +- solo/args/utils.py | 12 ------------ solo/methods/base.py | 10 +--------- solo/methods/linear.py | 2 +- solo/utils/misc.py | 2 +- 11 files changed, 10 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index c50d01e0..1036f3b2 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ The library is self-contained, but it is possible to use the models outside of s ## News * **[Jun 26 2022]**: :fire: Added [MoCo V3](https://arxiv.org/abs/2104.02057). -* **[Jun 10 2022]**: :bomb: Improved LARS and fixed some issues to support [Horovod](https://horovod.readthedocs.io/en/stable/pytorch.html). +* **[Jun 10 2022]**: :bomb: Improved LARS. * **[Jun 09 2022]**: :lollipop: Added support for [WideResnet](https://arxiv.org/abs/1605.07146), multicrop for SwAV and equalization data augmentation. * **[May 02 2022]**: :diamond_shape_with_a_dot_inside: Wrapped Dali with a DataModule, added auto resume for linear eval and Wandb run resume. * **[Apr 12 2022]**: :rainbow: Improved design of models and added support to train with a fraction of data. diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh index ded0965f..ab1ba6b7 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh +++ b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh @@ -1,7 +1,7 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone vit_small \ - --data_dir /nfs/datasets/imagenet1k \ + --data_dir $1 \ --train_h5_path train.h5 \ --val_h5_path val.h5 \ --max_epochs 400 \ diff --git a/main_knn.py b/main_knn.py index 81453ea3..d0c65c9b 100644 --- a/main_knn.py +++ b/main_knn.py @@ -1,4 +1,4 @@ -# Copyright 2021 solo-learn development team. +# Copyright 2022 solo-learn development team. # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in diff --git a/main_linear.py b/main_linear.py index a4684042..39ae15b0 100644 --- a/main_linear.py +++ b/main_linear.py @@ -1,4 +1,4 @@ -# Copyright 2021 solo-learn development team. +# Copyright 2022 solo-learn development team. # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in diff --git a/main_pretrain.py b/main_pretrain.py index b0f565f6..9c823564 100644 --- a/main_pretrain.py +++ b/main_pretrain.py @@ -1,4 +1,4 @@ -# Copyright 2021 solo-learn development team. +# Copyright 2022 solo-learn development team. # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in diff --git a/main_umap.py b/main_umap.py index 233a6aee..186237df 100644 --- a/main_umap.py +++ b/main_umap.py @@ -1,4 +1,4 @@ -# Copyright 2021 solo-learn development team. +# Copyright 2022 solo-learn development team. # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in diff --git a/setup.py b/setup.py index 278e8e87..52733c4c 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -# Copyright 2021 solo-learn development team. +# Copyright 2022 solo-learn development team. # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in diff --git a/solo/args/utils.py b/solo/args/utils.py index d7217f25..51afc483 100644 --- a/solo/args/utils.py +++ b/solo/args/utils.py @@ -239,12 +239,6 @@ def additional_setup_pretrain(args: Namespace): args.devices = [int(device) for device in args.devices.split(",") if device] # adjust lr according to batch size - if args.strategy == "horovod": - warnings.warn( - "You should manually pass --num_nodes_horovod for everything to work properly!" - ) - - # horovod+pl scales the lr by default try: num_nodes = args.num_nodes or 1 except AttributeError: @@ -311,12 +305,6 @@ def additional_setup_linear(args: Namespace): args.devices = [int(device) for device in args.devices.split(",") if device] # adjust lr according to batch size - if args.strategy == "horovod": - warnings.warn( - "You should manually pass --num_nodes_horovod for everything to work properly!" - ) - - # horovod+pl scales the lr by default try: num_nodes = args.num_nodes or 1 except AttributeError: diff --git a/solo/methods/base.py b/solo/methods/base.py index 9e846a7c..cb8be3c3 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -336,14 +336,6 @@ def add_model_specific_args(parent_parser: ArgumentParser) -> ArgumentParser: # disables channel last optimization parser.add_argument("--no_channel_last", action="store_true") - # When using horovod, be aware of how the processes are divided. - # The learning rate will only be scaled considering the number of - # devices in each process. - # If each gpu corresponds to each process, you should pass --num_nodes_horovod - # N_GPUS to properly scale the lr. - # You can also manually scale your lr if you are not sure, by checking your logs. - parser.add_argument("--num_nodes_horovod", default=None, type=int) - return parent_parser @property @@ -383,7 +375,7 @@ def num_training_steps(self) -> int: dataset_size = self.trainer.limit_train_batches * dataset_size num_devices = self.trainer.num_devices - num_nodes = self.extra_args.get("num_nodes_horovod", 0) or self.trainer.num_nodes or 1 + num_nodes = self.trainer.num_nodes or 1 effective_batch_size = ( self.batch_size * self.trainer.accumulate_grad_batches * num_devices * num_nodes ) diff --git a/solo/methods/linear.py b/solo/methods/linear.py index 2ad5967c..71124f0e 100644 --- a/solo/methods/linear.py +++ b/solo/methods/linear.py @@ -210,7 +210,7 @@ def num_training_steps(self) -> int: dataset_size = self.trainer.limit_train_batches * dataset_size num_devices = self.trainer.num_devices - num_nodes = self.extra_args.get("num_nodes_horovod", 0) or self.trainer.num_nodes or 1 + num_nodes = self.trainer.num_nodes or 1 effective_batch_size = ( self.batch_size * self.trainer.accumulate_grad_batches * num_devices * num_nodes ) diff --git a/solo/utils/misc.py b/solo/utils/misc.py index 6b794056..55ebf42e 100644 --- a/solo/utils/misc.py +++ b/solo/utils/misc.py @@ -261,7 +261,7 @@ def compute_dataset_size( def make_contiguous(module): - """Make the model contigous in order to comply with horovod. + """Make the model contigous in order to comply with some distributed strategies. https://github.com/lucidrains/DALLE-pytorch/issues/330 """ From 91df4543ec14becdf87545406733743e8947341f Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Tue, 12 Jul 2022 15:41:14 +0100 Subject: [PATCH 15/36] Update mocov3_vit_h5.sh --- bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh index ab1ba6b7..5490e143 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh +++ b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh @@ -16,7 +16,7 @@ python3 main_pretrain.py \ --exclude_bias_n_norm \ --scheduler warmup_cosine \ --lr 2.0e-4 \ - --classifier_lr 3.0e-4 \ + --classifier_lr 5.0e-4 \ --weight_decay 0.1 \ --batch_size 64 \ --num_workers 8 \ From 14e39ecfc3173cb30e8541138cd3a0ebd5d7a7e3 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Tue, 12 Jul 2022 16:04:24 +0100 Subject: [PATCH 16/36] Update mocov3_vit_h5.sh --- bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh index 5490e143..1c1eeea2 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh +++ b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh @@ -15,8 +15,8 @@ python3 main_pretrain.py \ --eta_lars 0.02 \ --exclude_bias_n_norm \ --scheduler warmup_cosine \ - --lr 2.0e-4 \ - --classifier_lr 5.0e-4 \ + --lr 3.0e-4 \ + --classifier_lr 3.0e-4 \ --weight_decay 0.1 \ --batch_size 64 \ --num_workers 8 \ From 5f8e5e928bdbe0f4042183c4370549940242e3fb Mon Sep 17 00:00:00 2001 From: Enrico Fini Date: Tue, 12 Jul 2022 17:18:08 +0200 Subject: [PATCH 17/36] Update README.md --- README.md | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/README.md b/README.md index c50d01e0..8747b755 100644 --- a/README.md +++ b/README.md @@ -65,7 +65,7 @@ The library is self-contained, but it is possible to use the models outside of s ## Extra flavor -### Multiple backbones +### Backbones * [ResNet](https://arxiv.org/abs/1512.03385) * [WideResNet](https://arxiv.org/abs/1605.07146) * [ViT](https://arxiv.org/abs/2010.11929) @@ -77,22 +77,23 @@ The library is self-contained, but it is possible to use the models outside of s * Increased data processing speed by up to 100% using [Nvidia Dali](https://github.com/NVIDIA/DALI). * Flexible augmentations. -### Evaluation and logging +### Evaluation * Online linear evaluation via stop-gradient for easier debugging and prototyping (optionally available for the momentum backbone as well). +* Standard offline linear evaluation. * Online and offline K-NN evaluation. -* Normal offline linear evaluation. -* All the perks of PyTorch Lightning (mixed precision, gradient accumulation, clipping, automatic logging and much more). -* Easy-to-extend modular code structure. -* Custom model logging with a simpler file organization. * Automatic feature space visualization with UMAP. -* Offline UMAP. -* Common metrics. ### Training tricks +* All the perks of PyTorch Lightning (mixed precision, gradient accumulation, clipping, and much more). +* Channel last conversion * Multi-cropping dataloading following [SwAV](https://arxiv.org/abs/2006.09882): * **Note**: currently, only SimCLR, BYOL and SwAV support this. -* Exclude batchnorm and biases from LARS. -* No LR scheduler for the projection head in SimSiam. +* Exclude batchnorm and biases from weight decay and LARS. +* No LR scheduler for the projection head (as in SimSiam). + +### Logging +* Metric logging on the cloud with [WandB](https://wandb.ai/site) +* Custom model checkpointing with a simple file organization. --- ## Requirements From 6884efd39e716a0898c8bd5f203df60b01f64928 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Tue, 12 Jul 2022 17:00:50 +0100 Subject: [PATCH 18/36] Update mocov3_vit_h5.sh --- bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh index 1c1eeea2..14ec34c7 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh +++ b/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh @@ -33,7 +33,6 @@ python3 main_pretrain.py \ --entity unitn-mhug \ --save_checkpoint \ --wandb \ - --auto_resume \ --method mocov3 \ --proj_hidden_dim 4096 \ --pred_hidden_dim 4096 \ From 0346487028b6c762651c6eccc895cc02f8da5ea7 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 11:29:21 +0100 Subject: [PATCH 19/36] wip --- README.md | 2 +- main_linear.py | 20 ++--- main_pretrain.py | 29 +++--- requirements.txt | 3 +- .../knn/imagenet-100/knn.sh | 0 .../linear/imagenet-100/barlow_linear.sh | 7 +- .../linear/imagenet-100/byol_linear.sh | 7 +- .../imagenet-100/deepclusterv2_linear.sh | 7 +- .../linear/imagenet-100/dino_linear.sh | 7 +- .../linear/imagenet-100/general_linear.sh | 7 +- .../linear/imagenet-100/mocov2plus_linear.sh | 7 +- .../linear/imagenet-100/mocov3_linear.sh | 7 +- .../linear/imagenet-100/nnclr_linear.sh | 7 +- .../linear/imagenet-100/ressl_linear.sh | 7 +- .../linear/imagenet-100/simclr_linear.sh | 7 +- .../linear/imagenet-100/simsiam_linear.sh | 7 +- .../linear/imagenet-100/swav_linear.sh | 7 +- .../linear/imagenet-100/vibcreg_linear.sh | 7 +- .../linear/imagenet-100/vicreg_linear.sh | 7 +- .../linear/imagenet/barlow.sh | 7 +- .../linear/imagenet/byol.sh | 7 +- .../linear/imagenet/mocov2plus.sh | 7 +- .../pretrain/cifar/barlow.sh | 3 +- .../pretrain/cifar/byol.sh | 3 +- .../pretrain/cifar/deepclusterv2.sh | 3 +- .../pretrain/cifar/dino.sh | 3 +- .../pretrain/cifar/mocov2plus.sh | 3 +- .../pretrain/cifar/mocov3.sh | 3 +- .../pretrain/cifar/multicrop/swav.sh | 3 +- .../pretrain/cifar/nnbyol.sh | 3 +- .../pretrain/cifar/nnclr.sh | 3 +- .../pretrain/cifar/nnsiam.sh | 3 +- .../pretrain/cifar/ressl.sh | 3 +- .../pretrain/cifar/simclr.sh | 3 +- .../pretrain/cifar/simsiam.sh | 3 +- .../pretrain/cifar/supcon.sh | 3 +- .../pretrain/cifar/swav.sh | 3 +- .../pretrain/cifar/vibcreg.sh | 3 +- .../pretrain/cifar/vicreg.sh | 3 +- .../pretrain/cifar/wmse.sh | 3 +- .../pretrain/custom/byol.sh | 3 +- .../pretrain/imagenet-100/barlow.sh | 7 +- .../pretrain/imagenet-100/byol.sh | 7 +- .../pretrain/imagenet-100/deepclusterv2.sh | 7 +- .../pretrain/imagenet-100/dino.sh | 7 +- .../pretrain/imagenet-100/dino_vit.sh | 5 +- .../pretrain/imagenet-100/mocov2plus.sh | 7 +- .../pretrain/imagenet-100/mocov3.sh | 7 +- .../pretrain/imagenet-100/mocov3_vit.sh | 7 +- .../pretrain/imagenet-100/mocov3_vit_h5.sh | 6 +- .../pretrain/imagenet-100/multicrop/byol.sh | 7 +- .../pretrain/imagenet-100/multicrop/simclr.sh | 7 +- .../pretrain/imagenet-100/multicrop/supcon.sh | 7 +- .../pretrain/imagenet-100/nnclr.sh | 7 +- .../pretrain/imagenet-100/ressl.sh | 7 +- .../pretrain/imagenet-100/simclr.sh | 7 +- .../pretrain/imagenet-100/simsiam.sh | 7 +- .../pretrain/imagenet-100/supcon.sh | 7 +- .../pretrain/imagenet-100/swav.sh | 7 +- .../pretrain/imagenet-100/vibcreg.sh | 7 +- .../pretrain/imagenet-100/vicreg.sh | 7 +- .../pretrain/imagenet-100/wmse.sh | 7 +- .../pretrain/imagenet/barlow.sh | 7 +- .../pretrain/imagenet/byol.sh | 7 +- .../pretrain/imagenet/mocov2plus.sh | 7 +- .../umap/imagenet-100/umap.sh | 0 .../utils/convert_imgfolder_to_h5.py | 0 setup.py | 1 + solo/args/dataset.py | 17 ++-- solo/methods/base.py | 2 +- solo/methods/linear.py | 2 +- solo/utils/classification_dataloader.py | 90 +++++++------------ solo/utils/dali_dataloader.py | 30 +++---- solo/utils/pretrain_dataloader.py | 39 +++----- 74 files changed, 254 insertions(+), 322 deletions(-) rename {bash_files => scripts}/knn/imagenet-100/knn.sh (100%) rename {bash_files => scripts}/linear/imagenet-100/barlow_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/byol_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/deepclusterv2_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/dino_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/general_linear.sh (81%) rename {bash_files => scripts}/linear/imagenet-100/mocov2plus_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/mocov3_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/nnclr_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/ressl_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/simclr_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/simsiam_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/swav_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/vibcreg_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet-100/vicreg_linear.sh (80%) rename {bash_files => scripts}/linear/imagenet/barlow.sh (81%) rename {bash_files => scripts}/linear/imagenet/byol.sh (81%) rename {bash_files => scripts}/linear/imagenet/mocov2plus.sh (79%) rename {bash_files => scripts}/pretrain/cifar/barlow.sh (92%) rename {bash_files => scripts}/pretrain/cifar/byol.sh (93%) rename {bash_files => scripts}/pretrain/cifar/deepclusterv2.sh (92%) rename {bash_files => scripts}/pretrain/cifar/dino.sh (93%) rename {bash_files => scripts}/pretrain/cifar/mocov2plus.sh (92%) rename {bash_files => scripts}/pretrain/cifar/mocov3.sh (92%) rename {bash_files => scripts}/pretrain/cifar/multicrop/swav.sh (93%) rename {bash_files => scripts}/pretrain/cifar/nnbyol.sh (93%) rename {bash_files => scripts}/pretrain/cifar/nnclr.sh (92%) rename {bash_files => scripts}/pretrain/cifar/nnsiam.sh (91%) rename {bash_files => scripts}/pretrain/cifar/ressl.sh (92%) rename {bash_files => scripts}/pretrain/cifar/simclr.sh (92%) rename {bash_files => scripts}/pretrain/cifar/simsiam.sh (91%) rename {bash_files => scripts}/pretrain/cifar/supcon.sh (92%) rename {bash_files => scripts}/pretrain/cifar/swav.sh (92%) rename {bash_files => scripts}/pretrain/cifar/vibcreg.sh (93%) rename {bash_files => scripts}/pretrain/cifar/vicreg.sh (93%) rename {bash_files => scripts}/pretrain/cifar/wmse.sh (92%) rename {bash_files => scripts}/pretrain/custom/byol.sh (94%) rename {bash_files => scripts}/pretrain/imagenet-100/barlow.sh (87%) rename {bash_files => scripts}/pretrain/imagenet-100/byol.sh (88%) rename {bash_files => scripts}/pretrain/imagenet-100/deepclusterv2.sh (88%) rename {bash_files => scripts}/pretrain/imagenet-100/dino.sh (88%) rename {bash_files => scripts}/pretrain/imagenet-100/dino_vit.sh (90%) rename {bash_files => scripts}/pretrain/imagenet-100/mocov2plus.sh (86%) rename {bash_files => scripts}/pretrain/imagenet-100/mocov3.sh (88%) rename {bash_files => scripts}/pretrain/imagenet-100/mocov3_vit.sh (88%) rename {bash_files => scripts}/pretrain/imagenet-100/mocov3_vit_h5.sh (91%) rename {bash_files => scripts}/pretrain/imagenet-100/multicrop/byol.sh (88%) rename {bash_files => scripts}/pretrain/imagenet-100/multicrop/simclr.sh (86%) rename {bash_files => scripts}/pretrain/imagenet-100/multicrop/supcon.sh (86%) rename {bash_files => scripts}/pretrain/imagenet-100/nnclr.sh (87%) rename {bash_files => scripts}/pretrain/imagenet-100/ressl.sh (87%) rename {bash_files => scripts}/pretrain/imagenet-100/simclr.sh (85%) rename {bash_files => scripts}/pretrain/imagenet-100/simsiam.sh (85%) rename {bash_files => scripts}/pretrain/imagenet-100/supcon.sh (85%) rename {bash_files => scripts}/pretrain/imagenet-100/swav.sh (87%) rename {bash_files => scripts}/pretrain/imagenet-100/vibcreg.sh (87%) rename {bash_files => scripts}/pretrain/imagenet-100/vicreg.sh (87%) rename {bash_files => scripts}/pretrain/imagenet-100/wmse.sh (86%) rename {bash_files => scripts}/pretrain/imagenet/barlow.sh (87%) rename {bash_files => scripts}/pretrain/imagenet/byol.sh (88%) rename {bash_files => scripts}/pretrain/imagenet/mocov2plus.sh (88%) rename {bash_files => scripts}/umap/imagenet-100/umap.sh (100%) rename {solo => scripts}/utils/convert_imgfolder_to_h5.py (100%) diff --git a/README.md b/README.md index dd073b4a..8f266b6b 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ First clone the repo. Then, to install solo-learn with [Dali](https://github.com/NVIDIA/DALI) and/or UMAP support, use: ``` -pip3 install .[dali,umap] --extra-index-url https://developer.download.nvidia.com/compute/redist +pip3 install .[dali,umap,h5] --extra-index-url https://developer.download.nvidia.com/compute/redist ``` If no Dali/UMAP support is needed, the repository can be installed as: diff --git a/main_linear.py b/main_linear.py index 39ae15b0..a0f1f679 100644 --- a/main_linear.py +++ b/main_linear.py @@ -90,28 +90,28 @@ def main(): model = LinearModel(backbone, **args.__dict__) make_contiguous(model) + if args.data_format == "dali": + val_data_format = "image_folder" + else: + val_data_format = args.data_format train_loader, val_loader = prepare_data( args.dataset, - data_dir=args.data_dir, - train_dir=args.train_dir, - val_dir=args.val_dir, - train_h5_path=args.train_h5_path, - val_h5_path=args.val_h5_path, + train_data_path=args.train_data_path, + val_data_path=args.val_data_path, + data_format=val_data_format, batch_size=args.batch_size, num_workers=args.num_workers, - data_fraction=args.data_fraction, ) - if args.dali: + if args.data_format == "dali": assert ( _dali_avaliable ), "Dali is not currently avaiable, please install it first with [dali]." dali_datamodule = ClassificationDALIDataModule( dataset=args.dataset, - data_dir=args.data_dir, - train_dir=args.train_dir, - val_dir=args.val_dir, + train_data_path=args.train_data_path, + val_data_path=args.val_data_path, num_workers=args.num_workers, batch_size=args.batch_size, data_fraction=args.data_fraction, diff --git a/main_pretrain.py b/main_pretrain.py index 9c823564..d8a1ea0c 100644 --- a/main_pretrain.py +++ b/main_pretrain.py @@ -69,32 +69,32 @@ def main(): make_contiguous(model) # validation dataloader for when it is available - if args.dataset == "custom" and (args.no_labels or args.val_dir is None): + if args.dataset == "custom" and (args.no_labels or args.val_data_path is None): val_loader = None - elif args.dataset in ["imagenet100", "imagenet"] and ( - args.val_dir is None and args.val_h5_path is None - ): + elif args.dataset in ["imagenet100", "imagenet"] and (args.val_data_path is None): val_loader = None else: + if args.data_format == "dali": + val_data_format = "image_folder" + else: + val_data_format = args.data_format + _, val_loader = prepare_data_classification( args.dataset, - data_dir=args.data_dir, - train_dir=args.train_dir, - val_dir=args.val_dir, - train_h5_path=args.train_h5_path, - val_h5_path=args.val_h5_path, + train_data_path=args.train_data_path, + val_data_path=args.val_data_path, + data_format=val_data_format, batch_size=args.batch_size, num_workers=args.num_workers, ) # pretrain dataloader - if args.dali: + if args.data_format == "dali": assert _dali_avaliable, "Dali is not avaiable, please install it first with [dali]." dali_datamodule = PretrainDALIDataModule( dataset=args.dataset, - data_dir=args.data_dir, - train_dir=args.train_dir, + train_data_path=args.train_data_path, unique_augs=args.unique_augs, transform_kwargs=args.transform_kwargs, num_crops_per_aug=args.num_crops_per_aug, @@ -124,10 +124,9 @@ def main(): train_dataset = prepare_datasets( args.dataset, transform, - data_dir=args.data_dir, - train_dir=args.train_dir, + train_data_path=args.train_data_path, + data_format=args.data_format, no_labels=args.no_labels, - train_h5_path=args.train_h5_path, data_fraction=args.data_fraction, ) train_loader = prepare_dataloader( diff --git a/requirements.txt b/requirements.txt index 744b6410..bff7dbbb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,5 +8,4 @@ tqdm wandb scipy timm -scikit-learn -h5py \ No newline at end of file +scikit-learn \ No newline at end of file diff --git a/bash_files/knn/imagenet-100/knn.sh b/scripts/knn/imagenet-100/knn.sh similarity index 100% rename from bash_files/knn/imagenet-100/knn.sh rename to scripts/knn/imagenet-100/knn.sh diff --git a/bash_files/linear/imagenet-100/barlow_linear.sh b/scripts/linear/imagenet-100/barlow_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/barlow_linear.sh rename to scripts/linear/imagenet-100/barlow_linear.sh index 5f4ffd8c..633a58dd 100644 --- a/bash_files/linear/imagenet-100/barlow_linear.sh +++ b/scripts/linear/imagenet-100/barlow_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 4 \ - --dali \ + --data_format dali \ --name barlow-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/byol_linear.sh b/scripts/linear/imagenet-100/byol_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/byol_linear.sh rename to scripts/linear/imagenet-100/byol_linear.sh index c088f655..d692c350 100644 --- a/bash_files/linear/imagenet-100/byol_linear.sh +++ b/scripts/linear/imagenet-100/byol_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 4 \ - --dali \ + --data_format dali \ --name byol-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/deepclusterv2_linear.sh b/scripts/linear/imagenet-100/deepclusterv2_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/deepclusterv2_linear.sh rename to scripts/linear/imagenet-100/deepclusterv2_linear.sh index 0b4b54b3..2c80142a 100644 --- a/bash_files/linear/imagenet-100/deepclusterv2_linear.sh +++ b/scripts/linear/imagenet-100/deepclusterv2_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /data/datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 5 \ - --dali \ + --data_format dali \ --name deepclusterv2-imagenet100-linear-eval \ --pretrained_feature_extractor PATH --project solo-learn \ --entity unitn-mhug \ diff --git a/bash_files/linear/imagenet-100/dino_linear.sh b/scripts/linear/imagenet-100/dino_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/dino_linear.sh rename to scripts/linear/imagenet-100/dino_linear.sh index bc32e112..ccc4fdd1 100644 --- a/bash_files/linear/imagenet-100/dino_linear.sh +++ b/scripts/linear/imagenet-100/dino_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 4 \ - --dali \ + --data_format dali \ --name dino-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/general_linear.sh b/scripts/linear/imagenet-100/general_linear.sh similarity index 81% rename from bash_files/linear/imagenet-100/general_linear.sh rename to scripts/linear/imagenet-100/general_linear.sh index c62a87e1..3b064b4b 100644 --- a/bash_files/linear/imagenet-100/general_linear.sh +++ b/scripts/linear/imagenet-100/general_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0,1 \ --accelerator gpu \ @@ -17,7 +16,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 128 \ --num_workers 10 \ - --dali \ + --data_format dali \ --name method-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/mocov2plus_linear.sh b/scripts/linear/imagenet-100/mocov2plus_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/mocov2plus_linear.sh rename to scripts/linear/imagenet-100/mocov2plus_linear.sh index 70bc0727..4193fa7e 100644 --- a/bash_files/linear/imagenet-100/mocov2plus_linear.sh +++ b/scripts/linear/imagenet-100/mocov2plus_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 10 \ - --dali \ + --data_format dali \ --name mocov2plus-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/mocov3_linear.sh b/scripts/linear/imagenet-100/mocov3_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/mocov3_linear.sh rename to scripts/linear/imagenet-100/mocov3_linear.sh index c81de080..e6bfbda7 100644 --- a/bash_files/linear/imagenet-100/mocov3_linear.sh +++ b/scripts/linear/imagenet-100/mocov3_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0,1 \ --accelerator gpu \ @@ -17,7 +16,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 128 \ --num_workers 10 \ - --dali \ + --data_format dali \ --name mocov3-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/nnclr_linear.sh b/scripts/linear/imagenet-100/nnclr_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/nnclr_linear.sh rename to scripts/linear/imagenet-100/nnclr_linear.sh index 02b26a58..b75f1f45 100644 --- a/bash_files/linear/imagenet-100/nnclr_linear.sh +++ b/scripts/linear/imagenet-100/nnclr_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 10 \ - --dali \ + --data_format dali \ --name nnclr-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/ressl_linear.sh b/scripts/linear/imagenet-100/ressl_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/ressl_linear.sh rename to scripts/linear/imagenet-100/ressl_linear.sh index 8dcecdf9..816ce5dc 100644 --- a/bash_files/linear/imagenet-100/ressl_linear.sh +++ b/scripts/linear/imagenet-100/ressl_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/test \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 10 \ - --dali \ + --data_format dali \ --name ressl-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/simclr_linear.sh b/scripts/linear/imagenet-100/simclr_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/simclr_linear.sh rename to scripts/linear/imagenet-100/simclr_linear.sh index 7c330b52..3ddb6b1a 100644 --- a/bash_files/linear/imagenet-100/simclr_linear.sh +++ b/scripts/linear/imagenet-100/simclr_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /data/datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 10 \ - --dali \ + --data_format dali \ --name simclr-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/simsiam_linear.sh b/scripts/linear/imagenet-100/simsiam_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/simsiam_linear.sh rename to scripts/linear/imagenet-100/simsiam_linear.sh index b2881646..7ff92820 100644 --- a/bash_files/linear/imagenet-100/simsiam_linear.sh +++ b/scripts/linear/imagenet-100/simsiam_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 10 \ - --dali \ + --data_format dali \ --name simsiam-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/swav_linear.sh b/scripts/linear/imagenet-100/swav_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/swav_linear.sh rename to scripts/linear/imagenet-100/swav_linear.sh index 25787130..e6aba1a2 100644 --- a/bash_files/linear/imagenet-100/swav_linear.sh +++ b/scripts/linear/imagenet-100/swav_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /data/datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 5 \ - --dali \ + --data_format dali \ --name swav-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/vibcreg_linear.sh b/scripts/linear/imagenet-100/vibcreg_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/vibcreg_linear.sh rename to scripts/linear/imagenet-100/vibcreg_linear.sh index 573694b5..7816e505 100644 --- a/bash_files/linear/imagenet-100/vibcreg_linear.sh +++ b/scripts/linear/imagenet-100/vibcreg_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 512 \ --num_workers 5 \ - --dali \ + --data_format dali \ --name vibcreg-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet-100/vicreg_linear.sh b/scripts/linear/imagenet-100/vicreg_linear.sh similarity index 80% rename from bash_files/linear/imagenet-100/vicreg_linear.sh rename to scripts/linear/imagenet-100/vicreg_linear.sh index 81fbdff1..c2ab6dbb 100644 --- a/bash_files/linear/imagenet-100/vicreg_linear.sh +++ b/scripts/linear/imagenet-100/vicreg_linear.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 512 \ --num_workers 5 \ - --dali \ + --data_format dali \ --name vicreg-imagenet100-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/linear/imagenet/barlow.sh b/scripts/linear/imagenet/barlow.sh similarity index 81% rename from bash_files/linear/imagenet/barlow.sh rename to scripts/linear/imagenet/barlow.sh index 3bfec0d0..234ad3c1 100644 --- a/bash_files/linear/imagenet/barlow.sh +++ b/scripts/linear/imagenet/barlow.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet \ --backbone resnet50 \ - --data_dir /data/datasets \ - --train_dir imagenet/train \ - --val_dir imagenet/val \ + --train_data_path /data/datasets/imagenet/train \ + --val_data_path /data/dataset/simagenet/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -17,7 +16,7 @@ python3 main_linear.py \ --weight_decay 1e-5 \ --batch_size 256 \ --num_workers 10 \ - --dali \ + --data_format dali \ --pretrained_feature_extractor PATH \ --name barlow-resnet50-imagenet-linear-eval \ --entity unitn-mhug \ diff --git a/bash_files/linear/imagenet/byol.sh b/scripts/linear/imagenet/byol.sh similarity index 81% rename from bash_files/linear/imagenet/byol.sh rename to scripts/linear/imagenet/byol.sh index 25c5dbf6..1e121357 100644 --- a/bash_files/linear/imagenet/byol.sh +++ b/scripts/linear/imagenet/byol.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet \ --backbone resnet50 \ - --data_dir /data/datasets \ - --train_dir imagenet/train \ - --val_dir imagenet/val \ + --train_data_path /data/datasets/imagenet/train \ + --val_data_path /data/dataset/simagenet/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -17,7 +16,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 10 \ - --dali \ + --data_format dali \ --pretrained_feature_extractor PATH \ --name byol-resnet50-imagenet-linear-eval \ --entity unitn-mhug \ diff --git a/bash_files/linear/imagenet/mocov2plus.sh b/scripts/linear/imagenet/mocov2plus.sh similarity index 79% rename from bash_files/linear/imagenet/mocov2plus.sh rename to scripts/linear/imagenet/mocov2plus.sh index 0a0443ab..6c17334c 100644 --- a/bash_files/linear/imagenet/mocov2plus.sh +++ b/scripts/linear/imagenet/mocov2plus.sh @@ -1,9 +1,8 @@ python3 main_linear.py \ --dataset imagenet \ --backbone resnet50 \ - --data_dir /data/datasets \ - --train_dir imagenet/train \ - --val_dir imagenet/val \ + --train_data_path /data/datasets/imagenet/train \ + --val_data_path /data/dataset/simagenet/val \ --max_epochs 100 \ --devices 0 \ --accelerator gpu \ @@ -15,7 +14,7 @@ python3 main_linear.py \ --weight_decay 0 \ --batch_size 256 \ --num_workers 5 \ - --dali \ + --data_format dali \ --name mocov2plus-imagenet-linear-eval \ --pretrained_feature_extractor PATH \ --project solo-learn \ diff --git a/bash_files/pretrain/cifar/barlow.sh b/scripts/pretrain/cifar/barlow.sh similarity index 92% rename from bash_files/pretrain/cifar/barlow.sh rename to scripts/pretrain/cifar/barlow.sh index 8988f8d2..ff25f812 100644 --- a/bash_files/pretrain/cifar/barlow.sh +++ b/scripts/pretrain/cifar/barlow.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/byol.sh b/scripts/pretrain/cifar/byol.sh similarity index 93% rename from bash_files/pretrain/cifar/byol.sh rename to scripts/pretrain/cifar/byol.sh index 9687e2d7..8a7a6025 100644 --- a/bash_files/pretrain/cifar/byol.sh +++ b/scripts/pretrain/cifar/byol.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/deepclusterv2.sh b/scripts/pretrain/cifar/deepclusterv2.sh similarity index 92% rename from bash_files/pretrain/cifar/deepclusterv2.sh rename to scripts/pretrain/cifar/deepclusterv2.sh index fa35f28a..913061a2 100644 --- a/bash_files/pretrain/cifar/deepclusterv2.sh +++ b/scripts/pretrain/cifar/deepclusterv2.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/dino.sh b/scripts/pretrain/cifar/dino.sh similarity index 93% rename from bash_files/pretrain/cifar/dino.sh rename to scripts/pretrain/cifar/dino.sh index 210c6ec4..e77dc492 100644 --- a/bash_files/pretrain/cifar/dino.sh +++ b/scripts/pretrain/cifar/dino.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/mocov2plus.sh b/scripts/pretrain/cifar/mocov2plus.sh similarity index 92% rename from bash_files/pretrain/cifar/mocov2plus.sh rename to scripts/pretrain/cifar/mocov2plus.sh index 34ae44d8..313a47c1 100644 --- a/bash_files/pretrain/cifar/mocov2plus.sh +++ b/scripts/pretrain/cifar/mocov2plus.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/mocov3.sh b/scripts/pretrain/cifar/mocov3.sh similarity index 92% rename from bash_files/pretrain/cifar/mocov3.sh rename to scripts/pretrain/cifar/mocov3.sh index 63c9dc32..5d370cda 100644 --- a/bash_files/pretrain/cifar/mocov3.sh +++ b/scripts/pretrain/cifar/mocov3.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/multicrop/swav.sh b/scripts/pretrain/cifar/multicrop/swav.sh similarity index 93% rename from bash_files/pretrain/cifar/multicrop/swav.sh rename to scripts/pretrain/cifar/multicrop/swav.sh index 66051fb7..baaf14ed 100644 --- a/bash_files/pretrain/cifar/multicrop/swav.sh +++ b/scripts/pretrain/cifar/multicrop/swav.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone wide_resnet28w8 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/nnbyol.sh b/scripts/pretrain/cifar/nnbyol.sh similarity index 93% rename from bash_files/pretrain/cifar/nnbyol.sh rename to scripts/pretrain/cifar/nnbyol.sh index 574fe38f..3f4571e6 100644 --- a/bash_files/pretrain/cifar/nnbyol.sh +++ b/scripts/pretrain/cifar/nnbyol.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/nnclr.sh b/scripts/pretrain/cifar/nnclr.sh similarity index 92% rename from bash_files/pretrain/cifar/nnclr.sh rename to scripts/pretrain/cifar/nnclr.sh index 8b297b5f..ea07af14 100644 --- a/bash_files/pretrain/cifar/nnclr.sh +++ b/scripts/pretrain/cifar/nnclr.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/nnsiam.sh b/scripts/pretrain/cifar/nnsiam.sh similarity index 91% rename from bash_files/pretrain/cifar/nnsiam.sh rename to scripts/pretrain/cifar/nnsiam.sh index 172a4275..b7e85e50 100644 --- a/bash_files/pretrain/cifar/nnsiam.sh +++ b/scripts/pretrain/cifar/nnsiam.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/ressl.sh b/scripts/pretrain/cifar/ressl.sh similarity index 92% rename from bash_files/pretrain/cifar/ressl.sh rename to scripts/pretrain/cifar/ressl.sh index 10e0e31d..67ce16fe 100644 --- a/bash_files/pretrain/cifar/ressl.sh +++ b/scripts/pretrain/cifar/ressl.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/simclr.sh b/scripts/pretrain/cifar/simclr.sh similarity index 92% rename from bash_files/pretrain/cifar/simclr.sh rename to scripts/pretrain/cifar/simclr.sh index fc1b78cf..a8cc96b0 100644 --- a/bash_files/pretrain/cifar/simclr.sh +++ b/scripts/pretrain/cifar/simclr.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/simsiam.sh b/scripts/pretrain/cifar/simsiam.sh similarity index 91% rename from bash_files/pretrain/cifar/simsiam.sh rename to scripts/pretrain/cifar/simsiam.sh index 9216730c..1697e1ad 100644 --- a/bash_files/pretrain/cifar/simsiam.sh +++ b/scripts/pretrain/cifar/simsiam.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/supcon.sh b/scripts/pretrain/cifar/supcon.sh similarity index 92% rename from bash_files/pretrain/cifar/supcon.sh rename to scripts/pretrain/cifar/supcon.sh index d3073429..08cd5baa 100644 --- a/bash_files/pretrain/cifar/supcon.sh +++ b/scripts/pretrain/cifar/supcon.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/swav.sh b/scripts/pretrain/cifar/swav.sh similarity index 92% rename from bash_files/pretrain/cifar/swav.sh rename to scripts/pretrain/cifar/swav.sh index dbeacdc4..a7f9f54d 100644 --- a/bash_files/pretrain/cifar/swav.sh +++ b/scripts/pretrain/cifar/swav.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/vibcreg.sh b/scripts/pretrain/cifar/vibcreg.sh similarity index 93% rename from bash_files/pretrain/cifar/vibcreg.sh rename to scripts/pretrain/cifar/vibcreg.sh index 059a5353..3fff41d8 100644 --- a/bash_files/pretrain/cifar/vibcreg.sh +++ b/scripts/pretrain/cifar/vibcreg.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/vicreg.sh b/scripts/pretrain/cifar/vicreg.sh similarity index 93% rename from bash_files/pretrain/cifar/vicreg.sh rename to scripts/pretrain/cifar/vicreg.sh index 159c2331..291f93d2 100644 --- a/bash_files/pretrain/cifar/vicreg.sh +++ b/scripts/pretrain/cifar/vicreg.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/cifar/wmse.sh b/scripts/pretrain/cifar/wmse.sh similarity index 92% rename from bash_files/pretrain/cifar/wmse.sh rename to scripts/pretrain/cifar/wmse.sh index 43c9e4be..f8568998 100644 --- a/bash_files/pretrain/cifar/wmse.sh +++ b/scripts/pretrain/cifar/wmse.sh @@ -1,7 +1,8 @@ python3 main_pretrain.py \ --dataset $1 \ --backbone resnet18 \ - --data_dir ./datasets \ + --train_data_path ./datasets \ + --val_data_path ./datasets \ --max_epochs 1000 \ --devices 0 \ --accelerator gpu \ diff --git a/bash_files/pretrain/custom/byol.sh b/scripts/pretrain/custom/byol.sh similarity index 94% rename from bash_files/pretrain/custom/byol.sh rename to scripts/pretrain/custom/byol.sh index 0e9aeafa..dfd843da 100644 --- a/bash_files/pretrain/custom/byol.sh +++ b/scripts/pretrain/custom/byol.sh @@ -5,8 +5,7 @@ python3 main_pretrain.py \ --dataset custom \ --backbone resnet18 \ - --data_dir PATH_TO_DIR \ - --train_dir PATH_TO_TRAIN_DIR \ + --train_data_dir PATH_TO_TRAIN_DIR \ --no_labels \ --max_epochs 400 \ --devices 0,1 \ diff --git a/bash_files/pretrain/imagenet-100/barlow.sh b/scripts/pretrain/imagenet-100/barlow.sh similarity index 87% rename from bash_files/pretrain/imagenet-100/barlow.sh rename to scripts/pretrain/imagenet-100/barlow.sh index b709506f..88accfa3 100644 --- a/bash_files/pretrain/imagenet-100/barlow.sh +++ b/scripts/pretrain/imagenet-100/barlow.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -19,7 +18,7 @@ python3 main_pretrain.py \ --lr 0.3 \ --weight_decay 1e-4 \ --batch_size 128 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet-100/byol.sh b/scripts/pretrain/imagenet-100/byol.sh similarity index 88% rename from bash_files/pretrain/imagenet-100/byol.sh rename to scripts/pretrain/imagenet-100/byol.sh index 38c7149a..d4f1ea5b 100644 --- a/bash_files/pretrain/imagenet-100/byol.sh +++ b/scripts/pretrain/imagenet-100/byol.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -20,7 +19,7 @@ python3 main_pretrain.py \ --weight_decay 1e-5 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet-100/deepclusterv2.sh b/scripts/pretrain/imagenet-100/deepclusterv2.sh similarity index 88% rename from bash_files/pretrain/imagenet-100/deepclusterv2.sh rename to scripts/pretrain/imagenet-100/deepclusterv2.sh index 7e707c01..75295005 100644 --- a/bash_files/pretrain/imagenet-100/deepclusterv2.sh +++ b/scripts/pretrain/imagenet-100/deepclusterv2.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -23,7 +22,7 @@ python3 main_pretrain.py \ --weight_decay 1e-6 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --encode_indexes_into_labels \ --brightness 0.8 \ --contrast 0.8 \ diff --git a/bash_files/pretrain/imagenet-100/dino.sh b/scripts/pretrain/imagenet-100/dino.sh similarity index 88% rename from bash_files/pretrain/imagenet-100/dino.sh rename to scripts/pretrain/imagenet-100/dino.sh index 633e37ca..2c34ce07 100644 --- a/bash_files/pretrain/imagenet-100/dino.sh +++ b/scripts/pretrain/imagenet-100/dino.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -20,7 +19,7 @@ python3 main_pretrain.py \ --weight_decay 1e-6 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet-100/dino_vit.sh b/scripts/pretrain/imagenet-100/dino_vit.sh similarity index 90% rename from bash_files/pretrain/imagenet-100/dino_vit.sh rename to scripts/pretrain/imagenet-100/dino_vit.sh index 58501312..08598714 100644 --- a/bash_files/pretrain/imagenet-100/dino_vit.sh +++ b/scripts/pretrain/imagenet-100/dino_vit.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone vit_tiny \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ diff --git a/bash_files/pretrain/imagenet-100/mocov2plus.sh b/scripts/pretrain/imagenet-100/mocov2plus.sh similarity index 86% rename from bash_files/pretrain/imagenet-100/mocov2plus.sh rename to scripts/pretrain/imagenet-100/mocov2plus.sh index 3c283159..8ffd4556 100644 --- a/bash_files/pretrain/imagenet-100/mocov2plus.sh +++ b/scripts/pretrain/imagenet-100/mocov2plus.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -17,7 +16,7 @@ python3 main_pretrain.py \ --weight_decay 1e-4 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.4 \ diff --git a/bash_files/pretrain/imagenet-100/mocov3.sh b/scripts/pretrain/imagenet-100/mocov3.sh similarity index 88% rename from bash_files/pretrain/imagenet-100/mocov3.sh rename to scripts/pretrain/imagenet-100/mocov3.sh index 28cd9c4c..067174d5 100644 --- a/bash_files/pretrain/imagenet-100/mocov3.sh +++ b/scripts/pretrain/imagenet-100/mocov3.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -19,7 +18,7 @@ python3 main_pretrain.py \ --weight_decay 1e-6 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit.sh b/scripts/pretrain/imagenet-100/mocov3_vit.sh similarity index 88% rename from bash_files/pretrain/imagenet-100/mocov3_vit.sh rename to scripts/pretrain/imagenet-100/mocov3_vit.sh index 71d71f5c..6166855b 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit.sh +++ b/scripts/pretrain/imagenet-100/mocov3_vit.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone vit_small \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --warmup_epochs 40 \ --devices 0,1,2,3,4,5,6,7 \ @@ -20,7 +19,7 @@ python3 main_pretrain.py \ --weight_decay 0.1 \ --batch_size 64 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh b/scripts/pretrain/imagenet-100/mocov3_vit_h5.sh similarity index 91% rename from bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh rename to scripts/pretrain/imagenet-100/mocov3_vit_h5.sh index ab1ba6b7..12ef4113 100644 --- a/bash_files/pretrain/imagenet-100/mocov3_vit_h5.sh +++ b/scripts/pretrain/imagenet-100/mocov3_vit_h5.sh @@ -1,9 +1,9 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone vit_small \ - --data_dir $1 \ - --train_h5_path train.h5 \ - --val_h5_path val.h5 \ + --train_data_path $1/train.h5 \ + --val_data_path $1/val.h5 \ + --data_format h5 \ --max_epochs 400 \ --warmup_epochs 40 \ --devices 0,1,2,3,4,5,6,7 \ diff --git a/bash_files/pretrain/imagenet-100/multicrop/byol.sh b/scripts/pretrain/imagenet-100/multicrop/byol.sh similarity index 88% rename from bash_files/pretrain/imagenet-100/multicrop/byol.sh rename to scripts/pretrain/imagenet-100/multicrop/byol.sh index 93fec0c0..883ba14c 100644 --- a/bash_files/pretrain/imagenet-100/multicrop/byol.sh +++ b/scripts/pretrain/imagenet-100/multicrop/byol.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -20,7 +19,7 @@ python3 main_pretrain.py \ --weight_decay 1e-5 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet-100/multicrop/simclr.sh b/scripts/pretrain/imagenet-100/multicrop/simclr.sh similarity index 86% rename from bash_files/pretrain/imagenet-100/multicrop/simclr.sh rename to scripts/pretrain/imagenet-100/multicrop/simclr.sh index 76483f70..f3ed0882 100644 --- a/bash_files/pretrain/imagenet-100/multicrop/simclr.sh +++ b/scripts/pretrain/imagenet-100/multicrop/simclr.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -26,7 +25,7 @@ python3 main_pretrain.py \ --crop_size 224 96 \ --num_crops_per_aug 2 6 \ --name multicrop-simclr-400ep-imagenet100 \ - --dali \ + --data_format dali \ --project solo-learn \ --entity unitn-mhug \ --wandb \ diff --git a/bash_files/pretrain/imagenet-100/multicrop/supcon.sh b/scripts/pretrain/imagenet-100/multicrop/supcon.sh similarity index 86% rename from bash_files/pretrain/imagenet-100/multicrop/supcon.sh rename to scripts/pretrain/imagenet-100/multicrop/supcon.sh index 76ca0d30..2969ffca 100644 --- a/bash_files/pretrain/imagenet-100/multicrop/supcon.sh +++ b/scripts/pretrain/imagenet-100/multicrop/supcon.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -27,7 +26,7 @@ python3 main_pretrain.py \ --crop_size 224 96 \ --num_crops_per_aug 2 6 \ --name supcon-multicrop-400ep-imagenet100 \ - --dali \ + --data_format dali \ --project solo-learn \ --entity unitn-mhug \ --wandb \ diff --git a/bash_files/pretrain/imagenet-100/nnclr.sh b/scripts/pretrain/imagenet-100/nnclr.sh similarity index 87% rename from bash_files/pretrain/imagenet-100/nnclr.sh rename to scripts/pretrain/imagenet-100/nnclr.sh index 31ac6312..1af1f72d 100644 --- a/bash_files/pretrain/imagenet-100/nnclr.sh +++ b/scripts/pretrain/imagenet-100/nnclr.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -26,7 +25,7 @@ python3 main_pretrain.py \ --solarization_prob 0.0 0.2 \ --num_crops_per_aug 1 1 \ --num_workers 4 \ - --dali \ + --data_format dali \ --wandb \ --name nnclr-gather-p-400ep-imagenet100 \ --entity unitn-mhug \ diff --git a/bash_files/pretrain/imagenet-100/ressl.sh b/scripts/pretrain/imagenet-100/ressl.sh similarity index 87% rename from bash_files/pretrain/imagenet-100/ressl.sh rename to scripts/pretrain/imagenet-100/ressl.sh index 5081d901..36539463 100644 --- a/bash_files/pretrain/imagenet-100/ressl.sh +++ b/scripts/pretrain/imagenet-100/ressl.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -16,7 +15,7 @@ python3 main_pretrain.py \ --weight_decay 1e-4 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 0.0 \ --contrast 0.4 0.0 \ --saturation 0.4 0.0 \ diff --git a/bash_files/pretrain/imagenet-100/simclr.sh b/scripts/pretrain/imagenet-100/simclr.sh similarity index 85% rename from bash_files/pretrain/imagenet-100/simclr.sh rename to scripts/pretrain/imagenet-100/simclr.sh index 518ef423..776d66b4 100644 --- a/bash_files/pretrain/imagenet-100/simclr.sh +++ b/scripts/pretrain/imagenet-100/simclr.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -25,7 +24,7 @@ python3 main_pretrain.py \ --hue 0.2 \ --num_crops_per_aug 2 \ --name simclr-400ep-imagenet100 \ - --dali \ + --data_format dali \ --project solo-learn \ --entity unitn-mhug \ --wandb \ diff --git a/bash_files/pretrain/imagenet-100/simsiam.sh b/scripts/pretrain/imagenet-100/simsiam.sh similarity index 85% rename from bash_files/pretrain/imagenet-100/simsiam.sh rename to scripts/pretrain/imagenet-100/simsiam.sh index 148de2ec..21d2caf2 100644 --- a/bash_files/pretrain/imagenet-100/simsiam.sh +++ b/scripts/pretrain/imagenet-100/simsiam.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -24,7 +23,7 @@ python3 main_pretrain.py \ --num_crops_per_aug 2 \ --zero_init_residual \ --name simsiam-400ep-imagenet100 \ - --dali \ + --data_format dali \ --entity unitn-mhug \ --project solo-learn \ --wandb \ diff --git a/bash_files/pretrain/imagenet-100/supcon.sh b/scripts/pretrain/imagenet-100/supcon.sh similarity index 85% rename from bash_files/pretrain/imagenet-100/supcon.sh rename to scripts/pretrain/imagenet-100/supcon.sh index 6179af0f..968724b4 100644 --- a/bash_files/pretrain/imagenet-100/supcon.sh +++ b/scripts/pretrain/imagenet-100/supcon.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -25,7 +24,7 @@ python3 main_pretrain.py \ --hue 0.2 \ --num_crops_per_aug 2 \ --name supcon-400ep-imagenet100 \ - --dali \ + --data_format dali \ --project solo-learn \ --entity unitn-mhug \ --wandb \ diff --git a/bash_files/pretrain/imagenet-100/swav.sh b/scripts/pretrain/imagenet-100/swav.sh similarity index 87% rename from bash_files/pretrain/imagenet-100/swav.sh rename to scripts/pretrain/imagenet-100/swav.sh index 78d5918b..49c811c5 100644 --- a/bash_files/pretrain/imagenet-100/swav.sh +++ b/scripts/pretrain/imagenet-100/swav.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -21,7 +20,7 @@ python3 main_pretrain.py \ --weight_decay 1e-6 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.8 \ --contrast 0.8 \ --saturation 0.8 \ diff --git a/bash_files/pretrain/imagenet-100/vibcreg.sh b/scripts/pretrain/imagenet-100/vibcreg.sh similarity index 87% rename from bash_files/pretrain/imagenet-100/vibcreg.sh rename to scripts/pretrain/imagenet-100/vibcreg.sh index 8b0137a4..6334f176 100644 --- a/bash_files/pretrain/imagenet-100/vibcreg.sh +++ b/scripts/pretrain/imagenet-100/vibcreg.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -19,7 +18,7 @@ python3 main_pretrain.py \ --weight_decay 1e-4 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --min_scale 0.2 \ --brightness 0.4 \ --contrast 0.4 \ diff --git a/bash_files/pretrain/imagenet-100/vicreg.sh b/scripts/pretrain/imagenet-100/vicreg.sh similarity index 87% rename from bash_files/pretrain/imagenet-100/vicreg.sh rename to scripts/pretrain/imagenet-100/vicreg.sh index ce373cd6..57d5e3f8 100644 --- a/bash_files/pretrain/imagenet-100/vicreg.sh +++ b/scripts/pretrain/imagenet-100/vicreg.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --devices 0,1 \ --accelerator gpu \ @@ -19,7 +18,7 @@ python3 main_pretrain.py \ --weight_decay 1e-4 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --min_scale 0.2 \ --brightness 0.4 \ --contrast 0.4 \ diff --git a/bash_files/pretrain/imagenet-100/wmse.sh b/scripts/pretrain/imagenet-100/wmse.sh similarity index 86% rename from bash_files/pretrain/imagenet-100/wmse.sh rename to scripts/pretrain/imagenet-100/wmse.sh index 4ea3cede..75c712d7 100644 --- a/bash_files/pretrain/imagenet-100/wmse.sh +++ b/scripts/pretrain/imagenet-100/wmse.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet100 \ --backbone resnet18 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --max_epochs 400 \ --precision 16 \ --devices 0,1 \ @@ -26,7 +25,7 @@ python3 main_pretrain.py \ --min_scale 0.08 \ --crop_size 224 96 \ --num_crops_per_aug 2 6 \ - --dali \ + --data_format dali \ --save_checkpoint \ --name wmse-imagenet100 \ --project solo-learn \ diff --git a/bash_files/pretrain/imagenet/barlow.sh b/scripts/pretrain/imagenet/barlow.sh similarity index 87% rename from bash_files/pretrain/imagenet/barlow.sh rename to scripts/pretrain/imagenet/barlow.sh index 1989c869..cb5b6065 100644 --- a/bash_files/pretrain/imagenet/barlow.sh +++ b/scripts/pretrain/imagenet/barlow.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet \ --backbone resnet50 \ - --data_dir /data/datasets \ - --train_dir imagenet/train \ - --val_dir imagenet/val \ + --train_data_path /datasets/ILSVRC2012/train \ + --val_data_path /datasets/ILSVRC2012/val \ --max_epochs 100 \ --devices 0,1,2,3 \ --accelerator gpu \ @@ -18,7 +17,7 @@ python3 main_pretrain.py \ --lr 0.8 \ --weight_decay 1.5e-6 \ --batch_size 64 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet/byol.sh b/scripts/pretrain/imagenet/byol.sh similarity index 88% rename from bash_files/pretrain/imagenet/byol.sh rename to scripts/pretrain/imagenet/byol.sh index 96e26b45..1eeaaaae 100644 --- a/bash_files/pretrain/imagenet/byol.sh +++ b/scripts/pretrain/imagenet/byol.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet \ --backbone resnet50 \ - --data_dir /datasets \ - --train_dir ILSVRC2012/train \ - --val_dir ILSVRC2012/val \ + --train_data_path /datasets/ILSVRC2012/train \ + --val_data_path /datasets/ILSVRC2012/val \ --max_epochs 100 \ --devices 0,1 \ --accelerator gpu \ @@ -20,7 +19,7 @@ python3 main_pretrain.py \ --weight_decay 1e-6 \ --batch_size 128 \ --num_workers 4 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/pretrain/imagenet/mocov2plus.sh b/scripts/pretrain/imagenet/mocov2plus.sh similarity index 88% rename from bash_files/pretrain/imagenet/mocov2plus.sh rename to scripts/pretrain/imagenet/mocov2plus.sh index 414c8eee..a9eaa27d 100644 --- a/bash_files/pretrain/imagenet/mocov2plus.sh +++ b/scripts/pretrain/imagenet/mocov2plus.sh @@ -1,9 +1,8 @@ python3 main_pretrain.py \ --dataset imagenet \ --backbone resnet50 \ - --data_dir /data/datasets \ - --train_dir imagenet/train \ - --val_dir imagenet/val \ + --train_data_path /datasets/ILSVRC2012/train \ + --val_data_path /datasets/ILSVRC2012/val \ --max_epochs 100 \ --devices 0,1 \ --accelerator gpu \ @@ -17,7 +16,7 @@ python3 main_pretrain.py \ --weight_decay 3e-5 \ --batch_size 128 \ --num_workers 5 \ - --dali \ + --data_format dali \ --brightness 0.4 \ --contrast 0.4 \ --saturation 0.2 \ diff --git a/bash_files/umap/imagenet-100/umap.sh b/scripts/umap/imagenet-100/umap.sh similarity index 100% rename from bash_files/umap/imagenet-100/umap.sh rename to scripts/umap/imagenet-100/umap.sh diff --git a/solo/utils/convert_imgfolder_to_h5.py b/scripts/utils/convert_imgfolder_to_h5.py similarity index 100% rename from solo/utils/convert_imgfolder_to_h5.py rename to scripts/utils/convert_imgfolder_to_h5.py diff --git a/setup.py b/setup.py index 52733c4c..eb2fba8c 100644 --- a/setup.py +++ b/setup.py @@ -25,6 +25,7 @@ EXTRA_REQUIREMENTS = { "dali": ["nvidia-dali-cuda110"], "umap": ["matplotlib", "seaborn", "pandas", "umap-learn"], + "h5": ["h5py"], } diff --git a/solo/args/dataset.py b/solo/args/dataset.py index 89ff7bda..0c50a5cb 100644 --- a/solo/args/dataset.py +++ b/solo/args/dataset.py @@ -20,6 +20,8 @@ from argparse import ArgumentParser from pathlib import Path +from requests import options + def dataset_args(parser: ArgumentParser): """Adds dataset-related arguments to a parser. @@ -40,20 +42,15 @@ def dataset_args(parser: ArgumentParser): parser.add_argument("--dataset", choices=SUPPORTED_DATASETS, type=str, required=True) # dataset path - parser.add_argument("--data_dir", type=Path, required=True) - parser.add_argument("--train_dir", type=Path, default=None) - parser.add_argument("--val_dir", type=Path, default=None) - - # h5 files - parser.add_argument("--train_h5_path", type=str, default=None) - parser.add_argument("--val_h5_path", type=str, default=None) + parser.add_argument("--train_data_path", type=Path, required=True) + parser.add_argument("--val_data_path", type=Path, default=None) + parser.add_argument( + "--data_format", default="image_folder", options=["image_folder", "dali", "h5"] + ) # percentage of data used from training, leave -1.0 to use all data available parser.add_argument("--data_fraction", default=-1.0, type=float) - # dali (imagenet-100/imagenet/custom only) - parser.add_argument("--dali", action="store_true") - def augmentations_args(parser: ArgumentParser): """Adds augmentation-related arguments to a parser. diff --git a/solo/methods/base.py b/solo/methods/base.py index cb8be3c3..15c4fdd2 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -375,7 +375,7 @@ def num_training_steps(self) -> int: dataset_size = self.trainer.limit_train_batches * dataset_size num_devices = self.trainer.num_devices - num_nodes = self.trainer.num_nodes or 1 + num_nodes = self.trainer.num_nodes effective_batch_size = ( self.batch_size * self.trainer.accumulate_grad_batches * num_devices * num_nodes ) diff --git a/solo/methods/linear.py b/solo/methods/linear.py index 71124f0e..ebfe212e 100644 --- a/solo/methods/linear.py +++ b/solo/methods/linear.py @@ -210,7 +210,7 @@ def num_training_steps(self) -> int: dataset_size = self.trainer.limit_train_batches * dataset_size num_devices = self.trainer.num_devices - num_nodes = self.trainer.num_nodes or 1 + num_nodes = self.trainer.num_nodes effective_batch_size = ( self.batch_size * self.trainer.accumulate_grad_batches * num_devices * num_nodes ) diff --git a/solo/utils/classification_dataloader.py b/solo/utils/classification_dataloader.py index 459dbe95..1e78224e 100644 --- a/solo/utils/classification_dataloader.py +++ b/solo/utils/classification_dataloader.py @@ -144,11 +144,9 @@ def prepare_datasets( dataset: str, T_train: Callable, T_val: Callable, - data_dir: Optional[Union[str, Path]] = None, - train_dir: Optional[Union[str, Path]] = None, - val_dir: Optional[Union[str, Path]] = None, - train_h5_path: Optional[str] = None, - val_h5_path: Optional[str] = None, + train_data_path: Optional[Union[str, Path]] = None, + val_data_path: Optional[Union[str, Path]] = None, + data_format: Optional[str] = "image_folder", download: bool = True, data_fraction: float = -1.0, ) -> Tuple[Dataset, Dataset]: @@ -158,11 +156,12 @@ def prepare_datasets( dataset (str): dataset name. T_train (Callable): pipeline of transformations for training dataset. T_val (Callable): pipeline of transformations for validation dataset. - data_dir Optional[Union[str, Path]]: path where to download/locate the dataset. - train_dir Optional[Union[str, Path]]: subpath where the training data is located. - val_dir Optional[Union[str, Path]]: subpath where the validation data is located. - train_h5_path Optional[str]: path to the train h5 dataset file, if it exists. - val_h5_path Optional[str]: path to the val h5 dataset file, if it exists. + train_data_path (Optional[Union[str, Path]], optional): path where the + training data is located. Defaults to None. + val_data_path (Optional[Union[str, Path]], optional): path where the + validation data is located. Defaults to None. + data_format (Optional[str]): format of the data. Defaults to "image_folder". + Possible values are "image_folder" and "h5". data_fraction (Optional[float]): percentage of data to use. Use all data when set to -1.0. Defaults to -1.0. @@ -170,35 +169,27 @@ def prepare_datasets( Tuple[Dataset, Dataset]: training dataset and validation dataset. """ - if data_dir is None: - sandbox_dir = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) - data_dir = sandbox_dir / "datasets" - else: - data_dir = Path(data_dir) - - if train_dir is None: - train_dir = Path(f"{dataset}/train") - else: - train_dir = Path(train_dir) + if train_data_path is None: + sandbox_folder = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + train_data_path = sandbox_folder / "datasets" - if val_dir is None: - val_dir = Path(f"{dataset}/val") - else: - val_dir = Path(val_dir) + if val_data_path is None: + sandbox_folder = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) + val_data_path = sandbox_folder / "datasets" assert dataset in ["cifar10", "cifar100", "stl10", "imagenet", "imagenet100", "custom"] if dataset in ["cifar10", "cifar100"]: DatasetClass = vars(torchvision.datasets)[dataset.upper()] train_dataset = DatasetClass( - data_dir / train_dir, + train_data_path, train=True, download=download, transform=T_train, ) val_dataset = DatasetClass( - data_dir / val_dir, + val_data_path, train=False, download=download, transform=T_val, @@ -206,32 +197,25 @@ def prepare_datasets( elif dataset == "stl10": train_dataset = STL10( - data_dir / train_dir, + train_data_path, split="train", download=True, transform=T_train, ) val_dataset = STL10( - data_dir / val_dir, + val_data_path, split="test", download=download, transform=T_val, ) elif dataset in ["imagenet", "imagenet100", "custom"]: - if train_h5_path: - train_h5_path = data_dir / train_h5_path - train_dataset = H5Dataset(dataset, train_h5_path, T_train) - else: - train_dir = data_dir / train_dir - train_dataset = ImageFolder(train_dir, T_train) - - if val_h5_path: - val_h5_path = data_dir / val_h5_path - val_dataset = H5Dataset(dataset, val_h5_path, T_val) + if data_format == "h5": + train_dataset = H5Dataset(dataset, train_data_path, T_train) + val_dataset = H5Dataset(dataset, val_data_path, T_val) else: - val_dir = data_dir / val_dir - val_dataset = ImageFolder(val_dir, T_val) + train_dataset = ImageFolder(train_data_path, T_train) + val_dataset = ImageFolder(val_data_path, T_val) if data_fraction > 0: assert data_fraction < 1, "Only use data_fraction for values smaller than 1." @@ -283,11 +267,9 @@ def prepare_dataloaders( def prepare_data( dataset: str, - data_dir: Optional[Union[str, Path]] = None, - train_dir: Optional[Union[str, Path]] = None, - val_dir: Optional[Union[str, Path]] = None, - train_h5_path: Optional[str] = None, - val_h5_path: Optional[str] = None, + train_data_path: Optional[Union[str, Path]] = None, + val_data_path: Optional[Union[str, Path]] = None, + data_format: Optional[str] = "image_folder", batch_size: int = 64, num_workers: int = 4, download: bool = True, @@ -297,14 +279,12 @@ def prepare_data( Args: dataset (str): dataset name. - data_dir (Optional[Union[str, Path]], optional): path where to download/locate the dataset. - Defaults to None. - train_dir (Optional[Union[str, Path]], optional): subpath where the + train_data_path (Optional[Union[str, Path]], optional): path where the training data is located. Defaults to None. - val_dir (Optional[Union[str, Path]], optional): subpath where the + val_data_path (Optional[Union[str, Path]], optional): path where the validation data is located. Defaults to None. - train_h5_path Optional[str]: path to the train h5 dataset file, if it exists. - val_h5_path Optional[str]: path to the val h5 dataset file, if it exists. + data_format (Optional[str]): format of the data. Defaults to "image_folder". + Possible values are "image_folder" and "h5". batch_size (int, optional): batch size. Defaults to 64. num_workers (int, optional): number of parallel workers. Defaults to 4. data_fraction (Optional[float]): percentage of data to use. Use all data when set to -1.0. @@ -319,13 +299,11 @@ def prepare_data( dataset, T_train, T_val, - data_dir=data_dir, - train_dir=train_dir, - val_dir=val_dir, + train_data_path=train_data_path, + val_data_path=val_data_path, + data_format=data_format, download=download, data_fraction=data_fraction, - train_h5_path=train_h5_path, - val_h5_path=val_h5_path, ) train_loader, val_loader = prepare_dataloaders( train_dataset, diff --git a/solo/utils/dali_dataloader.py b/solo/utils/dali_dataloader.py index 0e4d2873..2bc08783 100644 --- a/solo/utils/dali_dataloader.py +++ b/solo/utils/dali_dataloader.py @@ -779,8 +779,7 @@ class PretrainDALIDataModule(pl.LightningDataModule): def __init__( self, dataset: str, - data_dir: Union[str, Path], - train_dir: Union[str, Path], + train_data_path: Union[str, Path], unique_augs: int, transform_kwargs: Dict[str, Any], num_crops_per_aug: List[int], @@ -798,8 +797,7 @@ def __init__( Args: dataset (str): dataset name. - data_dir (Union[str, Path]): path where to download/locate the dataset. - train_dir (Union[str, Path]): subpath where the training data is located. + train_data_path (Union[str, Path]): path where the training data is located. unique_augs (int): number of unique augmentation pielines transform_kwargs (Dict[str, Any]): kwargs for the transformations. num_crops_per_aug (List[int]): number of crops per pipeline. @@ -822,8 +820,7 @@ def __init__( self.dataset = dataset # paths - self.data_dir = Path(data_dir) - self.train_dir = Path(train_dir) + self.train_data_path = Path(train_data_path) # augmentation-related self.unique_augs = unique_augs @@ -899,7 +896,7 @@ def setup(self, stage: Optional[str] = None): def train_dataloader(self): train_pipeline_builder = PretrainPipelineBuilder( - self.data_dir / self.train_dir, + self.train_data_path, batch_size=self.batch_size, transforms=self.transforms, num_crops_per_aug=self.num_crops_per_aug, @@ -951,9 +948,8 @@ class ClassificationDALIDataModule(pl.LightningDataModule): def __init__( self, dataset: str, - data_dir: Union[str, Path], - train_dir: Union[str, Path], - val_dir: Union[str, Path], + train_data_path: Union[str, Path], + val_data_path: Union[str, Path], batch_size: int, num_workers: int = 4, data_fraction: float = -1.0, @@ -963,9 +959,8 @@ def __init__( Args: dataset (str): dataset name. - data_dir (Union[str, Path]): path where to download/locate the dataset. - train_dir (Union[str, Path]): subpath where the training data is located. - val_dir (Union[str, Path]): subpath where the validation data is located. + train_data_path (Union[str, Path]): path where the training data is located. + val_data_path (Union[str, Path]): path where the validation data is located. batch_size (int): batch size.. num_workers (int, optional): number of parallel workers. Defaults to 4. data_fraction (float, optional): percentage of data to use. @@ -979,9 +974,8 @@ def __init__( self.dataset = dataset # paths - self.data_dir = Path(data_dir) - self.train_dir = Path(train_dir) - self.val_dir = Path(val_dir) + self.train_data_path = Path(train_data_path) + self.val_data_path = Path(val_data_path) self.num_workers = num_workers @@ -1022,7 +1016,7 @@ def setup(self, stage: Optional[str] = None): def train_dataloader(self): train_pipeline_builder = self.pipeline_class( - self.data_dir / self.train_dir, + self.train_data_path, validation=False, batch_size=self.batch_size, device=self.dali_device, @@ -1054,7 +1048,7 @@ def train_dataloader(self): def val_dataloader(self) -> DALIGenericIterator: val_pipeline_builder = self.pipeline_class( - self.data_dir / self.val_dir, + self.val_data_path, validation=True, batch_size=self.batch_size, device=self.dali_device, diff --git a/solo/utils/pretrain_dataloader.py b/solo/utils/pretrain_dataloader.py index c3ec6e22..14bed624 100644 --- a/solo/utils/pretrain_dataloader.py +++ b/solo/utils/pretrain_dataloader.py @@ -497,10 +497,9 @@ def prepare_n_crop_transform( def prepare_datasets( dataset: str, transform: Callable, - data_dir: Optional[Union[str, Path]] = None, - train_dir: Optional[Union[str, Path]] = None, + train_data_path: Optional[Union[str, Path]] = None, + data_format: Optional[str] = "image_folder", no_labels: Optional[Union[str, Path]] = False, - train_h5_path: Optional[str] = None, download: bool = True, data_fraction: float = -1.0, ) -> Dataset: @@ -509,31 +508,24 @@ def prepare_datasets( Args: dataset (str): the name of the dataset. transform (Callable): a transformation. - data_dir (Optional[Union[str, Path]]): the directory to load data from. - Defaults to None. - train_dir (Optional[Union[str, Path]]): training data directory - to be appended to data_dir. Defaults to None. + train_dir (Optional[Union[str, Path]]): training data path. Defaults to None. + data_format (Optional[str]): format of the data. Defaults to "image_folder". + Possible values are "image_folder" and "h5". no_labels (Optional[bool]): if the custom dataset has no labels. - train_h5_path Optional[str]: path to the train h5 dataset file, if it exists. data_fraction (Optional[float]): percentage of data to use. Use all data when set to -1.0. Defaults to -1.0. Returns: Dataset: the desired dataset with transformations. """ - if data_dir is None: + if train_data_path is None: sandbox_folder = Path(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) - data_dir = sandbox_folder / "datasets" - - if train_dir is None: - train_dir = Path(f"{dataset}/train") - else: - train_dir = Path(train_dir) + train_data_path = sandbox_folder / "datasets" if dataset in ["cifar10", "cifar100"]: DatasetClass = vars(torchvision.datasets)[dataset.upper()] train_dataset = dataset_with_index(DatasetClass)( - data_dir / train_dir, + train_data_path, train=True, download=download, transform=transform, @@ -541,30 +533,25 @@ def prepare_datasets( elif dataset == "stl10": train_dataset = dataset_with_index(STL10)( - data_dir / train_dir, + train_data_path, split="train+unlabeled", download=download, transform=transform, ) elif dataset in ["imagenet", "imagenet100"]: - if train_h5_path: - train_h5_path = data_dir / train_h5_path - train_dataset = dataset_with_index(H5Dataset)(dataset, train_h5_path, transform) - + if data_format == "h5": + train_dataset = dataset_with_index(H5Dataset)(dataset, train_data_path, transform) else: - train_dir = data_dir / train_dir - train_dataset = dataset_with_index(ImageFolder)(train_dir, transform) + train_dataset = dataset_with_index(ImageFolder)(train_data_path, transform) elif dataset == "custom": - train_dir = data_dir / train_dir - if no_labels: dataset_class = CustomDatasetWithoutLabels else: dataset_class = ImageFolder - train_dataset = dataset_with_index(dataset_class)(train_dir, transform) + train_dataset = dataset_with_index(dataset_class)(train_data_path, transform) if data_fraction > 0: assert data_fraction < 1, "Only use data_fraction for values smaller than 1." From 3ec5565ba72518a5a63319fcfb63f6641425ac3b Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 11:31:08 +0100 Subject: [PATCH 20/36] readme --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 8f266b6b..61fd732f 100644 --- a/README.md +++ b/README.md @@ -126,7 +126,7 @@ Then, to install solo-learn with [Dali](https://github.com/NVIDIA/DALI) and/or U pip3 install .[dali,umap,h5] --extra-index-url https://developer.download.nvidia.com/compute/redist ``` -If no Dali/UMAP support is needed, the repository can be installed as: +If no Dali/UMAP/H5 support is needed, the repository can be installed as: ``` pip3 install . ``` From 8a1ad24c6fbf66fee085ab99a6bfde35f8a2ac21 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 11:35:44 +0100 Subject: [PATCH 21/36] fix tests? --- tests/args/test_datasets.py | 5 ++--- tests/args/test_setup.py | 20 +++++++++++--------- tests/dali/test_dali_dataloader.py | 16 ++++++---------- tests/methods/utils.py | 5 ++--- 4 files changed, 21 insertions(+), 25 deletions(-) diff --git a/tests/args/test_datasets.py b/tests/args/test_datasets.py index 574ba957..ea7430e4 100644 --- a/tests/args/test_datasets.py +++ b/tests/args/test_datasets.py @@ -27,9 +27,8 @@ def test_argparse_dataset(): actions = [vars(action)["dest"] for action in vars(parser)["_actions"]] assert "dataset" in actions - assert "data_dir" in actions - assert "train_dir" in actions - assert "val_dir" in actions + assert "train_data_path" in actions + assert "val_data_path" in actions assert "dali" in actions diff --git a/tests/args/test_setup.py b/tests/args/test_setup.py index 623c18cf..9da183ac 100644 --- a/tests/args/test_setup.py +++ b/tests/args/test_setup.py @@ -40,7 +40,9 @@ def test_setup_pretrain(): "cifar10", "--backbone", "resnet18", - "--data_dir", + "--train_data_path", + "./datasets", + "--val_data_path", "./datasets", "--max_epochs", "1000", @@ -135,8 +137,10 @@ def test_setup_linear(): "imagenet100", "--backbone", "resnet18", - "--data_dir", - "/datasets", + "--train_data_path", + "./datasets", + "--val_data_path", + "./datasets", "--train_dir", "imagenet-100/train", "--val_dir", @@ -354,9 +358,8 @@ def test_additional_setup_pretrain(): args = { "backbone": "vit_small", "dataset": "custom", - "data_dir": Path("."), - "train_dir": "dummy_train", - "val_dir": "dummy_val", + "train_data_path": "./dummy_train", + "val_data_path": "./dummy_val", "mean": [0.485, 0.456, 0.406], "std": [0.228, 0.224, 0.225], "brightness": [0.4], @@ -443,9 +446,8 @@ def test_additional_setup_linear(): args = { "backbone": "vit_small", "dataset": "custom", - "data_dir": Path("."), - "train_dir": "dummy_train", - "val_dir": "dummy_val", + "train_data_path": "./dummy_train", + "val_data_path": "./dummy_val", "mean": [0.485, 0.456, 0.406], "std": [0.228, 0.224, 0.225], "crop_size": [224], diff --git a/tests/dali/test_dali_dataloader.py b/tests/dali/test_dali_dataloader.py index 89eaa731..9c29502d 100644 --- a/tests/dali/test_dali_dataloader.py +++ b/tests/dali/test_dali_dataloader.py @@ -134,8 +134,7 @@ def test_dali_pretrain(): kwargs = {**BASE_KWARGS, **DATA_KWARGS_WRAPPED, **method_kwargs} kwargs["dali_device"] = "cpu" - kwargs["train_dir"] = "dummy_train" - kwargs["data_dir"] = "." + kwargs["train_data_path"] = "./dummy_train" kwargs["dataset"] = "custom" kwargs["transform_kwargs"] = dict( @@ -161,8 +160,7 @@ def test_dali_pretrain(): ) dali_datamodule = PretrainDALIDataModule( dataset=args.dataset, - data_dir=args.data_dir, - train_dir=args.train_dir, + train_data_path=args.train_data_path, unique_augs=args.unique_augs, transform_kwargs=args.transform_kwargs, num_crops_per_aug=args.num_crops_per_aug, @@ -187,9 +185,8 @@ def test_dali_linear(): backbone.fc = nn.Identity() kwargs["dali_device"] = "cpu" - kwargs["data_dir"] = "." - kwargs["train_dir"] = "dummy_train" - kwargs["val_dir"] = "dummy_val" + kwargs["train_data_path"] = "./dummy_train" + kwargs["val_data_path"] = "./dummy_val" kwargs["dataset"] = "custom" del kwargs["backbone"] @@ -199,9 +196,8 @@ def test_dali_linear(): args = argparse.Namespace(**kwargs) dali_datamodule = ClassificationDALIDataModule( dataset=args.dataset, - data_dir=args.data_dir, - train_dir=args.train_dir, - val_dir=args.val_dir, + train_data_path=args.train_data_path, + val_data_path=args.val_data_path, num_workers=args.num_workers, batch_size=args.batch_size, data_fraction=args.data_fraction, diff --git a/tests/methods/utils.py b/tests/methods/utils.py index 3658eaf9..497088e3 100644 --- a/tests/methods/utils.py +++ b/tests/methods/utils.py @@ -74,9 +74,8 @@ def gen_base_kwargs( "dali_device": "gpu", "batch_size": batch_size, "num_workers": 4, - "data_dir": "/data/datasets", - "train_dir": "cifar10/train", - "val_dir": "cifar10/val", + "train_data_path": "./cifar10/train", + "val_data_path": "./cifar10/val", "dataset": "cifar10", } if momentum: From ff317aa915a6ecd8ef65ea2fd194d78df20d091f Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 11:38:23 +0100 Subject: [PATCH 22/36] minor stuff --- solo/args/dataset.py | 6 +++--- solo/args/utils.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/solo/args/dataset.py b/solo/args/dataset.py index 0c50a5cb..35644279 100644 --- a/solo/args/dataset.py +++ b/solo/args/dataset.py @@ -20,8 +20,6 @@ from argparse import ArgumentParser from pathlib import Path -from requests import options - def dataset_args(parser: ArgumentParser): """Adds dataset-related arguments to a parser. @@ -45,7 +43,9 @@ def dataset_args(parser: ArgumentParser): parser.add_argument("--train_data_path", type=Path, required=True) parser.add_argument("--val_data_path", type=Path, default=None) parser.add_argument( - "--data_format", default="image_folder", options=["image_folder", "dali", "h5"] + "--data_format", + default="image_folder", + choices=["image_folder", "dali", "h5"], ) # percentage of data used from training, leave -1.0 to use all data available diff --git a/solo/args/utils.py b/solo/args/utils.py index 51afc483..ad6da48e 100644 --- a/solo/args/utils.py +++ b/solo/args/utils.py @@ -214,7 +214,7 @@ def additional_setup_pretrain(args: Namespace): with suppress(AttributeError): del args.patch_size - if args.dali: + if args.data_format == "dali": assert args.dataset in ["imagenet100", "imagenet", "custom"] args.extra_optimizer_args = {} @@ -286,7 +286,7 @@ def additional_setup_linear(args: Namespace): with suppress(AttributeError): del args.patch_size - if args.dali: + if args.data_format == "dali": assert args.dataset in ["imagenet100", "imagenet", "custom"] args.extra_optimizer_args = {} From cdfc1cab9fcf3be7a678cf952d17a5252dd51b6a Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 11:40:52 +0100 Subject: [PATCH 23/36] other minor --- main_linear.py | 2 +- main_pretrain.py | 2 +- solo/args/setup.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/main_linear.py b/main_linear.py index a0f1f679..fc5727fd 100644 --- a/main_linear.py +++ b/main_linear.py @@ -195,7 +195,7 @@ def prefetch_batches(self) -> int: except: pass - if args.dali: + if args.data_format == "dali": trainer.fit(model, ckpt_path=ckpt_path, datamodule=dali_datamodule) else: trainer.fit(model, train_loader, val_loader, ckpt_path=ckpt_path) diff --git a/main_pretrain.py b/main_pretrain.py index d8a1ea0c..7f72e7f9 100644 --- a/main_pretrain.py +++ b/main_pretrain.py @@ -218,7 +218,7 @@ def prefetch_batches(self) -> int: except: pass - if args.dali: + if args.data_format == "dali": trainer.fit(model, ckpt_path=ckpt_path, datamodule=dali_datamodule) else: trainer.fit(model, train_loader, val_loader, ckpt_path=ckpt_path) diff --git a/solo/args/setup.py b/solo/args/setup.py index 3bebf864..48580eef 100644 --- a/solo/args/setup.py +++ b/solo/args/setup.py @@ -92,7 +92,7 @@ def parse_args_pretrain() -> argparse.Namespace: if temp_args.auto_resume: parser = AutoResumer.add_autoresumer_args(parser) - if _dali_available and temp_args.dali: + if _dali_available and temp_args.data_format == "dali": parser = PretrainDALIDataModule.add_dali_args(parser) # parse args @@ -142,7 +142,7 @@ def parse_args_linear() -> argparse.Namespace: if temp_args.auto_resume: parser = AutoResumer.add_autoresumer_args(parser) - if _dali_available and temp_args.dali: + if _dali_available and temp_args.data_format == "dali": parser = ClassificationDALIDataModule.add_dali_args(parser) # parse args From 7053ec3d637694bb47d05aa5b28861a222e78d8d Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 16:32:42 +0100 Subject: [PATCH 24/36] done? --- solo/methods/base.py | 17 +++++------------ solo/methods/linear.py | 15 ++++----------- solo/utils/misc.py | 18 +++++++++--------- tests/utils/test_pretrain_dataloader.py | 2 +- 4 files changed, 19 insertions(+), 33 deletions(-) diff --git a/solo/methods/base.py b/solo/methods/base.py index 15c4fdd2..3f5895f9 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -346,30 +346,23 @@ def num_training_steps(self) -> int: try: dataset = self.extra_args.get("dataset", None) if dataset not in ["cifar10", "cifar100", "stl10"]: - data_dir = self.extra_args.get("data_dir", ".") - train_dir = self.extra_args.get("train_dir", "train") - folder = os.path.join(data_dir, str(train_dir)) - h5py_file = self.extra_args.get("train_h5_path", None) - h5py_file = os.path.join(data_dir, h5py_file) + data_path = self.extra_args.get("train_data_path", "./train") else: - folder = None - h5py_file = None - + data_path = None + no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) dataset_size = compute_dataset_size( dataset=dataset, - folder=folder, + data_path=data_path, train=True, no_labels=no_labels, - h5py_file=h5py_file, data_fraction=data_fraction, ) except: raise RuntimeError( - "Please pass 'dataset' or 'data_dir '" - "and 'train_dir' as parameters to the model." + "Please pass 'dataset' or 'train_data_path' as parameters to the model." ) dataset_size = self.trainer.limit_train_batches * dataset_size diff --git a/solo/methods/linear.py b/solo/methods/linear.py index ebfe212e..7087ddf0 100644 --- a/solo/methods/linear.py +++ b/solo/methods/linear.py @@ -181,30 +181,23 @@ def num_training_steps(self) -> int: try: dataset = self.extra_args.get("dataset", None) if dataset not in ["cifar10", "cifar100", "stl10"]: - data_dir = self.extra_args.get("data_dir", ".") - train_dir = self.extra_args.get("train_dir", "train") - folder = os.path.join(data_dir, str(train_dir)) - h5py_file = self.extra_args.get("train_h5_path", None) - h5py_file = os.path.join(data_dir, h5py_file) + data_path = self.extra_args.get("train_data_path", "./train") else: - folder = None - h5py_file = None + data_path = None no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) dataset_size = compute_dataset_size( dataset=dataset, - folder=folder, + data_path=data_path, train=True, no_labels=no_labels, - h5py_file=h5py_file, data_fraction=data_fraction, ) except: raise RuntimeError( - "Please pass 'dataset' or 'data_dir '" - "and 'train_dir' as parameters to the model." + "Please pass 'dataset' or 'train_data_path' as parameters to the model." ) dataset_size = self.trainer.limit_train_batches * dataset_size diff --git a/solo/utils/misc.py b/solo/utils/misc.py index 55ebf42e..bd37a5ab 100644 --- a/solo/utils/misc.py +++ b/solo/utils/misc.py @@ -208,9 +208,9 @@ def gather(X, dim=0): def compute_dataset_size( dataset: Optional[str] = None, train: Optional[bool] = True, - folder: Optional[str] = None, + data_path: Optional[str] = None, + data_format: Optional[str] = "image_folder", no_labels: Optional[bool] = False, - h5py_file: Optional[str] = None, data_fraction: Optional[float] = -1, ): """Utility function to get the dataset size. If using cifar or stl, @@ -220,13 +220,13 @@ def compute_dataset_size( specify if it has labels or not with the no_labels flag. Args: - folder (Optional[str]): path to the ImageFolder. Defaults to None. dataset (Optional[str]): dataset size for predefined datasets [cifar10, cifar100, stl10]. Defaults to None. train (Optional[bool]): train dataset flag. Defaults to True. + data_path (Optional[str]): path to the folder. Defaults to None. + data_format (Optional[str]): format of the data, either "image_folder" or "h5". + Defaults to "image_folder". no_labels (Optional[bool]): if the dataset has no labels. Defaults to False. - h5py_file (Optional[str]): if using an h5py file, create a dummy H5Dataset to count the number of images. - Defaults to None. data_fraction (Optional[float]): amount of data to use. Defaults to -1. Returns: @@ -243,15 +243,15 @@ def compute_dataset_size( if dataset is not None: size = DATASET_SIZES.get(dataset.lower(), {}).get("train" if train else "val", None) - if h5py_file is not None: - size = len(H5Dataset(dataset, h5py_file)) + if data_format == "h5": + size = len(H5Dataset(dataset, data_path)) if size is None: if no_labels: - size = len(os.listdir(folder)) + size = len(os.listdir(data_path)) else: size = sum( - len(os.listdir(os.path.join(folder, class_))) for class_ in os.listdir(folder) + len(os.listdir(os.path.join(data_path, class_))) for class_ in os.listdir(data_path) ) if data_fraction != -1: diff --git a/tests/utils/test_pretrain_dataloader.py b/tests/utils/test_pretrain_dataloader.py index ed7ea282..70b87e05 100644 --- a/tests/utils/test_pretrain_dataloader.py +++ b/tests/utils/test_pretrain_dataloader.py @@ -91,7 +91,7 @@ def test_data(): T = [prepare_transform("cifar10", **kwargs)] T = prepare_n_crop_transform(T, num_crops_per_aug=[2]) - train_dataset = prepare_datasets("cifar10", T, data_dir=None) + train_dataset = prepare_datasets("cifar10", T, train_data_path=None) assert isinstance(train_dataset, CIFAR10) assert len(train_dataset[0]) == 3 From f511cc0c4aab8103aa928c043613df578fcd4f82 Mon Sep 17 00:00:00 2001 From: autoblack Date: Wed, 13 Jul 2022 15:33:39 +0000 Subject: [PATCH 25/36] fixup: format solo with Black --- solo/methods/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/solo/methods/base.py b/solo/methods/base.py index 3f5895f9..4e339317 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -349,7 +349,7 @@ def num_training_steps(self) -> int: data_path = self.extra_args.get("train_data_path", "./train") else: data_path = None - + no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) From c197ea60266558d4a50dc65c54da3e8d0d40e46d Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 17:26:54 +0100 Subject: [PATCH 26/36] fix tests --- tests/args/test_setup.py | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/tests/args/test_setup.py b/tests/args/test_setup.py index 9da183ac..0352e2a3 100644 --- a/tests/args/test_setup.py +++ b/tests/args/test_setup.py @@ -169,8 +169,8 @@ def test_setup_linear(): "--batch_size", "128", "--num_workers", - "10", - "--dali", + "10" "--data_format", + "dali", "--name", "test", "--pretrained_feature_extractor", @@ -219,7 +219,7 @@ def test_additional_setup_pretrain(): "max_scale": [1.0], "crop_size": [224], "num_crops_per_aug": [1, 1], - "dali": True, + "data_format": "dali", "optimizer": "sgd", "devices": "0,1", "lr": 0.1, @@ -228,7 +228,6 @@ def test_additional_setup_pretrain(): "zero_init_residual": False, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) @@ -257,7 +256,7 @@ def test_additional_setup_pretrain(): "max_scale": [1.0], "crop_size": [224], "num_crops_per_aug": [2], - "dali": True, + "data_format": "dali", "optimizer": "sgd", "devices": "0,1", "lr": 0.1, @@ -266,7 +265,6 @@ def test_additional_setup_pretrain(): "zero_init_residual": False, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) @@ -295,7 +293,7 @@ def test_additional_setup_pretrain(): "max_scale": [1.0], "crop_size": [224, 96], "num_crops_per_aug": [2, 4], - "dali": False, + "data_format": "image_folder", "optimizer": "sgd", "devices": "0,1", "lr": 0.1, @@ -304,7 +302,6 @@ def test_additional_setup_pretrain(): "zero_init_residual": False, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) @@ -333,7 +330,7 @@ def test_additional_setup_pretrain(): "max_scale": [1.0], "crop_size": [224], "num_crops_per_aug": [1, 1], - "dali": True, + "data_format": "dali", "optimizer": "sgd", "devices": "0,", "lr": 0.1, @@ -342,7 +339,6 @@ def test_additional_setup_pretrain(): "zero_init_residual": False, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) @@ -376,7 +372,7 @@ def test_additional_setup_pretrain(): "min_scale": [0.08], "max_scale": [1.0], "crop_size": [224], - "dali": True, + "data_format": "dali", "optimizer": "sgd", "devices": "0,", "lr": 0.1, @@ -385,7 +381,6 @@ def test_additional_setup_pretrain(): "patch_size": 16, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) @@ -401,7 +396,7 @@ def test_additional_setup_linear(): args = { "backbone": "resnet18", "dataset": "imagenet100", - "dali": True, + "data_format": "dali", "optimizer": "sgd", "devices": "0,1", "lr": 0.1, @@ -409,7 +404,6 @@ def test_additional_setup_linear(): "zero_init_residual": False, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) @@ -423,7 +417,7 @@ def test_additional_setup_linear(): args = { "backbone": "resnet18", "dataset": "imagenet100", - "dali": True, + "data_format": "dali", "optimizer": "sgd", "devices": "0,", "lr": 0.1, @@ -431,7 +425,6 @@ def test_additional_setup_linear(): "zero_init_residual": False, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) @@ -451,7 +444,7 @@ def test_additional_setup_linear(): "mean": [0.485, 0.456, 0.406], "std": [0.228, 0.224, 0.225], "crop_size": [224], - "dali": False, + "data_format": "image_folder", "num_crops_per_aug": [2], "optimizer": "sgd", "devices": "0,", @@ -461,7 +454,6 @@ def test_additional_setup_linear(): "patch_size": 16, "strategy": None, "num_nodes": 1, - "num_nodes_horovod": None, } args = argparse.Namespace(**args) From 039181a8a476e091d37ed53259ea1a5df55dc0f2 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 17:45:01 +0100 Subject: [PATCH 27/36] more fixes --- solo/args/utils.py | 6 ++---- tests/args/test_datasets.py | 2 +- tests/args/test_setup.py | 3 ++- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/solo/args/utils.py b/solo/args/utils.py index ad6da48e..46b7242d 100644 --- a/solo/args/utils.py +++ b/solo/args/utils.py @@ -57,10 +57,9 @@ def additional_setup_pretrain(args: Namespace): else: # hack to maintain the current pipeline # even if the custom dataset doesn't have any labels - dir_path = args.data_dir / args.train_dir args.num_classes = max( 1, - len([entry.name for entry in os.scandir(dir_path) if entry.is_dir]), + len([entry.name for entry in os.scandir(args.train_data_path) if entry.is_dir]), ) unique_augs = max( @@ -268,10 +267,9 @@ def additional_setup_linear(args: Namespace): else: # hack to maintain the current pipeline # even if the custom dataset doesn't have any labels - dir_path = args.data_dir / args.train_dir args.num_classes = max( 1, - len([entry.name for entry in os.scandir(dir_path) if entry.is_dir]), + len([entry.name for entry in os.scandir(args.train_data_path) if entry.is_dir]), ) # create backbone-specific arguments diff --git a/tests/args/test_datasets.py b/tests/args/test_datasets.py index ea7430e4..be70e414 100644 --- a/tests/args/test_datasets.py +++ b/tests/args/test_datasets.py @@ -29,7 +29,7 @@ def test_argparse_dataset(): assert "dataset" in actions assert "train_data_path" in actions assert "val_data_path" in actions - assert "dali" in actions + assert "data_format" in actions def test_argparse_augmentations(): diff --git a/tests/args/test_setup.py b/tests/args/test_setup.py index 0352e2a3..fa86a546 100644 --- a/tests/args/test_setup.py +++ b/tests/args/test_setup.py @@ -169,7 +169,8 @@ def test_setup_linear(): "--batch_size", "128", "--num_workers", - "10" "--data_format", + "10", + "--data_format", "dali", "--name", "test", From c72f189b8746938e89870c19c8fdd724633aaf5b Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 17:54:36 +0100 Subject: [PATCH 28/36] wip --- tests/args/test_setup.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/tests/args/test_setup.py b/tests/args/test_setup.py index fa86a546..50db862c 100644 --- a/tests/args/test_setup.py +++ b/tests/args/test_setup.py @@ -141,10 +141,6 @@ def test_setup_linear(): "./datasets", "--val_data_path", "./datasets", - "--train_dir", - "imagenet-100/train", - "--val_dir", - "imagenet-100/val", "--max_epochs", "100", "--devices", From d410faff340b3b3e7636589c5482cd9944d27d83 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 20:27:06 +0000 Subject: [PATCH 29/36] small tweaks --- solo/utils/classification_dataloader.py | 9 ++++++++- solo/utils/pretrain_dataloader.py | 8 +++++++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/solo/utils/classification_dataloader.py b/solo/utils/classification_dataloader.py index 1e78224e..cf9582e1 100644 --- a/solo/utils/classification_dataloader.py +++ b/solo/utils/classification_dataloader.py @@ -22,12 +22,18 @@ from typing import Callable, Optional, Tuple, Union import torchvision -from solo.utils.h5_dataset import H5Dataset from torch import nn from torch.utils.data import DataLoader, Dataset from torchvision import transforms from torchvision.datasets import STL10, ImageFolder +try: + from solo.utils.h5_dataset import H5Dataset +except ImportError: + _h5_available = False +else: + _h5_available = True + def build_custom_pipeline(): """Builds augmentation pipelines for custom data. @@ -211,6 +217,7 @@ def prepare_datasets( elif dataset in ["imagenet", "imagenet100", "custom"]: if data_format == "h5": + assert _h5_available train_dataset = H5Dataset(dataset, train_data_path, T_train) val_dataset = H5Dataset(dataset, val_data_path, T_val) else: diff --git a/solo/utils/pretrain_dataloader.py b/solo/utils/pretrain_dataloader.py index 14bed624..e2d844da 100644 --- a/solo/utils/pretrain_dataloader.py +++ b/solo/utils/pretrain_dataloader.py @@ -30,7 +30,12 @@ from torchvision import transforms from torchvision.datasets import STL10, ImageFolder -from solo.utils.h5_dataset import H5Dataset +try: + from solo.utils.h5_dataset import H5Dataset +except ImportError: + _h5_available = False +else: + _h5_available = True def dataset_with_index(DatasetClass: Type[Dataset]) -> Type[Dataset]: @@ -541,6 +546,7 @@ def prepare_datasets( elif dataset in ["imagenet", "imagenet100"]: if data_format == "h5": + assert _h5_available train_dataset = dataset_with_index(H5Dataset)(dataset, train_data_path, transform) else: train_dataset = dataset_with_index(ImageFolder)(train_data_path, transform) From 9375474409e4290d9cc8a75f4ba05f44cac4c002 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 21:15:29 +0000 Subject: [PATCH 30/36] fixed linear --- main_linear.py | 2 +- scripts/linear/imagenet-100/barlow_linear.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/main_linear.py b/main_linear.py index fc5727fd..1458dd81 100644 --- a/main_linear.py +++ b/main_linear.py @@ -57,7 +57,7 @@ def main(): if "swin" in args.backbone and cifar: kwargs["window_size"] = 4 - backbone = backbone_model(**kwargs) + backbone = backbone_model(method=None, **kwargs) if args.backbone.startswith("resnet"): # remove fc layer backbone.fc = nn.Identity() diff --git a/scripts/linear/imagenet-100/barlow_linear.sh b/scripts/linear/imagenet-100/barlow_linear.sh index 633a58dd..92c09031 100644 --- a/scripts/linear/imagenet-100/barlow_linear.sh +++ b/scripts/linear/imagenet-100/barlow_linear.sh @@ -21,4 +21,4 @@ python3 main_linear.py \ --entity unitn-mhug \ --wandb \ --save_checkpoint \ - --auto_resume \ No newline at end of file + --auto_resume From a2220ac9797f4f3f6094ef5e4e6df64b39326035 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 21:19:26 +0000 Subject: [PATCH 31/36] typo --- scripts/utils/convert_imgfolder_to_h5.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/utils/convert_imgfolder_to_h5.py b/scripts/utils/convert_imgfolder_to_h5.py index 87e06b7e..0b0325c2 100644 --- a/scripts/utils/convert_imgfolder_to_h5.py +++ b/scripts/utils/convert_imgfolder_to_h5.py @@ -35,7 +35,7 @@ def convert_imgfolder_to_h5(folder_path: str, h5_path: str): with h5py.File(h5_path, "w") as h5: classes = os.listdir(folder_path) - for class_name in tqdm(classes, desc="Processing classes:"): + for class_name in tqdm(classes, desc="Processing classes"): cur_folder = os.path.join(folder_path, class_name) class_group = h5.create_group(class_name) for i, img_name in enumerate(os.listdir(cur_folder)): From 75cc370aebbdbf61bab30e195d15ff92f0cddae4 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 21:59:13 +0000 Subject: [PATCH 32/36] fixed h5 --- solo/methods/base.py | 4 ++-- solo/methods/linear.py | 4 ++-- solo/utils/h5_dataset.py | 15 ++++++++++++--- solo/utils/misc.py | 1 - 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/solo/methods/base.py b/solo/methods/base.py index 4e339317..5ff5842f 100644 --- a/solo/methods/base.py +++ b/solo/methods/base.py @@ -17,7 +17,6 @@ # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. -import os import warnings from argparse import ArgumentParser from functools import partial @@ -352,10 +351,11 @@ def num_training_steps(self) -> int: no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) - + data_format = self.extra_args.get("data_format", "image_folder") dataset_size = compute_dataset_size( dataset=dataset, data_path=data_path, + data_format=data_format, train=True, no_labels=no_labels, data_fraction=data_fraction, diff --git a/solo/methods/linear.py b/solo/methods/linear.py index 7087ddf0..dcc7a3b0 100644 --- a/solo/methods/linear.py +++ b/solo/methods/linear.py @@ -17,7 +17,6 @@ # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. -import os from argparse import ArgumentParser from typing import Any, Dict, List, Optional, Sequence, Tuple @@ -187,10 +186,11 @@ def num_training_steps(self) -> int: no_labels = self.extra_args.get("no_labels", False) data_fraction = self.extra_args.get("data_fraction", -1.0) - + data_format = self.extra_args.get("data_format", "image_folder") dataset_size = compute_dataset_size( dataset=dataset, data_path=data_path, + data_format=data_format, train=True, no_labels=no_labels, data_fraction=data_fraction, diff --git a/solo/utils/h5_dataset.py b/solo/utils/h5_dataset.py index f03381e3..8459c8a1 100644 --- a/solo/utils/h5_dataset.py +++ b/solo/utils/h5_dataset.py @@ -20,12 +20,14 @@ import io import os +import warnings from pathlib import Path from typing import Callable, Optional -from tqdm import tqdm + import h5py from PIL import Image from torch.utils.data import Dataset +from tqdm import tqdm class H5Dataset(Dataset): @@ -74,12 +76,19 @@ def __init__( for class_name, img_name, _ in self._data: if class_name in class_set: new_data.append((class_name, img_name, self.class_to_idx[class_name])) - self._data = new_data + if not new_data: + warnings.warn( + "Skipped filtering. Tried to filter classes for imagenet100, " + "but wasn't able to do so. Either make sure that you do not " + "rely on the filtering, i.e. your h5 file is already filtered " + "or make sure the class names are the default ones." + ) + else: + self._data = new_data def _load_h5_data_info(self): self._data = [] h5_data_info_file = os.path.splitext(self.h5_path)[0] + ".txt" - if not os.path.isfile(h5_data_info_file): temp_h5_file = h5py.File(self.h5_path, "r") diff --git a/solo/utils/misc.py b/solo/utils/misc.py index bd37a5ab..6b6d6560 100644 --- a/solo/utils/misc.py +++ b/solo/utils/misc.py @@ -25,7 +25,6 @@ import torch import torch.distributed as dist import torch.nn as nn - from solo.utils.h5_dataset import H5Dataset From 75a34b03b98070b31f8600d7974b96e2062562f9 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 22:01:07 +0000 Subject: [PATCH 33/36] fix knn and umap scripts --- scripts/knn/imagenet-100/knn.sh | 5 ++--- scripts/umap/imagenet-100/umap.sh | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/scripts/knn/imagenet-100/knn.sh b/scripts/knn/imagenet-100/knn.sh index 3ec0251c..2cb33cf9 100644 --- a/scripts/knn/imagenet-100/knn.sh +++ b/scripts/knn/imagenet-100/knn.sh @@ -1,8 +1,7 @@ python3 main_knn.py \ --dataset imagenet100 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --batch_size 16 \ --num_workers 10 \ --pretrained_checkpoint_dir PATH \ diff --git a/scripts/umap/imagenet-100/umap.sh b/scripts/umap/imagenet-100/umap.sh index 5f58f8d9..03f4c7b5 100644 --- a/scripts/umap/imagenet-100/umap.sh +++ b/scripts/umap/imagenet-100/umap.sh @@ -1,8 +1,7 @@ python3 main_umap.py \ --dataset imagenet100 \ - --data_dir /datasets \ - --train_dir imagenet-100/train \ - --val_dir imagenet-100/val \ + --train_data_path /datasets/imagenet-100/train \ + --val_data_path /datasets/imagenet-100/val \ --batch_size 16 \ --num_workers 10 \ --pretrained_checkpoint_dir PATH From 5c31587c4d93140cc7e8acc2210cd03d260bfd84 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 22:06:14 +0000 Subject: [PATCH 34/36] small tweaks --- README.md | 1 + scripts/pretrain/custom/byol.sh | 6 +++--- setup.py | 1 - 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 61fd732f..daf8980d 100644 --- a/README.md +++ b/README.md @@ -15,6 +15,7 @@ The library is self-contained, but it is possible to use the models outside of s --- ## News +* **[Jul 13 2022]**: :sparkling_heart: Added support for [H5](https://docs.h5py.org/en/stable/index.html) data, improved scripts and data handling. * **[Jun 26 2022]**: :fire: Added [MoCo V3](https://arxiv.org/abs/2104.02057). * **[Jun 10 2022]**: :bomb: Improved LARS. * **[Jun 09 2022]**: :lollipop: Added support for [WideResnet](https://arxiv.org/abs/1605.07146), multicrop for SwAV and equalization data augmentation. diff --git a/scripts/pretrain/custom/byol.sh b/scripts/pretrain/custom/byol.sh index dfd843da..cd79b868 100644 --- a/scripts/pretrain/custom/byol.sh +++ b/scripts/pretrain/custom/byol.sh @@ -1,11 +1,11 @@ # Train without labels. # To train with labels, simply remove --no_labels -# --val_dir is optional and will expect a directory with subfolder (classes) -# --dali flag is also supported +# --val_data_path is optional and will expect a directory with subfolder (classes) +# --data_format supports "image_folder" and "dali" python3 main_pretrain.py \ --dataset custom \ --backbone resnet18 \ - --train_data_dir PATH_TO_TRAIN_DIR \ + --train_data_path PATH_TO_TRAIN_DIR \ --no_labels \ --max_epochs 400 \ --devices 0,1 \ diff --git a/setup.py b/setup.py index eb2fba8c..e30680dd 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,6 @@ def parse_requirements(path): "scipy", "timm", "scikit-learn", - "h5py", ], extras_require=EXTRA_REQUIREMENTS, dependency_links=["https://developer.download.nvidia.com/compute/redist"], From 6de7de36f142ad4bb9157329bd35a5d31e5072f5 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 23:32:04 +0100 Subject: [PATCH 35/36] Update tests.yml --- .github/workflows/tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index de7cd439..2a6038df 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -34,7 +34,7 @@ jobs: - name: Install Python dependencies run: | python -m pip install --upgrade pip - pip install -e .[umap] codecov mypy pytest-cov black + pip install -e .[umap,h5] codecov mypy pytest-cov black - name: Cache datasets uses: actions/cache@v2 From bbc2b46a5a6f5b1e45de4dfb103aa983759925c9 Mon Sep 17 00:00:00 2001 From: Victor Turrisi Date: Wed, 13 Jul 2022 23:33:31 +0100 Subject: [PATCH 36/36] Update dali_tests.yml --- .github/workflows/dali_tests.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/dali_tests.yml b/.github/workflows/dali_tests.yml index be31249f..f65f99b2 100644 --- a/.github/workflows/dali_tests.yml +++ b/.github/workflows/dali_tests.yml @@ -34,7 +34,8 @@ jobs: - name: Install Python dependencies run: | python -m pip install --upgrade pip - pip install -e .[umap] codecov mypy pytest-cov black + pip install .[dali,umap,h5] --extra-index-url https://developer.download.nvidia.com/compute/redist codecov + pip install mypy pytest-cov black - name: Cache datasets uses: actions/cache@v2 @@ -61,4 +62,4 @@ jobs: file: coverage.xml flags: dali name: DALI-coverage - fail_ci_if_error: false \ No newline at end of file + fail_ci_if_error: false