Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update script headers #4163

Merged
merged 6 commits into from
Jul 26, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions data/scripts/download_weights.sh
Original file line number Diff line number Diff line change
@@ -1,7 +1,12 @@
#!/bin/bash
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download latest models from https://github.com/ultralytics/yolov5/releases
# Usage:
# $ bash path/to/download_weights.sh
# YOLOv5 πŸš€ example usage: bash path/to/download_weights.sh
# parent
# └── yolov5
# β”œβ”€β”€ yolov5s.pt ← downloads here
# β”œβ”€β”€ yolov5m.pt
# └── ...

python - <<EOF
from utils.google_utils import attempt_download
Expand Down
14 changes: 7 additions & 7 deletions data/scripts/get_coco.sh
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#!/bin/bash
# COCO 2017 dataset http://cocodataset.org
# Download command: bash data/scripts/get_coco.sh
# Train command: python train.py --data coco.yaml
# Default dataset location is next to YOLOv5:
# /parent_folder
# /coco
# /yolov5
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download COCO 2017 dataset http://cocodataset.org
# YOLOv5 πŸš€ example usage: bash data/scripts/get_coco.sh
# parent
# β”œβ”€β”€ yolov5
# └── datasets
# └── coco ← downloads here

# Download/unzip labels
d='../datasets' # unzip directory
Expand Down
16 changes: 8 additions & 8 deletions data/scripts/get_coco128.sh
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
#!/bin/bash
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128
# Download command: bash data/scripts/get_coco128.sh
# Train command: python train.py --data coco128.yaml
# Default dataset location is next to /yolov5:
# /parent_folder
# /coco128
# /yolov5
# Copyright Ultralytics https://ultralytics.com, licensed under GNU GPL v3.0
# Download COCO128 dataset https://www.kaggle.com/ultralytics/coco128 (first 128 images from COCO train2017)
# YOLOv5 πŸš€ example usage: bash data/scripts/get_coco128.sh
# parent
# β”œβ”€β”€ yolov5
# └── datasets
# └── coco128 ← downloads here

# Download/unzip images and labels
d='../' # unzip directory
d='../datasets' # unzip directory
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB
echo 'Downloading' $url$f ' ...'
Expand Down
5 changes: 3 additions & 2 deletions train.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,9 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary
plots = not evolve # create plots
cuda = device.type != 'cpu'
init_seeds(1 + RANK)
with open(data) as f:
data_dict = yaml.safe_load(f) # data dict
with open(data, encoding='ascii', errors='ignore') as f:
data_dict = yaml.safe_load(f)

nc = 1 if single_cls else int(data_dict['nc']) # number of classes
names = ['item'] if single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names
assert len(names) == nc, f'{len(names)} names found for nc={nc} dataset in {data}' # check
Expand Down
10 changes: 4 additions & 6 deletions utils/autoanchor.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,11 +60,11 @@ def metric(k): # compute metric
print('') # newline


def kmean_anchors(path='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
def kmean_anchors(dataset='./data/coco128.yaml', n=9, img_size=640, thr=4.0, gen=1000, verbose=True):
""" Creates kmeans-evolved anchors from training dataset

Arguments:
path: path to dataset *.yaml, or a loaded dataset
dataset: path to data.yaml, or a loaded dataset
n: number of anchors
img_size: image size used for training
thr: anchor-label wh ratio threshold hyperparameter hyp['anchor_t'] used for training, default=4.0
Expand Down Expand Up @@ -103,13 +103,11 @@ def print_results(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k

if isinstance(path, str): # *.yaml file
with open(path) as f:
if isinstance(dataset, str): # *.yaml file
with open(dataset, encoding='ascii', errors='ignore') as f:
data_dict = yaml.safe_load(f) # model dict
from utils.datasets import LoadImagesAndLabels
dataset = LoadImagesAndLabels(data_dict['train'], augment=True, rect=True)
else:
dataset = path # dataset

# Get label wh
shapes = img_size * dataset.shapes / dataset.shapes.max(1, keepdims=True)
Expand Down
2 changes: 1 addition & 1 deletion utils/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -909,7 +909,7 @@ def unzip(path):
return False, None, path

zipped, data_dir, yaml_path = unzip(Path(path))
with open(check_file(yaml_path)) as f:
with open(check_file(yaml_path), encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict
if zipped:
data['path'] = data_dir # TODO: should this be dir.resolve()?
Expand Down
4 changes: 2 additions & 2 deletions utils/loggers/wandb/log_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,9 @@


def create_dataset_artifact(opt):
with open(opt.data) as f:
with open(opt.data, encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation')
logger = WandbLogger(opt, '', None, data, job_type='Dataset Creation') # TODO: return value unused


if __name__ == '__main__':
Expand Down
6 changes: 3 additions & 3 deletions utils/loggers/wandb/wandb_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def check_wandb_resume(opt):


def process_wandb_config_ddp_mode(opt):
with open(check_file(opt.data)) as f:
with open(check_file(opt.data), encoding='ascii', errors='ignore') as f:
data_dict = yaml.safe_load(f) # data dict
train_dir, val_dir = None, None
if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX):
Expand Down Expand Up @@ -150,7 +150,7 @@ def check_and_upload_dataset(self, opt):
opt.single_cls,
'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem)
print("Created dataset config file ", config_path)
with open(config_path) as f:
with open(config_path, encoding='ascii', errors='ignore') as f:
wandb_data_dict = yaml.safe_load(f)
return wandb_data_dict

Expand Down Expand Up @@ -226,7 +226,7 @@ def log_model(self, path, opt, epoch, fitness_score, best_model=False):
print("Saving model artifact on epoch ", epoch + 1)

def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
with open(data_file) as f:
with open(data_file, encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f) # data dict
check_dataset(data)
nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names'])
Expand Down
2 changes: 1 addition & 1 deletion val.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def run(data,
# model = nn.DataParallel(model)

# Data
with open(data) as f:
with open(data, encoding='ascii', errors='ignore') as f:
data = yaml.safe_load(f)
check_dataset(data) # check

Expand Down