-
Notifications
You must be signed in to change notification settings - Fork 150
/
aligned_dataset.py
70 lines (56 loc) · 2.73 KB
/
aligned_dataset.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os.path
from PIL import Image
from data.base_dataset import BaseDataset, get_params, get_transform
from data.image_folder import make_dataset
import argparse
class AlignedDataset(BaseDataset):
"""A dataset class for paired image dataset.
It assumes that the directory '/path/to/data/train' contains image pairs in the form of {A,B}.
During test time, you need to prepare a directory '/path/to/data/test'.
"""
@staticmethod
def modify_commandline_options(parser, is_train):
parser = BaseDataset.modify_commandline_options(parser, is_train)
assert isinstance(parser, argparse.ArgumentParser)
parser.add_argument('--meta_path', type=str, default=None,
help='the path to the meta file')
return parser
def __init__(self, opt):
"""Initialize this dataset class.
Parameters:
opt (Option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseDataset.__init__(self, opt)
meta_path = opt.meta_path if opt.phase == 'train' else None
self.dir_AB = os.path.join(opt.dataroot, opt.phase) # get the image directory
self.AB_paths = sorted(make_dataset(self.dir_AB, opt.max_dataset_size, meta_path=meta_path)) # get image paths
assert (self.opt.load_size >= self.opt.crop_size) # crop_size should be smaller than the size of loaded image
self.input_nc = self.opt.output_nc if self.opt.direction == 'BtoA' else self.opt.input_nc
self.output_nc = self.opt.input_nc if self.opt.direction == 'BtoA' else self.opt.output_nc
self.cache = {}
def __getitem__(self, index):
AB_path = self.AB_paths[index]
if not self.opt.load_in_memory or self.cache.get(index) is None:
AB = Image.open(AB_path).convert('RGB')
if self.opt.load_in_memory:
self.cache[index] = AB
else:
AB = self.cache[index]
# split AB image into A and B
w, h = AB.size
w2 = int(w / 2)
A = AB.crop((0, 0, w2, h))
B = AB.crop((w2, 0, w, h))
# apply the same transform to both A and B
transform_params = get_params(self.opt, A.size)
A_transform = get_transform(self.opt, transform_params, grayscale=(self.input_nc == 1))
B_transform = get_transform(self.opt, transform_params, grayscale=(self.output_nc == 1))
A = A_transform(A)
B = B_transform(B)
return {'A': A, 'B': B, 'A_paths': AB_path, 'B_paths': AB_path}
def __len__(self):
"""Return the total number of images in the dataset."""
if self.opt.max_dataset_size == -1:
return len(self.AB_paths)
else:
return self.opt.max_dataset_size