-
Notifications
You must be signed in to change notification settings - Fork 17
/
util.py
183 lines (138 loc) · 5.8 KB
/
util.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
import numpy as np
import os
import random
import imageio
import matplotlib.image as mpimg
from config import cfg
from colorama import init
from termcolor import colored
class DataProcess():
def __init__(self, data_paths, batch_size, repeat=True):
self.data_paths = data_paths
self.num_data = len(data_paths)
self.repeat = repeat
self.batch_size = batch_size
self.shuffle_db_inds()
self.n_vox = cfg.CONST.N_VOX
# self.n_dep = cfg.CONST.N_DEP
def shuffle_db_inds(self):
# Randomly permute the training roidb
if self.repeat:
self.perm = np.random.permutation(np.arange(self.num_data))
else:
self.perm = np.arange(self.num_data)
self.cur = 0
def get_next_minibatch(self):
flag = True
if (self.cur + self.batch_size) >= self.num_data and self.repeat:
self.shuffle_db_inds()
flag = False
db_inds = self.perm[self.cur:min(self.cur +
self.batch_size, self.num_data)]
self.cur += self.batch_size
return db_inds, flag
def get_tsdf(self, db_inds):
batch_tsdf = np.zeros(
(self.batch_size, self.n_vox[0], self.n_vox[1], self.n_vox[2]),
dtype=np.float32)
for batch_id, db_ind in enumerate(db_inds):
sceneId, model_id = self.data_paths[db_ind]
tsdf_fn = cfg.DIR.TSDF_PATH + model_id
tsdf_data = np.load(tsdf_fn)
batch_tsdf[batch_id, :, :, :] = tsdf_data
return batch_tsdf
def get_voxel(self, db_inds):
batch_voxel = np.zeros(
(self.batch_size, self.n_vox[0], self.n_vox[1], self.n_vox[2]),
dtype=np.float32)
for batch_id, db_ind in enumerate(db_inds):
sceneId, model_id = self.data_paths[db_ind]
voxel_fn = cfg.DIR.VOXEL_PATH + model_id
voxel_data = np.load(voxel_fn)
batch_voxel[batch_id, :, :, :] = voxel_data
return batch_voxel
def get_surf(self, db_inds):
batch_surf = np.zeros(
(self.batch_size, self.n_vox[0], self.n_vox[1], self.n_vox[2]),
dtype=np.float32)
for batch_id, db_ind in enumerate(db_inds):
sceneId, model_id = self.data_paths[db_ind]
surf_fn = cfg.DIR.SURF_PATH + model_id
surf_data = np.load(surf_fn)
batch_surf[batch_id, :, :, :] = surf_data
return batch_surf
def id_models_train(dataset_portion=[], data_list='./train_3rscan.list'):
'''
Load sceneId, model names from a suncg dataset.
'''
scene_name_pair = [] # full path of the objs files
model_path = cfg.DIR.TSDF_PATH
"""
models = os.listdir(model_path)
"""
with open(data_list) as file:
models = file.read().splitlines()
scene_name_pair.extend([(model_path, model_id) for model_id in models])
num_models = len(scene_name_pair)
portioned_scene_name_pair = scene_name_pair[int(num_models *
dataset_portion[0]):]
return portioned_scene_name_pair
def id_models_test(dataset_portion=[],
data_list='./lists_infer/test_3rscan.list'):
amount_of_test_sample = 50
scene_name_pair = [] # full path of the objs files
model_path = cfg.DIR.TSDF_PATH
with open(data_list) as file:
models = file.read().splitlines()
scene_name_pair.extend([(model_path, model_id) for model_id in models])
num_models = len(scene_name_pair)
data_paths_test = scene_name_pair[int(num_models * dataset_portion[0]) +
0:]
random.seed(1)
random.shuffle(data_paths_test)
# print('The first sample is:', data_paths_test[0][1])
data_paths = data_paths_test
# data_paths = data_paths_test[:amount_of_test_sample]
num_models = len(data_paths)
print('The amount of test data: %d' % num_models)
n_vox = cfg.CONST.N_VOX
batch_tsdf = np.zeros((num_models, n_vox[0], n_vox[1], n_vox[2]),
dtype=np.float32)
batch_surf = np.zeros((num_models, n_vox[0], n_vox[1], n_vox[2]),
dtype=np.float32)
batch_voxel = np.zeros((num_models, n_vox[0], n_vox[1], n_vox[2]),
dtype=np.float32)
for i in np.arange(num_models):
sceneId, model_id = data_paths[i]
# save depth images accordingly
depth_fn = sceneId.replace("depth_tsdf_camera_npy",
"depth_real_png") + "/" + model_id.replace(
".npy", ".png")
if os.path.isfile(depth_fn):
img = mpimg.imread(depth_fn)
img_uint8 = img.astype(np.uint8)
imageio.imwrite(
'results_depth/' + data_paths_test[i][1][:-4] + '.png',
img_uint8)
tsdf_fn = cfg.DIR.TSDF_PATH + model_id
tsdf_data = np.load(tsdf_fn)
batch_tsdf[i, :, :, :] = tsdf_data
surf_fn = cfg.DIR.SURF_PATH + model_id
surf_data = np.load(surf_fn)
batch_surf[i, :, :, :] = surf_data
voxel_fn = cfg.DIR.VOXEL_PATH + model_id
voxel_data = np.load(voxel_fn)
batch_voxel[i, :, :, :] = voxel_data
return batch_voxel, batch_surf, batch_tsdf, num_models, data_paths[:
num_models]
def onehot(voxel, class_num):
return np.eye(class_num)[voxel.astype(int)]
"""
onehot_voxels = np.zeros((voxel.shape[0], voxel.shape[1], voxel.shape[2],
voxel.shape[3], class_num))
for i in np.arange(class_num):
onehot_voxel = np.zeros(voxel.shape)
onehot_voxel[np.where(voxel == i)] = 1
onehot_voxels[:, :, :, :, i] = onehot_voxel[:, :, :, :]
return onehot_voxels
"""