From 9eb9618778cf20a50e95c21bcc9e8b73505376f8 Mon Sep 17 00:00:00 2001 From: Aman Kishore Date: Fri, 14 Oct 2022 00:02:44 +0000 Subject: [PATCH 1/6] Parallel processesing --- docker/Dockerfile | 11 ++- docker/make_image.sh | 0 render_shapenet_data/render_all.py | 6 ++ render_shapenet_data/render_parallel.py | 116 ++++++++++++++++++++++++ render_shapenet_data/render_shapenet.py | 8 +- 5 files changed, 134 insertions(+), 7 deletions(-) mode change 100644 => 100755 docker/make_image.sh create mode 100644 render_shapenet_data/render_parallel.py diff --git a/docker/Dockerfile b/docker/Dockerfile index ef8084a..c577d39 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,10 +1,10 @@ -# Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# Copyright (c) 2020-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # # NVIDIA CORPORATION, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related -# documentation and any modifications thereto. Any use, reproduction, -# disclosure or distribution of this material and related documentation -# without an express license agreement from NVIDIA CORPORATION or +# documentation and any modifications thereto. Any use, reproduction, +# disclosure or distribution of this material and related documentation +# without an express license agreement from NVIDIA CORPORATION or # its affiliates is strictly prohibited. ARG BASE_IMAGE=nvcr.io/nvidia/pytorch:21.08-py3 @@ -49,3 +49,6 @@ RUN pip install meshzoo ipdb imageio gputil h5py point-cloud-utils imageio image # HDR image support RUN imageio_download_bin freeimage + +# Blender Dependancies +RUN apt-get install -y libxi6 libgconf-2-4 libfontconfig1 libxrender1 \ No newline at end of file diff --git a/docker/make_image.sh b/docker/make_image.sh old mode 100644 new mode 100755 diff --git a/render_shapenet_data/render_all.py b/render_shapenet_data/render_all.py index 5d23be7..5f8fb8b 100644 --- a/render_shapenet_data/render_all.py +++ b/render_shapenet_data/render_all.py @@ -8,6 +8,7 @@ import os import argparse +import time parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.') parser.add_argument( @@ -35,10 +36,15 @@ 0.7, 0.9 ] + for synset, obj_scale in zip(synset_list, scale_list): file_list = sorted(os.listdir(os.path.join(dataset_folder, synset))) for idx, file in enumerate(file_list): + start_time = time.time() render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 >> tmp.out' % ( blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale ) os.system(render_cmd) + + end_time = time.time() + print('Time for rendering %d models: %f' % (1, end_time - start_time)) diff --git a/render_shapenet_data/render_parallel.py b/render_shapenet_data/render_parallel.py new file mode 100644 index 0000000..51e4980 --- /dev/null +++ b/render_shapenet_data/render_parallel.py @@ -0,0 +1,116 @@ +# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# NVIDIA CORPORATION & AFFILIATES and its licensors retain all intellectual property +# and proprietary rights in and to this software, related documentation +# and any modifications thereto. Any use, reproduction, disclosure or +# distribution of this software and related documentation without an express +# license agreement from NVIDIA CORPORATION & AFFILIATES is strictly prohibited. + +import os +import argparse +import subprocess +import time + +parser = argparse.ArgumentParser(description='Renders given obj file by rotation a camera around it.') +parser.add_argument( + '--save_folder', type=str, default='./tmp', + help='path for saving rendered image') +parser.add_argument( + '--dataset_folder', type=str, default='./tmp', + help='path for downloaded 3d dataset folder') +parser.add_argument( + '--blender_root', type=str, default='./tmp', + help='path for blender') +args = parser.parse_args() + +save_folder = args.save_folder +dataset_folder = args.dataset_folder +blender_root = args.blender_root + +synset_list = [ + # '02958343', # Car + '03001627', # Chair + '03790512' # Motorbike +] +scale_list = [ + # 0.9, + 0.7, + 0.9 +] + +for synset, obj_scale in zip(synset_list, scale_list): + file_list = sorted(os.listdir(os.path.join(dataset_folder, synset))) + idx = 0 + start_time = time.time() + while idx < len(file_list): + print("Done with %d/%d" % (idx, len(file_list))) + + stdout = open('stdout.txt', 'w') + stderr = open('stderr.txt', 'w') + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 0' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p0 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 1' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p1 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 2' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p2 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 3' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p3 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 4' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p4 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 5' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p5 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 6' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p6 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + file = file_list[idx] + render_cmd = '%s -b -P render_shapenet.py -- --output %s %s --scale %f --views 24 --resolution 1024 --gpu 7' % ( + blender_root, save_folder, os.path.join(dataset_folder, synset, file, 'model.obj'), obj_scale + ) + p7 = subprocess.Popen(render_cmd, shell=True, stdout=stdout, stderr=stderr) + idx += 1 + + p0.wait() + p1.wait() + p2.wait() + p3.wait() + p4.wait() + p5.wait() + p6.wait() + p7.wait() + + end_time = time.time() + print('Time for rendering %d models: %f' % (len(file_list), end_time - start_time)) diff --git a/render_shapenet_data/render_shapenet.py b/render_shapenet_data/render_shapenet.py index fe9f274..47d0406 100644 --- a/render_shapenet_data/render_shapenet.py +++ b/render_shapenet_data/render_shapenet.py @@ -31,6 +31,9 @@ parser.add_argument( '--resolution', type=int, default=512, help='Resolution of the images.') +parser.add_argument( + '--gpu', type=int, default=0, + help='gpu.') parser.add_argument( '--engine', type=str, default='CYCLES', help='Blender internal engine for rendering. E.g. CYCLES, BLENDER_EEVEE, ...') @@ -83,13 +86,12 @@ def enable_cuda_devices(): # If we have CUDA/OPENCL devices, enable only them, otherwise enable # all devices (assumed to be CPU) print(cprefs.devices) - for device in cprefs.devices: - device.use = not accelerated or device.type in acceleratedTypes + for idx, device in enumerate(cprefs.devices): + device.use = (not accelerated or device.type in acceleratedTypes) and idx == args.gpu print('Device enabled ({type}) = {enabled}'.format(type=device.type, enabled=device.use)) return accelerated - enable_cuda_devices() context.active_object.select_set(True) bpy.ops.object.delete() From 0362d5d607be48e23b7791edff0d6eb740b4bfb7 Mon Sep 17 00:00:00 2001 From: Aman Kishore Date: Sun, 16 Oct 2022 21:49:52 +0000 Subject: [PATCH 2/6] Better paths --- train_3d.py | 2 +- training/inference_3d.py | 14 +++++++------- training/inference_utils.py | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/train_3d.py b/train_3d.py index f2b87f8..135d988 100644 --- a/train_3d.py +++ b/train_3d.py @@ -62,7 +62,7 @@ def launch_training(c, desc, outdir, dry_run): prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None] cur_run_id = max(prev_run_ids, default=-1) + 1 if c.inference_vis: - c.run_dir = os.path.join(outdir, 'inference') + c.run_dir = os.path.join(outdir) else: c.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{desc}') assert not os.path.exists(c.run_dir) diff --git a/training/inference_3d.py b/training/inference_3d.py index befba57..028bd29 100644 --- a/training/inference_3d.py +++ b/training/inference_3d.py @@ -77,23 +77,23 @@ def inference( G.load_state_dict(model_state_dict['G'], strict=True) G_ema.load_state_dict(model_state_dict['G_ema'], strict=True) # D.load_state_dict(model_state_dict['D'], strict=True) - grid_size = (5, 5) + grid_size = (1, 1) n_shape = grid_size[0] * grid_size[1] grid_z = torch.randn([n_shape, G.z_dim], device=device).split(1) # random code for geometry grid_tex_z = torch.randn([n_shape, G.z_dim], device=device).split(1) # random code for texture grid_c = torch.ones(n_shape, device=device).split(1) print('==> generate ') - save_visualization( - G_ema, grid_z, grid_c, run_dir, 0, grid_size, 0, - save_all=False, - grid_tex_z=grid_tex_z - ) + # save_visualization( + # G_ema, grid_z, grid_c, run_dir, 0, grid_size, 0, + # save_all=False, + # grid_tex_z=grid_tex_z + # ) if inference_to_generate_textured_mesh: print('==> generate inference 3d shapes with texture') save_textured_mesh_for_inference( - G_ema, grid_z, grid_c, run_dir, save_mesh_dir='texture_mesh_for_inference', + G_ema, grid_z, grid_c, run_dir, save_mesh_dir="", c_to_compute_w_avg=None, grid_tex_z=grid_tex_z) if inference_save_interpolation: diff --git a/training/inference_utils.py b/training/inference_utils.py index af13804..12594d9 100644 --- a/training/inference_utils.py +++ b/training/inference_utils.py @@ -247,7 +247,7 @@ def save_textured_mesh_for_inference( all_uvs.data.cpu().numpy(), mesh_f.data.cpu().numpy(), all_mesh_tex_idx.data.cpu().numpy(), - os.path.join(mesh_dir, '%07d.obj' % (save_mesh_idx)) + os.path.join(mesh_dir, 'mesh.obj') ) lo, hi = (-1, 1) img = np.asarray(tex_map.permute(1, 2, 0).data.cpu().numpy(), dtype=np.float32) @@ -260,7 +260,7 @@ def save_textured_mesh_for_inference( img = img * (1 - mask) + dilate_img * mask img = img.clip(0, 255).astype(np.uint8) PIL.Image.fromarray(np.ascontiguousarray(img[::-1, :, :]), 'RGB').save( - os.path.join(mesh_dir, '%07d.png' % (save_mesh_idx))) + os.path.join(mesh_dir, 'albedo.png')) save_mesh_idx += 1 From 8bcf0474a8f0b16cbff4ca369bbc1caba52d4e1d Mon Sep 17 00:00:00 2001 From: Aman Kishore Date: Mon, 17 Oct 2022 18:56:38 +0000 Subject: [PATCH 3/6] Correct mtl --- training/utils/utils_3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/training/utils/utils_3d.py b/training/utils/utils_3d.py index c1bf09c..28ac035 100644 --- a/training/utils/utils_3d.py +++ b/training/utils/utils_3d.py @@ -37,7 +37,7 @@ def savemeshtes2(pointnp_px3, tcoords_px2, facenp_fx3, facetex_fx3, fname): fid.write('Ks 0.4 0.4 0.4\n') fid.write('Ns 10\n') fid.write('illum 2\n') - fid.write('map_Kd %s.png\n' % na) + fid.write('map_Kd albedo.png\n' % na) fid.close() #### From baed739c98e0a2d3ccab08495eff249b82586804 Mon Sep 17 00:00:00 2001 From: Aman Kishore Date: Tue, 18 Oct 2022 21:03:19 -0700 Subject: [PATCH 4/6] Remove duplicated verticies and clean up mesh --- training/networks_get3d.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/training/networks_get3d.py b/training/networks_get3d.py index 67175ce..0296e0a 100644 --- a/training/networks_get3d.py +++ b/training/networks_get3d.py @@ -375,6 +375,25 @@ def extract_3d_shape( else: mesh_v, mesh_f, sdf, deformation, v_deformed, sdf_reg_loss = self.get_geometry_prediction(ws_geo) + # Step 1.5: fix the mesh + import pymesh + mesh_v_processed = [] + mesh_f_processed = [] + + for v, f in zip(mesh_v, mesh_f): + mesh = pymesh.form_mesh(vertices=v.cpu().numpy(), faces=f.cpu().numpy()) + mesh, info = pymesh.remove_isolated_vertices(mesh) + mesh, info = pymesh.remove_duplicated_vertices(mesh, 1e-3) + print(info) + + tensor_vertices = torch.from_numpy(mesh.vertices).float().to(self.device) + tensor_faces = torch.from_numpy(mesh.faces).long().to(self.device) + mesh_v_processed.append(tensor_vertices) + mesh_f_processed.append(tensor_faces) + + mesh_v = mesh_v_processed + mesh_f = mesh_f_processed + # Step 2: use x-atlas to get uv mapping for the mesh from training.extract_texture_map import xatlas_uvmap all_uvs = [] From 0a18f077989ce014a346c992e1396a42280f0ff6 Mon Sep 17 00:00:00 2001 From: Aman Kishore Date: Tue, 18 Oct 2022 22:09:55 -0700 Subject: [PATCH 5/6] Bug fix --- training/utils/utils_3d.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/training/utils/utils_3d.py b/training/utils/utils_3d.py index 28ac035..9c1cea7 100644 --- a/training/utils/utils_3d.py +++ b/training/utils/utils_3d.py @@ -37,7 +37,7 @@ def savemeshtes2(pointnp_px3, tcoords_px2, facenp_fx3, facetex_fx3, fname): fid.write('Ks 0.4 0.4 0.4\n') fid.write('Ns 10\n') fid.write('illum 2\n') - fid.write('map_Kd albedo.png\n' % na) + fid.write('map_Kd albedo.png\n') fid.close() #### From ad585333fc365fe2ee57d605b59b2b698a8fdd10 Mon Sep 17 00:00:00 2001 From: Aman Kishore Date: Wed, 19 Oct 2022 14:27:02 -0700 Subject: [PATCH 6/6] Random seed fixed --- training/inference_3d.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/training/inference_3d.py b/training/inference_3d.py index 028bd29..3469331 100644 --- a/training/inference_3d.py +++ b/training/inference_3d.py @@ -51,9 +51,13 @@ def inference( bias_act._init() filtered_lrelu._init() + import random device = torch.device('cuda', rank) np.random.seed(random_seed * num_gpus + rank) torch.manual_seed(random_seed * num_gpus + rank) + random.seed(random_seed * num_gpus + rank) + torch.cuda.manual_seed(random_seed * num_gpus + rank) + torch.cuda.manual_seed_all(random_seed * num_gpus + rank) torch.backends.cudnn.enabled = True torch.backends.cudnn.benchmark = True # Improves training speed. torch.backends.cuda.matmul.allow_tf32 = True # Improves numerical accuracy.