From 6c159b6bc8d9291886a2c4e577141f58da0b72a7 Mon Sep 17 00:00:00 2001 From: Jan Blumenkamp Date: Sun, 8 Sep 2019 16:35:46 +0200 Subject: [PATCH] remove unused files --- fake_submission_gen.py | 34 --------------- keras_example.py | 94 ------------------------------------------ keras_utils.py | 85 -------------------------------------- 3 files changed, 213 deletions(-) delete mode 100644 fake_submission_gen.py delete mode 100644 keras_example.py delete mode 100644 keras_utils.py diff --git a/fake_submission_gen.py b/fake_submission_gen.py deleted file mode 100644 index 5eaf904..0000000 --- a/fake_submission_gen.py +++ /dev/null @@ -1,34 +0,0 @@ -from submission import SubmissionWriter -import json -import os - -""" Valid submission generation example. """ - -# load test image list -dataset_root = '/datasets/speed' -with open(os.path.join(dataset_root, 'test.json'), 'r') as f: - test_images = json.load(f) -with open(os.path.join(dataset_root, 'real_test.json'), 'r') as f: - real_test_images = json.load(f) - -submission = SubmissionWriter() - -# iterating over all test and real test images, appending submission -for image in test_images[::-1]: - - filename = image['filename'] - - # arbitrary prediction, just to store something. - q = [1.0, 0.0, 0.0, 0.0] - r = [10.0, 0.0, 0.0] - - submission.append_test(filename, q, r) - -for real_image in real_test_images: - filename = real_image['filename'] - q = [.71, .71, 0.0, 0.0] - r = [9.0, .1, .1] - submission.append_real_test(filename, q, r) - -submission.export(suffix='debug') -print('Submission exported.') diff --git a/keras_example.py b/keras_example.py deleted file mode 100644 index df80218..0000000 --- a/keras_example.py +++ /dev/null @@ -1,94 +0,0 @@ -import json -import tensorflow -from utils import KerasDataGenerator -from tensorflow.keras.applications.resnet50 import preprocess_input -from tensorflow.keras.preprocessing import image -import numpy as np -from submission import SubmissionWriter -import os - -""" - Example script demonstrating training on the SPEED dataset using Keras. - Usage example: python keras_example.py --dataset [path to speed] --epochs [num epochs] --batch [batch size] -""" - - -def evaluate(model, dataset, append_submission, dataset_root): - - """ Running evaluation on test set, appending results to a submission. """ - - with open(os.path.join(dataset_root, dataset + '.json'), 'r') as f: - image_list = json.load(f) - - print('Running evaluation on {} set...'.format(dataset)) - - for img in image_list: - img_path = os.path.join(dataset_root, 'images', dataset, img['filename']) - pil_img = image.load_img(img_path, target_size=(224, 224)) - x = image.img_to_array(pil_img) - x = preprocess_input(x) - x = np.expand_dims(x, 0) - output = model.predict(x) - append_submission(img['filename'], output[0, :4], output[0, 4:]) - - -def main(speed_root, epochs, batch_size): - - """ Setting up data generators and model, training, and evaluating model on test and real_test sets. """ - - # Setting up parameters - params = {'dim': (224, 224), - 'batch_size': batch_size, - 'n_channels': 3, - 'shuffle': True} - - # Loading and splitting dataset - with open(os.path.join(speed_root, 'train' + '.json'), 'r') as f: - label_list = json.load(f) - train_labels = label_list[:int(len(label_list)*.8)] - validation_labels = label_list[int(len(label_list)*.8):] - - # Data generators for training and validation - training_generator = KerasDataGenerator(preprocess_input, train_labels, speed_root, **params) - validation_generator = KerasDataGenerator(preprocess_input, validation_labels, speed_root, **params) - - # Loading and freezing pre-trained model - tensorflow.keras.backend.set_learning_phase(0) - pretrained_model = tensorflow.keras.applications.ResNet50(weights="imagenet", include_top=False, - input_shape=(224, 224, 3)) - - # Adding new trainable hidden and output layers to the model - tensorflow.keras.backend.set_learning_phase(1) - x = pretrained_model.output - x = tensorflow.keras.layers.Flatten()(x) - x = tensorflow.keras.layers.Dense(1024, activation="relu")(x) - predictions = tensorflow.keras.layers.Dense(7, activation="linear")(x) - model_final = tensorflow.keras.models.Model(inputs=pretrained_model.input, outputs=predictions) - model_final.compile(loss="mean_squared_error", optimizer='adam') - - # Training the model (transfer learning) - history = model_final.fit_generator( - training_generator, - epochs=epochs, - validation_data=validation_generator, - callbacks=[tensorflow.keras.callbacks.ProgbarLogger(count_mode='steps')]) - - print('Training losses: ', history.history['loss']) - print('Validation losses: ', history.history['val_loss']) - - # Generating submission - submission = SubmissionWriter() - evaluate(model_final, 'test', submission.append_test, speed_root) - evaluate(model_final, 'real_test', submission.append_real_test, speed_root) - submission.export(suffix='keras_example') - - -if __name__ == "__main__": - import argparse - parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) - parser.add_argument('--dataset', help='Path to the downloaded speed dataset.', default='') - parser.add_argument('--epochs', help='Number of epochs for training.', default=20) - parser.add_argument('--batch', help='number of samples in a batch.', default=32) - args = parser.parse_args() - - main(args.dataset, int(args.epochs), int(args.batch)) diff --git a/keras_utils.py b/keras_utils.py deleted file mode 100644 index d2066a4..0000000 --- a/keras_utils.py +++ /dev/null @@ -1,85 +0,0 @@ -from tensorflow.keras.utils import Sequence -from tensorflow.keras.preprocessing import image as keras_image -import os -import numpy as np -from utils import Camera, projectModel - -class KerasDataGenerator(Sequence): - - """ DataGenerator for Keras to be used with fit_generator (https://keras.io/models/sequential/#fit_generator)""" - - def __init__(self, label_list, speed_root, label_size, batch_size=32, dim=(224, 224), shuffle=True): - - # loading dataset - self.image_root = os.path.join(speed_root, 'images', 'train') - - # Initialization - self.dim = dim - self.batch_size = batch_size - self.labels = self.labels = {label['filename']: {'q': label['q_vbs2tango'], 'r': label['r_Vo2To_vbs_true']} - for label in label_list} - self.list_IDs = [label['filename'] for label in label_list] - self.shuffle = shuffle - self.label_size = label_size - self.indexes = None - self.on_epoch_end() - - def __len__(self): - - """ Denotes the number of batches per epoch. """ - - return int(np.floor(len(self.list_IDs) / self.batch_size)) - - def __getitem__(self, index): - - """ Generate one batch of data """ - - # Generate indexes of the batch - indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] - - # Find list of IDs - list_IDs_temp = [self.list_IDs[k] for k in indexes] - - # Generate data - return self.__data_generation(list_IDs_temp) - - def on_epoch_end(self): - - """ Updates indexes after each epoch """ - - self.indexes = np.arange(len(self.list_IDs)) - if self.shuffle: - np.random.shuffle(self.indexes) - - def drawBlob(self, img, pos, sigma=3): - # https://github.com/NVlabs/Deep_Object_Pose/blob/master/src/training/train.py#L851 - w = int(sigma*3) - if pos[0]-w>=0 and pos[0]+w=0 and pos[1]+w= 0.0 and y >= 0.0 and x <= 1.0 and y <= 1.0: - x_s, y_s = int(x * self.dim[1]), int(y * self.dim[0]) - self.drawBlob(masks[i][...,j], (x_s, y_s), self.label_size) - - return imgs, masks