Skip to content

Commit

Permalink
refactor(NeuralNetwork): remove unused workers parameter
Browse files Browse the repository at this point in the history
- Removed workers parameter in NeuralNetwork class.
- Updated related documentation and files to reflect this change.
  • Loading branch information
rizoudal committed Aug 12, 2024
1 parent bd4cff3 commit 8e5c66e
Show file tree
Hide file tree
Showing 7 changed files with 1,386 additions and 1,401 deletions.
1 change: 0 additions & 1 deletion aucmedi/automl/block_pred.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,6 @@ def block_predict(config):
# Define neural network parameters
nn_paras = {"n_labels": 1, # placeholder
"channels": 1, # placeholder
"workers": config["workers"],
"batch_queue_size": 4,
"multiprocessing": False,
}
Expand Down
1 change: 0 additions & 1 deletion aucmedi/automl/block_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,6 @@ def block_train(config):
# Define neural network parameters
nn_paras = {"n_labels": class_n,
"channels": 3,
"workers": config["workers"],
"batch_queue_size": 4,
"loss": loss,
"metrics": [AUC(100), F1Score(average="macro")],
Expand Down
2 changes: 0 additions & 2 deletions aucmedi/ensemble/bagging.py
Original file line number Diff line number Diff line change
Expand Up @@ -187,7 +187,6 @@ def train(self, training_generator, epochs=20, iterations=None,
"meta_variables": self.model_template.meta_variables,
"learning_rate": self.model_template.learning_rate,
"batch_queue_size": self.model_template.batch_queue_size,
"workers": self.model_template.workers,
"multiprocessing": self.model_template.multiprocessing,
}

Expand Down Expand Up @@ -325,7 +324,6 @@ def predict(self, prediction_generator, aggregate="mean",
"meta_variables": self.model_template.meta_variables,
"learning_rate": self.model_template.learning_rate,
"batch_queue_size": self.model_template.batch_queue_size,
"workers": self.model_template.workers,
"multiprocessing": self.model_template.multiprocessing,
}

Expand Down
3 changes: 0 additions & 3 deletions aucmedi/ensemble/composite.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,6 @@ def train(self, training_generator, epochs=20, iterations=None,
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"workers": self.model_list[i].workers,
"multiprocessing": self.model_list[i].multiprocessing,
}

Expand Down Expand Up @@ -355,7 +354,6 @@ def train_metalearner(self, training_generator):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"workers": self.model_list[i].workers,
"multiprocessing": self.model_list[i].multiprocessing,
}

Expand Down Expand Up @@ -469,7 +467,6 @@ def predict(self, prediction_generator, return_ensemble=False):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"workers": self.model_list[i].workers,
"multiprocessing": self.model_list[i].multiprocessing,
}

Expand Down
3 changes: 0 additions & 3 deletions aucmedi/ensemble/stacking.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,6 @@ def train(self, training_generator, epochs=20, iterations=None,
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"workers": self.model_list[i].workers,
"multiprocessing": self.model_list[i].multiprocessing,
}

Expand Down Expand Up @@ -343,7 +342,6 @@ def train_metalearner(self, training_generator):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"workers": self.model_list[i].workers,
"multiprocessing": self.model_list[i].multiprocessing,
}

Expand Down Expand Up @@ -456,7 +454,6 @@ def predict(self, prediction_generator, return_ensemble=False):
"meta_variables": self.model_list[i].meta_variables,
"learning_rate": self.model_list[i].learning_rate,
"batch_queue_size": self.model_list[i].batch_queue_size,
"workers": self.model_list[i].workers,
"multiprocessing": self.model_list[i].multiprocessing,
}

Expand Down
9 changes: 2 additions & 7 deletions aucmedi/neural_network/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
pretrained_weights=False, loss="categorical_crossentropy",
metrics=["categorical_accuracy"], activation_output="softmax",
fcl_dropout=True, meta_variables=None, learning_rate=0.0001,
batch_queue_size=10, workers=1, multiprocessing=False,
batch_queue_size=10, multiprocessing=False,
verbose=1):
""" Initialization function for creating a Neural Network (model) object.
Expand Down Expand Up @@ -169,7 +169,6 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
([Classifier][aucmedi.neural_network.architectures.classifier]).
learning_rate (float): Learning rate in which weights of the neural network will be updated.
batch_queue_size (int): The batch queue size is the number of previously prepared batches in the cache during runtime.
workers (int): Number of workers/threads which preprocess batches during runtime.
multiprocessing (bool): Option whether to utilize multi-processing for workers instead of threading .
verbose (int): Option (0/1) how much information should be written to stdout.
Expand All @@ -192,7 +191,6 @@ def __init__(self, n_labels, channels, input_shape=None, architecture=None,
self.metrics = metrics
self.learning_rate = learning_rate
self.batch_queue_size = batch_queue_size
self.workers = workers
self.multiprocessing = multiprocessing
self.pretrained_weights = pretrained_weights
self.activation_output = activation_output
Expand Down Expand Up @@ -298,7 +296,6 @@ def train(self, training_generator, validation_generator=None, epochs=20,
callbacks=callbacks, epochs=epochs,
steps_per_epoch=iterations,
class_weight=class_weights,
workers=self.workers,
use_multiprocessing=self.multiprocessing,
max_queue_size=self.batch_queue_size,
verbose=self.verbose)
Expand All @@ -321,7 +318,6 @@ def train(self, training_generator, validation_generator=None, epochs=20,
epochs=self.tf_epochs,
steps_per_epoch=iterations,
class_weight=class_weights,
workers=self.workers,
use_multiprocessing=self.multiprocessing,
max_queue_size=self.batch_queue_size,
verbose=self.verbose)
Expand All @@ -338,7 +334,6 @@ def train(self, training_generator, validation_generator=None, epochs=20,
initial_epoch=self.tf_epochs,
steps_per_epoch=iterations,
class_weight=class_weights,
workers=self.workers,
use_multiprocessing=self.multiprocessing,
max_queue_size=self.batch_queue_size,
verbose=self.verbose)
Expand Down Expand Up @@ -368,7 +363,7 @@ def predict(self, prediction_generator):
preds (numpy.ndarray): A NumPy array of predictions formatted with shape (n_samples, n_labels).
"""
# Run inference process with the Keras predict function
preds = self.model.predict(prediction_generator, workers=self.workers,
preds = self.model.predict(prediction_generator,
max_queue_size=self.batch_queue_size,
use_multiprocessing=self.multiprocessing,
verbose=self.verbose)
Expand Down
Loading

0 comments on commit 8e5c66e

Please sign in to comment.