Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor: commented out ViT-related mentions in files #223

Merged
merged 5 commits into from
Jul 30, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ The open-source software AUCMEDI allows fast setup of medical image classificati
- Wide range of 2D/3D data entry options with interfaces to the most common medical image formats such as DICOM, MetaImage, NifTI, PNG or TIF already supplied.
- Selection of pre-processing methods for preparing images, such as augmentation processes, color conversions, windowing, filtering, resizing and normalization.
- Use of deep neural networks for binary, multi-class as well as multi-label classification and efficient methods against class imbalances using modern loss functions such as focal loss.
- Library from modern architectures, like ResNet up to EfficientNet and Vision-Transformers (ViT)⁠.
- Library from modern architectures, like ResNet up to ConvNeXt. <!-- and Vision-Transformers (ViT)⁠.-->
- Complex ensemble learning techniques (combination of predictions) using test-time augmentation, bagging via cross-validation or stacking via logistic regressions.
- Explainable AI to explain opaque decision-making processes of the models using activation maps such as Grad-CAM or backpropagation.
- Automated Machine Learning (AutoML) mentality to ensure easy deployment, integration and maintenance of complex medical image classification pipelines (Docker).
Expand Down
4 changes: 1 addition & 3 deletions aucmedi/automl/block_train.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
import numpy as np
import json
from tensorflow.keras.metrics import AUC
from tensorflow_addons.metrics import F1Score
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger, \
ReduceLROnPlateau, EarlyStopping
# Internal libraries
Expand Down Expand Up @@ -140,8 +139,7 @@ def block_train(config):
"workers": config["workers"],
"batch_queue_size": 4,
"loss": loss,
"metrics": [AUC(100), F1Score(num_classes=class_n,
average="macro")],
"metrics": [AUC(100)],
"pretrained_weights": True,
"multiprocessing": False,
}
Expand Down
24 changes: 12 additions & 12 deletions aucmedi/neural_network/architectures/image/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,10 +61,10 @@
# Xception
from aucmedi.neural_network.architectures.image.xception import Xception
# Vision Transformer (ViT)
from aucmedi.neural_network.architectures.image.vit_b16 import ViT_B16
from aucmedi.neural_network.architectures.image.vit_b32 import ViT_B32
from aucmedi.neural_network.architectures.image.vit_l16 import ViT_L16
from aucmedi.neural_network.architectures.image.vit_l32 import ViT_L32
# from aucmedi.neural_network.architectures.image.vit_b16 import ViT_B16
# from aucmedi.neural_network.architectures.image.vit_b32 import ViT_B32
# from aucmedi.neural_network.architectures.image.vit_l16 import ViT_L16
# from aucmedi.neural_network.architectures.image.vit_l32 import ViT_L32
# ConvNeXt
from aucmedi.neural_network.architectures.image.convnext_base import ConvNeXtBase
from aucmedi.neural_network.architectures.image.convnext_tiny import ConvNeXtTiny
Expand Down Expand Up @@ -103,10 +103,10 @@
"VGG16": VGG16,
"VGG19": VGG19,
"Xception": Xception,
"ViT_B16": ViT_B16,
"ViT_B32": ViT_B32,
"ViT_L16": ViT_L16,
"ViT_L32": ViT_L32,
# "ViT_B16": ViT_B16,
# "ViT_B32": ViT_B32,
# "ViT_L16": ViT_L16,
# "ViT_L32": ViT_L32,
"ConvNeXtBase": ConvNeXtBase,
"ConvNeXtTiny": ConvNeXtTiny,
"ConvNeXtSmall": ConvNeXtSmall,
Expand Down Expand Up @@ -190,10 +190,10 @@
"VGG16": "caffe",
"VGG19": "caffe",
"Xception": "tf",
"ViT_B16": "tf",
"ViT_B32": "tf",
"ViT_L16": "tf",
"ViT_L32": "tf",
# "ViT_B16": "tf",
# "ViT_B32": "tf",
# "ViT_L16": "tf",
# "ViT_L32": "tf",
"ConvNeXtBase": None,
"ConvNeXtTiny": None,
"ConvNeXtSmall": None,
Expand Down
3 changes: 1 addition & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,7 @@ scikit-image==0.21.0
lime==0.2.0.1
pooch==1.6.0
classification-models-3D==1.0.10
vit-keras==0.1.2
tensorflow-addons==0.21.0
# vit-keras==0.1.2
Keras-Applications==1.0.8
SimpleITK==2.2.0
batchgenerators==0.25
Expand Down
3 changes: 1 addition & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@
'lime>=0.2.0.1',
'pooch>=1.6.0',
'classification-models-3D>=1.0.10',
'vit-keras>=0.1.2',
'tensorflow-addons>=0.21.0',
# 'vit-keras>=0.1.2',
'Keras-Applications==1.0.8',
'SimpleITK>=2.2.0',
'batchgenerators>=0.25',
Expand Down
32 changes: 16 additions & 16 deletions tests/test_architectures_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -665,7 +665,7 @@ def test_Xception(self):
# Architecture: ViT B16 #
#-------------------------------------------------#
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
def test_ViT_B16(self):
# def test_ViT_B16(self):
# self.datagen_RGB.sf_resize = Resize(shape=(224, 224))
# arch = ViT_B16(Classifier(n_labels=4), channels=3,
# input_shape=(224, 224))
Expand All @@ -676,16 +676,16 @@ def test_ViT_B16(self):
# batch_queue_size=1, input_shape=(224, 224))
# try : model.model.summary()
# except : raise Exception()
self.assertTrue(supported_standardize_mode["ViT_B16"] == "tf")
self.assertTrue(sdm_global["2D.ViT_B16"] == "tf")
self.assertTrue("2D.ViT_B16" in architecture_dict)
# self.assertTrue(supported_standardize_mode["ViT_B16"] == "tf")
# self.assertTrue(sdm_global["2D.ViT_B16"] == "tf")
# self.assertTrue("2D.ViT_B16" in architecture_dict)
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))

#-------------------------------------------------#
# Architecture: ViT B32 #
#-------------------------------------------------#
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
def test_ViT_B32(self):
# def test_ViT_B32(self):
# self.datagen_RGB.sf_resize = Resize(shape=(224, 224))
# arch = ViT_B32(Classifier(n_labels=4), channels=3,
# input_shape=(224, 224))
Expand All @@ -696,16 +696,16 @@ def test_ViT_B32(self):
# batch_queue_size=1, input_shape=(224, 224))
# try : model.model.summary()
# except : raise Exception()
self.assertTrue(supported_standardize_mode["ViT_B32"] == "tf")
self.assertTrue(sdm_global["2D.ViT_B32"] == "tf")
self.assertTrue("2D.ViT_B32" in architecture_dict)
# self.assertTrue(supported_standardize_mode["ViT_B32"] == "tf")
# self.assertTrue(sdm_global["2D.ViT_B32"] == "tf")
# self.assertTrue("2D.ViT_B32" in architecture_dict)
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))

#-------------------------------------------------#
# Architecture: ViT L16 #
#-------------------------------------------------#
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
def test_ViT_L16(self):
# def test_ViT_L16(self):
# self.datagen_RGB.sf_resize = Resize(shape=(384, 384))
# arch = ViT_L16(Classifier(n_labels=4), channels=3,
# input_shape=(384, 384))
Expand All @@ -716,16 +716,16 @@ def test_ViT_L16(self):
# batch_queue_size=1, input_shape=(384, 384))
# try : model.model.summary()
# except : raise Exception()
self.assertTrue(supported_standardize_mode["ViT_L16"] == "tf")
self.assertTrue(sdm_global["2D.ViT_L16"] == "tf")
self.assertTrue("2D.ViT_L16" in architecture_dict)
# self.assertTrue(supported_standardize_mode["ViT_L16"] == "tf")
# self.assertTrue(sdm_global["2D.ViT_L16"] == "tf")
# self.assertTrue("2D.ViT_L16" in architecture_dict)
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))

#-------------------------------------------------#
# Architecture: ViT L32 #
#-------------------------------------------------#
# Functionality and Interoperability testing deactived due to too intensive RAM requirements
def test_ViT_L32(self):
# def test_ViT_L32(self):
# self.datagen_RGB.sf_resize = Resize(shape=(384, 384))
# arch = ViT_L32(Classifier(n_labels=4), channels=3,
# input_shape=(384, 384))
Expand All @@ -736,9 +736,9 @@ def test_ViT_L32(self):
# batch_queue_size=1, input_shape=(384, 384))
# try : model.model.summary()
# except : raise Exception()
self.assertTrue(supported_standardize_mode["ViT_L32"] == "tf")
self.assertTrue(sdm_global["2D.ViT_L32"] == "tf")
self.assertTrue("2D.ViT_L32" in architecture_dict)
# self.assertTrue(supported_standardize_mode["ViT_L32"] == "tf")
# self.assertTrue(sdm_global["2D.ViT_L32"] == "tf")
# self.assertTrue("2D.ViT_L32" in architecture_dict)
# self.datagen_RGB.sf_resize = Resize(shape=(32, 32))

#-------------------------------------------------#
Expand Down
Loading