diff --git a/.gitignore b/.gitignore
index 27aeea2..b540cfb 100644
--- a/.gitignore
+++ b/.gitignore
@@ -101,3 +101,13 @@ ENV/
# mypy
.mypy_cache/
+
+# Log directory
+logs/*
+!logs/.gitkeep
+
+# Resource directory
+resources/*
+!resources/.gitkeep
+resources/weights/*
+!resources/weights/.gitkeep
diff --git a/.travis.yml b/.travis.yml
index e921600..4cd323b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,3 +1,4 @@
+
sudo: required
language: python
diff --git a/gui/__init__.py b/gui/__init__.py
index 9d83a46..6dbc27d 100644
--- a/gui/__init__.py
+++ b/gui/__init__.py
@@ -1 +1 @@
-from .dialogs import ThermoGUI
+from .dialogs import ThermoGUI, CreateDatasetGUI
\ No newline at end of file
diff --git a/gui/about/about_rich_text.html b/gui/about/about_rich_text.html
index 30b3d68..1ffc6bd 100644
--- a/gui/about/about_rich_text.html
+++ b/gui/about/about_rich_text.html
@@ -1,24 +1,24 @@
-
-
- About ThermoGUI
- This application has been developed as a feasibility study
- for automatic detection of damaged/not functional solar
- panel modules at SUPSI - ISAAC in Canobbio, Lugano (CH).
-
- Autor
- The project was implemented by the following authors
- listed in temporal order:
-
- - Carlo Del Don (carlo.deldon@gmail.com)
-
-
+
+
+About ThermoGUI
+This application has been developed as a feasibility study
+ for automatic detection of damaged/not functional solar
+ panel modules at SUPSI - ISAAC in Canobbio, Lugano (CH).
+
+Autor
+The project was implemented by the following authors
+ listed in temporal order:
+
+ - Carlo Del Don (carlo.deldon@gmail.com)
+
+
-
+
-
- Logo created on logomakr.
-
+
+ Logo created on logomakr.
+
diff --git a/gui/design/__init__.py b/gui/design/__init__.py
index fb2db1f..ae5bd4b 100644
--- a/gui/design/__init__.py
+++ b/gui/design/__init__.py
@@ -1,2 +1,4 @@
from .thermo_gui_design import Ui_ThermoGUI_main_window
-from .webcam_dialog_design import Ui_WebCam
\ No newline at end of file
+from .create_dataset_gui import Ui_CreateDataset_main_window
+from .webcam_dialog_design import Ui_WebCam
+from .image_saving_gui import Ui_Save_images_dialog
\ No newline at end of file
diff --git a/gui/design/create_dataset_gui.py b/gui/design/create_dataset_gui.py
new file mode 100644
index 0000000..a6e3ea3
--- /dev/null
+++ b/gui/design/create_dataset_gui.py
@@ -0,0 +1,859 @@
+# -*- coding: utf-8 -*-
+
+# Form implementation generated from reading ui file 'create_dataset_gui.ui'
+#
+# Created by: PyQt5 UI code generator 5.6
+#
+# WARNING! All changes made in this file will be lost!
+
+from PyQt5 import QtCore, QtGui, QtWidgets
+
+class Ui_CreateDataset_main_window(object):
+ def setupUi(self, CreateDataset_main_window):
+ CreateDataset_main_window.setObjectName("CreateDataset_main_window")
+ CreateDataset_main_window.resize(961, 611)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(CreateDataset_main_window.sizePolicy().hasHeightForWidth())
+ CreateDataset_main_window.setSizePolicy(sizePolicy)
+ icon = QtGui.QIcon()
+ icon.addPixmap(QtGui.QPixmap("img/logo.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
+ CreateDataset_main_window.setWindowIcon(icon)
+ self.centralwidget = QtWidgets.QWidget(CreateDataset_main_window)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
+ self.centralwidget.setSizePolicy(sizePolicy)
+ self.centralwidget.setLocale(QtCore.QLocale(QtCore.QLocale.English, QtCore.QLocale.UnitedStates))
+ self.centralwidget.setObjectName("centralwidget")
+ self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
+ self.horizontalLayout_2.setObjectName("horizontalLayout_2")
+ self.widget_3 = QtWidgets.QWidget(self.centralwidget)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy.setHorizontalStretch(2)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.widget_3.sizePolicy().hasHeightForWidth())
+ self.widget_3.setSizePolicy(sizePolicy)
+ self.widget_3.setObjectName("widget_3")
+ self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.widget_3)
+ self.verticalLayout_11.setContentsMargins(0, 0, 0, 0)
+ self.verticalLayout_11.setObjectName("verticalLayout_11")
+ self.rectangle_image_view = QtWidgets.QLabel(self.widget_3)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.rectangle_image_view.sizePolicy().hasHeightForWidth())
+ self.rectangle_image_view.setSizePolicy(sizePolicy)
+ self.rectangle_image_view.setMinimumSize(QtCore.QSize(300, 300))
+ font = QtGui.QFont()
+ font.setPointSize(26)
+ self.rectangle_image_view.setFont(font)
+ self.rectangle_image_view.setAutoFillBackground(True)
+ self.rectangle_image_view.setFrameShape(QtWidgets.QFrame.Box)
+ self.rectangle_image_view.setTextFormat(QtCore.Qt.RichText)
+ self.rectangle_image_view.setScaledContents(False)
+ self.rectangle_image_view.setObjectName("rectangle_image_view")
+ self.verticalLayout_11.addWidget(self.rectangle_image_view)
+ self.global_progress_bar = QtWidgets.QProgressBar(self.widget_3)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.global_progress_bar.sizePolicy().hasHeightForWidth())
+ self.global_progress_bar.setSizePolicy(sizePolicy)
+ self.global_progress_bar.setMinimumSize(QtCore.QSize(0, 10))
+ self.global_progress_bar.setMaximumSize(QtCore.QSize(16777215, 10))
+ self.global_progress_bar.setProperty("value", 0)
+ self.global_progress_bar.setTextVisible(False)
+ self.global_progress_bar.setInvertedAppearance(False)
+ self.global_progress_bar.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
+ self.global_progress_bar.setObjectName("global_progress_bar")
+ self.verticalLayout_11.addWidget(self.global_progress_bar)
+ self.widget_2 = QtWidgets.QWidget(self.widget_3)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.widget_2.sizePolicy().hasHeightForWidth())
+ self.widget_2.setSizePolicy(sizePolicy)
+ self.widget_2.setMinimumSize(QtCore.QSize(400, 0))
+ self.widget_2.setObjectName("widget_2")
+ self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.widget_2)
+ self.horizontalLayout_4.setContentsMargins(0, 0, 0, 0)
+ self.horizontalLayout_4.setSpacing(6)
+ self.horizontalLayout_4.setObjectName("horizontalLayout_4")
+ self.verticalLayout_3 = QtWidgets.QVBoxLayout()
+ self.verticalLayout_3.setContentsMargins(0, 0, 0, -1)
+ self.verticalLayout_3.setObjectName("verticalLayout_3")
+ self.label = QtWidgets.QLabel(self.widget_2)
+ self.label.setTextFormat(QtCore.Qt.RichText)
+ self.label.setObjectName("label")
+ self.verticalLayout_3.addWidget(self.label)
+ self.video_index_panel = QtWidgets.QGridLayout()
+ self.video_index_panel.setContentsMargins(-1, 2, -1, 2)
+ self.video_index_panel.setObjectName("video_index_panel")
+ self.video_from_index = QtWidgets.QSpinBox(self.widget_2)
+ self.video_from_index.setFocusPolicy(QtCore.Qt.WheelFocus)
+ self.video_from_index.setMaximum(100000)
+ self.video_from_index.setObjectName("video_from_index")
+ self.video_index_panel.addWidget(self.video_from_index, 1, 0, 1, 1)
+ self.from_video_index_label = QtWidgets.QLabel(self.widget_2)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.from_video_index_label.sizePolicy().hasHeightForWidth())
+ self.from_video_index_label.setSizePolicy(sizePolicy)
+ self.from_video_index_label.setObjectName("from_video_index_label")
+ self.video_index_panel.addWidget(self.from_video_index_label, 0, 0, 1, 1)
+ self.to_video_index_label = QtWidgets.QLabel(self.widget_2)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.to_video_index_label.sizePolicy().hasHeightForWidth())
+ self.to_video_index_label.setSizePolicy(sizePolicy)
+ self.to_video_index_label.setObjectName("to_video_index_label")
+ self.video_index_panel.addWidget(self.to_video_index_label, 0, 1, 1, 1)
+ self.video_to_index = QtWidgets.QSpinBox(self.widget_2)
+ self.video_to_index.setLayoutDirection(QtCore.Qt.LeftToRight)
+ self.video_to_index.setAutoFillBackground(False)
+ self.video_to_index.setMinimum(0)
+ self.video_to_index.setMaximum(100000)
+ self.video_to_index.setProperty("value", 500)
+ self.video_to_index.setObjectName("video_to_index")
+ self.video_index_panel.addWidget(self.video_to_index, 1, 1, 1, 1)
+ self.verticalLayout_3.addLayout(self.video_index_panel)
+ self.load_video_button = QtWidgets.QPushButton(self.widget_2)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.load_video_button.sizePolicy().hasHeightForWidth())
+ self.load_video_button.setSizePolicy(sizePolicy)
+ self.load_video_button.setCheckable(False)
+ self.load_video_button.setDefault(False)
+ self.load_video_button.setFlat(False)
+ self.load_video_button.setObjectName("load_video_button")
+ self.verticalLayout_3.addWidget(self.load_video_button)
+ self.line_5 = QtWidgets.QFrame(self.widget_2)
+ self.line_5.setFrameShape(QtWidgets.QFrame.HLine)
+ self.line_5.setFrameShadow(QtWidgets.QFrame.Sunken)
+ self.line_5.setObjectName("line_5")
+ self.verticalLayout_3.addWidget(self.line_5)
+ self.horizontalLayout_6 = QtWidgets.QHBoxLayout()
+ self.horizontalLayout_6.setObjectName("horizontalLayout_6")
+ self.verticalLayout_4 = QtWidgets.QVBoxLayout()
+ self.verticalLayout_4.setContentsMargins(-1, 0, -1, -1)
+ self.verticalLayout_4.setObjectName("verticalLayout_4")
+ self.image_scaling_label = QtWidgets.QLabel(self.widget_2)
+ self.image_scaling_label.setObjectName("image_scaling_label")
+ self.verticalLayout_4.addWidget(self.image_scaling_label)
+ self.image_scaling_slider = QtWidgets.QSlider(self.widget_2)
+ self.image_scaling_slider.setMinimum(1)
+ self.image_scaling_slider.setMaximum(19)
+ self.image_scaling_slider.setSingleStep(0)
+ self.image_scaling_slider.setPageStep(1)
+ self.image_scaling_slider.setProperty("value", 10)
+ self.image_scaling_slider.setSliderPosition(10)
+ self.image_scaling_slider.setTracking(True)
+ self.image_scaling_slider.setOrientation(QtCore.Qt.Horizontal)
+ self.image_scaling_slider.setTickPosition(QtWidgets.QSlider.TicksAbove)
+ self.image_scaling_slider.setTickInterval(2)
+ self.image_scaling_slider.setObjectName("image_scaling_slider")
+ self.verticalLayout_4.addWidget(self.image_scaling_slider)
+ self.horizontalLayout_6.addLayout(self.verticalLayout_4)
+ self.verticalLayout_14 = QtWidgets.QVBoxLayout()
+ self.verticalLayout_14.setObjectName("verticalLayout_14")
+ self.play_stop_buttons_panel = QtWidgets.QHBoxLayout()
+ self.play_stop_buttons_panel.setContentsMargins(-1, 0, -1, 0)
+ self.play_stop_buttons_panel.setObjectName("play_stop_buttons_panel")
+ self.play_video_button = QtWidgets.QPushButton(self.widget_2)
+ self.play_video_button.setEnabled(False)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.play_video_button.sizePolicy().hasHeightForWidth())
+ self.play_video_button.setSizePolicy(sizePolicy)
+ self.play_video_button.setMinimumSize(QtCore.QSize(20, 0))
+ self.play_video_button.setCheckable(False)
+ self.play_video_button.setObjectName("play_video_button")
+ self.play_stop_buttons_panel.addWidget(self.play_video_button)
+ self.stop_video_button = QtWidgets.QPushButton(self.widget_2)
+ self.stop_video_button.setEnabled(False)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.stop_video_button.sizePolicy().hasHeightForWidth())
+ self.stop_video_button.setSizePolicy(sizePolicy)
+ self.stop_video_button.setMinimumSize(QtCore.QSize(20, 0))
+ self.stop_video_button.setCheckable(False)
+ self.stop_video_button.setChecked(False)
+ self.stop_video_button.setObjectName("stop_video_button")
+ self.play_stop_buttons_panel.addWidget(self.stop_video_button)
+ self.verticalLayout_14.addLayout(self.play_stop_buttons_panel)
+ self.quick_save_button = QtWidgets.QPushButton(self.widget_2)
+ self.quick_save_button.setEnabled(False)
+ self.quick_save_button.setObjectName("quick_save_button")
+ self.verticalLayout_14.addWidget(self.quick_save_button)
+ self.horizontalLayout_6.addLayout(self.verticalLayout_14)
+ self.verticalLayout_3.addLayout(self.horizontalLayout_6)
+ self.horizontalLayout_4.addLayout(self.verticalLayout_3)
+ self.verticalLayout_11.addWidget(self.widget_2)
+ spacerItem = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ self.verticalLayout_11.addItem(spacerItem)
+ self.horizontalLayout_2.addWidget(self.widget_3)
+ self.widget = QtWidgets.QWidget(self.centralwidget)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy.setHorizontalStretch(1)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
+ self.widget.setSizePolicy(sizePolicy)
+ self.widget.setObjectName("widget")
+ self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.widget)
+ self.verticalLayout_10.setContentsMargins(0, 0, 0, 0)
+ self.verticalLayout_10.setObjectName("verticalLayout_10")
+ self.current_module_view = QtWidgets.QLabel(self.widget)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.current_module_view.sizePolicy().hasHeightForWidth())
+ self.current_module_view.setSizePolicy(sizePolicy)
+ self.current_module_view.setMinimumSize(QtCore.QSize(250, 150))
+ font = QtGui.QFont()
+ font.setPointSize(14)
+ self.current_module_view.setFont(font)
+ self.current_module_view.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))
+ self.current_module_view.setAutoFillBackground(True)
+ self.current_module_view.setFrameShape(QtWidgets.QFrame.Box)
+ self.current_module_view.setTextFormat(QtCore.Qt.RichText)
+ self.current_module_view.setObjectName("current_module_view")
+ self.verticalLayout_10.addWidget(self.current_module_view)
+ self.horizontalLayout = QtWidgets.QHBoxLayout()
+ self.horizontalLayout.setObjectName("horizontalLayout")
+ self.verticalLayout = QtWidgets.QVBoxLayout()
+ self.verticalLayout.setObjectName("verticalLayout")
+ self.label_2 = QtWidgets.QLabel(self.widget)
+ self.label_2.setObjectName("label_2")
+ self.verticalLayout.addWidget(self.label_2)
+ self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
+ self.horizontalLayout_3.setObjectName("horizontalLayout_3")
+ self.module_working_button = QtWidgets.QPushButton(self.widget)
+ self.module_working_button.setEnabled(False)
+ self.module_working_button.setObjectName("module_working_button")
+ self.horizontalLayout_3.addWidget(self.module_working_button)
+ self.module_broken_button = QtWidgets.QPushButton(self.widget)
+ self.module_broken_button.setEnabled(False)
+ self.module_broken_button.setObjectName("module_broken_button")
+ self.horizontalLayout_3.addWidget(self.module_broken_button)
+ self.verticalLayout.addLayout(self.horizontalLayout_3)
+ self.horizontalLayout.addLayout(self.verticalLayout)
+ self.verticalLayout_6 = QtWidgets.QVBoxLayout()
+ self.verticalLayout_6.setContentsMargins(10, -1, -1, -1)
+ self.verticalLayout_6.setObjectName("verticalLayout_6")
+ self.label_3 = QtWidgets.QLabel(self.widget)
+ self.label_3.setObjectName("label_3")
+ self.verticalLayout_6.addWidget(self.label_3)
+ self.misdetection_button = QtWidgets.QPushButton(self.widget)
+ self.misdetection_button.setEnabled(False)
+ self.misdetection_button.setObjectName("misdetection_button")
+ self.verticalLayout_6.addWidget(self.misdetection_button)
+ self.horizontalLayout.addLayout(self.verticalLayout_6)
+ self.verticalLayout_10.addLayout(self.horizontalLayout)
+ self.line = QtWidgets.QFrame(self.widget)
+ self.line.setFrameShape(QtWidgets.QFrame.HLine)
+ self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
+ self.line.setObjectName("line")
+ self.verticalLayout_10.addWidget(self.line)
+ self.gridLayout_2 = QtWidgets.QGridLayout()
+ self.gridLayout_2.setContentsMargins(-1, 10, -1, -1)
+ self.gridLayout_2.setObjectName("gridLayout_2")
+ self.label_7 = QtWidgets.QLabel(self.widget)
+ self.label_7.setObjectName("label_7")
+ self.gridLayout_2.addWidget(self.label_7, 4, 0, 1, 1)
+ self.label_4 = QtWidgets.QLabel(self.widget)
+ self.label_4.setObjectName("label_4")
+ self.gridLayout_2.addWidget(self.label_4, 1, 0, 1, 1)
+ self.label_5 = QtWidgets.QLabel(self.widget)
+ self.label_5.setObjectName("label_5")
+ self.gridLayout_2.addWidget(self.label_5, 2, 0, 1, 1)
+ self.label_6 = QtWidgets.QLabel(self.widget)
+ self.label_6.setObjectName("label_6")
+ self.gridLayout_2.addWidget(self.label_6, 3, 0, 1, 1)
+ self.label_9 = QtWidgets.QLabel(self.widget)
+ self.label_9.setObjectName("label_9")
+ self.gridLayout_2.addWidget(self.label_9, 0, 2, 1, 1)
+ self.label_8 = QtWidgets.QLabel(self.widget)
+ self.label_8.setObjectName("label_8")
+ self.gridLayout_2.addWidget(self.label_8, 0, 1, 1, 1)
+ self.total_manual_classified_label = QtWidgets.QLabel(self.widget)
+ self.total_manual_classified_label.setObjectName("total_manual_classified_label")
+ self.gridLayout_2.addWidget(self.total_manual_classified_label, 1, 1, 1, 1)
+ self.total_automatic_classified_label = QtWidgets.QLabel(self.widget)
+ self.total_automatic_classified_label.setObjectName("total_automatic_classified_label")
+ self.gridLayout_2.addWidget(self.total_automatic_classified_label, 1, 2, 1, 1)
+ self.working_manual_classified_label = QtWidgets.QLabel(self.widget)
+ self.working_manual_classified_label.setObjectName("working_manual_classified_label")
+ self.gridLayout_2.addWidget(self.working_manual_classified_label, 2, 1, 1, 1)
+ self.working_automatic_classified_label = QtWidgets.QLabel(self.widget)
+ self.working_automatic_classified_label.setObjectName("working_automatic_classified_label")
+ self.gridLayout_2.addWidget(self.working_automatic_classified_label, 2, 2, 1, 1)
+ self.broken_manual_classified_label = QtWidgets.QLabel(self.widget)
+ self.broken_manual_classified_label.setObjectName("broken_manual_classified_label")
+ self.gridLayout_2.addWidget(self.broken_manual_classified_label, 3, 1, 1, 1)
+ self.broken_automatic_classified_label = QtWidgets.QLabel(self.widget)
+ self.broken_automatic_classified_label.setObjectName("broken_automatic_classified_label")
+ self.gridLayout_2.addWidget(self.broken_automatic_classified_label, 3, 2, 1, 1)
+ self.other_manual_classified_label = QtWidgets.QLabel(self.widget)
+ self.other_manual_classified_label.setObjectName("other_manual_classified_label")
+ self.gridLayout_2.addWidget(self.other_manual_classified_label, 4, 1, 1, 1)
+ self.other_automatic_classified_label = QtWidgets.QLabel(self.widget)
+ self.other_automatic_classified_label.setObjectName("other_automatic_classified_label")
+ self.gridLayout_2.addWidget(self.other_automatic_classified_label, 4, 2, 1, 1)
+ self.verticalLayout_10.addLayout(self.gridLayout_2)
+ spacerItem1 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ self.verticalLayout_10.addItem(spacerItem1)
+ self.horizontalLayout_2.addWidget(self.widget)
+ self.control_panel = QtWidgets.QFrame(self.centralwidget)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.control_panel.sizePolicy().hasHeightForWidth())
+ self.control_panel.setSizePolicy(sizePolicy)
+ self.control_panel.setFrameShape(QtWidgets.QFrame.StyledPanel)
+ self.control_panel.setFrameShadow(QtWidgets.QFrame.Raised)
+ self.control_panel.setObjectName("control_panel")
+ self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.control_panel)
+ self.verticalLayout_2.setObjectName("verticalLayout_2")
+ self.tab_widget = QtWidgets.QTabWidget(self.control_panel)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.tab_widget.sizePolicy().hasHeightForWidth())
+ self.tab_widget.setSizePolicy(sizePolicy)
+ self.tab_widget.setAcceptDrops(False)
+ self.tab_widget.setTabPosition(QtWidgets.QTabWidget.North)
+ self.tab_widget.setTabShape(QtWidgets.QTabWidget.Rounded)
+ self.tab_widget.setIconSize(QtCore.QSize(16, 16))
+ self.tab_widget.setElideMode(QtCore.Qt.ElideNone)
+ self.tab_widget.setUsesScrollButtons(True)
+ self.tab_widget.setDocumentMode(False)
+ self.tab_widget.setTabsClosable(False)
+ self.tab_widget.setMovable(False)
+ self.tab_widget.setTabBarAutoHide(False)
+ self.tab_widget.setObjectName("tab_widget")
+ self.preprocessing_tab = QtWidgets.QWidget()
+ self.preprocessing_tab.setObjectName("preprocessing_tab")
+ self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.preprocessing_tab)
+ self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
+ self.verticalLayout_5.setObjectName("verticalLayout_5")
+ self.verticalLayout_9 = QtWidgets.QVBoxLayout()
+ self.verticalLayout_9.setContentsMargins(10, 10, 10, -1)
+ self.verticalLayout_9.setObjectName("verticalLayout_9")
+ self.preprocessing_label = QtWidgets.QLabel(self.preprocessing_tab)
+ self.preprocessing_label.setTextFormat(QtCore.Qt.RichText)
+ self.preprocessing_label.setObjectName("preprocessing_label")
+ self.verticalLayout_9.addWidget(self.preprocessing_label)
+ self.preprocessing_layout = QtWidgets.QVBoxLayout()
+ self.preprocessing_layout.setContentsMargins(-1, 0, -1, -1)
+ self.preprocessing_layout.setObjectName("preprocessing_layout")
+ self.undistort_image_box = QtWidgets.QCheckBox(self.preprocessing_tab)
+ self.undistort_image_box.setChecked(True)
+ self.undistort_image_box.setTristate(False)
+ self.undistort_image_box.setObjectName("undistort_image_box")
+ self.preprocessing_layout.addWidget(self.undistort_image_box)
+ self.preprocessing_grid_layout = QtWidgets.QGridLayout()
+ self.preprocessing_grid_layout.setContentsMargins(-1, 10, -1, -1)
+ self.preprocessing_grid_layout.setObjectName("preprocessing_grid_layout")
+ self.angle_label = QtWidgets.QLabel(self.preprocessing_tab)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.angle_label.sizePolicy().hasHeightForWidth())
+ self.angle_label.setSizePolicy(sizePolicy)
+ self.angle_label.setObjectName("angle_label")
+ self.preprocessing_grid_layout.addWidget(self.angle_label, 0, 0, 1, 1)
+ self.blur_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.blur_value.setMaximum(15)
+ self.blur_value.setProperty("value", 3)
+ self.blur_value.setObjectName("blur_value")
+ self.preprocessing_grid_layout.addWidget(self.blur_value, 1, 1, 1, 1)
+ self.blur_label = QtWidgets.QLabel(self.preprocessing_tab)
+ self.blur_label.setObjectName("blur_label")
+ self.preprocessing_grid_layout.addWidget(self.blur_label, 1, 0, 1, 1)
+ self.angle_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.angle_value.setMinimum(0)
+ self.angle_value.setMaximum(360)
+ self.angle_value.setSingleStep(10)
+ self.angle_value.setObjectName("angle_value")
+ self.preprocessing_grid_layout.addWidget(self.angle_value, 0, 1, 1, 1)
+ self.temperature_label = QtWidgets.QLabel(self.preprocessing_tab)
+ self.temperature_label.setObjectName("temperature_label")
+ self.preprocessing_grid_layout.addWidget(self.temperature_label, 2, 0, 1, 1)
+ self.temperature_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.temperature_value.setMaximum(255)
+ self.temperature_value.setSingleStep(10)
+ self.temperature_value.setProperty("value", 200)
+ self.temperature_value.setObjectName("temperature_value")
+ self.preprocessing_grid_layout.addWidget(self.temperature_value, 2, 1, 1, 1)
+ self.preprocessing_layout.addLayout(self.preprocessing_grid_layout)
+ self.verticalLayout_9.addLayout(self.preprocessing_layout)
+ self.line_2 = QtWidgets.QFrame(self.preprocessing_tab)
+ self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
+ self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
+ self.line_2.setObjectName("line_2")
+ self.verticalLayout_9.addWidget(self.line_2)
+ self.canny_parameters_label = QtWidgets.QLabel(self.preprocessing_tab)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.canny_parameters_label.sizePolicy().hasHeightForWidth())
+ self.canny_parameters_label.setSizePolicy(sizePolicy)
+ self.canny_parameters_label.setTextFormat(QtCore.Qt.RichText)
+ self.canny_parameters_label.setObjectName("canny_parameters_label")
+ self.verticalLayout_9.addWidget(self.canny_parameters_label)
+ self.histeresis_label = QtWidgets.QLabel(self.preprocessing_tab)
+ self.histeresis_label.setTextFormat(QtCore.Qt.RichText)
+ self.histeresis_label.setObjectName("histeresis_label")
+ self.verticalLayout_9.addWidget(self.histeresis_label)
+ self.histeresis_grid_layout = QtWidgets.QGridLayout()
+ self.histeresis_grid_layout.setContentsMargins(-1, 0, -1, -1)
+ self.histeresis_grid_layout.setObjectName("histeresis_grid_layout")
+ self.min_histeresis_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.min_histeresis_value.setWrapping(False)
+ self.min_histeresis_value.setFrame(True)
+ self.min_histeresis_value.setButtonSymbols(QtWidgets.QAbstractSpinBox.UpDownArrows)
+ self.min_histeresis_value.setMaximum(500)
+ self.min_histeresis_value.setSingleStep(10)
+ self.min_histeresis_value.setProperty("value", 30)
+ self.min_histeresis_value.setObjectName("min_histeresis_value")
+ self.histeresis_grid_layout.addWidget(self.min_histeresis_value, 1, 0, 1, 1)
+ self.label_max_histeresis = QtWidgets.QLabel(self.preprocessing_tab)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.label_max_histeresis.sizePolicy().hasHeightForWidth())
+ self.label_max_histeresis.setSizePolicy(sizePolicy)
+ self.label_max_histeresis.setMaximumSize(QtCore.QSize(16777215, 16))
+ self.label_max_histeresis.setObjectName("label_max_histeresis")
+ self.histeresis_grid_layout.addWidget(self.label_max_histeresis, 0, 1, 1, 1)
+ self.max_histeresis_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.max_histeresis_value.setMaximum(500)
+ self.max_histeresis_value.setSingleStep(10)
+ self.max_histeresis_value.setProperty("value", 140)
+ self.max_histeresis_value.setObjectName("max_histeresis_value")
+ self.histeresis_grid_layout.addWidget(self.max_histeresis_value, 1, 1, 1, 1)
+ self.label_min_histeresis = QtWidgets.QLabel(self.preprocessing_tab)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.label_min_histeresis.sizePolicy().hasHeightForWidth())
+ self.label_min_histeresis.setSizePolicy(sizePolicy)
+ self.label_min_histeresis.setMaximumSize(QtCore.QSize(16777215, 16))
+ self.label_min_histeresis.setObjectName("label_min_histeresis")
+ self.histeresis_grid_layout.addWidget(self.label_min_histeresis, 0, 0, 1, 1)
+ self.verticalLayout_9.addLayout(self.histeresis_grid_layout)
+ self.dilation_layout = QtWidgets.QHBoxLayout()
+ self.dilation_layout.setContentsMargins(-1, 0, -1, -1)
+ self.dilation_layout.setObjectName("dilation_layout")
+ self.dilation_label = QtWidgets.QLabel(self.preprocessing_tab)
+ self.dilation_label.setObjectName("dilation_label")
+ self.dilation_layout.addWidget(self.dilation_label)
+ self.dilation_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.dilation_value.setMaximum(10)
+ self.dilation_value.setProperty("value", 3)
+ self.dilation_value.setObjectName("dilation_value")
+ self.dilation_layout.addWidget(self.dilation_value)
+ self.verticalLayout_9.addLayout(self.dilation_layout)
+ self.verticalLayout_5.addLayout(self.verticalLayout_9)
+ spacerItem2 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ self.verticalLayout_5.addItem(spacerItem2)
+ self.tab_widget.addTab(self.preprocessing_tab, "")
+ self.segment_tab = QtWidgets.QWidget()
+ self.segment_tab.setObjectName("segment_tab")
+ self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.segment_tab)
+ self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
+ self.verticalLayout_7.setObjectName("verticalLayout_7")
+ self.verticalLayout_12 = QtWidgets.QVBoxLayout()
+ self.verticalLayout_12.setContentsMargins(10, 10, 10, -1)
+ self.verticalLayout_12.setObjectName("verticalLayout_12")
+ self.segment_label = QtWidgets.QLabel(self.segment_tab)
+ self.segment_label.setTextFormat(QtCore.Qt.RichText)
+ self.segment_label.setObjectName("segment_label")
+ self.verticalLayout_12.addWidget(self.segment_label)
+ self.detection_layout = QtWidgets.QGridLayout()
+ self.detection_layout.setContentsMargins(-1, 0, -1, -1)
+ self.detection_layout.setObjectName("detection_layout")
+ self.max_gap_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.max_gap_value.setMaximum(500)
+ self.max_gap_value.setSingleStep(10)
+ self.max_gap_value.setProperty("value", 150)
+ self.max_gap_value.setObjectName("max_gap_value")
+ self.detection_layout.addWidget(self.max_gap_value, 4, 1, 1, 1)
+ self.max_gap_label = QtWidgets.QLabel(self.segment_tab)
+ self.max_gap_label.setObjectName("max_gap_label")
+ self.detection_layout.addWidget(self.max_gap_label, 4, 0, 1, 1)
+ self.min_length_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.min_length_value.setMinimum(0)
+ self.min_length_value.setMaximum(500)
+ self.min_length_value.setSingleStep(10)
+ self.min_length_value.setProperty("value", 50)
+ self.min_length_value.setObjectName("min_length_value")
+ self.detection_layout.addWidget(self.min_length_value, 3, 1, 1, 1)
+ self.delta_rho_label = QtWidgets.QLabel(self.segment_tab)
+ self.delta_rho_label.setObjectName("delta_rho_label")
+ self.detection_layout.addWidget(self.delta_rho_label, 0, 0, 1, 1)
+ self.min_votes_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.min_votes_value.setMinimum(0)
+ self.min_votes_value.setMaximum(500)
+ self.min_votes_value.setSingleStep(10)
+ self.min_votes_value.setProperty("value", 60)
+ self.min_votes_value.setObjectName("min_votes_value")
+ self.detection_layout.addWidget(self.min_votes_value, 2, 1, 1, 1)
+ self.delta_theta_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.delta_theta_value.setMinimum(1)
+ self.delta_theta_value.setMaximum(180)
+ self.delta_theta_value.setProperty("value", 1)
+ self.delta_theta_value.setObjectName("delta_theta_value")
+ self.detection_layout.addWidget(self.delta_theta_value, 1, 1, 1, 1)
+ self.min_votes_label = QtWidgets.QLabel(self.segment_tab)
+ self.min_votes_label.setObjectName("min_votes_label")
+ self.detection_layout.addWidget(self.min_votes_label, 2, 0, 1, 1)
+ self.delta_rho_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.delta_rho_value.setMinimum(1)
+ self.delta_rho_value.setMaximum(100)
+ self.delta_rho_value.setProperty("value", 1)
+ self.delta_rho_value.setObjectName("delta_rho_value")
+ self.detection_layout.addWidget(self.delta_rho_value, 0, 1, 1, 1)
+ self.min_length_label = QtWidgets.QLabel(self.segment_tab)
+ self.min_length_label.setObjectName("min_length_label")
+ self.detection_layout.addWidget(self.min_length_label, 3, 0, 1, 1)
+ self.delta_theta_label = QtWidgets.QLabel(self.segment_tab)
+ self.delta_theta_label.setObjectName("delta_theta_label")
+ self.detection_layout.addWidget(self.delta_theta_label, 1, 0, 1, 1)
+ self.extend_segments_label = QtWidgets.QLabel(self.segment_tab)
+ self.extend_segments_label.setObjectName("extend_segments_label")
+ self.detection_layout.addWidget(self.extend_segments_label, 5, 0, 1, 1)
+ self.extend_segments_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.extend_segments_value.setMaximum(500)
+ self.extend_segments_value.setProperty("value", 10)
+ self.extend_segments_value.setObjectName("extend_segments_value")
+ self.detection_layout.addWidget(self.extend_segments_value, 5, 1, 1, 1)
+ self.verticalLayout_12.addLayout(self.detection_layout)
+ self.line_3 = QtWidgets.QFrame(self.segment_tab)
+ self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
+ self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
+ self.line_3.setObjectName("line_3")
+ self.verticalLayout_12.addWidget(self.line_3)
+ self.clustering_label = QtWidgets.QLabel(self.segment_tab)
+ self.clustering_label.setTextFormat(QtCore.Qt.RichText)
+ self.clustering_label.setObjectName("clustering_label")
+ self.verticalLayout_12.addWidget(self.clustering_label)
+ self.clustering_layout = QtWidgets.QGridLayout()
+ self.clustering_layout.setContentsMargins(-1, 0, -1, -1)
+ self.clustering_layout.setObjectName("clustering_layout")
+ self.num_init_label = QtWidgets.QLabel(self.segment_tab)
+ self.num_init_label.setObjectName("num_init_label")
+ self.clustering_layout.addWidget(self.num_init_label, 2, 0, 1, 1)
+ self.cluster_type_label = QtWidgets.QLabel(self.segment_tab)
+ self.cluster_type_label.setObjectName("cluster_type_label")
+ self.clustering_layout.addWidget(self.cluster_type_label, 0, 0, 1, 1)
+ self.num_clusters_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.num_clusters_value.setMinimum(2)
+ self.num_clusters_value.setObjectName("num_clusters_value")
+ self.clustering_layout.addWidget(self.num_clusters_value, 1, 1, 1, 1)
+ self.num_init_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.num_init_value.setEnabled(False)
+ self.num_init_value.setMinimum(1)
+ self.num_init_value.setProperty("value", 5)
+ self.num_init_value.setObjectName("num_init_value")
+ self.clustering_layout.addWidget(self.num_init_value, 2, 1, 1, 1)
+ self.num_clusters_label = QtWidgets.QLabel(self.segment_tab)
+ self.num_clusters_label.setObjectName("num_clusters_label")
+ self.clustering_layout.addWidget(self.num_clusters_label, 1, 0, 1, 1)
+ self.cluster_type_layout = QtWidgets.QHBoxLayout()
+ self.cluster_type_layout.setObjectName("cluster_type_layout")
+ self.gmm_value = QtWidgets.QRadioButton(self.segment_tab)
+ self.gmm_value.setChecked(True)
+ self.gmm_value.setObjectName("gmm_value")
+ self.cluster_type_layout.addWidget(self.gmm_value)
+ self.knn_value = QtWidgets.QRadioButton(self.segment_tab)
+ self.knn_value.setObjectName("knn_value")
+ self.cluster_type_layout.addWidget(self.knn_value)
+ self.clustering_layout.addLayout(self.cluster_type_layout, 0, 1, 1, 1)
+ self.verticalLayout_12.addLayout(self.clustering_layout)
+ self.features_label = QtWidgets.QLabel(self.segment_tab)
+ self.features_label.setObjectName("features_label")
+ self.verticalLayout_12.addWidget(self.features_label)
+ self.features_layout = QtWidgets.QHBoxLayout()
+ self.features_layout.setContentsMargins(-1, 0, -1, -1)
+ self.features_layout.setObjectName("features_layout")
+ self.swipe_clusters_value = QtWidgets.QCheckBox(self.segment_tab)
+ self.swipe_clusters_value.setObjectName("swipe_clusters_value")
+ self.features_layout.addWidget(self.swipe_clusters_value)
+ self.use_angle_value = QtWidgets.QCheckBox(self.segment_tab)
+ self.use_angle_value.setChecked(True)
+ self.use_angle_value.setObjectName("use_angle_value")
+ self.features_layout.addWidget(self.use_angle_value)
+ self.use_centers_value = QtWidgets.QCheckBox(self.segment_tab)
+ self.use_centers_value.setObjectName("use_centers_value")
+ self.features_layout.addWidget(self.use_centers_value)
+ self.verticalLayout_12.addLayout(self.features_layout)
+ self.line_4 = QtWidgets.QFrame(self.segment_tab)
+ self.line_4.setFrameShape(QtWidgets.QFrame.HLine)
+ self.line_4.setFrameShadow(QtWidgets.QFrame.Sunken)
+ self.line_4.setObjectName("line_4")
+ self.verticalLayout_12.addWidget(self.line_4)
+ self.cleaning_label = QtWidgets.QLabel(self.segment_tab)
+ self.cleaning_label.setTextFormat(QtCore.Qt.RichText)
+ self.cleaning_label.setObjectName("cleaning_label")
+ self.verticalLayout_12.addWidget(self.cleaning_label)
+ self.gridLayout = QtWidgets.QGridLayout()
+ self.gridLayout.setContentsMargins(-1, 0, -1, -1)
+ self.gridLayout.setObjectName("gridLayout")
+ self.max_merging_distance_label = QtWidgets.QLabel(self.segment_tab)
+ self.max_merging_distance_label.setObjectName("max_merging_distance_label")
+ self.gridLayout.addWidget(self.max_merging_distance_label, 2, 0, 1, 1)
+ self.max_angle_variation_mean_label = QtWidgets.QLabel(self.segment_tab)
+ self.max_angle_variation_mean_label.setObjectName("max_angle_variation_mean_label")
+ self.gridLayout.addWidget(self.max_angle_variation_mean_label, 0, 0, 1, 1)
+ self.max_merging_angle_label = QtWidgets.QLabel(self.segment_tab)
+ self.max_merging_angle_label.setObjectName("max_merging_angle_label")
+ self.gridLayout.addWidget(self.max_merging_angle_label, 1, 0, 1, 1)
+ self.max_angle_variation_mean_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.max_angle_variation_mean_value.setMaximum(90)
+ self.max_angle_variation_mean_value.setSingleStep(5)
+ self.max_angle_variation_mean_value.setProperty("value", 20)
+ self.max_angle_variation_mean_value.setObjectName("max_angle_variation_mean_value")
+ self.gridLayout.addWidget(self.max_angle_variation_mean_value, 0, 1, 1, 1)
+ self.max_merging_angle_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.max_merging_angle_value.setMaximum(90)
+ self.max_merging_angle_value.setSingleStep(5)
+ self.max_merging_angle_value.setProperty("value", 10)
+ self.max_merging_angle_value.setObjectName("max_merging_angle_value")
+ self.gridLayout.addWidget(self.max_merging_angle_value, 1, 1, 1, 1)
+ self.max_merging_distance_value = QtWidgets.QSpinBox(self.segment_tab)
+ self.max_merging_distance_value.setMaximum(500)
+ self.max_merging_distance_value.setSingleStep(10)
+ self.max_merging_distance_value.setProperty("value", 10)
+ self.max_merging_distance_value.setObjectName("max_merging_distance_value")
+ self.gridLayout.addWidget(self.max_merging_distance_value, 2, 1, 1, 1)
+ self.verticalLayout_12.addLayout(self.gridLayout)
+ self.verticalLayout_7.addLayout(self.verticalLayout_12)
+ spacerItem3 = QtWidgets.QSpacerItem(20, 41, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ self.verticalLayout_7.addItem(spacerItem3)
+ self.tab_widget.addTab(self.segment_tab, "")
+ self.module_tab = QtWidgets.QWidget()
+ self.module_tab.setObjectName("module_tab")
+ self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.module_tab)
+ self.verticalLayout_8.setContentsMargins(0, 0, 0, 0)
+ self.verticalLayout_8.setObjectName("verticalLayout_8")
+ self.verticalLayout_13 = QtWidgets.QVBoxLayout()
+ self.verticalLayout_13.setContentsMargins(10, 10, 10, -1)
+ self.verticalLayout_13.setObjectName("verticalLayout_13")
+ self.filter_label = QtWidgets.QLabel(self.module_tab)
+ self.filter_label.setTextFormat(QtCore.Qt.RichText)
+ self.filter_label.setObjectName("filter_label")
+ self.verticalLayout_13.addWidget(self.filter_label)
+ self.module_filter_layout = QtWidgets.QGridLayout()
+ self.module_filter_layout.setContentsMargins(-1, 0, -1, -1)
+ self.module_filter_layout.setObjectName("module_filter_layout")
+ self.ratio_max_deviation_value = QtWidgets.QDoubleSpinBox(self.module_tab)
+ self.ratio_max_deviation_value.setMaximum(10.0)
+ self.ratio_max_deviation_value.setSingleStep(0.1)
+ self.ratio_max_deviation_value.setProperty("value", 0.3)
+ self.ratio_max_deviation_value.setObjectName("ratio_max_deviation_value")
+ self.module_filter_layout.addWidget(self.ratio_max_deviation_value, 1, 1, 1, 1)
+ self.ratio_max_deviation_label = QtWidgets.QLabel(self.module_tab)
+ self.ratio_max_deviation_label.setObjectName("ratio_max_deviation_label")
+ self.module_filter_layout.addWidget(self.ratio_max_deviation_label, 1, 0, 1, 1)
+ self.min_area_value = QtWidgets.QSpinBox(self.module_tab)
+ self.min_area_value.setMaximum(1000000)
+ self.min_area_value.setSingleStep(100)
+ self.min_area_value.setProperty("value", 800)
+ self.min_area_value.setObjectName("min_area_value")
+ self.module_filter_layout.addWidget(self.min_area_value, 2, 1, 1, 1)
+ self.min_area_label = QtWidgets.QLabel(self.module_tab)
+ self.min_area_label.setObjectName("min_area_label")
+ self.module_filter_layout.addWidget(self.min_area_label, 2, 0, 1, 1)
+ self.expected_ratio_label = QtWidgets.QLabel(self.module_tab)
+ self.expected_ratio_label.setObjectName("expected_ratio_label")
+ self.module_filter_layout.addWidget(self.expected_ratio_label, 0, 0, 1, 1)
+ self.expected_ratio_value = QtWidgets.QDoubleSpinBox(self.module_tab)
+ self.expected_ratio_value.setMinimum(0.1)
+ self.expected_ratio_value.setMaximum(5.0)
+ self.expected_ratio_value.setSingleStep(0.1)
+ self.expected_ratio_value.setProperty("value", 1.5)
+ self.expected_ratio_value.setObjectName("expected_ratio_value")
+ self.module_filter_layout.addWidget(self.expected_ratio_value, 0, 1, 1, 1)
+ self.verticalLayout_13.addLayout(self.module_filter_layout)
+ self.verticalLayout_8.addLayout(self.verticalLayout_13)
+ spacerItem4 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ self.verticalLayout_8.addItem(spacerItem4)
+ self.tab_widget.addTab(self.module_tab, "")
+ self.verticalLayout_2.addWidget(self.tab_widget)
+ spacerItem5 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ self.verticalLayout_2.addItem(spacerItem5)
+ self.horizontalLayout_2.addWidget(self.control_panel, 0, QtCore.Qt.AlignTop)
+ CreateDataset_main_window.setCentralWidget(self.centralwidget)
+ self.menuBar = QtWidgets.QMenuBar(CreateDataset_main_window)
+ self.menuBar.setGeometry(QtCore.QRect(0, 0, 961, 21))
+ self.menuBar.setObjectName("menuBar")
+ self.FileMenu = QtWidgets.QMenu(self.menuBar)
+ self.FileMenu.setObjectName("FileMenu")
+ CreateDataset_main_window.setMenuBar(self.menuBar)
+ self.actionLoad = QtWidgets.QAction(CreateDataset_main_window)
+ self.actionLoad.setObjectName("actionLoad")
+ self.file_about = QtWidgets.QAction(CreateDataset_main_window)
+ self.file_about.setObjectName("file_about")
+ self.file_exit = QtWidgets.QAction(CreateDataset_main_window)
+ self.file_exit.setObjectName("file_exit")
+ self.FileMenu.addAction(self.file_about)
+ self.FileMenu.addSeparator()
+ self.FileMenu.addAction(self.file_exit)
+ self.menuBar.addAction(self.FileMenu.menuAction())
+
+ self.retranslateUi(CreateDataset_main_window)
+ self.tab_widget.setCurrentIndex(0)
+ QtCore.QMetaObject.connectSlotsByName(CreateDataset_main_window)
+
+ def retranslateUi(self, CreateDataset_main_window):
+ _translate = QtCore.QCoreApplication.translate
+ CreateDataset_main_window.setWindowTitle(_translate("CreateDataset_main_window", "ThermoGUI"))
+ CreateDataset_main_window.setWhatsThis(_translate("CreateDataset_main_window", "Tool Bar
"))
+ self.rectangle_image_view.setText(_translate("CreateDataset_main_window", "Module\n"
+" Map
\n"
+" "))
+ self.global_progress_bar.setFormat(_translate("CreateDataset_main_window", "%p%"))
+ self.label.setText(_translate("CreateDataset_main_window", "Input
\n"
+" "))
+ self.video_from_index.setToolTip(_translate("CreateDataset_main_window", "Initial\n"
+" frame of the video to be loaded.
\n"
+" "))
+ self.from_video_index_label.setText(_translate("CreateDataset_main_window", "From:"))
+ self.to_video_index_label.setText(_translate("CreateDataset_main_window", "To:"))
+ self.video_to_index.setToolTip(_translate("CreateDataset_main_window", "Final\n"
+" frame of the video to be loaded.
\n"
+" "))
+ self.load_video_button.setToolTip(_translate("CreateDataset_main_window", "Loads\n"
+" the frames from the selected video file.
\n"
+" "))
+ self.load_video_button.setText(_translate("CreateDataset_main_window", "Choose Video"))
+ self.image_scaling_label.setText(_translate("CreateDataset_main_window", "Input image scaling : 1.00"))
+ self.play_video_button.setToolTip(_translate("CreateDataset_main_window", "Play\n"
+" the current video/Webcam.
\n"
+" "))
+ self.play_video_button.setText(_translate("CreateDataset_main_window", "Play"))
+ self.stop_video_button.setToolTip(_translate("CreateDataset_main_window", "Resets\n"
+" the current video to the start.
\n"
+" "))
+ self.stop_video_button.setText(_translate("CreateDataset_main_window", "Stop and Save"))
+ self.quick_save_button.setText(_translate("CreateDataset_main_window", "Quick Save"))
+ self.current_module_view.setText(_translate("CreateDataset_main_window", "Current\n"
+" Module
\n"
+" "))
+ self.label_2.setText(_translate("CreateDataset_main_window", "Module Detected:"))
+ self.module_working_button.setToolTip(_translate("CreateDataset_main_window", "Select current module as \'Working\' (shortcut:\n"
+" \'0\')\n"
+" "))
+ self.module_working_button.setText(_translate("CreateDataset_main_window", "Working (0)"))
+ self.module_working_button.setShortcut(_translate("CreateDataset_main_window", "0"))
+ self.module_broken_button.setToolTip(_translate("CreateDataset_main_window", "Select current module as \'Broken\' (shortcut:\n"
+" \'1\')\n"
+" "))
+ self.module_broken_button.setText(_translate("CreateDataset_main_window", "Broken (1)"))
+ self.module_broken_button.setShortcut(_translate("CreateDataset_main_window", "1"))
+ self.label_3.setText(_translate("CreateDataset_main_window", "Misdetection:"))
+ self.misdetection_button.setToolTip(_translate("CreateDataset_main_window", "Select current module as \'Misdetected\' (shortcut: \'2\')\n"
+" "))
+ self.misdetection_button.setText(_translate("CreateDataset_main_window", "Other(2)"))
+ self.misdetection_button.setShortcut(_translate("CreateDataset_main_window", "2"))
+ self.label_7.setText(_translate("CreateDataset_main_window", "Other:"))
+ self.label_4.setText(_translate("CreateDataset_main_window", "Total classified modules:
\n"
+" "))
+ self.label_5.setText(_translate("CreateDataset_main_window", "Working:"))
+ self.label_6.setText(_translate("CreateDataset_main_window", "Broken:"))
+ self.label_9.setText(_translate("CreateDataset_main_window", "Automatic:"))
+ self.label_8.setText(_translate("CreateDataset_main_window", "Manual:"))
+ self.total_manual_classified_label.setText(_translate("CreateDataset_main_window", "0
\n"
+" "))
+ self.total_automatic_classified_label.setText(_translate("CreateDataset_main_window", "0
\n"
+" "))
+ self.working_manual_classified_label.setText(_translate("CreateDataset_main_window", "0"))
+ self.working_automatic_classified_label.setText(_translate("CreateDataset_main_window", "0"))
+ self.broken_manual_classified_label.setText(_translate("CreateDataset_main_window", "0"))
+ self.broken_automatic_classified_label.setText(_translate("CreateDataset_main_window", "0"))
+ self.other_manual_classified_label.setText(_translate("CreateDataset_main_window", "0"))
+ self.other_automatic_classified_label.setText(_translate("CreateDataset_main_window", "0"))
+ self.preprocessing_label.setText(_translate("CreateDataset_main_window", "Preprocessing
\n"
+" "))
+ self.undistort_image_box.setText(_translate("CreateDataset_main_window", "undistort image"))
+ self.angle_label.setText(_translate("CreateDataset_main_window", "angle:"))
+ self.blur_label.setText(_translate("CreateDataset_main_window", "blur:"))
+ self.temperature_label.setText(_translate("CreateDataset_main_window", "temperature:"))
+ self.canny_parameters_label.setText(_translate("CreateDataset_main_window", "Canny
\n"
+" "))
+ self.histeresis_label.setText(_translate("CreateDataset_main_window", "Histeresis:
"))
+ self.label_max_histeresis.setText(_translate("CreateDataset_main_window", "max"))
+ self.label_min_histeresis.setText(_translate("CreateDataset_main_window", "min"))
+ self.dilation_label.setText(_translate("CreateDataset_main_window", "dilation steps:"))
+ self.tab_widget.setTabText(self.tab_widget.indexOf(self.preprocessing_tab), _translate("CreateDataset_main_window", "Preprocessing"))
+ self.segment_label.setText(_translate("CreateDataset_main_window", "Detection
\n"
+" "))
+ self.max_gap_label.setText(_translate("CreateDataset_main_window", "max gap"))
+ self.delta_rho_label.setText(_translate("CreateDataset_main_window", "delta rho"))
+ self.min_votes_label.setText(_translate("CreateDataset_main_window", "min votes"))
+ self.min_length_label.setText(_translate("CreateDataset_main_window", "min length"))
+ self.delta_theta_label.setText(_translate("CreateDataset_main_window", "delta theta"))
+ self.extend_segments_label.setText(_translate("CreateDataset_main_window", "extend segments"))
+ self.clustering_label.setText(_translate("CreateDataset_main_window", "Clustering
\n"
+" "))
+ self.num_init_label.setText(_translate("CreateDataset_main_window", "num init"))
+ self.cluster_type_label.setText(_translate("CreateDataset_main_window", "type"))
+ self.num_clusters_label.setText(_translate("CreateDataset_main_window", "num clusters"))
+ self.gmm_value.setText(_translate("CreateDataset_main_window", "GMM"))
+ self.knn_value.setText(_translate("CreateDataset_main_window", "KNN"))
+ self.features_label.setText(_translate("CreateDataset_main_window", "Features:"))
+ self.swipe_clusters_value.setText(_translate("CreateDataset_main_window", "swipe"))
+ self.use_angle_value.setText(_translate("CreateDataset_main_window", "angles"))
+ self.use_centers_value.setText(_translate("CreateDataset_main_window", "centers"))
+ self.cleaning_label.setText(_translate("CreateDataset_main_window", "Cleaning
\n"
+" "))
+ self.max_merging_distance_label.setText(_translate("CreateDataset_main_window", "max merging distance"))
+ self.max_angle_variation_mean_label.setText(_translate("CreateDataset_main_window", "max angle variation"))
+ self.max_merging_angle_label.setText(_translate("CreateDataset_main_window", "max merging angle"))
+ self.tab_widget.setTabText(self.tab_widget.indexOf(self.segment_tab), _translate("CreateDataset_main_window", "Segments"))
+ self.filter_label.setText(_translate("CreateDataset_main_window", "Filter
\n"
+" "))
+ self.ratio_max_deviation_label.setText(_translate("CreateDataset_main_window", "ratio max deviation"))
+ self.min_area_label.setText(_translate("CreateDataset_main_window", "min area"))
+ self.expected_ratio_label.setText(_translate("CreateDataset_main_window", "expected ratio"))
+ self.tab_widget.setTabText(self.tab_widget.indexOf(self.module_tab), _translate("CreateDataset_main_window", "Modules"))
+ self.FileMenu.setTitle(_translate("CreateDataset_main_window", "File"))
+ self.actionLoad.setText(_translate("CreateDataset_main_window", "Load"))
+ self.file_about.setText(_translate("CreateDataset_main_window", "About"))
+ self.file_exit.setText(_translate("CreateDataset_main_window", "Exit"))
+
+
+if __name__ == "__main__":
+ import sys
+ app = QtWidgets.QApplication(sys.argv)
+ CreateDataset_main_window = QtWidgets.QMainWindow()
+ ui = Ui_CreateDataset_main_window()
+ ui.setupUi(CreateDataset_main_window)
+ CreateDataset_main_window.show()
+ sys.exit(app.exec_())
+
diff --git a/gui/design/create_dataset_gui.ui b/gui/design/create_dataset_gui.ui
new file mode 100644
index 0000000..9b023e8
--- /dev/null
+++ b/gui/design/create_dataset_gui.ui
@@ -0,0 +1,1611 @@
+
+
+ CreateDataset_main_window
+
+
+
+ 0
+ 0
+ 961
+ 611
+
+
+
+
+ 0
+ 0
+
+
+
+ ThermoGUI
+
+
+
+ img/logo.pngimg/logo.png
+
+
+ <html><head/><body><p>Tool Bar</p><p><br/></p></body></html>
+
+
+
+
+ 0
+ 0
+
+
+
+
+
+
+ -
+
+
+
+ 2
+ 0
+
+
+
+
-
+
+
+
+ 0
+ 0
+
+
+
+
+ 300
+ 300
+
+
+
+
+ 26
+
+
+
+ true
+
+
+ QFrame::Box
+
+
+ <html><head/><body><p align="center">Module
+ Map</p></body></html>
+
+
+
+ Qt::RichText
+
+
+ false
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+
+ 0
+ 10
+
+
+
+
+ 16777215
+ 10
+
+
+
+ 0
+
+
+ false
+
+
+ false
+
+
+ QProgressBar::TopToBottom
+
+
+ %p%
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+
+ 400
+ 0
+
+
+
+
+ 6
+
+
+ 0
+
+
+ 0
+
+
+ 0
+
+
+ 0
+
+
-
+
+
+ 0
+
+
+ 0
+
+
+ 0
+
+
-
+
+
+ <html><head/><body><p align="center"><span
+ style=" font-weight:600;">Input</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ 2
+
+
+ 2
+
+
-
+
+
+ Qt::WheelFocus
+
+
+ <html><head/><body><p>Initial
+ frame of the video to be loaded.</p></body></html>
+
+
+
+ 100000
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+ From:
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+ To:
+
+
+
+ -
+
+
+ <html><head/><body><p>Final
+ frame of the video to be loaded.</p></body></html>
+
+
+
+ Qt::LeftToRight
+
+
+ false
+
+
+ 0
+
+
+ 100000
+
+
+ 500
+
+
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+ <html><head/><body><p>Loads
+ the frames from the selected video file.</p></body></html>
+
+
+
+ Choose Video
+
+
+ false
+
+
+ false
+
+
+ false
+
+
+
+ -
+
+
+ Qt::Horizontal
+
+
+
+ -
+
+
-
+
+
+ 0
+
+
-
+
+
+ Input image scaling : 1.00
+
+
+
+ -
+
+
+ 1
+
+
+ 19
+
+
+ 0
+
+
+ 1
+
+
+ 10
+
+
+ 10
+
+
+ true
+
+
+ Qt::Horizontal
+
+
+ QSlider::TicksAbove
+
+
+ 2
+
+
+
+
+
+ -
+
+
-
+
+
+ 0
+
+
+ 0
+
+
-
+
+
+ false
+
+
+
+ 0
+ 0
+
+
+
+
+ 20
+ 0
+
+
+
+ <html><head/><body><p>Play
+ the current video/Webcam.</p></body></html>
+
+
+
+ Play
+
+
+ false
+
+
+
+ -
+
+
+ false
+
+
+
+ 0
+ 0
+
+
+
+
+ 20
+ 0
+
+
+
+ <html><head/><body><p>Resets
+ the current video to the start.</p></body></html>
+
+
+
+ Stop and Save
+
+
+ false
+
+
+ false
+
+
+
+
+
+ -
+
+
+ false
+
+
+ Quick Save
+
+
+
+
+
+
+
+
+
+
+
+
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 0
+
+
+
+
+
+
+
+ -
+
+
+
+ 1
+ 0
+
+
+
+
-
+
+
+
+ 0
+ 0
+
+
+
+
+ 250
+ 150
+
+
+
+
+ 14
+
+
+
+ CrossCursor
+
+
+ true
+
+
+ QFrame::Box
+
+
+ <html><head/><body><p align="center">Current
+ Module</p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
-
+
+
-
+
+
+ Module Detected:
+
+
+
+ -
+
+
-
+
+
+ false
+
+
+ Select current module as 'Working' (shortcut:
+ '0')
+
+
+
+ Working (0)
+
+
+ 0
+
+
+
+ -
+
+
+ false
+
+
+ Select current module as 'Broken' (shortcut:
+ '1')
+
+
+
+ Broken (1)
+
+
+ 1
+
+
+
+
+
+
+
+ -
+
+
+ 10
+
+
-
+
+
+ Misdetection:
+
+
+
+ -
+
+
+ false
+
+
+ Select current module as 'Misdetected' (shortcut: '2')
+
+
+
+ Other(2)
+
+
+ 2
+
+
+
+
+
+
+
+ -
+
+
+ Qt::Horizontal
+
+
+
+ -
+
+
+ 10
+
+
-
+
+
+ Other:
+
+
+
+ -
+
+
+ <html><head/><body><p><span style="
+ font-weight:600;">Total classified modules:</span></p></body></html>
+
+
+
+
+ -
+
+
+ Working:
+
+
+
+ -
+
+
+ Broken:
+
+
+
+ -
+
+
+ Automatic:
+
+
+
+ -
+
+
+ Manual:
+
+
+
+ -
+
+
+ <html><head/><body><p><span style="
+ font-weight:600;">0</span></p></body></html>
+
+
+
+
+ -
+
+
+ <html><head/><body><p><span style="
+ font-weight:600;">0</span></p></body></html>
+
+
+
+
+ -
+
+
+ 0
+
+
+
+ -
+
+
+ 0
+
+
+
+ -
+
+
+ 0
+
+
+
+ -
+
+
+ 0
+
+
+
+ -
+
+
+ 0
+
+
+
+ -
+
+
+ 0
+
+
+
+
+
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 0
+
+
+
+
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+ QFrame::StyledPanel
+
+
+ QFrame::Raised
+
+
+
-
+
+
+
+ 0
+ 0
+
+
+
+ false
+
+
+ QTabWidget::North
+
+
+ QTabWidget::Rounded
+
+
+ 0
+
+
+
+ 16
+ 16
+
+
+
+ Qt::ElideNone
+
+
+ true
+
+
+ false
+
+
+ false
+
+
+ false
+
+
+ false
+
+
+
+ Preprocessing
+
+
+
-
+
+
+ 10
+
+
+ 10
+
+
+ 10
+
+
-
+
+
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Preprocessing</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ undistort image
+
+
+ true
+
+
+ false
+
+
+
+ -
+
+
+ 10
+
+
-
+
+
+
+ 0
+ 0
+
+
+
+ angle:
+
+
+
+ -
+
+
+ 15
+
+
+ 3
+
+
+
+ -
+
+
+ blur:
+
+
+
+ -
+
+
+ 0
+
+
+ 360
+
+
+ 10
+
+
+
+ -
+
+
+ temperature:
+
+
+
+ -
+
+
+ 255
+
+
+ 10
+
+
+ 200
+
+
+
+
+
+
+
+ -
+
+
+ Qt::Horizontal
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Canny</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ <html><head/><body><p>Histeresis:</p></body></html>
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ false
+
+
+ true
+
+
+ QAbstractSpinBox::UpDownArrows
+
+
+ 500
+
+
+ 10
+
+
+ 30
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+
+ 16777215
+ 16
+
+
+
+ max
+
+
+
+ -
+
+
+ 500
+
+
+ 10
+
+
+ 140
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+
+ 16777215
+ 16
+
+
+
+ min
+
+
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ dilation steps:
+
+
+
+ -
+
+
+ 10
+
+
+ 3
+
+
+
+
+
+
+
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 40
+
+
+
+
+
+
+
+
+ Segments
+
+
+ -
+
+
+ 10
+
+
+ 10
+
+
+ 10
+
+
-
+
+
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Detection</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ 500
+
+
+ 10
+
+
+ 150
+
+
+
+ -
+
+
+ max gap
+
+
+
+ -
+
+
+ 0
+
+
+ 500
+
+
+ 10
+
+
+ 50
+
+
+
+ -
+
+
+ delta rho
+
+
+
+ -
+
+
+ 0
+
+
+ 500
+
+
+ 10
+
+
+ 60
+
+
+
+ -
+
+
+ 1
+
+
+ 180
+
+
+ 1
+
+
+
+ -
+
+
+ min votes
+
+
+
+ -
+
+
+ 1
+
+
+ 100
+
+
+ 1
+
+
+
+ -
+
+
+ min length
+
+
+
+ -
+
+
+ delta theta
+
+
+
+ -
+
+
+ extend segments
+
+
+
+ -
+
+
+ 500
+
+
+ 10
+
+
+
+
+
+ -
+
+
+ Qt::Horizontal
+
+
+
+ -
+
+
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Clustering</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ num init
+
+
+
+ -
+
+
+ type
+
+
+
+ -
+
+
+ 2
+
+
+
+ -
+
+
+ false
+
+
+ 1
+
+
+ 5
+
+
+
+ -
+
+
+ num clusters
+
+
+
+ -
+
+
-
+
+
+ GMM
+
+
+ true
+
+
+
+ -
+
+
+ KNN
+
+
+
+
+
+
+
+ -
+
+
+ Features:
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ swipe
+
+
+
+ -
+
+
+ angles
+
+
+ true
+
+
+
+ -
+
+
+ centers
+
+
+
+
+
+ -
+
+
+ Qt::Horizontal
+
+
+
+ -
+
+
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Cleaning</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ max merging distance
+
+
+
+ -
+
+
+ max angle variation
+
+
+
+ -
+
+
+ max merging angle
+
+
+
+ -
+
+
+ 90
+
+
+ 5
+
+
+ 20
+
+
+
+ -
+
+
+ 90
+
+
+ 5
+
+
+ 10
+
+
+
+ -
+
+
+ 500
+
+
+ 10
+
+
+ 10
+
+
+
+
+
+
+
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 41
+
+
+
+
+
+
+
+
+ Modules
+
+
+ -
+
+
+ 10
+
+
+ 10
+
+
+ 10
+
+
-
+
+
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Filter</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+
+ -
+
+
+ 0
+
+
-
+
+
+ 10.000000000000000
+
+
+ 0.100000000000000
+
+
+ 0.300000000000000
+
+
+
+ -
+
+
+ ratio max deviation
+
+
+
+ -
+
+
+ 1000000
+
+
+ 100
+
+
+ 800
+
+
+
+ -
+
+
+ min area
+
+
+
+ -
+
+
+ expected ratio
+
+
+
+ -
+
+
+ 0.100000000000000
+
+
+ 5.000000000000000
+
+
+ 0.100000000000000
+
+
+ 1.500000000000000
+
+
+
+
+
+
+
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 40
+
+
+
+
+
+
+
+
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 0
+
+
+
+
+
+
+
+
+
+
+
+
+ Load
+
+
+
+
+ About
+
+
+
+
+ Exit
+
+
+
+
+
+
diff --git a/gui/design/image_saving_gui.py b/gui/design/image_saving_gui.py
new file mode 100644
index 0000000..4fde56f
--- /dev/null
+++ b/gui/design/image_saving_gui.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+
+# Form implementation generated from reading ui file 'image_saving_gui.ui'
+#
+# Created by: PyQt5 UI code generator 5.6
+#
+# WARNING! All changes made in this file will be lost!
+
+from PyQt5 import QtCore, QtWidgets
+
+
+class Ui_Save_images_dialog(object):
+ def setupUi(self, Save_images_dialog):
+ Save_images_dialog.setObjectName("Save_images_dialog")
+ Save_images_dialog.resize(380, 190)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(Save_images_dialog.sizePolicy().hasHeightForWidth())
+ Save_images_dialog.setSizePolicy(sizePolicy)
+ Save_images_dialog.setMinimumSize(QtCore.QSize(380, 190))
+ Save_images_dialog.setMaximumSize(QtCore.QSize(380, 190))
+ self.horizontalLayout = QtWidgets.QHBoxLayout(Save_images_dialog)
+ self.horizontalLayout.setObjectName("horizontalLayout")
+ self.verticalLayout = QtWidgets.QVBoxLayout()
+ self.verticalLayout.setObjectName("verticalLayout")
+ self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
+ self.horizontalLayout_2.setObjectName("horizontalLayout_2")
+ self.choose_directory_button = QtWidgets.QPushButton(Save_images_dialog)
+ self.choose_directory_button.setObjectName("choose_directory_button")
+ self.horizontalLayout_2.addWidget(self.choose_directory_button)
+ self.verticalLayout.addLayout(self.horizontalLayout_2)
+ self.save_directory_label = QtWidgets.QLabel(Save_images_dialog)
+ self.save_directory_label.setObjectName("save_directory_label")
+ self.verticalLayout.addWidget(self.save_directory_label)
+ spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
+ self.verticalLayout.addItem(spacerItem)
+ self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
+ self.horizontalLayout_3.setObjectName("horizontalLayout_3")
+ spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
+ self.horizontalLayout_3.addItem(spacerItem1)
+ self.save_button = QtWidgets.QPushButton(Save_images_dialog)
+ self.save_button.setEnabled(False)
+ self.save_button.setObjectName("save_button")
+ self.horizontalLayout_3.addWidget(self.save_button)
+ spacerItem2 = QtWidgets.QSpacerItem(35, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
+ self.horizontalLayout_3.addItem(spacerItem2)
+ self.verticalLayout.addLayout(self.horizontalLayout_3)
+ self.progress_bar_all_frames = QtWidgets.QProgressBar(Save_images_dialog)
+ self.progress_bar_all_frames.setEnabled(False)
+ self.progress_bar_all_frames.setMaximumSize(QtCore.QSize(16777215, 15))
+ self.progress_bar_all_frames.setProperty("value", 0)
+ self.progress_bar_all_frames.setTextVisible(True)
+ self.progress_bar_all_frames.setObjectName("progress_bar_all_frames")
+ self.verticalLayout.addWidget(self.progress_bar_all_frames)
+ self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
+ self.horizontalLayout_4.setObjectName("horizontalLayout_4")
+ self.progress_bar_intra_frame = QtWidgets.QProgressBar(Save_images_dialog)
+ self.progress_bar_intra_frame.setEnabled(False)
+ self.progress_bar_intra_frame.setMaximumSize(QtCore.QSize(16777215, 8))
+ self.progress_bar_intra_frame.setProperty("value", 0)
+ self.progress_bar_intra_frame.setTextVisible(False)
+ self.progress_bar_intra_frame.setObjectName("progress_bar_intra_frame")
+ self.horizontalLayout_4.addWidget(self.progress_bar_intra_frame)
+ spacerItem3 = QtWidgets.QSpacerItem(35, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
+ self.horizontalLayout_4.addItem(spacerItem3)
+ self.verticalLayout.addLayout(self.horizontalLayout_4)
+ self.horizontalLayout.addLayout(self.verticalLayout)
+
+ self.retranslateUi(Save_images_dialog)
+ QtCore.QMetaObject.connectSlotsByName(Save_images_dialog)
+
+ def retranslateUi(self, Save_images_dialog):
+ _translate = QtCore.QCoreApplication.translate
+ Save_images_dialog.setWindowTitle(_translate("Save_images_dialog", "ThermoGUI - Save Images"))
+ self.choose_directory_button.setText(_translate("Save_images_dialog", "Choose Output Directory"))
+ self.save_directory_label.setText(_translate("Save_images_dialog", "Saving to directory: \" \""))
+ self.save_button.setText(_translate("Save_images_dialog", "Save!"))
diff --git a/gui/design/image_saving_gui.ui b/gui/design/image_saving_gui.ui
new file mode 100644
index 0000000..59ddbe5
--- /dev/null
+++ b/gui/design/image_saving_gui.ui
@@ -0,0 +1,175 @@
+
+
+ Save_images_dialog
+
+
+
+ 0
+ 0
+ 380
+ 190
+
+
+
+
+ 0
+ 0
+
+
+
+
+ 380
+ 190
+
+
+
+
+ 380
+ 190
+
+
+
+ ThermoGUI - Save Images
+
+
+ -
+
+
-
+
+
-
+
+
+ Choose Output Directory
+
+
+
+
+
+ -
+
+
+ Saving to directory: " "
+
+
+
+ -
+
+
+ Qt::Vertical
+
+
+
+ 20
+ 40
+
+
+
+
+ -
+
+
-
+
+
+ Qt::Horizontal
+
+
+
+ 40
+ 20
+
+
+
+
+ -
+
+
+ false
+
+
+ Save!
+
+
+
+ -
+
+
+ Qt::Horizontal
+
+
+ QSizePolicy::Fixed
+
+
+
+ 35
+ 20
+
+
+
+
+
+
+ -
+
+
+ false
+
+
+
+ 16777215
+ 15
+
+
+
+ 0
+
+
+ true
+
+
+
+ -
+
+
-
+
+
+ false
+
+
+
+ 16777215
+ 8
+
+
+
+ 0
+
+
+ false
+
+
+
+ -
+
+
+ Qt::Horizontal
+
+
+ QSizePolicy::Fixed
+
+
+
+ 35
+ 20
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/gui/design/thermo_gui_design.py b/gui/design/thermo_gui_design.py
index d23f045..3b6c7ee 100644
--- a/gui/design/thermo_gui_design.py
+++ b/gui/design/thermo_gui_design.py
@@ -11,7 +11,7 @@
class Ui_ThermoGUI_main_window(object):
def setupUi(self, ThermoGUI_main_window):
ThermoGUI_main_window.setObjectName("ThermoGUI_main_window")
- ThermoGUI_main_window.resize(909, 598)
+ ThermoGUI_main_window.resize(920, 598)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
@@ -30,46 +30,61 @@ def setupUi(self, ThermoGUI_main_window):
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
- self.widget_3 = QtWidgets.QWidget(self.centralwidget)
+ self.left_panel = QtWidgets.QWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.widget_3.sizePolicy().hasHeightForWidth())
- self.widget_3.setSizePolicy(sizePolicy)
- self.widget_3.setObjectName("widget_3")
- self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.widget_3)
+ sizePolicy.setHeightForWidth(self.left_panel.sizePolicy().hasHeightForWidth())
+ self.left_panel.setSizePolicy(sizePolicy)
+ self.left_panel.setObjectName("left_panel")
+ self.verticalLayout_11 = QtWidgets.QVBoxLayout(self.left_panel)
self.verticalLayout_11.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_11.setObjectName("verticalLayout_11")
- self.module_image_view = QtWidgets.QLabel(self.widget_3)
+ self.class_image_view = QtWidgets.QLabel(self.left_panel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.module_image_view.sizePolicy().hasHeightForWidth())
- self.module_image_view.setSizePolicy(sizePolicy)
- self.module_image_view.setMinimumSize(QtCore.QSize(300, 300))
+ sizePolicy.setHeightForWidth(self.class_image_view.sizePolicy().hasHeightForWidth())
+ self.class_image_view.setSizePolicy(sizePolicy)
+ self.class_image_view.setMinimumSize(QtCore.QSize(300, 300))
font = QtGui.QFont()
font.setPointSize(26)
- self.module_image_view.setFont(font)
- self.module_image_view.setAutoFillBackground(True)
- self.module_image_view.setFrameShape(QtWidgets.QFrame.Box)
- self.module_image_view.setTextFormat(QtCore.Qt.RichText)
- self.module_image_view.setScaledContents(False)
- self.module_image_view.setObjectName("module_image_view")
- self.verticalLayout_11.addWidget(self.module_image_view)
+ self.class_image_view.setFont(font)
+ self.class_image_view.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))
+ self.class_image_view.setMouseTracking(True)
+ self.class_image_view.setAutoFillBackground(True)
+ self.class_image_view.setFrameShape(QtWidgets.QFrame.Box)
+ self.class_image_view.setTextFormat(QtCore.Qt.RichText)
+ self.class_image_view.setObjectName("class_image_view")
+ self.verticalLayout_11.addWidget(self.class_image_view)
+ self.global_progress_bar = QtWidgets.QProgressBar(self.left_panel)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.global_progress_bar.sizePolicy().hasHeightForWidth())
+ self.global_progress_bar.setSizePolicy(sizePolicy)
+ self.global_progress_bar.setMinimumSize(QtCore.QSize(0, 10))
+ self.global_progress_bar.setMaximumSize(QtCore.QSize(16777215, 10))
+ self.global_progress_bar.setProperty("value", 0)
+ self.global_progress_bar.setTextVisible(False)
+ self.global_progress_bar.setInvertedAppearance(False)
+ self.global_progress_bar.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
+ self.global_progress_bar.setObjectName("global_progress_bar")
+ self.verticalLayout_11.addWidget(self.global_progress_bar)
spacerItem = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_11.addItem(spacerItem)
- self.horizontalLayout_2.addWidget(self.widget_3)
- self.widget = QtWidgets.QWidget(self.centralwidget)
+ self.horizontalLayout_2.addWidget(self.left_panel)
+ self.central_panel = QtWidgets.QWidget(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(1)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
- self.widget.setSizePolicy(sizePolicy)
- self.widget.setObjectName("widget")
- self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.widget)
+ sizePolicy.setHeightForWidth(self.central_panel.sizePolicy().hasHeightForWidth())
+ self.central_panel.setSizePolicy(sizePolicy)
+ self.central_panel.setObjectName("central_panel")
+ self.verticalLayout_10 = QtWidgets.QVBoxLayout(self.central_panel)
self.verticalLayout_10.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_10.setObjectName("verticalLayout_10")
- self.widget_2 = QtWidgets.QWidget(self.widget)
+ self.widget_2 = QtWidgets.QWidget(self.central_panel)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
@@ -164,7 +179,7 @@ def setupUi(self, ThermoGUI_main_window):
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.video_view.sizePolicy().hasHeightForWidth())
self.video_view.setSizePolicy(sizePolicy)
- self.video_view.setMinimumSize(QtCore.QSize(250, 0))
+ self.video_view.setMinimumSize(QtCore.QSize(250, 150))
self.video_view.setMaximumSize(QtCore.QSize(250, 16777215))
font = QtGui.QFont()
font.setPointSize(26)
@@ -181,6 +196,34 @@ def setupUi(self, ThermoGUI_main_window):
self.video_view.setObjectName("video_view")
self.horizontalLayout.addWidget(self.video_view)
self.tabWidget.addTab(self.input_image_tab, "")
+ self.attention_image_tab = QtWidgets.QWidget()
+ self.attention_image_tab.setObjectName("attention_image_tab")
+ self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.attention_image_tab)
+ self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
+ self.horizontalLayout_6.setObjectName("horizontalLayout_6")
+ self.attention_view = QtWidgets.QLabel(self.attention_image_tab)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
+ sizePolicy.setHorizontalStretch(0)
+ sizePolicy.setVerticalStretch(0)
+ sizePolicy.setHeightForWidth(self.attention_view.sizePolicy().hasHeightForWidth())
+ self.attention_view.setSizePolicy(sizePolicy)
+ self.attention_view.setMinimumSize(QtCore.QSize(250, 150))
+ self.attention_view.setMaximumSize(QtCore.QSize(250, 16777215))
+ font = QtGui.QFont()
+ font.setPointSize(26)
+ self.attention_view.setFont(font)
+ self.attention_view.setCursor(QtGui.QCursor(QtCore.Qt.CrossCursor))
+ self.attention_view.setMouseTracking(True)
+ self.attention_view.setAutoFillBackground(True)
+ self.attention_view.setFrameShape(QtWidgets.QFrame.Box)
+ self.attention_view.setFrameShadow(QtWidgets.QFrame.Plain)
+ self.attention_view.setTextFormat(QtCore.Qt.RichText)
+ self.attention_view.setScaledContents(False)
+ self.attention_view.setAlignment(QtCore.Qt.AlignCenter)
+ self.attention_view.setWordWrap(False)
+ self.attention_view.setObjectName("attention_view")
+ self.horizontalLayout_6.addWidget(self.attention_view)
+ self.tabWidget.addTab(self.attention_image_tab, "")
self.canny_edges_tab = QtWidgets.QWidget()
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
@@ -268,24 +311,39 @@ def setupUi(self, ThermoGUI_main_window):
self.horizontalLayout_4.addWidget(self.rectangle_image_view)
self.tabWidget.addTab(self.rectangle_image_tab, "")
self.verticalLayout_6.addWidget(self.tabWidget)
- self.global_progress_bar = QtWidgets.QProgressBar(self.widget_2)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.Fixed)
+ self.line_6 = QtWidgets.QFrame(self.widget_2)
+ self.line_6.setFrameShape(QtWidgets.QFrame.HLine)
+ self.line_6.setFrameShadow(QtWidgets.QFrame.Sunken)
+ self.line_6.setObjectName("line_6")
+ self.verticalLayout_6.addWidget(self.line_6)
+ self.widget_4 = QtWidgets.QWidget(self.widget_2)
+ self.widget_4.setObjectName("widget_4")
+ self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.widget_4)
+ self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0)
+ self.horizontalLayout_7.setObjectName("horizontalLayout_7")
+ self.module_image_view = QtWidgets.QLabel(self.widget_4)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
- sizePolicy.setHeightForWidth(self.global_progress_bar.sizePolicy().hasHeightForWidth())
- self.global_progress_bar.setSizePolicy(sizePolicy)
- self.global_progress_bar.setMinimumSize(QtCore.QSize(0, 10))
- self.global_progress_bar.setMaximumSize(QtCore.QSize(16777215, 10))
- self.global_progress_bar.setProperty("value", 0)
- self.global_progress_bar.setTextVisible(False)
- self.global_progress_bar.setInvertedAppearance(False)
- self.global_progress_bar.setTextDirection(QtWidgets.QProgressBar.TopToBottom)
- self.global_progress_bar.setObjectName("global_progress_bar")
- self.verticalLayout_6.addWidget(self.global_progress_bar)
+ sizePolicy.setHeightForWidth(self.module_image_view.sizePolicy().hasHeightForWidth())
+ self.module_image_view.setSizePolicy(sizePolicy)
+ self.module_image_view.setMinimumSize(QtCore.QSize(250, 150))
+ self.module_image_view.setMaximumSize(QtCore.QSize(250, 16777215))
+ font = QtGui.QFont()
+ font.setPointSize(26)
+ self.module_image_view.setFont(font)
+ self.module_image_view.setCursor(QtGui.QCursor(QtCore.Qt.ArrowCursor))
+ self.module_image_view.setAutoFillBackground(True)
+ self.module_image_view.setFrameShape(QtWidgets.QFrame.Box)
+ self.module_image_view.setTextFormat(QtCore.Qt.RichText)
+ self.module_image_view.setScaledContents(False)
+ self.module_image_view.setObjectName("module_image_view")
+ self.horizontalLayout_7.addWidget(self.module_image_view)
+ self.verticalLayout_6.addWidget(self.widget_4)
self.verticalLayout_10.addWidget(self.widget_2)
spacerItem1 = QtWidgets.QSpacerItem(20, 0, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_10.addItem(spacerItem1)
- self.horizontalLayout_2.addWidget(self.widget)
+ self.horizontalLayout_2.addWidget(self.central_panel)
self.control_panel = QtWidgets.QFrame(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
@@ -434,6 +492,11 @@ def setupUi(self, ThermoGUI_main_window):
self.preprocessing_grid_layout = QtWidgets.QGridLayout()
self.preprocessing_grid_layout.setContentsMargins(-1, 10, -1, -1)
self.preprocessing_grid_layout.setObjectName("preprocessing_grid_layout")
+ self.blur_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.blur_value.setMaximum(15)
+ self.blur_value.setProperty("value", 3)
+ self.blur_value.setObjectName("blur_value")
+ self.preprocessing_grid_layout.addWidget(self.blur_value, 1, 1, 1, 1)
self.angle_label = QtWidgets.QLabel(self.preprocessing_tab)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
@@ -442,20 +505,25 @@ def setupUi(self, ThermoGUI_main_window):
self.angle_label.setSizePolicy(sizePolicy)
self.angle_label.setObjectName("angle_label")
self.preprocessing_grid_layout.addWidget(self.angle_label, 0, 0, 1, 1)
+ self.blur_label = QtWidgets.QLabel(self.preprocessing_tab)
+ self.blur_label.setObjectName("blur_label")
+ self.preprocessing_grid_layout.addWidget(self.blur_label, 1, 0, 1, 1)
self.angle_value = QtWidgets.QSpinBox(self.preprocessing_tab)
self.angle_value.setMinimum(0)
self.angle_value.setMaximum(360)
self.angle_value.setSingleStep(10)
self.angle_value.setObjectName("angle_value")
self.preprocessing_grid_layout.addWidget(self.angle_value, 0, 1, 1, 1)
- self.blur_label = QtWidgets.QLabel(self.preprocessing_tab)
- self.blur_label.setObjectName("blur_label")
- self.preprocessing_grid_layout.addWidget(self.blur_label, 1, 0, 1, 1)
- self.blur_value = QtWidgets.QSpinBox(self.preprocessing_tab)
- self.blur_value.setMaximum(15)
- self.blur_value.setProperty("value", 3)
- self.blur_value.setObjectName("blur_value")
- self.preprocessing_grid_layout.addWidget(self.blur_value, 1, 1, 1, 1)
+ self.temperature_label = QtWidgets.QLabel(self.preprocessing_tab)
+ self.temperature_label.setObjectName("temperature_label")
+ self.preprocessing_grid_layout.addWidget(self.temperature_label, 2, 0, 1, 1)
+ self.temperature_value = QtWidgets.QSpinBox(self.preprocessing_tab)
+ self.temperature_value.setMaximum(255)
+ self.temperature_value.setSingleStep(10)
+ self.temperature_value.setProperty("value", 200)
+ self.temperature_value.setDisplayIntegerBase(10)
+ self.temperature_value.setObjectName("temperature_value")
+ self.preprocessing_grid_layout.addWidget(self.temperature_value, 2, 1, 1, 1)
self.preprocessing_layout.addLayout(self.preprocessing_grid_layout)
self.verticalLayout_9.addLayout(self.preprocessing_layout)
self.line_2 = QtWidgets.QFrame(self.preprocessing_tab)
@@ -757,7 +825,7 @@ def setupUi(self, ThermoGUI_main_window):
self.horizontalLayout_2.addWidget(self.control_panel, 0, QtCore.Qt.AlignTop)
ThermoGUI_main_window.setCentralWidget(self.centralwidget)
self.menuBar = QtWidgets.QMenuBar(ThermoGUI_main_window)
- self.menuBar.setGeometry(QtCore.QRect(0, 0, 909, 21))
+ self.menuBar.setGeometry(QtCore.QRect(0, 0, 920, 21))
self.menuBar.setObjectName("menuBar")
self.FileMenu = QtWidgets.QMenu(self.menuBar)
self.FileMenu.setObjectName("FileMenu")
@@ -782,54 +850,103 @@ def retranslateUi(self, ThermoGUI_main_window):
_translate = QtCore.QCoreApplication.translate
ThermoGUI_main_window.setWindowTitle(_translate("ThermoGUI_main_window", "ThermoGUI"))
ThermoGUI_main_window.setWhatsThis(_translate("ThermoGUI_main_window", "Tool Bar
"))
- self.module_image_view.setText(_translate("ThermoGUI_main_window", "Module Map
"))
- self.play_video_button.setToolTip(_translate("ThermoGUI_main_window", "Play the current video/Webcam.
"))
+ self.class_image_view.setText(_translate("ThermoGUI_main_window", "Classes Image
"))
+ self.global_progress_bar.setFormat(_translate("ThermoGUI_main_window", "%p%"))
+ self.play_video_button.setToolTip(_translate("ThermoGUI_main_window", "Play\n"
+" the current video/Webcam.
\n"
+" "))
self.play_video_button.setText(_translate("ThermoGUI_main_window", "Play"))
- self.pause_video_button.setToolTip(_translate("ThermoGUI_main_window", "Pause the current video/Webcam.
"))
+ self.pause_video_button.setToolTip(_translate("ThermoGUI_main_window", "Pause\n"
+" the current video/Webcam.
\n"
+" "))
self.pause_video_button.setText(_translate("ThermoGUI_main_window", "Pause"))
- self.stop_video_button.setToolTip(_translate("ThermoGUI_main_window", "Resets the current video to the start.
"))
+ self.stop_video_button.setToolTip(_translate("ThermoGUI_main_window", "Resets\n"
+" the current video to the start.
\n"
+" "))
self.stop_video_button.setText(_translate("ThermoGUI_main_window", "Stop"))
- self.reset_button.setToolTip(_translate("ThermoGUI_main_window", "Resets the state of the ThermoGUI.
"))
+ self.reset_button.setToolTip(_translate("ThermoGUI_main_window", "Resets the state\n"
+" of the ThermoGUI.
\n"
+" "))
self.reset_button.setText(_translate("ThermoGUI_main_window", "Reset"))
- self.video_view.setText(_translate("ThermoGUI_main_window", "Input Image
"))
+ self.video_view.setText(_translate("ThermoGUI_main_window", "Input Image
\n"
+" "))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.input_image_tab), _translate("ThermoGUI_main_window", "Input image"))
- self.canny_edges_view.setText(_translate("ThermoGUI_main_window", "Edges Image
"))
+ self.attention_view.setText(_translate("ThermoGUI_main_window", "Attention Image
"))
+ self.tabWidget.setTabText(self.tabWidget.indexOf(self.attention_image_tab), _translate("ThermoGUI_main_window", "Attention image"))
+ self.canny_edges_view.setText(_translate("ThermoGUI_main_window", "Edges Image
\n"
+" "))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.canny_edges_tab), _translate("ThermoGUI_main_window", "Edges image"))
- self.segment_image_view.setText(_translate("ThermoGUI_main_window", "Segment Image
"))
+ self.segment_image_view.setText(_translate("ThermoGUI_main_window", "Segment Image
\n"
+" "))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.segment_image_tab), _translate("ThermoGUI_main_window", "Segment image"))
- self.rectangle_image_view.setText(_translate("ThermoGUI_main_window", "Rectangle Image
"))
+ self.rectangle_image_view.setText(_translate("ThermoGUI_main_window", "Rectangle Image
\n"
+" "))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.rectangle_image_tab), _translate("ThermoGUI_main_window", "Rectangle image"))
- self.global_progress_bar.setFormat(_translate("ThermoGUI_main_window", "%p%"))
- self.label.setText(_translate("ThermoGUI_main_window", "File
"))
- self.video_from_index.setToolTip(_translate("ThermoGUI_main_window", "Initial frame of the video to be loaded.
"))
+ self.module_image_view.setText(_translate("ThermoGUI_main_window", "Module\n"
+" Map
\n"
+" "))
+ self.label.setText(_translate("ThermoGUI_main_window", "File
\n"
+" "))
+ self.video_from_index.setToolTip(_translate("ThermoGUI_main_window", "Initial\n"
+" frame of the video to be loaded.
\n"
+" "))
self.from_video_index_label.setText(_translate("ThermoGUI_main_window", "From:"))
self.to_video_index_label.setText(_translate("ThermoGUI_main_window", "To:"))
- self.video_to_index.setToolTip(_translate("ThermoGUI_main_window", "Final frame of the video to be loaded.
"))
- self.load_video_button.setToolTip(_translate("ThermoGUI_main_window", "Loads the frames from the selected video file.
"))
+ self.video_to_index.setToolTip(_translate("ThermoGUI_main_window", "Final\n"
+" frame of the video to be loaded.
\n"
+" "))
+ self.load_video_button.setToolTip(_translate("ThermoGUI_main_window", "Loads\n"
+" the frames from the selected video file.
\n"
+" "))
self.load_video_button.setText(_translate("ThermoGUI_main_window", "Choose Video"))
- self.label_2.setText(_translate("ThermoGUI_main_window", "WebCam
"))
- self.detect_webcam_button.setToolTip(_translate("ThermoGUI_main_window", "Detects the correct port for the Webcam
"))
+ self.label_2.setText(_translate("ThermoGUI_main_window", "WebCam
\n"
+" "))
+ self.detect_webcam_button.setToolTip(_translate("ThermoGUI_main_window", "Detects\n"
+" the correct port for the Webcam
\n"
+" "))
self.detect_webcam_button.setText(_translate("ThermoGUI_main_window", "Detect"))
self.image_scaling_label.setText(_translate("ThermoGUI_main_window", "Input image scaling : 1.00"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.video_tab), _translate("ThermoGUI_main_window", "Video Loader"))
- self.preprocessing_label.setText(_translate("ThermoGUI_main_window", "Preprocessing
"))
+ self.preprocessing_label.setText(_translate("ThermoGUI_main_window", "Preprocessing
\n"
+" "))
self.undistort_image_box.setText(_translate("ThermoGUI_main_window", "undistort image"))
self.angle_label.setText(_translate("ThermoGUI_main_window", "angle:"))
self.blur_label.setText(_translate("ThermoGUI_main_window", "blur:"))
- self.canny_parameters_label.setText(_translate("ThermoGUI_main_window", "Canny
"))
+ self.temperature_label.setText(_translate("ThermoGUI_main_window", "temperature:"))
+ self.canny_parameters_label.setText(_translate("ThermoGUI_main_window", "Canny
\n"
+" "))
self.histeresis_label.setText(_translate("ThermoGUI_main_window", "Histeresis:
"))
self.label_max_histeresis.setText(_translate("ThermoGUI_main_window", "max"))
self.label_min_histeresis.setText(_translate("ThermoGUI_main_window", "min"))
- self.dilation_label.setText(_translate("ThermoGUI_main_window", "dilation steps: "))
+ self.dilation_label.setText(_translate("ThermoGUI_main_window", "dilation steps:"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.preprocessing_tab), _translate("ThermoGUI_main_window", "Preprocessing"))
- self.segment_label.setText(_translate("ThermoGUI_main_window", "Detection
"))
+ self.segment_label.setText(_translate("ThermoGUI_main_window", "Detection
\n"
+" "))
self.max_gap_label.setText(_translate("ThermoGUI_main_window", "max gap"))
self.delta_rho_label.setText(_translate("ThermoGUI_main_window", "delta rho"))
self.min_votes_label.setText(_translate("ThermoGUI_main_window", "min votes"))
self.min_length_label.setText(_translate("ThermoGUI_main_window", "min length"))
self.delta_theta_label.setText(_translate("ThermoGUI_main_window", "delta theta"))
self.extend_segments_label.setText(_translate("ThermoGUI_main_window", "extend segments"))
- self.clustering_label.setText(_translate("ThermoGUI_main_window", "Clustering
"))
+ self.clustering_label.setText(_translate("ThermoGUI_main_window", "Clustering
\n"
+" "))
self.num_init_label.setText(_translate("ThermoGUI_main_window", "num init"))
self.cluster_type_label.setText(_translate("ThermoGUI_main_window", "type"))
self.num_clusters_label.setText(_translate("ThermoGUI_main_window", "num clusters"))
@@ -839,12 +956,18 @@ def retranslateUi(self, ThermoGUI_main_window):
self.swipe_clusters_value.setText(_translate("ThermoGUI_main_window", "swipe"))
self.use_angle_value.setText(_translate("ThermoGUI_main_window", "angles"))
self.use_centers_value.setText(_translate("ThermoGUI_main_window", "centers"))
- self.cleaning_label.setText(_translate("ThermoGUI_main_window", "Cleaning
"))
+ self.cleaning_label.setText(_translate("ThermoGUI_main_window", "Cleaning
\n"
+" "))
self.max_merging_distance_label.setText(_translate("ThermoGUI_main_window", "max merging distance"))
self.max_angle_variation_mean_label.setText(_translate("ThermoGUI_main_window", "max angle variation"))
self.max_merging_angle_label.setText(_translate("ThermoGUI_main_window", "max merging angle"))
self.tab_widget.setTabText(self.tab_widget.indexOf(self.segment_tab), _translate("ThermoGUI_main_window", "Segments"))
- self.filter_label.setText(_translate("ThermoGUI_main_window", "Filter
"))
+ self.filter_label.setText(_translate("ThermoGUI_main_window", "Filter
\n"
+" "))
self.ratio_max_deviation_label.setText(_translate("ThermoGUI_main_window", "ratio max deviation"))
self.min_area_label.setText(_translate("ThermoGUI_main_window", "min area"))
self.expected_ratio_label.setText(_translate("ThermoGUI_main_window", "expected ratio"))
diff --git a/gui/design/thermography_gui.ui b/gui/design/thermography_gui.ui
index 5c12c3f..55b0d4d 100644
--- a/gui/design/thermography_gui.ui
+++ b/gui/design/thermography_gui.ui
@@ -6,7 +6,7 @@
0
0
- 909
+ 920
598
@@ -38,7 +38,7 @@
-
-
+
2
@@ -47,7 +47,7 @@
-
-
+
0
@@ -65,6 +65,12 @@
26
+
+ CrossCursor
+
+
+ true
+
true
@@ -72,14 +78,48 @@
QFrame::Box
- <html><head/><body><p align="center">Module Map</p></body></html>
+ <html><head/><body><p align="center">Classes Image</p></body></html>
Qt::RichText
-
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+
+ 0
+ 10
+
+
+
+
+ 16777215
+ 10
+
+
+
+ 0
+
+
+ false
+
+
false
+
+ QProgressBar::TopToBottom
+
+
+ %p%
+
-
@@ -99,7 +139,7 @@
-
-
+
1
@@ -156,7 +196,9 @@
- <html><head/><body><p>Play the current video/Webcam.</p></body></html>
+ <html><head/><body><p>Play
+ the current video/Webcam.</p></body></html>
+
Play
@@ -184,7 +226,9 @@
- <html><head/><body><p>Pause the current video/Webcam.</p></body></html>
+ <html><head/><body><p>Pause
+ the current video/Webcam.</p></body></html>
+
Pause
@@ -212,7 +256,9 @@
- <html><head/><body><p>Resets the current video to the start.</p></body></html>
+ <html><head/><body><p>Resets
+ the current video to the start.</p></body></html>
+
Stop
@@ -243,7 +289,10 @@
- <html><head/><body><p><span style=" font-weight:400;">Resets the state of the ThermoGUI.</span></p></body></html>
+ <html><head/><body><p><span
+ style=" font-weight:400;">Resets the state
+ of the ThermoGUI.</span></p></body></html>
+
false
@@ -309,7 +358,76 @@
250
- 0
+ 150
+
+
+
+
+ 250
+ 16777215
+
+
+
+
+ 26
+
+
+
+ CrossCursor
+
+
+ true
+
+
+ true
+
+
+ QFrame::Box
+
+
+ QFrame::Plain
+
+
+ <html><head/><body><p
+ align="center">Input Image</p></body></html>
+
+
+
+ Qt::RichText
+
+
+ false
+
+
+ Qt::AlignCenter
+
+
+ false
+
+
+ 0
+
+
+
+
+
+
+
+ Attention image
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+
+ 250
+ 150
@@ -339,7 +457,7 @@
QFrame::Plain
- <html><head/><body><p align="center">Input Image</p></body></html>
+ <html><head/><body><p align="center">Attention Image </p></body></html>
Qt::RichText
@@ -400,7 +518,9 @@
QFrame::Box
- <html><head/><body><p align="center">Edges Image</p></body></html>
+ <html><head/><body><p
+ align="center">Edges Image</p></body></html>
+
Qt::RichText
@@ -455,7 +575,9 @@
QFrame::Box
- <html><head/><body><p align="center">Segment Image</p></body></html>
+ <html><head/><body><p
+ align="center">Segment Image</p></body></html>
+
Qt::RichText
@@ -510,7 +632,9 @@
QFrame::Box
- <html><head/><body><p align="center">Rectangle Image</p></body></html>
+ <html><head/><body><p
+ align="center">Rectangle Image</p></body></html>
+
Qt::RichText
@@ -525,42 +649,65 @@
-
-
-
-
- 0
- 0
-
-
-
-
- 0
- 10
-
-
-
-
- 16777215
- 10
-
-
-
- 0
-
-
- false
-
-
- false
-
-
- QProgressBar::TopToBottom
-
-
- %p%
+
+
+ Qt::Horizontal
+ -
+
+
+
-
+
+
+
+ 0
+ 0
+
+
+
+
+ 250
+ 150
+
+
+
+
+ 250
+ 16777215
+
+
+
+
+ 26
+
+
+
+ ArrowCursor
+
+
+ true
+
+
+ QFrame::Box
+
+
+ <html><head/><body><p align="center">Module
+ Map</p></body></html>
+
+
+
+ Qt::RichText
+
+
+ false
+
+
+
+
+
+
@@ -658,7 +805,10 @@
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">File</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">File</span></p></body></html>
+
Qt::RichText
@@ -679,7 +829,9 @@
Qt::WheelFocus
- <html><head/><body><p>Initial frame of the video to be loaded.</p></body></html>
+ <html><head/><body><p>Initial
+ frame of the video to be loaded.</p></body></html>
+
100000
@@ -715,7 +867,9 @@
-
- <html><head/><body><p>Final frame of the video to be loaded.</p></body></html>
+ <html><head/><body><p>Final
+ frame of the video to be loaded.</p></body></html>
+
Qt::LeftToRight
@@ -745,7 +899,9 @@
- <html><head/><body><p>Loads the frames from the selected video file.</p></body></html>
+ <html><head/><body><p>Loads
+ the frames from the selected video file.</p></body></html>
+
Choose Video
@@ -771,7 +927,10 @@
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">WebCam</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">WebCam</span></p></body></html>
+
Qt::RichText
@@ -781,7 +940,9 @@
-
- <html><head/><body><p>Detects the correct port for the Webcam</p></body></html>
+ <html><head/><body><p>Detects
+ the correct port for the Webcam</p></body></html>
+
Detect
@@ -879,7 +1040,10 @@
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">Preprocessing</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Preprocessing</span></p></body></html>
+
Qt::RichText
@@ -909,6 +1073,16 @@
10
+
-
+
+
+ 15
+
+
+ 3
+
+
+
-
@@ -922,6 +1096,13 @@
+ -
+
+
+ blur:
+
+
+
-
@@ -935,20 +1116,26 @@
- -
-
+
-
+
- blur:
+ temperature:
- -
-
+
-
+
- 15
+ 255
+
+
+ 10
- 3
+ 200
+
+
+ 10
@@ -972,7 +1159,10 @@
- <html><head/><body><p align="center"><span style=" font-weight:600;">Canny</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Canny</span></p></body></html>
+
Qt::RichText
@@ -1077,7 +1267,7 @@
-
- dilation steps:
+ dilation steps:
@@ -1129,7 +1319,10 @@
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">Detection</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Detection</span></p></body></html>
+
Qt::RichText
@@ -1276,7 +1469,10 @@
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">Clustering</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Clustering</span></p></body></html>
+
Qt::RichText
@@ -1400,7 +1596,10 @@
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">Cleaning</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Cleaning</span></p></body></html>
+
Qt::RichText
@@ -1510,7 +1709,10 @@
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">Filter</span></p></body></html>
+ <html><head/><body><p
+ align="center"><span style="
+ font-weight:600;">Filter</span></p></body></html>
+
Qt::RichText
@@ -1629,7 +1831,7 @@
0
0
- 909
+ 920
21
diff --git a/gui/design/webcam_dialog_design.py b/gui/design/webcam_dialog_design.py
index bc89aa9..a209748 100644
--- a/gui/design/webcam_dialog_design.py
+++ b/gui/design/webcam_dialog_design.py
@@ -8,6 +8,7 @@
from PyQt5 import QtCore, QtGui, QtWidgets
+
class Ui_WebCam(object):
def setupUi(self, WebCam):
WebCam.setObjectName("WebCam")
@@ -26,7 +27,8 @@ def setupUi(self, WebCam):
self.verticalLayout = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout.setObjectName("verticalLayout")
self.webcam_view = QtWidgets.QLabel(self.centralwidget)
- sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
+ sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding,
+ QtWidgets.QSizePolicy.MinimumExpanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.webcam_view.sizePolicy().hasHeightForWidth())
@@ -58,8 +60,8 @@ def setupUi(self, WebCam):
def retranslateUi(self, WebCam):
_translate = QtCore.QCoreApplication.translate
WebCam.setWindowTitle(_translate("WebCam", "ThermoGUI - Webcam"))
- self.webcam_view.setText(_translate("WebCam", "
WebCam
"))
+ self.webcam_view.setText(_translate("WebCam",
+ " WebCam
"))
self.previous_button.setText(_translate("WebCam", "Previous"))
self.ok_button.setText(_translate("WebCam", "Use port 0!"))
self.next_button.setText(_translate("WebCam", "Next"))
-
diff --git a/gui/design/webcam_view.ui b/gui/design/webcam_view.ui
index f07253d..d5d8771 100644
--- a/gui/design/webcam_view.ui
+++ b/gui/design/webcam_view.ui
@@ -1,95 +1,98 @@
- WebCam
-
-
-
- 0
- 0
- 310
- 265
-
-
-
-
- 0
- 0
-
-
-
-
- 200
- 200
-
-
-
- ThermoGUI - Webcam
-
-
-
- img/logo-webcam.pngimg/logo-webcam.png
-
-
-
- -
-
-
-
- 0
- 0
-
-
-
-
- 150
- 150
-
-
-
- true
-
-
- <html><head/><body><p align="center"><span style=" font-weight:600;">WebCam</span></p></body></html>
-
-
- Qt::RichText
-
-
- true
-
-
-
- -
-
-
-
-
-
- false
+ WebCam
+
+
+
+ 0
+ 0
+ 310
+ 265
+
-
- Previous
+
+
+ 0
+ 0
+
-
-
- -
-
-
- Use port 0!
+
+
+ 200
+ 200
+
-
-
- -
-
-
- Next
+
+ ThermoGUI - Webcam
-
-
-
-
-
-
-
-
-
+
+
+ img/logo-webcam.pngimg/logo-webcam.png
+
+
+
+
+ -
+
+
+
+ 0
+ 0
+
+
+
+
+ 150
+ 150
+
+
+
+ true
+
+
+ <html><head/><body><p align="center"><span
+ style=" font-weight:600;">WebCam</span></p></body></html>
+
+
+
+ Qt::RichText
+
+
+ true
+
+
+
+ -
+
+
-
+
+
+ false
+
+
+ Previous
+
+
+
+ -
+
+
+ Use port 0!
+
+
+
+ -
+
+
+ Next
+
+
+
+
+
+
+
+
+
+
diff --git a/gui/dialogs/__init__.py b/gui/dialogs/__init__.py
index 151fee8..9347cb5 100644
--- a/gui/dialogs/__init__.py
+++ b/gui/dialogs/__init__.py
@@ -1 +1,6 @@
-from .thermo_gui import ThermoGUI
\ No newline at end of file
+from .about_dialog import AboutDialog
+from .webcam_dialog import WebcamDialog
+from .image_saving_dialog import SaveImageDialog
+
+from .create_dataset_dialog import CreateDatasetGUI
+from .thermo_gui_dialog import ThermoGUI
\ No newline at end of file
diff --git a/gui/dialogs/about_dialog.py b/gui/dialogs/about_dialog.py
index a8717a0..1565fe1 100644
--- a/gui/dialogs/about_dialog.py
+++ b/gui/dialogs/about_dialog.py
@@ -1,11 +1,15 @@
-from PyQt5 import QtGui, QtCore, QtWidgets
import os
+
+from PyQt5 import QtGui, QtCore, QtWidgets
+from simple_logger import Logger
+
import thermography as tg
class AboutDialog(QtWidgets.QMessageBox):
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
+ Logger.debug("Opened About dialog")
self.setWindowTitle("Thermography - About")
self.setTextFormat(QtCore.Qt.RichText)
gui_directory = os.path.join(os.path.join(tg.settings.get_thermography_root_dir(), os.pardir), "gui")
diff --git a/gui/dialogs/create_dataset_dialog.py b/gui/dialogs/create_dataset_dialog.py
new file mode 100644
index 0000000..99131c9
--- /dev/null
+++ b/gui/dialogs/create_dataset_dialog.py
@@ -0,0 +1,404 @@
+import os
+
+import cv2
+import numpy as np
+from PyQt5 import QtGui, QtCore, QtWidgets
+from PyQt5.QtGui import QImage, QPainter
+from simple_logger import Logger
+
+import thermography as tg
+from gui.design import Ui_CreateDataset_main_window
+from gui.dialogs import AboutDialog, SaveImageDialog
+from gui.threads import ThermoDatasetCreationThread
+
+
+class VideoLoaderThread(QtCore.QThread):
+ finish_signal = QtCore.pyqtSignal(list)
+
+ def __init__(self, video_path: str, from_index: int, to_index: int, parent=None):
+ super(self.__class__, self).__init__(parent=parent)
+ self.video_path = video_path
+ self.from_index = from_index
+ self.to_index = to_index
+
+ def run(self):
+ video_loader = tg.io.VideoLoader(self.video_path, self.from_index, self.to_index)
+ self.finish_signal.emit(video_loader.frames)
+
+
+class CreateDatasetGUI(QtWidgets.QMainWindow, Ui_CreateDataset_main_window):
+ """
+ Dataset creation GUI.
+ """
+
+ def __init__(self):
+ super(self.__class__, self).__init__()
+ Logger.info("Creating dataset creation GUI")
+ self.setupUi(self)
+ self.set_logo_icon()
+
+ self.last_folder_opened = None
+ self.frames = []
+ self.last_frame_image = None
+ self.current_frame_id = 0
+ self.current_module_id_in_frame = 0
+ self.current_frame_modules = []
+ self.discarded_modules = {}
+ self.accepted_modules = {}
+ self.misdetected_modules = {}
+
+ self.module_counter = {"automatic": {"accepted": 0, "discarded": 0, "misdetected": 0},
+ "manual": {"accepted": 0, "discarded": 0, "misdetected": 0}}
+
+ self.thermo_thread = None
+
+ self.connect_widgets()
+
+ def set_logo_icon(self):
+ gui_path = os.path.join(os.path.join(tg.settings.get_thermography_root_dir(), os.pardir), "gui")
+ logo_path = os.path.join(gui_path, "img/logo.png")
+ Logger.debug("Setting logo {}".format(logo_path))
+ icon = QtGui.QIcon()
+ icon.addPixmap(QtGui.QPixmap(logo_path), QtGui.QIcon.Normal, QtGui.QIcon.Off)
+ self.setWindowIcon(icon)
+
+ def connect_widgets(self):
+ Logger.debug("Connecting all widgets")
+ # File buttons
+ self.file_about.triggered.connect(self.open_about_window)
+ self.file_exit.triggered.connect(self.deleteLater)
+
+ # Main buttons.
+ self.load_video_button.clicked.connect(self.load_video_from_file)
+
+ self.play_video_button.clicked.connect(self.start_playing_frames)
+ self.stop_video_button.clicked.connect(self.save_and_close)
+ self.quick_save_button.clicked.connect(self.save_module_dataset)
+
+ # Working and Broken module buttons.
+ self.module_working_button.clicked.connect(self.current_module_is_working)
+ self.module_broken_button.clicked.connect(self.current_module_is_broken)
+ self.misdetection_button.clicked.connect(self.current_module_misdetection)
+
+ # Preprocessing
+ self.undistort_image_box.stateChanged.connect(self.update_image_distortion)
+
+ self.image_scaling_slider.valueChanged.connect(self.update_preprocessing_params)
+ self.angle_value.valueChanged.connect(self.update_preprocessing_params)
+ self.blur_value.valueChanged.connect(self.update_preprocessing_params)
+ self.temperature_value.valueChanged.connect(self.update_preprocessing_params)
+
+ # Edge extraction.
+ self.max_histeresis_value.valueChanged.connect(self.update_histeresis_params)
+ self.min_histeresis_value.valueChanged.connect(self.update_histeresis_params)
+ self.dilation_value.valueChanged.connect(self.update_dilation_steps)
+
+ # Segment detection.
+ self.delta_rho_value.valueChanged.connect(self.update_edge_params)
+ self.delta_theta_value.valueChanged.connect(self.update_edge_params)
+ self.min_votes_value.valueChanged.connect(self.update_edge_params)
+ self.min_length_value.valueChanged.connect(self.update_edge_params)
+ self.max_gap_value.valueChanged.connect(self.update_edge_params)
+ self.extend_segments_value.valueChanged.connect(self.update_edge_params)
+
+ # Segment clustering.
+ self.gmm_value.clicked.connect(self.update_clustering_params)
+ self.knn_value.clicked.connect(self.update_clustering_params)
+ self.num_clusters_value.valueChanged.connect(self.update_clustering_params)
+ self.num_init_value.valueChanged.connect(self.update_clustering_params)
+ self.use_angle_value.stateChanged.connect(self.update_clustering_params)
+ self.use_centers_value.stateChanged.connect(self.update_clustering_params)
+ self.swipe_clusters_value.stateChanged.connect(self.update_clustering_params)
+
+ # Segment cleaning
+ self.max_angle_variation_mean_value.valueChanged.connect(self.update_cluster_cleaning_params)
+ self.max_merging_angle_value.valueChanged.connect(self.update_cluster_cleaning_params)
+ self.max_merging_distance_value.valueChanged.connect(self.update_cluster_cleaning_params)
+
+ # Rectangle detection.
+ self.expected_ratio_value.valueChanged.connect(self.update_rectangle_detection_params)
+ self.ratio_max_deviation_value.valueChanged.connect(self.update_rectangle_detection_params)
+ self.min_area_value.valueChanged.connect(self.update_rectangle_detection_params)
+ Logger.debug("Windgets connected")
+
+ def connect_thermo_thread(self):
+ Logger.debug("Connecting thermo thread")
+ self.thermo_thread.last_frame_signal.connect(lambda x: self.store_last_frame_image(x))
+ self.thermo_thread.module_list_signal.connect(lambda x: self.display_all_modules(x))
+ Logger.debug("Thermo thread connected")
+
+ def store_last_frame_image(self, img: np.ndarray):
+ self.last_frame_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
+
+ def open_about_window(self):
+ about = AboutDialog(parent=self)
+ about.show()
+
+ def load_video_from_file(self):
+ open_directory = ""
+ if self.last_folder_opened is not None:
+ open_directory = self.last_folder_opened
+ video_file_name, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Select a video",
+ filter="Videos (*.mov *.mp4 *.avi)",
+ directory=open_directory)
+ Logger.debug("Selected video path: <{}>".format(video_file_name))
+
+ if video_file_name == "":
+ return
+
+ self.last_folder_opened = os.path.dirname(video_file_name)
+ self.setWindowTitle("Thermography: {}".format(video_file_name))
+
+ start_frame = self.video_from_index.value()
+ end_frame = self.video_to_index.value()
+ if end_frame == -1:
+ end_frame = None
+
+ Logger.debug("Start frame: {}, end frame: {}".format(start_frame, end_frame))
+
+ video_loader_thread = VideoLoaderThread(video_path=video_file_name, from_index=start_frame, to_index=end_frame,
+ parent=self)
+ video_loader_thread.start()
+ video_loader_thread.finish_signal.connect(self.video_loader_finished)
+
+ def video_loader_finished(self, frame_list: list):
+ Logger.debug("Video loader finished")
+ self.frames = frame_list.copy()
+ self.global_progress_bar.setMinimum(0)
+ self.global_progress_bar.setMaximum(len(self.frames) - 1)
+ Logger.debug("Loaded {} frames".format(len(self.frames)))
+
+ self.play_video_button.setEnabled(True)
+ self.module_working_button.setEnabled(True)
+ self.module_broken_button.setEnabled(True)
+ self.misdetection_button.setEnabled(True)
+
+ def save_module_dataset(self):
+ self.save_dialog = SaveImageDialog(working_modules=self.accepted_modules, broken_modules=self.discarded_modules,
+ misdetected_modules=self.misdetected_modules, parent=self)
+ self.save_dialog.exec_()
+
+ def save_and_close(self):
+ self.save_module_dataset()
+ self.close()
+
+ def start_playing_frames(self):
+ self.thermo_thread = ThermoDatasetCreationThread()
+ self.connect_thermo_thread()
+ self.image_scaling_slider.setEnabled(False)
+ self.update_image_scaling()
+
+ self.play_video_button.setEnabled(False)
+ self.stop_video_button.setEnabled(True)
+ self.quick_save_button.setEnabled(True)
+
+ self.current_frame_id = 0
+ self.current_module_id_in_frame = 0
+
+ self.thermo_thread.processing_frame_id = self.current_frame_id
+ self.thermo_thread.processing_frame = self.frames[self.current_frame_id]
+
+ self.thermo_thread.start()
+
+ def current_module_is_working(self):
+ Logger.debug("Current module is working")
+ self.update_module_counter("manual", 0)
+ self.register_module(self.accepted_modules)
+ self.display_next_module()
+
+ def current_module_is_broken(self):
+ Logger.debug("Current module is broken")
+ self.update_module_counter("manual", 1)
+ self.register_module(self.discarded_modules)
+ self.display_next_module()
+
+ def current_module_misdetection(self):
+ Logger.debug("Current module was misdetected")
+ self.update_module_counter("manual", 2)
+ self.register_module(self.misdetected_modules)
+ self.display_next_module()
+
+ def register_module(self, m: dict):
+ current_module = self.current_frame_modules[self.current_module_id_in_frame]
+ image = cv2.cvtColor(current_module["image"], cv2.COLOR_BGR2RGB)
+ coords = current_module["coordinates"]
+ moduel_id = current_module["id"]
+ if moduel_id not in m:
+ m[moduel_id] = []
+ m[moduel_id].append({"image": image, "coordinates": coords, "frame_id": self.current_frame_id})
+
+ def update_global_progress_bar(self, frame_index: int):
+ self.global_progress_bar.setValue(frame_index)
+
+ def update_image_scaling(self):
+ image_scaling = self.image_scaling_slider.value() * 0.1
+ if self.thermo_thread is not None:
+ self.thermo_thread.app.preprocessing_parameters.image_scaling = image_scaling
+ self.image_scaling_label.setText("Input image scaling: {:0.2f}".format(image_scaling))
+
+ def update_histeresis_params(self):
+ min_value = self.min_histeresis_value.value()
+ max_value = self.max_histeresis_value.value()
+ if max_value <= min_value:
+ max_value = min_value + 1
+ self.max_histeresis_value.setValue(max_value)
+ self.thermo_thread.app.edge_detection_parameters.hysteresis_max_thresh = max_value
+ self.thermo_thread.app.edge_detection_parameters.hysteresis_min_thresh = min_value
+
+ def update_dilation_steps(self):
+ self.thermo_thread.app.edge_detection_parameters.dilation_steps = self.dilation_value.value()
+
+ def update_image_distortion(self):
+ self.thermo_thread.app.should_undistort_image = self.undistort_image_box.isChecked()
+
+ def update_image_angle(self):
+ self.thermo_thread.app.preprocessing_parameters.image_rotation = self.angle_value.value() * np.pi / 180
+ if self.angle_value.value() == 360:
+ self.angle_value.setValue(0)
+
+ def update_blur_value(self):
+ self.thermo_thread.app.preprocessing_parameters.gaussian_blur = self.blur_value.value()
+
+ def update_temperature_value(self):
+ self.thermo_thread.app.preprocessing_parameters.red_threshold = self.temperature_value.value()
+
+ def update_preprocessing_params(self):
+ self.update_image_scaling()
+ self.update_image_angle()
+ self.update_blur_value()
+ self.update_temperature_value()
+
+ def update_edge_params(self):
+ self.thermo_thread.app.segment_detection_parameters.d_rho = self.delta_rho_value.value()
+ self.thermo_thread.app.segment_detection_parameters.d_theta = np.pi / 180 * self.delta_theta_value.value()
+ self.thermo_thread.app.segment_detection_parameters.min_num_votes = self.min_votes_value.value()
+ self.thermo_thread.app.segment_detection_parameters.min_line_length = self.min_length_value.value()
+ self.thermo_thread.app.segment_detection_parameters.max_line_gap = self.max_gap_value.value()
+ self.thermo_thread.app.segment_detection_parameters.extension_pixels = self.extend_segments_value.value()
+
+ def update_clustering_params(self):
+ self.thermo_thread.app.segment_clustering_parameters.num_init = self.num_init_value.value()
+ self.thermo_thread.app.segment_clustering_parameters.swipe_clusters = self.swipe_clusters_value.isChecked()
+ self.thermo_thread.app.segment_clustering_parameters.num_clusters = self.num_clusters_value.value()
+ self.thermo_thread.app.segment_clustering_parameters.use_centers = self.use_centers_value.isChecked()
+ self.thermo_thread.app.segment_clustering_parameters.use_angles = self.use_angle_value.isChecked()
+ if self.knn_value.isChecked():
+ self.thermo_thread.app.segment_clustering_parameters.cluster_type = "knn"
+ self.swipe_clusters_value.setEnabled(False)
+ self.num_init_value.setEnabled(True)
+ elif self.gmm_value.isChecked():
+ self.thermo_thread.app.segment_clustering_parameters.cluster_type = "gmm"
+ self.swipe_clusters_value.setEnabled(True)
+ self.num_init_value.setEnabled(False)
+
+ def update_cluster_cleaning_params(self):
+ self.thermo_thread.app.cluster_cleaning_parameters.max_angle_variation_mean = np.pi / 180 * self.max_angle_variation_mean_value.value()
+ self.thermo_thread.app.cluster_cleaning_parameters.max_merging_angle = np.pi / 180 * self.max_merging_angle_value.value()
+ self.thermo_thread.app.cluster_cleaning_parameters.max_endpoint_distance = np.pi / 180 * self.max_merging_distance_value.value()
+
+ def update_rectangle_detection_params(self):
+ self.thermo_thread.app.rectangle_detection_parameters.aspect_ratio = self.expected_ratio_value.value()
+ self.thermo_thread.app.rectangle_detection_parameters.aspect_ratio_relative_deviation = self.ratio_max_deviation_value.value()
+ self.thermo_thread.app.rectangle_detection_parameters.min_area = self.min_area_value.value()
+
+ def display_all_modules(self, module_list: list):
+ self.current_frame_modules = module_list.copy()
+ self.current_module_id_in_frame = -1
+ if len(self.current_frame_modules) == 0:
+ # Since there are no modules in this frame, display the input image with a label saying no module has been detected.
+ image = QImage(self.last_frame_image.data, self.last_frame_image.shape[1], self.last_frame_image.shape[0],
+ self.last_frame_image.strides[0], QImage.Format_RGB888)
+ image = image.scaled(self.rectangle_image_view.size(), QtCore.Qt.KeepAspectRatio,
+ QtCore.Qt.SmoothTransformation)
+ pixmap = QtGui.QPixmap.fromImage(image)
+ painter = QPainter()
+ painter.begin(pixmap)
+ rect = QtCore.QRect(0, 0, pixmap.width(), pixmap.height())
+ font = QtGui.QFont()
+ font.setPointSize(26)
+ painter.setFont(font)
+ painter.drawText(rect, QtCore.Qt.AlignCenter, "No Module detected")
+ painter.end()
+ self.rectangle_image_view.setPixmap(pixmap)
+ self.frame_finished()
+ else:
+ self.display_next_module()
+
+ def update_module_counter(self, automatic_manual_str, module_class_id):
+ label_text = {0: "accepted", 1: "discarded", 2: "misdetected"}[module_class_id]
+ self.module_counter[automatic_manual_str][label_text] += 1
+ self.working_manual_classified_label.setText(str(self.module_counter["manual"]["accepted"]))
+ self.broken_manual_classified_label.setText(str(self.module_counter["manual"]["discarded"]))
+ self.other_manual_classified_label.setText(str(self.module_counter["manual"]["misdetected"]))
+ self.working_automatic_classified_label.setText(str(self.module_counter["automatic"]["accepted"]))
+ self.broken_automatic_classified_label.setText(str(self.module_counter["automatic"]["discarded"]))
+ self.other_automatic_classified_label.setText(str(self.module_counter["automatic"]["misdetected"]))
+ self.total_manual_classified_label.setText(str(sum(self.module_counter["manual"].values())))
+ self.total_automatic_classified_label.setText(str(sum(self.module_counter["automatic"].values())))
+
+ def display_next_module(self):
+ self.current_module_id_in_frame += 1
+ if len(self.current_frame_modules) == self.current_module_id_in_frame:
+ self.frame_finished()
+ return
+
+ d = self.current_frame_modules[self.current_module_id_in_frame]
+ module_ID = d["id"]
+ coordinates = d["coordinates"]
+ module_image = cv2.cvtColor(d["image"], cv2.COLOR_BGR2RGB)
+ # If module_ID has already been classified, then there is no need to display it as we can directly classify it
+ # using the existing manual label.
+ was_already_classified = False
+ for module_class_id, module_class in enumerate(
+ [self.accepted_modules, self.discarded_modules, self.misdetected_modules]):
+ if not was_already_classified and module_ID in module_class:
+ module_class[module_ID].append(
+ {"image": module_image, "coordinates": coordinates, "frame_id": self.current_frame_id})
+ was_already_classified = True
+ # Update counting labels:
+ self.update_module_counter("automatic", module_class_id)
+
+ mask = np.zeros_like(self.last_frame_image)
+ tmp_image = self.last_frame_image.copy()
+ module_color = (0, 0, 255)
+ if was_already_classified:
+ module_color = (255, 0, 0)
+ cv2.polylines(tmp_image, np.int32([coordinates]), True, module_color, 2, cv2.LINE_AA)
+ cv2.fillConvexPoly(mask, np.int32([coordinates]), module_color, cv2.LINE_4)
+ cv2.addWeighted(tmp_image, 1.0, mask, 0.0, 0, tmp_image)
+ image = QImage(tmp_image.data, tmp_image.shape[1], tmp_image.shape[0], tmp_image.strides[0],
+ QImage.Format_RGB888)
+ image = image.scaled(self.rectangle_image_view.size(), QtCore.Qt.KeepAspectRatio,
+ QtCore.Qt.SmoothTransformation)
+ pixmap = QtGui.QPixmap.fromImage(image)
+ self.rectangle_image_view.setPixmap(pixmap)
+ self.resize_video_view(module_image.shape, self.current_module_view)
+ image = QImage(module_image.data, module_image.shape[1], module_image.shape[0], module_image.strides[0],
+ QImage.Format_RGB888)
+ pixmap = QtGui.QPixmap.fromImage(image)
+ self.current_module_view.setPixmap(pixmap)
+ self.current_module_view.repaint()
+
+ if was_already_classified:
+ self.display_next_module()
+
+ @staticmethod
+ def resize_video_view(size, view):
+ view.setFixedSize(size[1], size[0])
+
+ def frame_finished(self):
+ self.current_frame_id += 1
+ self.current_module_id_in_frame = 0
+
+ self.global_progress_bar.setValue(self.current_frame_id)
+
+ if self.current_frame_id == len(self.frames):
+ _ = QtWidgets.QMessageBox.information(self, "Finished", "Analyzed all frames", QtWidgets.QMessageBox.Ok)
+ self.save_module_dataset()
+ return
+
+ self.thermo_thread.processing_frame = self.frames[self.current_frame_id]
+ self.thermo_thread.processing_frame_id = self.current_frame_id
+
+ self.thermo_thread.terminate()
+ self.thermo_thread.start()
diff --git a/gui/dialogs/image_saving_dialog.py b/gui/dialogs/image_saving_dialog.py
new file mode 100644
index 0000000..ecf925e
--- /dev/null
+++ b/gui/dialogs/image_saving_dialog.py
@@ -0,0 +1,108 @@
+import os
+
+import cv2
+from PyQt5 import QtWidgets, QtGui
+from simple_logger import Logger
+
+import thermography as tg
+from gui.design import Ui_Save_images_dialog
+
+
+class SaveImageDialog(QtWidgets.QDialog, Ui_Save_images_dialog):
+ def __init__(self, working_modules: dict, broken_modules: dict, misdetected_modules: dict, parent=None):
+ super(self.__class__, self).__init__(parent=parent)
+
+ Logger.debug("Opened 'Save Images' dialog")
+
+ self.setupUi(self)
+ self.set_logo_icon()
+
+ self.working_modules = working_modules
+ self.broken_modules = broken_modules
+ self.misdetected_modules = misdetected_modules
+
+ self.output_directory = " "
+
+ self.choose_directory_button.clicked.connect(self.open_directory_dialog)
+ self.save_button.clicked.connect(self.save_module_dataset)
+ self.progress_bar_all_frames.setMinimum(0)
+ self.progress_bar_all_frames.setMaximum(
+ len(self.working_modules.keys()) + len(self.broken_modules.keys()) + len(
+ self.misdetected_modules.keys()) - 1)
+
+ def set_logo_icon(self):
+ gui_path = os.path.join(os.path.join(tg.settings.get_thermography_root_dir(), os.pardir), "gui")
+ logo_path = os.path.join(gui_path, "img/logo.png")
+ icon = QtGui.QIcon()
+ icon.addPixmap(QtGui.QPixmap(logo_path), QtGui.QIcon.Normal, QtGui.QIcon.Off)
+ self.setWindowIcon(icon)
+
+ def open_directory_dialog(self):
+ output_directory = QtWidgets.QFileDialog.getExistingDirectory(caption="Select dataset output directory")
+ Logger.debug("Selected <{}> directory to store all images".format(output_directory))
+ if output_directory == "":
+ return
+
+ self.output_directory = output_directory
+
+ if len(os.listdir(self.output_directory)) > 0:
+ Logger.warning("Directory {} is not empty!".format(self.output_directory))
+ QtWidgets.QMessageBox.warning(self, "Non empty directory",
+ "Directory {} not empty! Select an empty directory!".format(
+ self.output_directory), QtWidgets.QMessageBox.Ok,
+ QtWidgets.QMessageBox.Ok)
+ self.open_directory_dialog()
+ else:
+ self.save_directory_label.setText('Saving to directory: "{}"'.format(self.output_directory))
+ self.save_button.setEnabled(True)
+
+ def save_module_dataset(self):
+ self.progress_bar_all_frames.setEnabled(True)
+ self.progress_bar_intra_frame.setEnabled(True)
+ button_reply = QtWidgets.QMessageBox.question(self, 'Save dataset',
+ "Want to save dataset to {}?".format(self.output_directory),
+ QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No,
+ QtWidgets.QMessageBox.No)
+ if button_reply == QtWidgets.QMessageBox.No:
+ Logger.warning("Rejected directory <{}> for saving all images".format(self.output_directory))
+ self.output_directory = None
+ self.save_module_dataset()
+ else:
+ Logger.info("Saving all images to <{}>".format(self.output_directory))
+ Logger.warning("If dialog freezes, check log file, but DON'T close the window!")
+ working_modules_output_dir = os.path.join(self.output_directory, "working")
+ broken_modules_output_dir = os.path.join(self.output_directory, "broken")
+ misdetected_modules_output_dir = os.path.join(self.output_directory, "misdetected")
+
+ overall_iter = 0
+
+ def save_modules_into_directory(module_dict: dict, directory: str):
+ global overall_iter
+
+ os.mkdir(os.path.abspath(directory))
+ for module_number, (module_id, registered_modules) in enumerate(module_dict.items()):
+ Logger.debug("Saving all views of module ID {}: view {}/{}".format(module_id, module_number,
+ len(module_dict.keys()) - 1))
+ self.progress_bar_all_frames.setValue(self.progress_bar_all_frames.value() + 1)
+ self.progress_bar_intra_frame.setValue(0)
+ self.progress_bar_intra_frame.setMaximum(len(registered_modules))
+ for m_index, m in enumerate(registered_modules):
+ name = "id_{0:05d}_frame_{1:05d}.jpg".format(module_id, m["frame_id"])
+ path = os.path.join(directory, name)
+ img = cv2.cvtColor(m["image"], cv2.COLOR_RGB2BGR)
+ cv2.imwrite(path, img)
+ self.progress_bar_intra_frame.setValue(m_index + 1)
+
+ Logger.info("Saving working modules to <{}>".format(working_modules_output_dir))
+ save_modules_into_directory(self.working_modules, working_modules_output_dir)
+ Logger.info("Saved all working modules to <{}>".format(working_modules_output_dir))
+ Logger.info("Saving broken modules to <{}>".format(broken_modules_output_dir))
+ save_modules_into_directory(self.broken_modules, broken_modules_output_dir)
+ Logger.info("Saved all broken modules to <{}>".format(broken_modules_output_dir))
+ Logger.info("Saving misdetected modules to <{}>".format(misdetected_modules_output_dir))
+ save_modules_into_directory(self.misdetected_modules, misdetected_modules_output_dir)
+ Logger.info("Saved all misdetected modules to <{}>".format(misdetected_modules_output_dir))
+
+ _ = QtWidgets.QMessageBox.information(self, "Saved!", "Saved all modules to {}".format(self.output_directory),
+ QtWidgets.QMessageBox.Ok)
+ self.close()
diff --git a/gui/dialogs/thermo_gui.py b/gui/dialogs/thermo_gui_dialog.py
similarity index 76%
rename from gui/dialogs/thermo_gui.py
rename to gui/dialogs/thermo_gui_dialog.py
index e0e2d6d..6490974 100644
--- a/gui/dialogs/thermo_gui.py
+++ b/gui/dialogs/thermo_gui_dialog.py
@@ -2,98 +2,14 @@
import cv2
import numpy as np
-
from PyQt5 import QtGui, QtCore, QtWidgets
-from PyQt5.QtCore import QThread
from PyQt5.QtGui import QImage
+from simple_logger import Logger
import thermography as tg
from gui.design import Ui_ThermoGUI_main_window
-from gui.dialogs.about_dialog import AboutDialog
-from gui.dialogs.webcam_dialog import WebCamWindow
-
-
-class ThermoGuiThread(QThread):
- iteration_signal = QtCore.pyqtSignal(int)
- finish_signal = QtCore.pyqtSignal(bool)
- last_frame_signal = QtCore.pyqtSignal(np.ndarray)
- edge_frame_signal = QtCore.pyqtSignal(np.ndarray)
- segment_frame_signal = QtCore.pyqtSignal(np.ndarray)
- rectangle_frame_signal = QtCore.pyqtSignal(np.ndarray)
- module_map_frame_signal = QtCore.pyqtSignal(np.ndarray)
-
- def __init__(self):
- """
- Initializes the Thermo Thread.
- """
- super(ThermoGuiThread, self).__init__()
-
- self.camera_param_file_name = None
- self.input_file_name = None
-
- self.pause_time = 50
- self.is_paused = False
-
- self.webcam_port = None
- self.cap = None
- self.should_use_webcam = False
-
- self.load_default_paths()
-
- self.app = tg.App(input_video_path=self.input_file_name, camera_param_file=self.camera_param_file_name)
-
- def use_webcam(self, webcam_port: int):
- self.webcam_port = webcam_port
- self.cap = cv2.VideoCapture(self.webcam_port)
- self.should_use_webcam = True
-
- def load_default_paths(self):
- # Load camera parameters.
- settings_dir = tg.settings.get_settings_dir()
-
- self.camera_param_file_name = os.path.join(settings_dir, "camera_parameters.json")
- tg.settings.set_data_dir("Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/")
- self.input_file_name = os.path.join(tg.settings.get_data_dir(), "Ispez Termografica Ghidoni 1.mov")
-
- def load_video(self, start_frame: int, end_frame: int):
- self.app = tg.App(input_video_path=self.input_file_name, camera_param_file=self.camera_param_file_name)
- self.app.load_video(start_frame=start_frame, end_frame=end_frame)
-
- def run(self):
- if self.should_use_webcam:
- frame_id = 0
- while True:
- while self.is_paused:
- self.msleep(self.pause_time)
-
- ret, frame = self.cap.read()
- if ret:
- self.app.step(frame_id, frame)
-
- self.last_frame_signal.emit(self.app.last_scaled_frame_rgb)
- self.edge_frame_signal.emit(self.app.last_edges_frame)
- self.segment_frame_signal.emit(self.app.create_segment_image())
- self.rectangle_frame_signal.emit(self.app.create_rectangle_image())
- self.module_map_frame_signal.emit(self.app.create_module_map_image())
- frame_id += 1
-
- self.app.reset()
- else:
- for frame_id, frame in enumerate(self.app.frames):
- while self.is_paused:
- self.msleep(self.pause_time)
-
- self.app.step(frame_id, frame)
- self.last_frame_signal.emit(self.app.last_scaled_frame_rgb)
- self.edge_frame_signal.emit(self.app.last_edges_frame)
- self.segment_frame_signal.emit(self.app.create_segment_image())
- self.rectangle_frame_signal.emit(self.app.create_rectangle_image())
- self.module_map_frame_signal.emit(self.app.create_module_map_image())
- self.iteration_signal.emit(frame_id)
-
- self.app.reset()
-
- self.finish_signal.emit(True)
+from gui.dialogs import AboutDialog, WebcamDialog
+from gui.threads import ThermoGuiThread
class ThermoGUI(QtWidgets.QMainWindow, Ui_ThermoGUI_main_window):
@@ -103,9 +19,9 @@ class ThermoGUI(QtWidgets.QMainWindow, Ui_ThermoGUI_main_window):
def __init__(self):
super(self.__class__, self).__init__()
+ Logger.info("Creating themoGUI")
self.setupUi(self)
self.set_logo_icon()
-
self.thermo_thread = ThermoGuiThread()
self.is_stoppable = True
@@ -120,12 +36,13 @@ def __init__(self):
def set_logo_icon(self):
gui_path = os.path.join(os.path.join(tg.settings.get_thermography_root_dir(), os.pardir), "gui")
logo_path = os.path.join(gui_path, "img/logo.png")
+ Logger.debug("Setting logo {}".format(logo_path))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(logo_path), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
def connect_widgets(self):
-
+ Logger.debug("Connecting all widgets")
# File buttons
self.file_about.triggered.connect(self.open_about_window)
self.file_exit.triggered.connect(self.deleteLater)
@@ -142,10 +59,15 @@ def connect_widgets(self):
self.image_scaling_slider.valueChanged.connect(self.update_image_scaling)
- # Preprocessing and Edge extraction.
+ # Preprocessing
self.undistort_image_box.stateChanged.connect(self.update_image_distortion)
- self.angle_value.valueChanged.connect(self.update_image_angle)
- self.blur_value.valueChanged.connect(self.update_blur_value)
+
+ self.image_scaling_slider.valueChanged.connect(self.update_preprocessing_params)
+ self.angle_value.valueChanged.connect(self.update_preprocessing_params)
+ self.blur_value.valueChanged.connect(self.update_preprocessing_params)
+ self.temperature_value.valueChanged.connect(self.update_preprocessing_params)
+
+ # Edge extraction.
self.max_histeresis_value.valueChanged.connect(self.update_histeresis_params)
self.min_histeresis_value.valueChanged.connect(self.update_histeresis_params)
self.dilation_value.valueChanged.connect(self.update_dilation_steps)
@@ -176,15 +98,20 @@ def connect_widgets(self):
self.expected_ratio_value.valueChanged.connect(self.update_rectangle_detection_params)
self.ratio_max_deviation_value.valueChanged.connect(self.update_rectangle_detection_params)
self.min_area_value.valueChanged.connect(self.update_rectangle_detection_params)
+ Logger.debug("Windgets connected")
def connect_thermo_thread(self):
+ Logger.debug("Connecting thermo thread")
self.thermo_thread.last_frame_signal.connect(lambda x: self.display_image(x))
+ self.thermo_thread.attention_frame_signal.connect(lambda x: self.display_attention(x))
self.thermo_thread.edge_frame_signal.connect(lambda x: self.display_canny_edges(x))
self.thermo_thread.segment_frame_signal.connect(lambda x: self.display_segment_image(x))
self.thermo_thread.rectangle_frame_signal.connect(lambda x: self.display_rectangle_image(x))
+ self.thermo_thread.classes_frame_signal.connect(lambda x: self.display_classes_image(x))
self.thermo_thread.module_map_frame_signal.connect(lambda x: self.display_module_map_image(x))
self.thermo_thread.finish_signal.connect(self.video_finished)
+ Logger.debug("Thermo thread connected")
def open_about_window(self):
about = AboutDialog(parent=self)
@@ -197,6 +124,7 @@ def load_video_from_file(self):
video_file_name, _ = QtWidgets.QFileDialog.getOpenFileName(caption="Select a video",
filter="Videos (*.mov *.mp4 *.avi)",
directory=open_directory)
+ Logger.debug("Selected video path: <{}>".format(video_file_name))
if video_file_name == "":
return
self.last_folder_opened = os.path.dirname(video_file_name)
@@ -209,6 +137,8 @@ def load_video_from_file(self):
end_frame = self.video_to_index.value()
if end_frame == -1:
end_frame = None
+
+ Logger.debug("Start frame: {}, end frame: {}".format(start_frame, end_frame))
self.thermo_thread.load_video(start_frame=start_frame, end_frame=end_frame)
self.global_progress_bar.setMinimum(0)
@@ -217,11 +147,13 @@ def load_video_from_file(self):
self.thermo_thread.iteration_signal.connect(self.update_global_progress_bar)
def play_all_frames(self):
+ Logger.debug("Playing all frames")
self.thermo_thread.is_paused = False
self.image_scaling_slider.setEnabled(False)
self.update_image_scaling()
- self.image_scaling_label.setText("Input image scaling: {:0.2f}".format(self.thermo_thread.app.image_scaling))
+ self.image_scaling_label.setText(
+ "Input image scaling: {:0.2f}".format(self.thermo_thread.app.preprocessing_parameters.image_scaling))
self.play_video_button.setEnabled(False)
self.pause_video_button.setEnabled(True)
if self.is_stoppable:
@@ -229,10 +161,12 @@ def play_all_frames(self):
self.thermo_thread.start()
def stop_all_frames(self):
+ Logger.debug("Stopped frames execution")
self.thermo_thread.terminate()
self.video_finished(True)
def pause_all_frames(self):
+ Logger.debug("Pausing all frames")
self.thermo_thread.is_paused = True
self.play_video_button.setEnabled(True)
if self.is_stoppable:
@@ -245,9 +179,26 @@ def update_global_progress_bar(self, frame_index: int):
def update_image_scaling(self):
image_scaling = self.image_scaling_slider.value() * 0.1
if self.thermo_thread is not None:
- self.thermo_thread.app.image_scaling = image_scaling
+ self.thermo_thread.app.preprocessing_parameters.image_scaling = image_scaling
self.image_scaling_label.setText("Input image scaling: {:0.2f}".format(image_scaling))
+ def update_image_angle(self):
+ self.thermo_thread.app.preprocessing_parameters.image_rotation = self.angle_value.value() * np.pi / 180
+ if self.angle_value.value() == 360:
+ self.angle_value.setValue(0)
+
+ def update_blur_value(self):
+ self.thermo_thread.app.preprocessing_parameters.gaussian_blur = self.blur_value.value()
+
+ def update_temperature_value(self):
+ self.thermo_thread.app.preprocessing_parameters.red_threshold = self.temperature_value.value()
+
+ def update_preprocessing_params(self):
+ self.update_image_scaling()
+ self.update_image_angle()
+ self.update_blur_value()
+ self.update_temperature_value()
+
def update_histeresis_params(self):
min_value = self.min_histeresis_value.value()
max_value = self.max_histeresis_value.value()
@@ -263,14 +214,6 @@ def update_dilation_steps(self):
def update_image_distortion(self):
self.thermo_thread.app.should_undistort_image = self.undistort_image_box.isChecked()
- def update_image_angle(self):
- self.thermo_thread.app.image_rotating_angle = self.angle_value.value() * np.pi / 180
- if self.angle_value.value() == 360:
- self.angle_value.setValue(0)
-
- def update_blur_value(self):
- self.thermo_thread.app.gaussian_blur = self.blur_value.value()
-
def update_edge_params(self):
self.thermo_thread.app.segment_detection_parameters.d_rho = self.delta_rho_value.value()
self.thermo_thread.app.segment_detection_parameters.d_theta = np.pi / 180 * self.delta_theta_value.value()
@@ -305,36 +248,54 @@ def update_rectangle_detection_params(self):
self.thermo_thread.app.rectangle_detection_parameters.min_area = self.min_area_value.value()
def display_image(self, frame: np.ndarray):
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
image = image.scaled(self.video_view.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
pixmap = QtGui.QPixmap.fromImage(image)
self.video_view.setPixmap(pixmap)
+ def display_attention(self, frame: np.ndarray):
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
+ image = image.scaled(self.video_view.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
+ pixmap = QtGui.QPixmap.fromImage(image)
+ self.attention_view.setPixmap(pixmap)
+
def display_canny_edges(self, frame: np.ndarray):
- frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
+ frame = cv2.cvtColor(frame, cv2.COLOR_GRAY2RGB)
image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
image = image.scaled(self.video_view.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
pixmap = QtGui.QPixmap.fromImage(image)
self.canny_edges_view.setPixmap(pixmap)
def display_segment_image(self, frame: np.ndarray):
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
image = image.scaled(self.video_view.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
pixmap = QtGui.QPixmap.fromImage(image)
self.segment_image_view.setPixmap(pixmap)
def display_rectangle_image(self, frame: np.ndarray):
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
image = image.scaled(self.video_view.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
pixmap = QtGui.QPixmap.fromImage(image)
self.rectangle_image_view.setPixmap(pixmap)
def display_module_map_image(self, frame: np.ndarray):
- self.resize_video_view(frame.shape, self.module_image_view)
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
+ image = image.scaled(self.video_view.size(), QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation)
pixmap = QtGui.QPixmap.fromImage(image)
self.module_image_view.setPixmap(pixmap)
+ def display_classes_image(self, frame: np.ndarray):
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
+ self.resize_video_view(frame.shape, self.class_image_view)
+ image = QImage(frame.data, frame.shape[1], frame.shape[0], frame.strides[0], QImage.Format_RGB888)
+ pixmap = QtGui.QPixmap.fromImage(image)
+ self.class_image_view.setPixmap(pixmap)
+
@staticmethod
def resize_video_view(size, view):
view.setFixedSize(size[1], size[0])
@@ -346,6 +307,7 @@ def video_finished(self, finished: bool):
self.image_scaling_slider.setEnabled(finished)
def set_webcam_port(self, port):
+ Logger.debug("Setting webcam port {}".format(port))
self.webcam_port = port
self.thermo_thread.use_webcam(self.webcam_port)
self.is_stoppable = False
@@ -353,7 +315,7 @@ def set_webcam_port(self, port):
self.play_all_frames()
def load_webcam(self):
- self.capture = WebCamWindow(parent=self)
+ self.capture = WebcamDialog(parent=self)
self.capture.webcam_port_signal.connect(lambda port: self.set_webcam_port(port))
self.capture.show()
self.capture.start()
@@ -361,6 +323,7 @@ def load_webcam(self):
self.undistort_image_box.setChecked(False)
def reset_app(self):
+ Logger.log("Resetting app")
self.thermo_thread.terminate()
self.thermo_thread = ThermoGuiThread()
self.image_scaling_slider.setValue(10)
diff --git a/gui/dialogs/webcam_dialog.py b/gui/dialogs/webcam_dialog.py
index a463288..e9eb688 100644
--- a/gui/dialogs/webcam_dialog.py
+++ b/gui/dialogs/webcam_dialog.py
@@ -1,16 +1,19 @@
-import cv2
import os
+
+import cv2
from PyQt5 import QtGui, QtCore, QtWidgets
+from simple_logger import Logger
import thermography as tg
from gui.design import Ui_WebCam
-class WebCamWindow(QtWidgets.QMainWindow, Ui_WebCam):
+class WebcamDialog(QtWidgets.QMainWindow, Ui_WebCam):
webcam_port_signal = QtCore.pyqtSignal(int)
def __init__(self, parent=None):
super(self.__class__, self).__init__(parent=parent)
+ Logger.info("Opened Webcam dialog")
self.setupUi(self)
self.set_logo_icon()
@@ -21,21 +24,22 @@ def __init__(self, parent=None):
self.previous_button.clicked.connect(self.decrease_webcam_value)
self.ok_button.clicked.connect(self.current_webcam_value_found)
- self.set_logo_icon()
-
def set_logo_icon(self):
gui_path = os.path.join(os.path.join(tg.settings.get_thermography_root_dir(), os.pardir), "gui")
logo_path = os.path.join(gui_path, "img/logo-webcam.png")
+ Logger.debug("Setting logo <{}>".format(logo_path))
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(logo_path), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.setWindowIcon(icon)
def increase_webcam_value(self):
+ Logger.debug("Increasing webcam port value to {}".format(self.webcam_value + 1))
self.webcam_value += 1
self.previous_button.setEnabled(True)
self.set_webcam()
def decrease_webcam_value(self):
+ Logger.debug("Decreasing webcam port value to {}".format(self.webcam_value - 1))
self.webcam_value -= 1
if self.webcam_value == 0:
self.previous_button.setEnabled(False)
@@ -76,4 +80,4 @@ def stop(self):
def deleteLater(self):
self.cap.release()
- super(QtGui.QWidget, self).deleteLater()
+ super(QtWidgets, self).deleteLater()
diff --git a/gui/threads/__init__.py b/gui/threads/__init__.py
new file mode 100644
index 0000000..6a92a97
--- /dev/null
+++ b/gui/threads/__init__.py
@@ -0,0 +1,2 @@
+from .thermo_thread import ThermoGuiThread
+from .thermo_thread_dataset_creation import ThermoDatasetCreationThread
\ No newline at end of file
diff --git a/gui/threads/thermo_thread.py b/gui/threads/thermo_thread.py
new file mode 100644
index 0000000..622497b
--- /dev/null
+++ b/gui/threads/thermo_thread.py
@@ -0,0 +1,108 @@
+import os
+
+import cv2
+import numpy as np
+from PyQt5 import QtCore
+from PyQt5.QtCore import QThread
+from simple_logger import Logger
+
+import thermography as tg
+
+
+class ThermoGuiThread(QThread):
+ iteration_signal = QtCore.pyqtSignal(int)
+ finish_signal = QtCore.pyqtSignal(bool)
+ last_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ attention_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ edge_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ segment_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ rectangle_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ module_map_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ classes_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ module_list_signal = QtCore.pyqtSignal(list)
+
+ def __init__(self):
+ """
+ Initializes the Thermo Thread.
+ """
+ super(ThermoGuiThread, self).__init__()
+ Logger.info("Created thermoGUI thread")
+
+ self.camera_param_file_name = None
+ self.input_file_name = None
+
+ self.pause_time = 50
+ self.is_paused = False
+
+ self.webcam_port = None
+ self.cap = None
+ self.should_use_webcam = False
+
+ self.load_default_paths()
+
+ self.app = tg.App(input_video_path=self.input_file_name, camera_param_file=self.camera_param_file_name)
+
+ def use_webcam(self, webcam_port: int):
+ Logger.debug("Thermo thread uses webcam port {}".format(webcam_port))
+ self.webcam_port = webcam_port
+ self.cap = cv2.VideoCapture(self.webcam_port)
+ self.should_use_webcam = True
+
+ def load_default_paths(self):
+ # Load camera parameters.
+ settings_dir = tg.settings.get_settings_dir()
+
+ self.camera_param_file_name = os.path.join(settings_dir, "camera_parameters.json")
+ tg.settings.set_data_dir("Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/")
+ self.input_file_name = os.path.join(tg.settings.get_data_dir(), "Ispez Termografica Ghidoni 1.mov")
+ Logger.debug("Using default camera param file: {}\n"
+ "Default input file name: {}".format(self.camera_param_file_name, self.input_file_name))
+
+ def load_video(self, start_frame: int, end_frame: int):
+ self.app = tg.App(input_video_path=self.input_file_name, camera_param_file=self.camera_param_file_name)
+ self.app.load_video(start_frame=start_frame, end_frame=end_frame)
+
+ def run(self):
+ if self.should_use_webcam:
+ frame_id = 0
+ while True:
+ while self.is_paused:
+ self.msleep(self.pause_time)
+
+ ret, frame = self.cap.read()
+ if ret:
+ Logger.debug("Using webcam frame {}".format(frame_id))
+ self.app.step(frame_id, frame)
+
+ self.last_frame_signal.emit(self.app.last_scaled_frame_rgb)
+ self.edge_frame_signal.emit(self.app.last_edges_frame)
+ self.segment_frame_signal.emit(self.app.create_segment_image())
+ self.rectangle_frame_signal.emit(self.app.create_rectangle_image())
+ self.module_map_frame_signal.emit(self.app.create_module_map_image())
+ frame_id += 1
+
+ self.app.reset()
+ else:
+ for frame_id, frame in enumerate(self.app.frames):
+ while self.is_paused:
+ self.msleep(self.pause_time)
+
+ Logger.debug("Using video frame {}".format(frame_id))
+ # Perform one step in the input video (i.e. analyze one frame)
+ self.app.step(frame_id, frame)
+ # Perform inference (classification on the detected modules)
+ self.app.classify_detected_modules()
+
+ self.last_frame_signal.emit(self.app.last_scaled_frame_rgb)
+ self.attention_frame_signal.emit(self.app.last_attention_image)
+ self.edge_frame_signal.emit(self.app.last_edges_frame)
+ self.segment_frame_signal.emit(self.app.create_segment_image())
+ self.rectangle_frame_signal.emit(self.app.create_rectangle_image())
+ self.module_map_frame_signal.emit(self.app.create_module_map_image())
+ self.classes_frame_signal.emit(self.app.create_classes_image())
+ self.iteration_signal.emit(frame_id)
+ self.module_list_signal.emit(self.app.create_module_list())
+
+ self.app.reset()
+
+ self.finish_signal.emit(True)
diff --git a/gui/threads/thermo_thread_dataset_creation.py b/gui/threads/thermo_thread_dataset_creation.py
new file mode 100644
index 0000000..d4b60cc
--- /dev/null
+++ b/gui/threads/thermo_thread_dataset_creation.py
@@ -0,0 +1,62 @@
+import os
+
+import numpy as np
+from PyQt5 import QtCore
+from PyQt5.QtCore import QThread
+from simple_logger import Logger
+
+import thermography as tg
+
+
+class ThermoDatasetCreationThread(QThread):
+ iteration_signal = QtCore.pyqtSignal(int)
+ last_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ edge_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ segment_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ rectangle_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ module_map_frame_signal = QtCore.pyqtSignal(np.ndarray)
+ module_list_signal = QtCore.pyqtSignal(list)
+
+ def __init__(self):
+ """
+ Initializes the Thermo Thread for dataset creation.
+ """
+ super(self.__class__, self).__init__()
+ Logger.info("Created dataset creation ThermoThread")
+ self.camera_param_file_name = None
+
+ self.load_default_paths()
+
+ self.app = tg.App(input_video_path=None, camera_param_file=self.camera_param_file_name)
+
+ self.processing_frame = None
+ self.processing_frame_id = None
+
+ def load_default_paths(self):
+ # Load camera parameters.
+ settings_dir = tg.settings.get_settings_dir()
+
+ self.camera_param_file_name = os.path.join(settings_dir, "camera_parameters.json")
+ Logger.debug("Using default camera param file: {}".format(self.camera_param_file_name))
+ tg.settings.set_data_dir("Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/")
+
+ def run(self):
+ if self.processing_frame_id is None:
+ Logger.error("Processing frame id is None!")
+ return
+ if self.processing_frame is None:
+ Logger.error("Processing frame is None")
+ return
+
+ Logger.debug("Processing frame id {}".format(self.processing_frame_id))
+ self.app.step(self.processing_frame_id, self.processing_frame)
+
+ self.last_frame_signal.emit(self.app.last_scaled_frame_rgb)
+ self.edge_frame_signal.emit(self.app.last_edges_frame)
+ self.segment_frame_signal.emit(self.app.create_segment_image())
+ self.rectangle_frame_signal.emit(self.app.create_rectangle_image())
+ self.module_map_frame_signal.emit(self.app.create_module_map_image())
+ self.iteration_signal.emit(self.processing_frame_id)
+ self.module_list_signal.emit(self.app.create_module_list())
+
+ self.app.reset()
diff --git a/logs/.gitkeep b/logs/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/main_app.py b/main_app.py
index ac32ded..cd4bf81 100644
--- a/main_app.py
+++ b/main_app.py
@@ -1,4 +1,5 @@
import thermography as tg
+from thermography.io import setup_logger, LogLevel
import os
@@ -11,9 +12,10 @@ def _main():
app = tg.App(input_video_path=IN_FILE_NAME, camera_param_file=camera_param_file)
- app.load_video(start_frame=1500, end_frame=1800)
+ app.load_video(start_frame=1700, end_frame=1900)
app.run()
if __name__ == '__main__':
+ setup_logger(console_log_level=LogLevel.INFO, file_log_level=LogLevel.DEBUG)
_main()
diff --git a/main_create_dataset.py b/main_create_dataset.py
new file mode 100644
index 0000000..c5292df
--- /dev/null
+++ b/main_create_dataset.py
@@ -0,0 +1,13 @@
+from PyQt5 import QtWidgets
+import sys
+
+from thermography.io import setup_logger, LogLevel
+from gui import CreateDatasetGUI
+
+if __name__ == '__main__':
+ setup_logger(console_log_level=LogLevel.INFO, file_log_level=LogLevel.DEBUG)
+
+ app = QtWidgets.QApplication(sys.argv)
+ form = CreateDatasetGUI()
+ form.show()
+ app.exec_()
diff --git a/gui.py b/main_thermogui.py
similarity index 58%
rename from gui.py
rename to main_thermogui.py
index 942ce4e..32a137d 100644
--- a/gui.py
+++ b/main_thermogui.py
@@ -1,8 +1,12 @@
from PyQt5 import QtWidgets
import sys
+
+from thermography.io import setup_logger, LogLevel
from gui import ThermoGUI
if __name__ == '__main__':
+ setup_logger(console_log_level=LogLevel.INFO, file_log_level=LogLevel.DEBUG)
+
app = QtWidgets.QApplication(sys.argv)
form = ThermoGUI()
form.show()
diff --git a/main_training.py b/main_training.py
new file mode 100644
index 0000000..a684815
--- /dev/null
+++ b/main_training.py
@@ -0,0 +1,277 @@
+import os
+import timeit
+from datetime import datetime
+
+import numpy as np
+import tensorflow as tf
+
+from thermography.classification.dataset import ThermoDataset, ThermoClass, create_directory_list
+from thermography.classification.models import ThermoNet3x3, ThermoNet
+
+
+def main():
+ ########################### Input and output paths ###########################
+
+ dataset_path = "Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/padded_dataset"
+ dataset_directories = create_directory_list(dataset_path)
+
+ print("Input dataset directories:")
+ for path_index, path in enumerate(dataset_directories):
+ print(" ({}) {}".format(path_index, path))
+ print()
+
+ output_data_path = "Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/output"
+ print("Output data path:\n {}".format(output_data_path))
+ print()
+
+ # Path for tf.summary.FileWriter and to store model checkpoints
+ summary_path = os.path.join(output_data_path, "tensorboard")
+ checkpoint_path = os.path.join(output_data_path, "checkpoints")
+ print("Checkpoint directory: {}\nSummary directory: {}".format(checkpoint_path, summary_path))
+ print()
+
+ ############################ Thermography classes ############################
+
+ working_class = ThermoClass("working", 0)
+ broken_class = ThermoClass("broken", 1)
+ misdetected_class = ThermoClass("misdetected", 2)
+ thermo_class_list = [working_class, broken_class, misdetected_class]
+
+ ############################# Runtime parameters #############################
+
+ # Dataset params
+ load_all_data = True
+ normalize_images = True
+
+ # Learning params
+ num_epochs = 100000
+ batch_size = 128
+ global_step = tf.Variable(0, name="global_step")
+ learning_rate = 0.00025
+
+ # Network params
+ image_shape = np.array([96, 120, 1])
+ keep_probability = 0.5
+
+ # Summary params
+ write_train_summaries_every_n_steps = 501
+ write_histograms_every_n_steps = 1001
+ write_kernel_images_every_n_steps = 1001
+ write_test_summaries_every_n_epochs = 20
+ save_model_every_n_epochs = 20
+
+ ############################# Loading the dataset ############################
+
+ # Place data loading and preprocessing on the cpu.
+ with tf.device('/cpu:0'):
+ with tf.name_scope("dataset"):
+ with tf.name_scope("loading"):
+ dataset = ThermoDataset(batch_size=batch_size, balance_data=True, img_shape=image_shape,
+ normalize_images=normalize_images)
+ dataset.set_train_test_validation_fraction(train_fraction=0.8, test_fraction=0.2,
+ validation_fraction=0.0)
+
+ dataset.load_dataset(root_directory_list=dataset_directories, class_list=thermo_class_list,
+ load_all_data=load_all_data)
+ dataset.print_info()
+
+ with tf.name_scope("iterator"):
+ train_iterator = dataset.get_train_iterator()
+ next_train_batch = train_iterator.get_next()
+ test_iterator = dataset.get_test_iterator()
+ next_test_batch = test_iterator.get_next()
+
+ ############################### Net construction #############################
+
+ with tf.name_scope("placeholders"):
+ # TF placeholder for graph input and output
+ input_images = tf.placeholder(tf.float32, [None, *image_shape], name="input_image")
+ input_one_hot_labels = tf.placeholder(tf.int32, [None, dataset.num_classes], name="input_one_hot_labels")
+ input_labels = tf.argmax(input_one_hot_labels, axis=1, name="input_labels")
+ keep_prob = tf.placeholder(tf.float32, name="keep_probability")
+
+ # Initialize model
+ model = ThermoNet3x3(x=input_images, image_shape=image_shape, num_classes=dataset.num_classes, keep_prob=keep_prob)
+
+ # Operation for calculating the loss
+ with tf.name_scope("cross_ent"):
+ # Link variable to model output
+ logits = model.logits
+ loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=input_one_hot_labels))
+
+ # Add the loss to summary
+ tf.summary.scalar('train/cross_entropy', loss, collections=["train"])
+ tf.summary.scalar('test/cross_entropy', loss, collections=["test"])
+
+ # Train operation
+ with tf.name_scope("train"):
+ optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name="adam")
+ train_op = optimizer.minimize(loss, global_step=global_step)
+
+ # Predict operation
+ with tf.name_scope("prediction"):
+ class_prediction_op = tf.argmax(logits, axis=1, name="class_predictions")
+ correct_pred = tf.equal(class_prediction_op, input_labels, name="correct_predictions")
+ accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="accuracy")
+
+ # Add the accuracy to the summary
+ tf.summary.scalar('train/accuracy', accuracy, collections=["train"])
+ tf.summary.scalar('test/accuracy', accuracy, collections=["test"])
+
+ # Merge all summaries together
+ train_summaries = tf.summary.merge_all(key="train")
+ test_summaries = tf.summary.merge_all(key="test")
+ histogram_summaries = tf.summary.merge_all(key="histogram")
+ kernel_summaries = tf.summary.merge_all(key="kernels")
+
+ # Initialize the FileWriter
+ writer = tf.summary.FileWriter(summary_path)
+
+ # Initialize an saver for store model checkpoints
+ saver = tf.train.Saver()
+
+ # Start Tensorflow session
+ with tf.Session() as sess:
+
+ # Initialize all variables
+ sess.run(tf.global_variables_initializer())
+
+ # Add the model graph to TensorBoard only if we did not load the entire dataset!
+ if not load_all_data:
+ writer.add_graph(sess.graph)
+
+ print("{} Start training...".format(datetime.now()))
+ print("{} Open Tensorboard at --logdir={}".format(datetime.now(), summary_path))
+
+ train_steps_per_epoch = int(np.ceil(dataset.train_size / dataset.batch_size))
+ print("{} Number of training steps per epoch: {}".format(datetime.now(), train_steps_per_epoch))
+ test_steps_per_epoch = int(np.ceil(dataset.test_size / dataset.batch_size))
+ print("{} Number of test steps per epoch: {}".format(datetime.now(), test_steps_per_epoch))
+ print()
+
+ # Loop over number of epochs
+ for epoch in range(num_epochs):
+
+ print("=======================================================")
+ print("{} Starting epoch number: {}".format(datetime.now(), epoch))
+
+ # Initialize iterator with the training and test dataset.
+ sess.run(train_iterator.initializer)
+ sess.run(test_iterator.initializer)
+
+ all_train_predictions = []
+ all_train_labels = []
+ train_epoch_step = 0
+ while True:
+ step_start_time = timeit.default_timer()
+
+ # get next batch of data
+ try:
+ img_batch, label_batch = sess.run(next_train_batch)
+ except tf.errors.OutOfRangeError:
+ print("{} Ended training epoch number {}".format(datetime.now(), epoch))
+ break
+
+ # And run the training op
+ _, predictions = sess.run([train_op, class_prediction_op], feed_dict={input_images: img_batch,
+ input_one_hot_labels: label_batch,
+ keep_prob: keep_probability})
+ all_train_predictions.extend(predictions)
+ all_train_labels.extend(np.argmax(label_batch, axis=1))
+
+ if sess.run(global_step) % write_train_summaries_every_n_steps == 0:
+ print("{} Writing training summary".format(datetime.now()))
+ train_s = sess.run(train_summaries,
+ feed_dict={input_images: img_batch, input_one_hot_labels: label_batch,
+ keep_prob: keep_probability})
+ writer.add_summary(train_s, sess.run(global_step))
+
+ if sess.run(global_step) % write_histograms_every_n_steps == 0:
+ print("{} Writing histogram summary".format(datetime.now()))
+ histogram_s = sess.run(histogram_summaries)
+ writer.add_summary(histogram_s, sess.run(global_step))
+
+ if sess.run(global_step) % write_kernel_images_every_n_steps == 0:
+ print("{} Writing kernel summary".format(datetime.now()))
+ kernel_s = sess.run(kernel_summaries)
+ writer.add_summary(kernel_s, sess.run(global_step))
+
+ step_end_time = timeit.default_timer()
+
+ train_epoch_step += 1
+ print("{} Global step {}, Epoch: {}, Epoch step {}/{}, ETA: {:.3g} s."
+ .format(datetime.now(), sess.run(global_step), epoch, train_epoch_step, train_steps_per_epoch,
+ step_end_time - step_start_time))
+
+ cm = tf.confusion_matrix(labels=all_train_labels, predictions=all_train_predictions,
+ num_classes=dataset.num_classes).eval()
+ print("{} Training confusion matrix:\n{}".format(datetime.now(), cm))
+
+ print("-------------------------------------------------------")
+ print("{} Starting evaluation on test set.".format(datetime.now()))
+ # Evaluate on test dataset
+ all_test_predictions = []
+ all_test_labels = []
+ test_summaries_written = False
+ test_epoch_steps = 0
+ wrongly_classified = []
+ while True:
+ step_start_time = timeit.default_timer()
+ try:
+ img_batch, label_batch = sess.run(next_test_batch)
+ except tf.errors.OutOfRangeError:
+ print("{} Test evaluation terminated.".format(datetime.now()))
+ break
+
+ predictions, predicted_correctly = sess.run([class_prediction_op, correct_pred],
+ feed_dict={input_images: img_batch,
+ input_one_hot_labels: label_batch,
+ keep_prob: 1.0})
+ all_test_predictions.extend(predictions)
+ all_test_labels.extend(np.argmax(label_batch, axis=1))
+
+ for img, p, l in zip(img_batch[~predicted_correctly], predictions[~predicted_correctly],
+ np.argmax(label_batch[~predicted_correctly, :], axis=1)):
+ wrongly_classified.append({"img": img, "prediction": p, "label": l})
+
+ step_end_time = timeit.default_timer()
+ test_epoch_steps += 1
+ print("{} Epoch: {}, Test epoch step {}/{}, ETA: {:.3g} s."
+ .format(datetime.now(), epoch, test_epoch_steps, test_steps_per_epoch,
+ step_end_time - step_start_time))
+
+ if not test_summaries_written and epoch % write_test_summaries_every_n_epochs == 0:
+ print("{} Writing test summary".format(datetime.now()))
+ test_summaries_written = True
+ test_s = sess.run(test_summaries,
+ feed_dict={input_images: img_batch, input_one_hot_labels: label_batch,
+ keep_prob: 1.0})
+ writer.add_summary(test_s, sess.run(global_step))
+
+ cm = tf.confusion_matrix(labels=all_test_labels, predictions=all_test_predictions,
+ num_classes=dataset.num_classes).eval()
+ print("{} Test confusion matrix:\n{}".format(datetime.now(), cm))
+
+ if epoch % write_test_summaries_every_n_epochs == 0:
+ with tf.name_scope('image_prediction'):
+ if len(wrongly_classified) > 10:
+ wrongly_classified = wrongly_classified[0:10]
+ for i, wrong in enumerate(wrongly_classified):
+ image_summary = tf.summary.image(
+ "{}: True {} pred {}".format(i, wrong["label"], wrong["prediction"]),
+ np.array([wrong["img"]]))
+ image_s = sess.run(image_summary)
+ writer.add_summary(image_s, sess.run(global_step))
+
+ if epoch % save_model_every_n_epochs == 0:
+ print("{} Saving checkpoint of model".format(datetime.now()))
+
+ # save checkpoint of the model
+ checkpoint_name = os.path.join(checkpoint_path, model.name)
+ save_path = saver.save(sess, checkpoint_name, global_step=epoch, write_meta_graph=False)
+
+ print("{} Model checkpoint saved at {}".format(datetime.now(), save_path))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/main_training_restorer.py b/main_training_restorer.py
new file mode 100644
index 0000000..e7882e7
--- /dev/null
+++ b/main_training_restorer.py
@@ -0,0 +1,88 @@
+import os
+import random
+
+import cv2
+import numpy as np
+import tensorflow as tf
+
+from thermography.classification.models import ThermoNet
+
+output_path = "Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/output"
+checkpoint_path = os.path.join(output_path, "checkpoints")
+input_folder = "Z:/SE/SEI/Servizi Civili/Del Don Carlo/termografia/dataset/Ghidoni/0-1000"
+num_images = 300
+
+if __name__ == '__main__':
+ image_shape = np.array([96, 120, 1])
+ num_classes = 3
+
+ with tf.name_scope("placeholders"):
+ # TF placeholder for graph input and output
+ x = tf.placeholder(tf.float32, [None, *image_shape], name="input_image")
+
+ model = ThermoNet(x=x, image_shape=image_shape, num_classes=num_classes, keep_prob=1.0)
+
+ with tf.name_scope("predict"):
+ predict_op = tf.argmax(model.logits, axis=1, name="model_predictions")
+ probabilities = tf.nn.softmax(model.logits)
+
+ # Add ops to save and restore all the variables.
+ saver = tf.train.Saver()
+
+ class_name = {0: "working", 1: "broken", 2: "misdetected"}
+
+ # Later, launch the model, use the saver to restore variables from disk, and
+ # do some work with the model.
+ with tf.Session() as sess:
+ # Restore variables from disk.
+ saver.restore(sess, tf.train.latest_checkpoint(checkpoint_path))
+ print("Model restored.")
+
+ input_images = []
+ print("Loading images..")
+ image_per_class = num_images / 3
+ for class_type in os.listdir(input_folder):
+ true_label = class_type
+ folder_path = os.path.join(input_folder, class_type)
+ image_count = 0
+ for img_name in os.listdir(folder_path):
+ img_path = os.path.join(folder_path, img_name)
+ input_images.append(
+ {"image": cv2.imread(img_path, cv2.IMREAD_COLOR), "true_label": true_label, "file_name": img_path})
+ image_count += 1
+ if image_count > image_per_class:
+ break
+
+ random.shuffle(input_images)
+ print("{} images laoded!".format(len(input_images)))
+
+ for input_image in input_images:
+ img = input_image["image"]
+ true_label = input_image["true_label"]
+ image_name = input_image["file_name"]
+
+ resized_img = cv2.resize(img, (image_shape[1], image_shape[0]), interpolation=cv2.INTER_AREA)
+ normalized_img = cv2.normalize(resized_img.astype('float'), None, 0.0, 1.0, cv2.NORM_MINMAX)
+ class_probabilities, predicted_label = sess.run([probabilities, predict_op],
+ feed_dict={x: [normalized_img]})
+
+ predicted_correcly = class_name[predicted_label[0]] == true_label
+ if predicted_correcly:
+ font_color = (40, 200, 40)
+ else:
+ font_color = (0, 0, 255)
+ font_scale = 1.0
+ thickness = 2
+ cv2.putText(img, "True lab: {}".format(true_label), (10, 30), cv2.FONT_HERSHEY_SIMPLEX, font_scale,
+ font_color, thickness)
+ cv2.putText(img, "Predicted: {}".format(class_name[predicted_label[0]]), (10, 60), cv2.FONT_HERSHEY_SIMPLEX,
+ font_scale, font_color, thickness)
+ np.set_printoptions(precision=3, suppress=True)
+ cv2.putText(img, "Logits: {}".format(class_probabilities[0]), (10, 90), cv2.FONT_HERSHEY_SIMPLEX,
+ font_scale, font_color, thickness)
+ print("Image {}".format(image_name))
+ cv2.imshow("Module", img)
+
+ cv2.waitKey(700)
+ if (true_label == "broken" and predicted_label[0]!=1) or (true_label!="broken" and predicted_label==1):
+ cv2.waitKey()
diff --git a/requirements.txt b/requirements.txt
index 0be059c..693a862 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,9 @@ numpy>=1.0
opencv-python>=3.0
scikit-learn>=0.19.0
scipy>=0.19.1
+tensorflow >= 1.3.0
sphinx_rtd_theme
-git+git://github.com/coagulant/progressbar-python3.git
\ No newline at end of file
+git+git://github.com/coagulant/progressbar-python3.git
+git+git://github.com/cdeldon/simple-logger.git@master
diff --git a/resources/.gitkeep b/resources/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/resources/weights/.gitkeep b/resources/weights/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/thermography/classification/__init__.py b/thermography/classification/__init__.py
new file mode 100644
index 0000000..8703d76
--- /dev/null
+++ b/thermography/classification/__init__.py
@@ -0,0 +1 @@
+from .inference import Inference
\ No newline at end of file
diff --git a/thermography/classification/dataset/__init__.py b/thermography/classification/dataset/__init__.py
new file mode 100644
index 0000000..4c1b652
--- /dev/null
+++ b/thermography/classification/dataset/__init__.py
@@ -0,0 +1,3 @@
+from .thermo_dataset import ThermoDataset
+from .thermo_class import ThermoClass
+from .create_directory_list import create_directory_list
\ No newline at end of file
diff --git a/thermography/classification/dataset/create_directory_list.py b/thermography/classification/dataset/create_directory_list.py
new file mode 100644
index 0000000..56e4a88
--- /dev/null
+++ b/thermography/classification/dataset/create_directory_list.py
@@ -0,0 +1,37 @@
+import os
+
+
+def create_directory_list(root_dir: str):
+ """
+ Creates a list of directories for dataset loading.
+ :param root_dir: Absolute path to the root directory of the dataset.
+
+ The dataset root directory must be of the following form:
+ ::
+ root_dir
+ |__video1
+ | |__0-1000
+ | |__1000_2000
+ |__video2
+ | |__0-500
+ | |__500-1000
+ | |__1000-1200
+ |__video3
+ |__0-1000
+
+ and each folder 'xxxx-yyyy' must contain three directories associated to the classes of the dataset.
+
+ :return: A list of absolute paths to the class directories containing the dataset images.
+ """
+ if not os.path.exists(root_dir):
+ raise FileNotFoundError("Directory {} does not exist".format(root_dir))
+
+ # List all directories associated to different videos.
+ recording_path_list = [os.path.join(root_dir, f) for f in os.listdir(root_dir)]
+
+ input_data_path = []
+ for g in recording_path_list:
+ # Append the different directories associated to different video frame intervalls.
+ input_data_path.extend([os.path.join(g, f) for f in os.listdir(g)])
+
+ return input_data_path
diff --git a/thermography/classification/dataset/thermo_class.py b/thermography/classification/dataset/thermo_class.py
new file mode 100644
index 0000000..f5b98fb
--- /dev/null
+++ b/thermography/classification/dataset/thermo_class.py
@@ -0,0 +1,14 @@
+from typing import List
+
+
+class ThermoClass:
+ def __init__(self, class_name: str, class_value: int, class_folder: str = None):
+ self.class_name = class_name
+ self.class_value = class_value
+
+ if class_folder is None:
+ class_folder = self.class_name
+ self.class_folder = class_folder
+
+
+ThermoClassList = List[ThermoClass]
diff --git a/thermography/classification/dataset/thermo_dataset.py b/thermography/classification/dataset/thermo_dataset.py
new file mode 100644
index 0000000..637f1cc
--- /dev/null
+++ b/thermography/classification/dataset/thermo_dataset.py
@@ -0,0 +1,293 @@
+import os
+
+import cv2
+import numpy as np
+import tensorflow as tf
+from tensorflow.contrib.data import Dataset
+from tensorflow.python.framework import dtypes
+from tensorflow.python.framework.ops import convert_to_tensor
+
+from .thermo_class import ThermoClassList
+
+
+class ThermoDataset:
+ def __init__(self, img_shape: np.ndarray, batch_size: int = 32, balance_data: bool = True,
+ normalize_images: bool = True):
+ self.image_shape = img_shape
+ self.batch_size = batch_size
+ self.balance_data = balance_data
+ self.normalize_images = normalize_images
+
+ self.__train_dataset = None
+ self.__test_dataset = None
+ self.__validation_dataset = None
+ self.__root_directory_list = None
+ self.__thermo_class_list = None
+ self.num_classes = None
+ self.__samples_per_class = []
+
+ self.__train_fraction = 0.6
+ self.__test_fraction = 0.2
+ self.__validation_fraction = 0.2
+
+ self.__image_file_names = None
+ self.__labels = None
+
+ @property
+ def image_shape(self):
+ return self.__image_shape
+
+ @image_shape.setter
+ def image_shape(self, l: list):
+ if len(l) != 3:
+ raise ValueError("Image shape passed to dataset must be of length 3! Passed: {}".format(l))
+ self.__image_shape = l
+
+ @property
+ def rgb(self):
+ return self.image_shape[2] == 3
+
+ @property
+ def data_size(self):
+ return len(self.__labels)
+
+ @property
+ def train_size(self):
+ return int(self.data_size * self.__train_fraction)
+
+ @property
+ def test_size(self):
+ return int(self.data_size * self.__test_fraction)
+
+ @property
+ def validation_size(self):
+ return int(self.data_size * self.__validation_fraction)
+
+ @property
+ def train(self):
+ return self.__train_dataset
+
+ @property
+ def test(self):
+ return self.__test_dataset
+
+ @property
+ def validation(self):
+ return self.__validation_dataset
+
+ @property
+ def split_fraction(self):
+ return np.array([self.__train_fraction, self.__test_fraction, self.__validation_fraction])
+
+ def dataset_from_id(self, index):
+ return {0: self.__train_dataset, 1: self.__test_dataset, 2: self.__validation_dataset}[index]
+
+ @property
+ def root_directory_list(self):
+ return self.__root_directory_list
+
+ @root_directory_list.setter
+ def root_directory_list(self, dir_list: list):
+ if type(dir_list) is str:
+ dir_list = [dir_list]
+ for directory in dir_list:
+ if not os.path.exists(directory):
+ raise ValueError(
+ "Directory <{}> passed to 'root_directory_list' property does not exist".format(directory))
+ self.__root_directory_list = dir_list
+
+ @property
+ def thermo_class_list(self):
+ if self.__thermo_class_list is None:
+ raise ValueError("Property 'thermo_class_list' has not been set yet!")
+ return self.__thermo_class_list
+
+ @thermo_class_list.setter
+ def thermo_class_list(self, thermo_class_list):
+ if self.__root_directory_list is None:
+ raise ValueError("Must set property 'root_directory_list' before setting the class list!")
+ directories_which_must_be_contained_in_root_directory = [thermo_class.class_name for thermo_class in
+ thermo_class_list]
+
+ for directory in directories_which_must_be_contained_in_root_directory:
+ for dir in self.__root_directory_list:
+ if directory not in os.listdir(dir):
+ raise (ValueError("Root directory {} does not contain subdirectory {}".format(dir, directory)))
+
+ self.num_classes = len(thermo_class_list)
+ thermo_class_labels = [thermo_class.class_value for thermo_class in thermo_class_list]
+ for class_label in range(self.num_classes):
+ if class_label not in thermo_class_labels:
+ raise ValueError(
+ "Class label {} is not present in thermo classes: {}".format(class_label, thermo_class_labels))
+
+ def load_dataset(self, root_directory_list: list, class_list: ThermoClassList, load_all_data: bool = False):
+ self.root_directory_list = root_directory_list
+ self.thermo_class_list = class_list
+
+ self.__image_file_names = np.array([], dtype=str)
+ self.__labels = np.array([], dtype=np.int32)
+ sample_per_class = {}
+ for thermo_class in sorted(class_list, key=lambda t: t.class_value):
+ for root_dir in self.root_directory_list:
+ directory = os.path.join(root_dir, thermo_class.class_folder)
+ image_names = np.array([os.path.join(directory, img_name) for img_name in os.listdir(directory)
+ if img_name.endswith(".jpg")], dtype=str)
+ self.__image_file_names = np.concatenate((self.__image_file_names, image_names))
+ self.__labels = np.concatenate(
+ (self.__labels, np.ones(shape=(len(image_names)), dtype=np.int32) * thermo_class.class_value))
+ if thermo_class.class_value not in sample_per_class:
+ sample_per_class[thermo_class.class_value] = len(image_names)
+ else:
+ sample_per_class[thermo_class.class_value] += len(image_names)
+
+ self.__samples_per_class = [sample_per_class[thermo_class.class_value] for thermo_class in class_list]
+
+ if self.balance_data:
+ self.__balance_data()
+
+ permutation = np.random.permutation(len(self.__image_file_names))
+ self.__image_file_names = self.__image_file_names[permutation]
+ self.__labels = self.__labels[permutation]
+
+ self.__create_internal_dataset(load_all_data)
+
+ def __balance_data(self):
+ # Shuffle each class independently (This is useful in case of multiple root directories because it does not
+ # discard only elements of the last listed root directory, but random elements of all root directories)
+ start_index = 0
+ for class_id, num_samples_in_this_class in enumerate(self.__samples_per_class):
+ permutation = np.random.permutation(num_samples_in_this_class)
+ self.__image_file_names[start_index:start_index + num_samples_in_this_class] = \
+ self.__image_file_names[start_index:start_index + num_samples_in_this_class][permutation]
+ start_index += num_samples_in_this_class
+
+ class_with_min_samples = np.argmin(self.__samples_per_class)
+ num_min_samples = self.__samples_per_class[class_with_min_samples]
+
+ # Remove all elements in the majority classes in order to balance their sample numbers to the minority class.
+ start_index = 0
+ elements_to_delete = []
+ for num_samples_in_this_class in self.__samples_per_class:
+ new_indices_to_delete = [i for i in
+ range(start_index + num_min_samples, start_index + num_samples_in_this_class)]
+ elements_to_delete.extend(new_indices_to_delete)
+ start_index += num_samples_in_this_class
+
+ self.__labels = np.delete(self.__labels, elements_to_delete)
+ self.__image_file_names = np.delete(self.__image_file_names, elements_to_delete)
+
+ # Check for class balance.
+ cumulator = np.zeros(shape=3)
+ for label in self.__labels:
+ cumulator[label] += 1
+ for i in range(2):
+ if cumulator[i] != cumulator[i + 1]:
+ raise RuntimeError("Error in data balancing: resulting label distribution: {}".format(cumulator))
+
+ self.__samples_per_class = [num_min_samples for _ in range(self.num_classes)]
+
+ def set_train_test_validation_fraction(self, train_fraction, test_fraction, validation_fraction):
+ total = train_fraction + test_fraction + validation_fraction
+ self.__train_fraction = float(train_fraction) / total
+ self.__test_fraction = float(test_fraction) / total
+ self.__validation_fraction = float(validation_fraction) / total
+
+ def __parse_image(self, image_path: str, image_label: int):
+ one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)
+ img_file = tf.read_file(image_path)
+ img_decoded = tf.image.decode_jpeg(img_file, channels=self.image_shape[2])
+ img_decoded = tf.image.resize_images(img_decoded, self.image_shape[0:2])
+ img_decoded = tf.cast(img_decoded, tf.float32)
+ if self.normalize_images:
+ img_decoded = tf.image.per_image_standardization(img_decoded)
+
+ return img_decoded, one_hot
+
+ def __parse_image_load(self, image_path: str, image_label: int):
+ one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)
+ if self.rgb:
+ flag = cv2.IMREAD_COLOR
+ else:
+ flag = cv2.IMREAD_GRAYSCALE
+
+ img = cv2.imread(image_path, flags=flag)
+ img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA).astype(
+ np.float32)
+
+ if self.normalize_images:
+ img_mean = np.mean(img, axis=(0, 1))
+ img_std = np.std(img, axis=(0, 1))
+
+ img = (img - img_mean) / img_std
+
+ return img, one_hot
+
+ def __create_internal_dataset(self, load_all_data: bool):
+ cumulative_fraction = 0.0
+ for dataset_id in range(3):
+ fraction = self.split_fraction[dataset_id]
+ min_index = int(np.floor(cumulative_fraction * self.data_size))
+ max_index = int(np.floor((cumulative_fraction + fraction) * self.data_size))
+ cumulative_fraction += fraction
+
+ if load_all_data:
+ images = []
+ labels = []
+ num_images = max_index - min_index - 1
+ print("Loading {} images for {} dataset.".format(num_images,
+ {0: "TRAIN", 1: "TEST", 2: "VALIDAT."}[dataset_id]))
+ for image_num, image_index in enumerate(range(min_index, max_index)):
+ image_path = self.__image_file_names[image_index]
+ image_label = self.__labels[image_index]
+ if (image_num + 1) % 100 == 0:
+ print("Loaded {} images of {}".format(image_num + 1, num_images))
+ im, l = self.__parse_image_load(image_path, image_label)
+ images.append(im)
+ labels.append(l)
+ print("Loaded all {} images".format({0: "TRAIN", 1: "TEST", 2: "VALIDAT."}[dataset_id]))
+ images = np.array(images)
+ if not self.rgb:
+ images = images[..., np.newaxis]
+ print("Images shape: {}".format(images.shape))
+ images = convert_to_tensor(images, dtypes.float32)
+ labels = convert_to_tensor(labels, dtypes.int32)
+ else:
+ images = convert_to_tensor(self.__image_file_names[min_index:max_index], dtypes.string)
+ labels = convert_to_tensor(self.__labels[min_index:max_index], dtypes.int32)
+
+ data = Dataset.from_tensor_slices((images, labels))
+ if not load_all_data:
+ data = data.map(self.__parse_image)
+
+ # Create a new dataset with batches of images
+ data = data.batch(self.batch_size)
+ if dataset_id == 0:
+ self.__train_dataset = data
+ elif dataset_id == 1:
+ self.__test_dataset = data
+ else:
+ self.__validation_dataset = data
+
+ def get_train_iterator(self):
+ return self.train.make_initializable_iterator()
+
+ def get_test_iterator(self):
+ return self.test.make_initializable_iterator()
+
+ def get_validation_iterator(self):
+ return self.validation.make_initializable_iterator()
+
+ def print_info(self):
+ print("Num samples (train/test/val): {} tot: {}\n"
+ "Samples per class: {}\n"
+ "Sample type {}\n"
+ "Sample shape: {}\n"
+ "Label type {}\n"
+ "Label shape: {}\n"
+ "Root dirs: {}".format([int(np.floor(frac * len(self.__labels))) for frac in self.split_fraction],
+ len(self.__labels),
+ self.__samples_per_class,
+ self.train.output_types[0], self.train.output_shapes[0][1:],
+ self.train.output_types[1], self.train.output_shapes[1][1:],
+ self.__root_directory_list))
diff --git a/thermography/classification/inference.py b/thermography/classification/inference.py
new file mode 100644
index 0000000..7e75695
--- /dev/null
+++ b/thermography/classification/inference.py
@@ -0,0 +1,83 @@
+import cv2
+import numpy as np
+import tensorflow as tf
+from simple_logger import Logger
+
+from .models.base_net import BaseNet
+
+
+class Inference:
+ def __init__(self, checkpoint_dir: str, model_class: type, image_shape: np.ndarray, num_classes: int):
+ self.checkpoint_dir = checkpoint_dir
+ self.image_shape = image_shape
+ self.num_classes = num_classes
+
+ self.graph = tf.Graph()
+ with self.graph.as_default():
+ self.x = tf.placeholder(tf.float32, [None, *self.image_shape], name="input_image")
+ self.keep_probability = tf.placeholder(tf.float32, name="keep_probability")
+ self.model = model_class(x=self.x, image_shape=self.image_shape, num_classes=self.num_classes,
+ keep_prob=self.keep_probability)
+
+ self.logits = self.model.logits
+ self.probabilities = tf.nn.softmax(self.logits)
+
+ # Add ops to save and restore all the variables.
+ self.sess = tf.Session(graph=self.graph)
+
+ # Restore variables from disk.
+ with self.sess.as_default():
+ with self.graph.as_default():
+ self.saver = tf.train.Saver()
+ self.saver.restore(self.sess, tf.train.latest_checkpoint(self.checkpoint_dir))
+
+ Logger.info("Model restored.")
+
+ def __del__(self):
+ Logger.info("Deleting inference object")
+ self.sess.close()
+
+ @property
+ def model(self):
+ return self.__model
+
+ @model.setter
+ def model(self, m: BaseNet):
+ if not isinstance(m, BaseNet):
+ raise TypeError("Model passed to {} is not deriving from BaseNet".format(self.__class__.__name__))
+ self.__model = m
+
+ def classify(self, image_list: list) -> np.ndarray:
+ if len(image_list) == 0:
+ return np.empty(shape=[0])
+
+ img_tensor = []
+ for img in image_list:
+ if (img.shape[0:2] != self.image_shape[0:2]).any():
+ shape = img.shape
+ img = img.astype(np.float32)
+ Logger.warning("Image is of size {}, should be {}, resizing".format(shape, self.image_shape))
+ img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA)
+ if img.shape[2] != self.image_shape[2]:
+ if self.image_shape[2] == 1:
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+ img = img
+ elif self.image_shape[2] == 3:
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
+
+ img_mean = np.mean(img, axis=(0, 1))
+ img_std = np.std(img, axis=(0, 1))
+ img = (img - img_mean) / img_std
+
+ img_tensor.append(img)
+
+ img_tensor = np.array(img_tensor)
+
+ if len(img_tensor.shape) == 3:
+ img_tensor = img_tensor[..., np.newaxis]
+
+ Logger.debug("Classifying {} module image{}".format(
+ img_tensor.shape[0], "" if img_tensor.shape[0] == 1 else "s"))
+
+ class_probabilities = self.sess.run(self.probabilities, feed_dict={self.x: img_tensor, self.keep_probability: 1.0})
+ return class_probabilities
diff --git a/thermography/classification/models/__init__.py b/thermography/classification/models/__init__.py
new file mode 100644
index 0000000..afaab14
--- /dev/null
+++ b/thermography/classification/models/__init__.py
@@ -0,0 +1,2 @@
+from .thermo_net import ThermoNet
+from .thermo_net_3x3 import ThermoNet as ThermoNet3x3
diff --git a/thermography/classification/models/base_net.py b/thermography/classification/models/base_net.py
new file mode 100644
index 0000000..14bda62
--- /dev/null
+++ b/thermography/classification/models/base_net.py
@@ -0,0 +1,84 @@
+from abc import ABC, abstractmethod
+
+import numpy as np
+import tensorflow as tf
+
+
+class BaseNet(ABC):
+ """
+ Base interdace for nets used by the thermography package
+ """
+
+ def __init__(self, x: tf.Tensor, image_shape: np.ndarray, num_classes: int, name: str = "ThermoNet"):
+ self.x = x
+ self.image_shape = image_shape
+ self.__num_classes = num_classes
+ self.__name = name
+ self.__logits = None
+
+ @property
+ def x(self):
+ return self.__x
+
+ @x.setter
+ def x(self, x_: tf.Tensor):
+ if type(x_) is not tf.Tensor:
+ raise TypeError("__x in {} must be a tensorflow placeholder!".format(self.__class__.__name__))
+ self.__x = x_
+
+ @property
+ def image_shape(self):
+ return self.__image_shape
+
+ @image_shape.setter
+ def image_shape(self, shape):
+ if type(shape) is not np.ndarray:
+ raise TypeError("__image_shape in {} must be a np.ndarray of three elements".format(self.__class__.__name__))
+ if len(shape) != 3:
+ raise ValueError("__image_shape in {} must be a np.ndarray of there elements".format(self.__class__.__name__))
+ self.__image_shape = shape
+
+ @property
+ def channels(self):
+ return self.image_shape[2]
+
+ @property
+ def name(self):
+ return self.__name
+
+ @property
+ def num_classes(self):
+ if self.__num_classes is None:
+ raise RuntimeError("__num_classes in {} is has not been overridden!".format(self.__class__.__name__))
+ return self.__num_classes
+
+ @num_classes.setter
+ def num_classes(self, n: int):
+ if type(n) is not int:
+ raise TypeError("Num classes in {} must be an integer!".format(self.__class__.__name__))
+ if n <= 0:
+ raise ValueError("Num classes in {} must be strictly positive!".format(self.__class__.__name__))
+ self.__num_classes = n
+
+ @property
+ def logits(self):
+ if self.__logits is None:
+ raise RuntimeError("__logits in {} is has not been overridden!".format(self.__class__.__name__))
+ return self.__logits
+
+ @logits.setter
+ def logits(self, l: tf.Tensor):
+ self.__logits = l
+
+ @abstractmethod
+ def create(self) -> None:
+ pass
+
+ @staticmethod
+ def update_shape(current_shape: np.ndarray, scale: int):
+ assert(len(current_shape) == 2)
+ return (np.ceil(current_shape.astype(np.float32) / scale)).astype(np.int32)
+
+ @property
+ def flat_shape(self):
+ return self.image_shape[0:2]
diff --git a/thermography/classification/models/operations.py b/thermography/classification/models/operations.py
new file mode 100644
index 0000000..c163afa
--- /dev/null
+++ b/thermography/classification/models/operations.py
@@ -0,0 +1,42 @@
+import tensorflow as tf
+
+from ..utils import kernel_to_image_summary
+
+__all__ = ["weight_variable",
+ "bias_variable",
+ "conv2d",
+ "conv_relu",
+ "max_pool_2x2",
+ "max_pool_4x4",
+ "max_pool_kxk"]
+
+
+def weight_variable(name, shape):
+ return tf.get_variable(name=name, shape=shape, initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.1))
+
+
+def bias_variable(name, shape):
+ return tf.get_variable(name=name, shape=shape, initializer=tf.constant_initializer(value=0.1))
+
+
+def conv2d(name, x, W):
+ return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME', name=name)
+
+
+def conv_relu(x, kernel_shape, bias_shape, name: str = ""):
+ weights = weight_variable(name="W" + name, shape=kernel_shape)
+ kernel_to_image_summary(kernel=weights, summary_name="kernels")
+ biases = bias_variable(name="b" + name, shape=bias_shape)
+ return tf.nn.relu(conv2d(name="conv2d" + name, x=x, W=weights) + biases)
+
+
+def max_pool_2x2(name, x):
+ return max_pool_kxk(name=name, x=x, k=2)
+
+
+def max_pool_4x4(name, x):
+ return max_pool_kxk(name=name, x=x, k=4)
+
+
+def max_pool_kxk(name, x, k: int):
+ return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME', name=name)
diff --git a/thermography/classification/models/thermo_net.py b/thermography/classification/models/thermo_net.py
new file mode 100644
index 0000000..839054f
--- /dev/null
+++ b/thermography/classification/models/thermo_net.py
@@ -0,0 +1,46 @@
+import numpy as np
+import tensorflow as tf
+
+from .base_net import BaseNet
+from .operations import *
+
+
+class ThermoNet(BaseNet):
+ def __init__(self, x: tf.Tensor, image_shape: np.ndarray, num_classes: int, keep_prob: float, *args, **kwargs):
+ super(self.__class__, self).__init__(x=x, image_shape=image_shape, num_classes=num_classes, name="ThermoNet")
+ self.keep_probability = keep_prob
+
+ self.create()
+
+ def create(self):
+ with tf.variable_scope(self.name):
+ current_shape = self.flat_shape
+ with tf.variable_scope('conv_1'):
+ h_conv1_0 = conv_relu(x=self.x, kernel_shape=[5, 5, self.image_shape[2], 8], bias_shape=[8], name="_0")
+ self.h_pool1 = max_pool_4x4(name="max_pool", x=h_conv1_0)
+ current_shape = self.update_shape(current_shape, 4)
+ # 24 30
+
+ with tf.variable_scope('conv_2'):
+ h_conv2_0 = conv_relu(x=self.h_pool1, kernel_shape=[5, 5, 8, 16], bias_shape=[16], name="_0")
+ self.h_pool2 = max_pool_4x4(name="max_pool", x=h_conv2_0)
+ current_shape = self.update_shape(current_shape, 4)
+ # 6 8
+
+ with tf.variable_scope('full_connected_1'):
+ flattened = tf.reshape(self.h_pool2, [-1, np.prod(current_shape) * 16])
+ shape = flattened.get_shape().as_list()
+
+ W_fc1 = weight_variable(name="W", shape=[shape[1], 256])
+ b_fc1 = bias_variable(name="b", shape=[256])
+
+ h_fc1 = tf.nn.relu(tf.matmul(flattened, W_fc1) + b_fc1)
+
+ with tf.variable_scope('drop_out_1'):
+ self.h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob=self.keep_probability, name="dropout")
+
+ with tf.variable_scope('full_connected_2'):
+ W_fc2 = weight_variable(name="W", shape=[256, self.num_classes])
+ b_fc2 = bias_variable(name="b", shape=[self.num_classes])
+
+ self.logits = tf.add(tf.matmul(self.h_fc1_drop, W_fc2), b_fc2, name="logits")
diff --git a/thermography/classification/models/thermo_net_3x3.py b/thermography/classification/models/thermo_net_3x3.py
new file mode 100644
index 0000000..f420899
--- /dev/null
+++ b/thermography/classification/models/thermo_net_3x3.py
@@ -0,0 +1,61 @@
+import numpy as np
+import tensorflow as tf
+
+from .base_net import BaseNet
+from .operations import *
+
+
+class ThermoNet(BaseNet):
+ def __init__(self, x: tf.Tensor, image_shape: np.ndarray, num_classes: int, keep_prob: float, *args, **kwargs):
+ super(self.__class__, self).__init__(x=x, image_shape=image_shape, num_classes=num_classes, name="ThermoNet")
+ self.keep_probability = keep_prob
+
+ self.create()
+
+ def create(self):
+ with tf.variable_scope(self.name):
+ current_shape = self.flat_shape
+ with tf.variable_scope('conv_1'):
+ h_conv1_0 = conv_relu(x=self.x, kernel_shape=[5, 5, self.image_shape[2], 8], bias_shape=[8], name="_0")
+ self.h_pool1 = max_pool_2x2(name="max_pool", x=h_conv1_0)
+ current_shape = self.update_shape(current_shape, 2)
+ # 48 60
+
+ with tf.variable_scope('conv_2'):
+ h_conv2_0 = conv_relu(x=self.h_pool1, kernel_shape=[3, 3, 8, 16], bias_shape=[16], name="_0")
+ self.h_pool2 = max_pool_4x4(name="max_pool", x=h_conv2_0)
+ current_shape = self.update_shape(current_shape, 4)
+ # 12 15
+
+ with tf.variable_scope('conv_3'):
+ h_conv3_0 = conv_relu(x=self.h_pool2, kernel_shape=[3, 3, 16, 32], bias_shape=[32], name="_0")
+ self.h_pool3 = max_pool_2x2(name="max_pool", x=h_conv3_0)
+ current_shape = self.update_shape(current_shape, 2)
+ # 6 8
+
+ with tf.variable_scope('drop_out_1'):
+ self.h_pool3_drop = tf.nn.dropout(self.h_pool3, keep_prob=self.keep_probability, name="dropout")
+
+ with tf.variable_scope('full_connected_1'):
+ flattened = tf.reshape(self.h_pool3_drop, [-1, np.prod(current_shape) * 32])
+ shape = flattened.get_shape().as_list()
+
+ W_fc1 = weight_variable(name="W", shape=[shape[1], 256])
+ b_fc1 = bias_variable(name="b", shape=[256])
+
+ self.h_fc1 = tf.nn.relu(tf.matmul(flattened, W_fc1) + b_fc1)
+
+ with tf.variable_scope('full_connected_2'):
+ W_fc2 = weight_variable(name="W", shape=[256, 32])
+ b_fc2 = bias_variable(name="b", shape=[32])
+
+ h_fc2 = tf.nn.relu(tf.matmul(self.h_fc1, W_fc2) + b_fc2)
+
+ with tf.variable_scope('drop_out_2'):
+ self.h_fc2_drop = tf.nn.dropout(h_fc2, keep_prob=self.keep_probability, name="dropout")
+
+ with tf.variable_scope('full_connected_3'):
+ W_fc3 = weight_variable(name="W", shape=[32, self.num_classes])
+ b_fc3 = bias_variable(name="b", shape=[self.num_classes])
+
+ self.logits = tf.add(tf.matmul(self.h_fc2_drop, W_fc3), b_fc3, name="logits")
diff --git a/thermography/classification/utils/__init__.py b/thermography/classification/utils/__init__.py
new file mode 100644
index 0000000..0df7f53
--- /dev/null
+++ b/thermography/classification/utils/__init__.py
@@ -0,0 +1 @@
+from .kernel_summaries import kernel_to_image_summary
diff --git a/thermography/classification/utils/kernel_summaries.py b/thermography/classification/utils/kernel_summaries.py
new file mode 100644
index 0000000..510bcab
--- /dev/null
+++ b/thermography/classification/utils/kernel_summaries.py
@@ -0,0 +1,27 @@
+import tensorflow as tf
+
+
+def kernel_to_image_summary(kernel: tf.Tensor, summary_name: str, max_images=3, collection: str = "kernels"):
+ """
+ Converts a kernel tensor of shape [width, height, in_channels, out_channels] to an image summary.
+
+ :param kernel: Tensor representing the convolutional kernel.
+ :param summary_name: Name to give to the summary.
+ :param max_images: Maximal number of images to extract from the kernel tensor (slices).
+ :param collection: Summary collection where the image summary is added.
+ """
+ # Normalize the input kernel to the 0-1 range.
+ x_min = tf.reduce_min(kernel)
+ x_max = tf.reduce_max(kernel)
+ weights_0_to_1 = (kernel - x_min) / (x_max - x_min)
+
+ # Rearrange weights such that they are ordered as [out_channels, width, height, in_channels]
+ weights_transposed = tf.transpose(weights_0_to_1, [3, 0, 1, 2])
+ # Unstack the in_channels axis --> [0, [out_channels, width, height], 1: [...], ..., in_channels-1: [...]]
+ weights_transposed = tf.unstack(weights_transposed, axis=3)
+ # Concatenate the unstacked channels: --> [out_channels * in_channels, width, height]
+ weights_transposed = tf.concat(weights_transposed, axis=0)
+ # Add an empty dimension at the end of the tensor [out_channels * in_channels, width, height, 1]
+ weights_transposed = tf.expand_dims(weights_transposed, axis=-1)
+
+ tf.summary.image(summary_name, weights_transposed, max_outputs=max_images, collections=[collection])
diff --git a/thermography/detection/__init__.py b/thermography/detection/__init__.py
index 2fd3883..77b05c2 100644
--- a/thermography/detection/__init__.py
+++ b/thermography/detection/__init__.py
@@ -2,6 +2,7 @@
from .intersection_detection import *
from .motion_detection import *
from .rectangle_detection import *
+from .preprocessing import *
from .segment_clustering import *
from .segment_detection import *
@@ -9,5 +10,6 @@
"IntersectionDetector", "IntersectionDetectorParams",
"MotionDetector",
"RectangleDetector", "RectangleDetectorParams",
+ "PreprocessingParams", "FramePreprocessor",
"SegmentClusterer", "SegmentClustererParams", "ClusterCleaningParams",
"SegmentDetector", "SegmentDetectorParams"]
diff --git a/thermography/detection/edge_detection.py b/thermography/detection/edge_detection.py
index 3a96353..b5efb08 100644
--- a/thermography/detection/edge_detection.py
+++ b/thermography/detection/edge_detection.py
@@ -1,5 +1,6 @@
import cv2
import numpy as np
+from simple_logger import Logger
__all__ = ["EdgeDetectorParams", "EdgeDetector"]
@@ -26,9 +27,11 @@ def detect(self):
"""
canny = cv2.Canny(image=self.input_image, threshold1=self.params.hysteresis_min_thresh,
threshold2=self.params.hysteresis_max_thresh, apertureSize=3)
+ Logger.debug("Canny edges computed")
dilated = cv2.dilate(canny, self.params.kernel,
iterations=self.params.dilation_steps)
+ Logger.debug("Dilate canny edges with {} steps".format(self.params.dilation_steps))
size = np.size(dilated)
skel = np.zeros(dilated.shape, np.uint8)
@@ -37,6 +40,7 @@ def detect(self):
done = False
while not done:
+ Logger.debug("Eroding canny edges")
eroded = cv2.erode(img, self.params.kernel)
temp = cv2.dilate(eroded, self.params.kernel)
temp = cv2.subtract(img, temp)
diff --git a/thermography/detection/intersection_detection.py b/thermography/detection/intersection_detection.py
index 9a6d5f0..55db997 100644
--- a/thermography/detection/intersection_detection.py
+++ b/thermography/detection/intersection_detection.py
@@ -1,5 +1,7 @@
-from thermography.utils.geometry import angle, angle_diff, segment_segment_intersection
import numpy as np
+from simple_logger import Logger
+
+from thermography.utils.geometry import angle, angle_diff, segment_segment_intersection
__all__ = ["IntersectionDetector", "IntersectionDetectorParams"]
@@ -25,11 +27,14 @@ def detect(self):
Detects the intersections between the segments passed to the constructor using the parameters passed to the
constructor.
"""
+ Logger.debug("Detecting intersection")
self.cluster_cluster_intersections = {}
self.raw_intersections = []
num_clusters = len(self.segments)
for cluster_index_i in range(num_clusters):
- for cluster_index_j in range(num_clusters):
+ for cluster_index_j in range(cluster_index_i+1, num_clusters):
+ Logger.debug("Detecting intersections between cluster {} and cluster {}".format(cluster_index_i,
+ cluster_index_j))
self.__detect_intersections_between_clusters(cluster_index_i, cluster_index_j)
def __detect_intersections_between_clusters(self, cluster_index_i: int, cluster_index_j: int):
diff --git a/thermography/detection/motion_detection.py b/thermography/detection/motion_detection.py
index 4d17c66..5726f64 100644
--- a/thermography/detection/motion_detection.py
+++ b/thermography/detection/motion_detection.py
@@ -1,5 +1,6 @@
import cv2
import numpy as np
+from simple_logger import Logger
from thermography.utils import scale_image
@@ -29,7 +30,7 @@ def motion_estimate(self, frame: np.ndarray) -> np.ndarray:
:param frame: New frame of the sequence.
:return: The estimation of the mean motion between self.last_frame and the frame passed as argument. The motion estimate is expressed in pixel units.
"""
-
+ Logger.debug("Detecting motion")
frame = scale_image(frame, self.scaling)
if self.last_frame is None:
self.last_frame = frame.copy()
@@ -39,6 +40,7 @@ def motion_estimate(self, frame: np.ndarray) -> np.ndarray:
cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
mean_flow = np.mean(self.flow, axis=(0, 1))
+ Logger.debug("Mean motion is {}".format(mean_flow))
self.last_frame = frame.copy()
diff --git a/thermography/detection/preprocessing.py b/thermography/detection/preprocessing.py
new file mode 100644
index 0000000..8ef397e
--- /dev/null
+++ b/thermography/detection/preprocessing.py
@@ -0,0 +1,97 @@
+import cv2
+import numpy as np
+
+from thermography.utils import scale_image, rotate_image
+
+__all__ = ["PreprocessingParams", "FramePreprocessor"]
+
+
+class PreprocessingParams:
+ def __init__(self):
+ self.gaussian_blur = 2
+ self.image_scaling = 1.0
+ self.image_rotation = 0.0
+ self.red_threshold = 200
+ self.min_area = 60 * 60
+
+
+class FramePreprocessor:
+ def __init__(self, input_image: np.ndarray, params: PreprocessingParams = PreprocessingParams()):
+ self.input_image = input_image
+ self.params = params
+ self.preprocessed_image = None
+ self.scaled_image_rgb = None
+ self.scaled_image = None
+ self.attention_image = None
+
+ @property
+ def channels(self):
+ if len(self.input_image.shape) < 3:
+ return 1
+ elif len(self.input_image.shape) == 3:
+ return 3
+ else:
+ raise ValueError("Input image has {} channels.".format(len(self.input_image.shape)))
+
+ @property
+ def gray_scale(self):
+ if self.channels == 1:
+ return True
+ elif self.channels == 3:
+ return (self.input_image[:, :, 0] == self.input_image[:, :, 1]).all() and \
+ (self.input_image[:, :, 0] == self.input_image[:, :, 2]).all()
+ else:
+ raise ValueError("Input image has {} channels.".format(len(self.input_image.shape)))
+
+ def preprocess(self) -> None:
+ scaled_image = scale_image(self.input_image, self.params.image_scaling)
+ rotated_frame = rotate_image(scaled_image, self.params.image_rotation)
+
+ if self.params.gaussian_blur > 0:
+ self.scaled_image = cv2.blur(self.scaled_image, (self.params.gaussian_blur, self.params.gaussian_blur))
+
+ if self.channels == 1:
+ self.scaled_image = rotated_frame
+ self.scaled_image_rgb = cv2.cvtColor(self.scaled_image, cv2.COLOR_GRAY2BGR)
+ self.preprocessed_image = self.scaled_image.astype(np.uint8)
+ mask = np.ones_like(self.scaled_image).astype(np.uint8) * 255
+ else:
+ if self.gray_scale:
+ self.scaled_image_rgb = rotated_frame
+ self.scaled_image = rotated_frame[:, :, 0]
+ self.preprocessed_image = self.scaled_image.astype(np.uint8)
+ mask = np.ones_like(self.scaled_image).astype(np.uint8) * 255
+ else:
+ self.scaled_image_rgb = rotated_frame
+ self.scaled_image = cv2.cvtColor(self.scaled_image_rgb, cv2.COLOR_BGR2GRAY)
+
+ # Pixels with red channel larger or equal to params.red_threshold are colorcoded white in the binary image,
+ # all other pixels are black.
+ red_channel = self.scaled_image_rgb[:, :, 2]
+ _, thresholded_image = cv2.threshold(red_channel, self.params.red_threshold, 255, 0, cv2.THRESH_BINARY)
+
+ # Perform dilation and erosion on the thresholded image to remove holes and small islands.
+ kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))
+ closing = cv2.morphologyEx(thresholded_image, cv2.MORPH_CLOSE, kernel)
+ opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel)
+
+ # contours is a python list of all detected contours which are represented as numpy arrays of (x,y) coords.
+ image, contours, hierarchy = cv2.findContours(opening, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
+ areas = [cv2.contourArea(contour) for contour in contours]
+ discarded_contours = [area < self.params.min_area for area in areas]
+ contours = [contours[i] for i in range(len(contours)) if not discarded_contours[i]]
+
+ mask = np.zeros_like(self.scaled_image)
+ cv2.drawContours(mask, contours, -1, (255), cv2.FILLED)
+
+ mask = cv2.dilate(mask, kernel, iterations=5)
+ mask = cv2.blur(mask, (25, 25))
+ mask = mask.astype(np.float) / 255.
+ self.preprocessed_image = (self.scaled_image * mask).astype(np.uint8)
+
+ mask = (mask * 255).astype(np.uint8)
+
+ attention_mask = cv2.applyColorMap(mask, cv2.COLORMAP_WINTER)
+ self.attention_image = np.empty_like(self.scaled_image_rgb)
+ cv2.addWeighted(cv2.cvtColor(self.scaled_image, cv2.COLOR_GRAY2BGR), 0.7, attention_mask, 0.3, 0,
+ self.attention_image)
diff --git a/thermography/detection/rectangle_detection.py b/thermography/detection/rectangle_detection.py
index ca032e0..8703b3b 100644
--- a/thermography/detection/rectangle_detection.py
+++ b/thermography/detection/rectangle_detection.py
@@ -1,5 +1,7 @@
-from thermography.utils.geometry import aspect_ratio, area
import numpy as np
+from simple_logger import Logger
+
+from thermography.utils.geometry import aspect_ratio, area, sort_rectangle
__all__ = ["RectangleDetector", "RectangleDetectorParams"]
@@ -7,7 +9,7 @@
class RectangleDetectorParams:
def __init__(self):
self.aspect_ratio = 1.5
- self.aspect_ratio_relative_deviation = 0.35
+ self.aspect_ratio_relative_deviation = 0.35
self.min_area = 20 * 40
@@ -20,11 +22,14 @@ def __init__(self, input_intersections: dict, params: RectangleDetectorParams =
self.rectangles = []
def detect(self):
+ Logger.debug("Detecting rectangles")
# Iterate over each pair of clusters.
- num_clusters = int((np.sqrt(8 * len(self.intersections) + 1) - 1) / 2)
+ num_clusters = int((np.sqrt(8 * len(self.intersections) + 1) + 1) / 2)
for cluster_index_i in range(num_clusters):
for cluster_index_j in range(cluster_index_i + 1, num_clusters):
if (cluster_index_i, cluster_index_j) in self.intersections:
+ Logger.debug("Detecting rectangles between cluster {} and cluster {}".format(cluster_index_i,
+ cluster_index_j))
self.__detect_rectangles_between_clusters(cluster_index_i, cluster_index_j)
@staticmethod
@@ -55,9 +60,10 @@ def __detect_rectangles_between_clusters(self, cluster_index_i: int, cluster_ind
coord3 = intersections_with_i_plus[segment_index_j]
coord4 = intersections_with_i_plus[segment_index_j + 1]
rectangle = np.array([coord1, coord2, coord4, coord3])
+ rectangle = sort_rectangle(rectangle)
if self.fulfills_ratio(rectangle, self.params.aspect_ratio,
- self.params.aspect_ratio_relative_deviation) and area(
- rectangle) >= self.params.min_area:
+ self.params.aspect_ratio_relative_deviation) and \
+ area(rectangle) >= self.params.min_area:
rectangles_between_cluster_i_j.append(rectangle)
self.rectangles.extend(rectangles_between_cluster_i_j)
diff --git a/thermography/detection/segment_clustering.py b/thermography/detection/segment_clustering.py
index c81f676..3301f21 100644
--- a/thermography/detection/segment_clustering.py
+++ b/thermography/detection/segment_clustering.py
@@ -1,5 +1,6 @@
import numpy as np
from matplotlib import pylab as plt
+from simple_logger import Logger
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import normalize
@@ -50,7 +51,9 @@ def cluster_segments(self):
Clusters the input segments based on the parameters passed as argument. The features that can be used to cluster
the segments are their mean coordinates, and their angle.
"""
+ Logger.debug("Clustering segments")
if self.params.cluster_type not in ["gmm", "knn"]:
+ Logger.fatal("Invalid value for cluster type: {}".format(self.params.cluster_type))
raise ValueError("Invalid value for 'cluster_type': {} "
"'cluster_type' should be in ['gmm', 'knn']".format(self.params.cluster_type))
@@ -88,9 +91,11 @@ def cluster_segments(self):
cluster_prediction = None
if self.params.cluster_type is "knn":
+ Logger.debug("Clustering segments using KNN")
cluster_prediction = KMeans(n_clusters=self.params.num_clusters, n_init=self.params.num_init,
random_state=0).fit_predict(features)
elif self.params.cluster_type is "gmm":
+ Logger.debug("Clustering segments using GMM")
best_gmm = None
lowest_bic = np.infty
bic = []
diff --git a/thermography/detection/segment_detection.py b/thermography/detection/segment_detection.py
index d2a111d..e56d0a7 100644
--- a/thermography/detection/segment_detection.py
+++ b/thermography/detection/segment_detection.py
@@ -1,5 +1,6 @@
import cv2
import numpy as np
+from simple_logger import Logger
__all__ = ["SegmentDetector", "SegmentDetectorParams"]
@@ -45,6 +46,7 @@ def detect(self):
Detects the segments in the input image using the parameters passed as argument. Furthermore the detected
segments are extended on each side by a few pixels as defined in the parameters.
"""
+ Logger.debug("Detecting segments")
self.segments = cv2.HoughLinesP(image=self.input_image, rho=self.params.d_rho,
theta=self.params.d_theta,
threshold=self.params.min_num_votes,
@@ -53,6 +55,7 @@ def detect(self):
# If no segments have been found, return an empty array.
if self.segments is None:
+ Logger.warning("No segments were detected")
self.segments = np.empty(shape=(0, 4))
return
diff --git a/thermography/io/__init__.py b/thermography/io/__init__.py
index 1fa4f68..2bb4ff1 100644
--- a/thermography/io/__init__.py
+++ b/thermography/io/__init__.py
@@ -1,3 +1,5 @@
+from .modes import LogLevel, Modality
+from .logger import setup_logger
from .io import *
__all__ = ["ImageLoader",
diff --git a/thermography/io/io.py b/thermography/io/io.py
index c2137c9..08dcbb7 100644
--- a/thermography/io/io.py
+++ b/thermography/io/io.py
@@ -1,7 +1,10 @@
-import cv2
import os
+
+import cv2
import progressbar
-from .modes import Modality
+from simple_logger import Logger
+
+from . import Modality
__all__ = ["ImageLoader", "VideoLoader"]
@@ -14,7 +17,7 @@ def __init__(self, image_path: str, mode: Modality = Modality.DEFAULT):
:param image_path: Absolute path to the image file to be loaded.
:param mode: Modality to be used when loading the image.
"""
-
+ Logger.debug("Loading image at {}".format(image_path))
self.image_path = image_path
self.mode = mode
self.image_raw = cv2.imread(self.image_path, self.mode)
@@ -49,10 +52,12 @@ def __init__(self, video_path: str, start_frame: int = 0, end_frame: int = None)
:param start_frame: Start frame of the video to be considered (inclusive).
:param end_frame: End frame of the video to be considered (non inclusive).
"""
+ Logger.debug("Loading video at {}".format(video_path))
self.video_path = video_path
self.start_frame = start_frame
self.end_frame = end_frame
+ Logger.debug("Start frame: {}, end frame: {}".format(self.start_frame, self.end_frame))
self.frames = []
self.__load_video(cv2.VideoCapture(self.video_path))
@@ -80,36 +85,34 @@ def video_path(self):
@video_path.setter
def video_path(self, path: str):
if not os.path.exists(path):
- raise FileExistsError("Video file {} not found".format(self.video_path))
+ Logger.fatal("Video path {} does not exist".format(path))
+ raise FileNotFoundError("Video file {} not found".format(path))
self.__video_path = path
def __load_video(self, video_raw: cv2.VideoCapture):
if not video_raw.isOpened():
- print("Unable to read {} feed".format(self.video_path))
+ Logger.error("Unable to read {} feed".format(self.video_path))
self.frames = []
num_video_frames = int(video_raw.get(cv2.CAP_PROP_FRAME_COUNT))
- if self.end_frame is None:
+ if self.end_frame is None or self.end_frame > num_video_frames:
+ Logger.warning("Setting end_frame to {}".format(num_video_frames))
self.end_frame = num_video_frames
num_frames = 0
- num_total_frames = self.end_frame - self.start_frame
# Skip the first frames until the self_start frame.
video_raw.set(cv2.CAP_PROP_POS_FRAMES, self.start_frame)
- print("Loading {} frames...".format(self.end_frame - self.start_frame))
- bar = progressbar.ProgressBar(maxval=num_total_frames,
+ Logger.info("Loading {} frames...".format(self.end_frame - self.start_frame))
+ bar = progressbar.ProgressBar(maxval=self.num_frames,
widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
for i in range(self.end_frame - self.start_frame):
- if i + self.start_frame >= num_video_frames:
- RuntimeWarning(
- "end_frame={} passed to VideoLoader is greater than video size {}."
- "Closing video stream now.".format(self.end_frame, num_video_frames))
ret = video_raw.grab()
if not ret:
+ Logger.error("Could not load frame {}".format(i + self.start_frame))
raise ValueError("Could not load frame {}".format(i + self.start_frame))
self.frames.append(video_raw.retrieve()[1])
diff --git a/thermography/io/logger.py b/thermography/io/logger.py
new file mode 100644
index 0000000..6854449
--- /dev/null
+++ b/thermography/io/logger.py
@@ -0,0 +1,26 @@
+import os
+from datetime import datetime
+
+from simple_logger import Logger
+
+from thermography.settings import get_log_dir
+from . import LogLevel
+
+
+def setup_logger(console_log_level: LogLevel = LogLevel.INFO, file_log_level: LogLevel = LogLevel.DEBUG,
+ log_file_name: str = None):
+ """
+ Sets up the simple logger.
+ :param console_log_level: Log level associated to the streaming log.
+ :param file_log_level: Log level associated to the file log.
+ :param log_file_name: If set, then the file log is written to this file. Otherwise a new log file will be created in the log directory returned by get_log_dir()
+ """
+ if log_file_name is None:
+ log_directory = get_log_dir()
+ name = "logging_{}.log".format(datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
+ log_file_name = os.path.join(log_directory, name)
+
+ Logger.set_file_logging_level(file_log_level)
+ Logger.set_log_file(log_file_name)
+ Logger.set_console_logging_level(console_log_level)
+ Logger.init()
diff --git a/thermography/io/modes.py b/thermography/io/modes.py
index 11116e6..a5ae8ef 100644
--- a/thermography/io/modes.py
+++ b/thermography/io/modes.py
@@ -1,6 +1,7 @@
-import cv2
from enum import IntEnum
+import cv2
+
class Modality(IntEnum):
"""
@@ -12,3 +13,15 @@ class Modality(IntEnum):
# Set the default loading modality to RGB.
DEFAULT = RGB
+
+
+class LogLevel:
+ """
+ Log levels used for the simple_logger.
+ """
+ DEBUG = "DEBUG"
+ INFO = "INFO"
+ WARNING = "WARNING"
+ WARN = WARNING
+ ERROR = "ERROR"
+ FATAL = "FATAL"
diff --git a/thermography/module_map.py b/thermography/module_map.py
index ab7587a..76bfc12 100644
--- a/thermography/module_map.py
+++ b/thermography/module_map.py
@@ -1,19 +1,23 @@
-import cv2
import numpy as np
-from thermography.utils import ID, rectangle_contains
+from simple_logger import Logger
+
+from thermography.utils import ID, rectangle_contains, area_between_rectangles, area
class ModuleMap:
class __ModuleInMap:
def __init__(self, ID: int, rectangle: np.ndarray, frame_id: int):
+ Logger.debug("Creating a new module inside the map with ID {}".format(ID))
self.ID = ID
self.last_rectangle = None
self.last_center = None
+ self.last_area = None
self.frame_id_history = []
self.rectangle_history = {}
self.cumulated_motion = np.array([0, 0], dtype=np.float32)
+ self.__all_probabilities = []
self.add(rectangle, frame_id)
@@ -34,6 +38,7 @@ def __repr__(self):
def add(self, rectangle: np.ndarray, frame_id: int):
self.last_rectangle = rectangle
self.last_center = np.mean(self.last_rectangle, axis=0)
+ self.last_area = area(self.last_rectangle)
self.frame_id_history.append(frame_id)
self.rectangle_history[frame_id] = rectangle
@@ -44,7 +49,21 @@ def add_motion(self, frame_id: int, motion_estimate: np.ndarray):
if frame_id != self.frame_id_history[-1]:
self.cumulated_motion += motion_estimate
+ def update_probability(self, prob: np.ndarray) -> None:
+ """
+ Updates the current probability distribution over the class labels of this module.
+ :param prob: A 1D numpy array of size 'num_classes' representing the classification probability.
+ """
+ self.__all_probabilities.append(prob)
+
+ @property
+ def mean_probability(self):
+ if len(self.__all_probabilities) == 0:
+ raise RuntimeError("No probabilities assigned to current module {}".format(self.ID))
+ return np.mean(self.__all_probabilities, axis=0)
+
def __init__(self):
+ Logger.debug("Creating the module map")
# A dictionary of modules and their centers keyed by their ID.
self.global_module_map = {}
self.module_database = []
@@ -64,6 +83,7 @@ def insert(self, rectangle_list: list, frame_id: int, motion_estimate: np.ndarra
:param motion_estimate: Motion estimate between the last frame (ID-1) and the frame containing the rectangles.
"""
+ Logger.debug("Inserting a new rectangle list into the module map at frame {}".format(frame_id))
# When no information about the motion estimate is given, assume no motion.
if motion_estimate is None:
motion_estimate = np.array([0.0, 0.0])
@@ -83,15 +103,20 @@ def insert(self, rectangle_list: list, frame_id: int, motion_estimate: np.ndarra
# Shift the rectangle center using the motion estimate.
rectangle_center -= motion_estimate
- # Compute the ID of the rectangle in the global map which is closest to the current rectangle.
- nearest_ID = self.__find_closest_module(rectangle_center)
+ # Compute the ID of the rectangle in the global map which is most similar to the current rectangle.
+ # This computation involves the evaluation of the surface between the corresponding edges of the
+ # rectangles of interest.
+ most_similar_ID = self.__find_most_similar_module(rectangle, area_threshold_ratio=0.5)
- # If this rectangle's center is inside the nearest rectangle, set it as a correspondence.
- closest_rectangle = self.global_module_map[nearest_ID].last_rectangle
- if rectangle_contains(closest_rectangle, rectangle_center):
- associations.append(nearest_ID)
- else:
+ if most_similar_ID is None:
associations.append(None)
+ else:
+ # If this rectangle's center is inside the nearest rectangle, set it as a correspondence.
+ closest_rectangle = self.global_module_map[most_similar_ID].last_rectangle
+ if rectangle_contains(closest_rectangle, rectangle_center):
+ associations.append(most_similar_ID)
+ else:
+ associations.append(None)
for rectangle_index, correspondence in enumerate(associations):
if correspondence is None:
@@ -106,18 +131,41 @@ def insert(self, rectangle_list: list, frame_id: int, motion_estimate: np.ndarra
self.__store_old_modules(frame_id)
- def __find_closest_module(self, rectangle_center: np.ndarray) -> int:
- min_distance = np.infty
+ def update_class_belief(self, probabilities: dict) -> None:
+ """
+ Updates the current class probability for the modules being detected in the last step.
+
+ :param probabilities: A dictionary keyed by the module ID, whose value is a probability distribution over the classes.
+ """
+ for module_id, prob in probabilities.items():
+ self.global_module_map[module_id].update_probability(prob)
+
+ def __find_most_similar_module(self, rectangle: np.ndarray, area_threshold_ratio: float) -> int:
+ """
+ Finds the most similar rectangle in the global map by computing the surface between each rectangle stored in the
+ module map, and the one passed as argument.
+
+ :param rectangle: Query rectangle in the form [[x0, y0], [x1, y1], [x2,y2], [x3, y3]]
+ :param area_threshold_ratio: The most similar module is accepted only if the relative deviation between the area
+ of the query rectangle and the candidate is smaller than this parameter
+ :return: Index of the most similar rectangle in the module map.
+ """
+ rectangle_area = area(rectangle)
+ rectangle_center = np.mean(rectangle, axis=0)
+ min_surface_between_rect = np.infty
best_id = None
for module_id, module_in_map in self.global_module_map.items():
- dist = np.linalg.norm(rectangle_center - (module_in_map.last_center - module_in_map.cumulated_motion))
- if dist < min_distance:
- min_distance = dist
+ if not rectangle_contains(module_in_map.last_rectangle, rectangle_center):
+ continue
+ surface_between_rect = area_between_rectangles(rectangle, module_in_map.last_rectangle)
+ surface_diff = module_in_map.last_area - rectangle_area
+ if surface_between_rect < min_surface_between_rect and surface_diff / rectangle_area < area_threshold_ratio:
+ min_surface_between_rect = surface_between_rect
best_id = module_id
return best_id
- def __store_old_modules(self, current_frame_id : int):
+ def __store_old_modules(self, current_frame_id: int):
old_rectangles_indices = []
max_time_distance = 10
for rect_id, rectangle_in_map in self.global_module_map.items():
diff --git a/thermography/settings/__init__.py b/thermography/settings/__init__.py
index b0a7b2e..628ea20 100644
--- a/thermography/settings/__init__.py
+++ b/thermography/settings/__init__.py
@@ -1,11 +1,12 @@
import os
from .camera import Camera
-from .modules import Modules
SETTINGS_DIR = os.path.dirname(os.path.abspath(__file__))
THERMOGRAPHY_ROOT_DIR = os.path.dirname(SETTINGS_DIR)
DATA_DIR = ""
TEST_DIR = os.path.join(THERMOGRAPHY_ROOT_DIR, "test")
+LOG_DIR = os.path.join(os.path.join(THERMOGRAPHY_ROOT_DIR, os.pardir), "logs")
+RES_DIR = os.path.join(os.path.join(THERMOGRAPHY_ROOT_DIR, os.pardir), "resources")
def get_settings_dir() -> str:
@@ -56,9 +57,26 @@ def get_test_dir() -> str:
return TEST_DIR
+def get_log_dir() -> str:
+ """
+ Returns the absolute path to the log directory.
+ :return: Absolute path to the log directory.
+ """
+ return LOG_DIR
+
+def get_resources_dir() -> str:
+ """
+ Returns the absolute path to the resources directory.
+ :return: Absolute path to the resources directory.
+ """
+ return RES_DIR
+
+
__all__ = ["Camera",
"get_data_dir",
+ "get_log_dir",
"get_settings_dir",
"get_thermography_root_dir",
"get_test_dir",
- "set_data_dir"]
+ "set_data_dir",
+ "get_resources_dir"]
diff --git a/thermography/settings/camera.py b/thermography/settings/camera.py
index 6a33d40..12c2d37 100644
--- a/thermography/settings/camera.py
+++ b/thermography/settings/camera.py
@@ -1,7 +1,9 @@
import json
-import numpy as np
import os
+import numpy as np
+from simple_logger import Logger
+
class Camera:
def __init__(self, camera_path: str):
@@ -16,6 +18,8 @@ def __init__(self, camera_path: str):
with open(self.camera_path) as param_file:
self.camera_params = json.load(param_file)
+ Logger.debug("Camera parameter file is: \n{}".format(str(self)))
+
def __str__(self):
return "Image size: {},\n" \
"Focal length: {}\n" \
@@ -73,7 +77,9 @@ def camera_path(self):
@camera_path.setter
def camera_path(self, path: str):
if not os.path.exists(path):
- raise FileExistsError("Camera config file {} not found".format(self.camera_path))
+ Logger.fatal("Camera config file {} not found".format(self.camera_path))
+ raise FileNotFoundError("Camera config file {} not found".format(self.camera_path))
if not path.endswith("json"):
+ Logger.fatal("Can only parse '.json' files")
raise ValueError("Can only parse '.json' files, passed camera file is {}".format(path))
self.__camera_path = path
diff --git a/thermography/settings/camera_parameters.json b/thermography/settings/camera_parameters.json
index cf5023e..f9b68ae 100644
--- a/thermography/settings/camera_parameters.json
+++ b/thermography/settings/camera_parameters.json
@@ -1,8 +1,14 @@
{
- "image_size": [640,512],
+ "image_size": [
+ 640,
+ 512
+ ],
"focal_length": 529.412,
- "principal_point": [320, 256],
- "distortion" : {
+ "principal_point": [
+ 320,
+ 256
+ ],
+ "distortion": {
"radial": {
"r1": -0.283796,
"r2": 0.100312,
diff --git a/thermography/settings/modules.py b/thermography/settings/modules.py
deleted file mode 100644
index ef9ebaa..0000000
--- a/thermography/settings/modules.py
+++ /dev/null
@@ -1,18 +0,0 @@
-import numpy as np
-
-
-class Modules:
- def __init__(self):
- """
- Initializes the module object with default parameters.
- """
-
- self.dimensions = np.array([1.5, 1.0])
-
- def __str__(self):
- return "Module dimensions: {},\n" \
- "Aspect ratio: {}".format(self.dimensions, self.aspect_ratio)
-
- @property
- def aspect_ratio(self):
- return self.dimensions[0] / self.dimensions[1]
diff --git a/thermography/test/test_ID.py b/thermography/test/test_ID.py
index 5a42140..d4eb7f5 100644
--- a/thermography/test/test_ID.py
+++ b/thermography/test/test_ID.py
@@ -1,6 +1,7 @@
-import numpy as np
import unittest
+import numpy as np
+
from thermography.utils import ID
diff --git a/thermography/test/test_geometry.py b/thermography/test/test_geometry.py
index e5e2944..59de919 100644
--- a/thermography/test/test_geometry.py
+++ b/thermography/test/test_geometry.py
@@ -1,7 +1,8 @@
import collections
-import numpy as np
import unittest
+import numpy as np
+
from thermography.utils.geometry import *
@@ -82,6 +83,21 @@ def test_area(self):
polygon = np.array([point1, point5, point3, point4])
self.assertEqual(area(points=polygon), 1.5)
+ def test_area_between_rectangles(self):
+ """
+ Tests the 'area_between_rectangles' function which computes the surface between two rectangles.
+ """
+ rect1 = np.array([[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0]])
+ self.assertEqual(area_between_rectangles(rect1, rect1), 0.0)
+
+ rect2 = np.array([[0.25, .25], [0.75, 0.25], [0.75, 0.75], [0.25, 0.75]])
+ self.assertEqual(area_between_rectangles(rect1, rect2), 0.75)
+ self.assertEqual(area_between_rectangles(rect2, rect1), 0.75)
+
+ rect3 = rect1 + np.array([0.2, 0.2])
+ self.assertAlmostEqual(area_between_rectangles(rect1, rect3), 0.8)
+ self.assertAlmostEqual(area_between_rectangles(rect3, rect1), 0.8)
+
def test_aspect_ratio(self):
"""
Tests the 'aspect_ratio' function which computes the aspect ratio of a rectangle.
@@ -345,6 +361,27 @@ def test_segment_sorting(self):
sorted_segments_indices = sort_segments(segments)
self.assertListEqual([*sorted_segments_indices], [3, 0, 1, 2])
+ def test_sort_rectangle(self):
+ p0 = np.array([0.0, 0.0])
+ p1 = np.array([1.0, 0.0])
+ p2 = np.array([1.0, 1.0])
+ p3 = np.array([0.0, 1.0])
+
+ r_final = np.array([p0, p1, p2, p3])
+
+ r0123 = np.array([p0, p1, p2, p3])
+ r1230 = np.array([p1, p2, p3, p0])
+ r2301 = np.array([p2, p3, p0, p1])
+ r3012 = np.array([p3, p0, p1, p2])
+ r3210 = np.array([p3, p2, p1, p0])
+ r2103 = np.array([p2, p1, p0, p3])
+ r1032 = np.array([p1, p0, p3, p2])
+ r0321 = np.array([p0, p3, p2, p1])
+
+ for rec in [r0123, r1230, r2301, r3012, r3210, r2103, r1032, r0321]:
+ sorted_rec = sort_rectangle(rec)
+ self.assertTrue((r_final == sorted_rec).all(), msg="Original:\n{}\nSorted:\n{}".format(rec, sorted_rec))
+
if __name__ == '__main__':
unittest.main()
diff --git a/thermography/test/test_images.py b/thermography/test/test_images.py
index 4fb2678..c1e6d82 100644
--- a/thermography/test/test_images.py
+++ b/thermography/test/test_images.py
@@ -1,6 +1,7 @@
-import numpy as np
import unittest
+import numpy as np
+
from thermography.utils.images import *
diff --git a/thermography/thermo_app.py b/thermography/thermo_app.py
index 4d4ec49..40b6d67 100644
--- a/thermography/thermo_app.py
+++ b/thermography/thermo_app.py
@@ -1,12 +1,17 @@
-from . import ModuleMap
-from .io import VideoLoader
-from .detection import *
-from .settings import Camera, Modules
-from .utils import rotate_image, scale_image
-from .utils.display import *
+import os
import cv2
import numpy as np
+from simple_logger import Logger
+
+from . import ModuleMap
+from .classification import Inference
+from .classification.models import ThermoNet3x3
+from .detection import *
+from .io import VideoLoader
+from .settings import Camera, get_resources_dir
+from .utils import aspect_ratio
+from .utils.display import *
class ThermoApp:
@@ -21,13 +26,18 @@ def __init__(self, input_video_path, camera_param_file):
:param input_video_path: Absolute path to the input video.
:param camera_param_file: Parameter file of the camera.
"""
-
+ Logger.debug("Starting thermo app")
self.input_video_path = input_video_path
self.camera_param_file = camera_param_file
- # Camera and Modules object containing the corresponding parameters.
+ self.image_shape = np.array([96, 120, 1])
+ self.num_classes = 3
+ checkpoint_dir = os.path.join(get_resources_dir(), "weights")
+ self.inference = Inference(checkpoint_dir=checkpoint_dir, model_class=ThermoNet3x3,
+ image_shape=self.image_shape, num_classes=self.num_classes)
+
+ # Camera object containing the corresponding parameters.
self.camera = None
- self.modules = None
# Object responsible for loading the video passed as parameter.
self.video_loader = None
@@ -38,6 +48,8 @@ def __init__(self, input_video_path, camera_param_file):
# Objects referring to the items computed during the last frame.
self.last_input_frame = None
+ self.last_preprocessed_image = None
+ self.last_attention_image = None
self.last_scaled_frame_rgb = None
self.last_scaled_frame = None
self.last_edges_frame = None
@@ -48,12 +60,11 @@ def __init__(self, input_video_path, camera_param_file):
self.last_rectangles = None
self.last_mean_motion = None
self.last_frame_id = 0
+ self.last_probabilities = {}
# Runtime parameters for detection.
self.should_undistort_image = True
- self.image_rotating_angle = 0.0
- self.image_scaling = 1.0
- self.gaussian_blur = 3
+ self.preprocessing_parameters = PreprocessingParams()
self.edge_detection_parameters = EdgeDetectorParams()
self.segment_detection_parameters = SegmentDetectorParams()
self.segment_clustering_parameters = SegmentClustererParams()
@@ -69,6 +80,7 @@ def frames(self):
return self.video_loader.frames
def create_segment_image(self):
+ Logger.debug("Creating segment image")
base_image = self.last_scaled_frame_rgb.copy()
if self.last_cluster_list is None:
return base_image
@@ -87,22 +99,42 @@ def create_segment_image(self):
return base_image
def create_rectangle_image(self):
+ Logger.debug("Creating rectangle image")
base_image = self.last_scaled_frame_rgb.copy()
- if self.last_rectangles is not None and len(self.last_rectangles) > 0:
- mean_color = np.mean(base_image, axis=(0, 1))
- mask = np.zeros_like(base_image)
- if mean_color[0] == mean_color[1] == mean_color[2]:
- mean_color = np.array([255, 255, 0])
- opposite_color = np.array([255, 255, 255]) - mean_color
- opposite_color = (int(opposite_color[0]), int(opposite_color[1]), int(opposite_color[2]))
- for rectangle in self.last_rectangles:
- cv2.polylines(base_image, np.int32([rectangle]), True, opposite_color, 1, cv2.LINE_AA)
- cv2.fillConvexPoly(mask, np.int32([rectangle]), (255, 0, 0), cv2.LINE_4)
-
- cv2.addWeighted(base_image, 1, mask, 0.3, 0, base_image)
+ mask = np.zeros_like(base_image)
+
+ for module_id, module in self.module_map.global_module_map.items():
+ if module.frame_id_history[-1] == self.last_frame_id:
+
+ module_coords = module.last_rectangle - np.int32(module.cumulated_motion)
+ mean_prob = module.mean_probability
+ color = color_from_probabilities(mean_prob)
+
+ cv2.polylines(base_image, np.int32([module_coords]), True, color, 1, cv2.LINE_AA)
+ cv2.fillConvexPoly(mask, np.int32([module_coords]), color, cv2.LINE_4)
+ else:
+ continue
+
+ cv2.addWeighted(base_image, 1.0, mask, 0.4, 0, base_image)
+ return base_image
+
+ def create_classes_image(self):
+ Logger.debug("Creating classes image")
+ base_image = self.last_scaled_frame_rgb.copy()
+
+ for module_id, module in self.module_map.global_module_map.items():
+ module_coords = module.last_rectangle - np.int32(module.cumulated_motion)
+ module_center = module.last_center - np.int32(module.cumulated_motion)
+ mean_prob = module.mean_probability
+ color = color_from_probabilities(mean_prob)
+
+ cv2.circle(base_image, (int(module_center[0]), int(module_center[1])), 6, color, cv2.FILLED, cv2.LINE_AA)
+ cv2.polylines(base_image, np.int32([module_coords]), True, color, 1, cv2.LINE_AA)
+
return base_image
def create_module_map_image(self):
+ Logger.debug("Creating module map image")
base_image = self.last_scaled_frame_rgb.copy()
for rect_id, rectangle in self.module_map.global_module_map.items():
rect_shift = rectangle.last_rectangle - np.int32(rectangle.cumulated_motion)
@@ -120,17 +152,50 @@ def create_module_map_image(self):
return base_image
+ def create_module_list(self):
+ Logger.debug("Creating module list")
+ module_list = []
+ module_width = 90
+ module_height = 66
+ padding = 15
+ image_width = module_width + 2 * padding
+ image_height = module_height + 2 * padding
+ module_image_size = (image_width, image_height)
+
+ for rectangle_id, rectangle in self.module_map.global_module_map.items():
+ # Only iterate over the last detected rectangles.
+ if rectangle.frame_id_history[-1] != self.last_frame_id:
+ continue
+
+ module_coordinates = rectangle.last_rectangle
+ module_aspect_ratio = aspect_ratio(module_coordinates)
+ is_horizontal = module_aspect_ratio >= 1.0
+ if is_horizontal:
+ projection_rectangle = np.float32([[0 + padding, 0 + padding],
+ [image_width - 1 - padding, 0 + padding],
+ [image_width - 1 - padding, image_height - 1 - padding],
+ [0 + padding, image_height - 1 - padding]])
+ else:
+ projection_rectangle = np.float32([[0 + padding, image_height - 1 - padding],
+ [0 + padding, 0 + padding],
+ [image_width - 1 - padding, 0 + padding],
+ [image_width - 1 - padding, image_height - 1 - padding]])
+
+ transformation_matrix = cv2.getPerspectiveTransform(np.float32(module_coordinates),
+ projection_rectangle)
+ extracted = cv2.warpPerspective(self.last_scaled_frame_rgb, transformation_matrix, module_image_size)
+
+ module_list.append({"coordinates": rectangle.last_rectangle, "image": extracted, "id": rectangle.ID})
+
+ return module_list
+
def __load_params(self):
"""
- Load the parameters related to camera and modules.
+ Load the parameters related to camera.
"""
self.camera = Camera(camera_path=self.camera_param_file)
- self.modules = Modules()
- print("Using camera parameters:\n{}".format(self.camera))
- print()
- print("Using module parameters:\n{}".format(self.modules))
- print()
+ Logger.info("Using camera parameters:\n{}".format(self.camera))
def load_video(self, start_frame: int, end_frame: int):
"""
@@ -141,8 +206,17 @@ def load_video(self, start_frame: int, end_frame: int):
"""
self.video_loader = VideoLoader(video_path=self.input_video_path, start_frame=start_frame, end_frame=end_frame)
+ def preprocess_frame(self):
+ frame_preprocessor = FramePreprocessor(input_image=self.last_input_frame, params=self.preprocessing_parameters)
+ frame_preprocessor.preprocess()
+
+ self.last_scaled_frame_rgb = frame_preprocessor.scaled_image_rgb
+ self.last_scaled_frame = frame_preprocessor.scaled_image
+ self.last_preprocessed_image = frame_preprocessor.preprocessed_image
+ self.last_attention_image = frame_preprocessor.attention_image
+
def detect_edges(self):
- edge_detector = EdgeDetector(input_image=self.last_scaled_frame, params=self.edge_detection_parameters)
+ edge_detector = EdgeDetector(input_image=self.last_preprocessed_image, params=self.edge_detection_parameters)
edge_detector.detect()
self.last_edges_frame = edge_detector.edge_image
@@ -171,12 +245,25 @@ def detect_intersections(self):
self.last_intersections = intersection_detector.cluster_cluster_intersections
def detect_rectangles(self):
- self.rectangle_detection_parameters.aspect_ratio = self.modules.aspect_ratio
rectangle_detector = RectangleDetector(input_intersections=self.last_intersections,
params=self.rectangle_detection_parameters)
rectangle_detector.detect()
self.last_rectangles = rectangle_detector.rectangles
+ def classify_detected_modules(self):
+ """
+ Classifies the modules in the global module map which have been detected in the current frame. This function
+ must be called after inserting the modules in the global module map!
+ """
+ assert (self.inference is not None)
+
+ module_list = self.create_module_list()
+ probabilities = self.inference.classify([m["image"] for m in module_list])
+ for module, prob in zip(module_list, probabilities):
+ self.last_probabilities[module["id"]] = prob
+
+ self.module_map.update_class_belief(self.last_probabilities)
+
def step(self, frame_id, frame):
self.last_frame_id = frame_id
self.last_input_frame = frame
@@ -186,20 +273,14 @@ def step(self, frame_id, frame):
distCoeffs=self.camera.distortion_coeff)
else:
undistorted_image = distorted_image
+ self.last_input_frame = undistorted_image
- scaled_image = scale_image(undistorted_image, self.image_scaling)
-
- rotated_frame = rotate_image(scaled_image, self.image_rotating_angle)
- self.last_scaled_frame_rgb = rotated_frame
-
- gray = cv2.cvtColor(src=rotated_frame, code=cv2.COLOR_BGR2GRAY)
- gray = cv2.blur(gray, (self.gaussian_blur, self.gaussian_blur))
-
- self.last_scaled_frame = gray
+ self.preprocess_frame()
self.detect_edges()
self.detect_segments()
if len(self.last_segments) < 3:
+ Logger.warning("Found less than three segments!")
return False
self.cluster_segments()
@@ -212,10 +293,16 @@ def step(self, frame_id, frame):
# Add the detected rectangles to the global map.
self.module_map.insert(self.last_rectangles, frame_id, self.last_mean_motion)
+ if len(self.last_rectangles) == 0:
+ Logger.warning("No rectangles detected!")
+ return False
+
return True
def reset(self):
self.last_input_frame = None
+ self.last_preprocessed_image = None
+ self.last_attention_image = None
self.last_scaled_frame_rgb = None
self.last_scaled_frame = None
self.last_edges_frame = None
@@ -226,6 +313,8 @@ def reset(self):
self.last_rectangles = None
self.last_mean_motion = None
+ self.last_probabilities = {}
+
def run(self):
for frame_id, frame in enumerate(self.video_loader.frames):
@@ -262,11 +351,4 @@ def run(self):
cv2.imshow("Global map", global_map)
cv2.waitKey(1)
-
- # Rectangle extraction.
- # default_rect = np.float32([[629, 10], [10, 10], [10, 501], [629, 501]])
- # for rectangle in rectangle_detector.rectangles:
- # M = cv2.getPerspectiveTransform(np.float32(rectangle), default_rect)
- # extracted = cv2.warpPerspective(rectangle, M, (640, 512))
-
self.reset()
diff --git a/thermography/utils/display.py b/thermography/utils/display.py
index 6427ba1..4c431e0 100644
--- a/thermography/utils/display.py
+++ b/thermography/utils/display.py
@@ -2,7 +2,7 @@
import numpy as np
__all__ = ["draw_intersections", "draw_motion", "draw_rectangles", "draw_segments",
- "random_color"]
+ "random_color", "color_from_probabilities"]
def draw_intersections(intersections: list, base_image: np.ndarray, windows_name: str):
@@ -126,3 +126,14 @@ def random_color() -> tuple:
"""
c = np.random.randint(0, 255, 3)
return int(c[0]), int(c[1]), int(c[2])
+
+
+def color_from_probabilities(prob: np.ndarray) -> tuple:
+ """
+ Constructs a color tuple given the probability distribution prob.
+
+ :param prob: A three dimensional numpy array containing class probabilities.
+ :return: The color associated to the probability distribution.
+ """
+ color = np.diag(prob).dot(np.ones(shape=[3, 1]) * 255.0)
+ return (int(color[2]), int(color[0]), int(color[1]))
diff --git a/thermography/utils/geometry.py b/thermography/utils/geometry.py
index 7942392..4c9996f 100644
--- a/thermography/utils/geometry.py
+++ b/thermography/utils/geometry.py
@@ -5,6 +5,7 @@
"angle_diff",
"aspect_ratio",
"area",
+ "area_between_rectangles",
"line_estimate",
"mean_segment_angle",
"merge_segments",
@@ -14,6 +15,7 @@
"segment_line_intersection",
"segment_min_distance",
"segment_segment_intersection",
+ "sort_rectangle",
"sort_segments"]
@@ -60,9 +62,38 @@ def area(points: np.ndarray) -> float:
"""
x = points[:, 0]
y = points[:, 1]
+
return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
+def area_between_rectangles(rect1: np.ndarray, rect2: np.ndarray) -> float:
+ """
+ Computes the cumulative surface between the corresponding edges of the two rectangles passed as argument.
+ ::
+
+ *--------------------*
+ |####################|
+ |###*-------------*##|
+ |###| |##|
+ |###| |##|
+ |###| |##|
+ |###*-------------*##|
+ |####################|
+ *--------------------*
+ :param rect1: First rectangle's coordinates [[x0,y0],[x1,y1],[x2,y2],[x3,y3]]
+ :param rect2: Second rectangle's coordinates [[x'0,y'0],[x'1,y'1],[x'2,y'2],[x'3,y'3]]
+ :return: The surface between the rectangles' corresponding edges.
+ """
+
+ r0 = sort_rectangle(np.array([*rect1[0:2], *rect2[1::-1]]))
+ r1 = sort_rectangle(np.array([*rect1[1:3], *rect2[2:0:-1]]))
+ r2 = sort_rectangle(np.array([*rect1[2:4], *rect2[3:1:-1]]))
+ r3 = sort_rectangle(np.array([rect1[3], rect1[0], rect2[0], rect2[3]]))
+
+ a0, a1, a2, a3 = area(r0), area(r1), area(r2), area(r3)
+ return a0 + a1 + a2 + a3
+
+
def aspect_ratio(rectangle: np.ndarray) -> float:
"""
Computes the aspect ratio of a rectangle.
@@ -383,6 +414,31 @@ def segment_segment_intersection(seg1: np.ndarray, seg2: np.ndarray) -> np.ndarr
return False
+def sort_rectangle(rectangle: np.ndarray) -> np.ndarray:
+ """
+ Sorts the coordinates in the rectangle such that the final indexing corresponds to the following structure:
+ ::
+ +-----------> x
+ | 3 2
+ | *-------------*
+ | | |
+ v | |
+ y | |
+ *-------------*
+ 0 1
+
+ :param rectangle: numpy array of coordinates with form: [[x0, y0], [x1, y1], [x2, y2], [x3, y3]]
+ :return: A rectangle whose vertices are sorted.
+ """
+
+ center = np.mean(rectangle, axis=0)
+ diff = rectangle - center
+ angles = np.arctan2(diff[:, 1], diff[:, 0])
+
+ order = np.argsort(angles)
+ return rectangle[order]
+
+
def sort_segments(segment_list: list) -> np.ndarray:
"""
Sorts the segments passed as argument based on the normal associated to the mean angle.