Skip to content

Commit

Permalink
Merge pull request #44 from julrog/consistent_typing
Browse files Browse the repository at this point in the history
Consistent typing
  • Loading branch information
julrog authored Jan 1, 2023
2 parents 8f0a473 + f34e671 commit 25dd721
Show file tree
Hide file tree
Showing 74 changed files with 678 additions and 667 deletions.
11 changes: 5 additions & 6 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,9 @@
repos:
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.991
hooks:
- id: mypy
args: [--ignore-missing-imports, --disallow-untyped-defs, --disallow-incomplete-defs, --disallow-untyped-calls]
- repo: https://github.com/timothycrosley/isort
rev: 5.10.1
hooks:
Expand Down Expand Up @@ -27,16 +32,10 @@ repos:
- id: check-merge-conflict
- id: mixed-line-ending
args: ["--fix=crlf"]
#- repo: https://github.com/markdownlint/markdownlint
# rev: v0.12.0
# hooks:
# - id: markdownlint
#args: [ "-r", "~MD002,~MD013,~MD024,~MD029,~MD033,~MD034,~MD036" ]
- repo: https://github.com/codespell-project/codespell
rev: v2.2.2
hooks:
- id: codespell
#args: ["--skip", "*.ipynb,tools/data/hvu/label_map.json", "-L", "te,nd,thre,Gool,gool"]
- repo: https://github.com/hadialqattan/pycln
rev: v2.1.2
hooks:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ The visualization tool `start_tool.py` can be used to render and/or process neur

### VR

See [VR_TOOL.md](./VR_TOOL.md)) for more info.
See [VR_TOOL.md](./VR_TOOL.md) for more info.

### Example
<img align="center" src="./docs/images/compare_network_parameters.jpg" />
Expand Down
2 changes: 1 addition & 1 deletion configs/processing.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"edge_bandwidth_reduction": 0.9,
"edge_importance_type": 0,
"layer_distance": 0.5,
"layer_distance": 0.4,
"layer_width": 1.0,
"node_bandwidth_reduction": 0.95,
"prune_percentage": 0.0,
Expand Down
Empty file added data/__init__.py
Empty file.
58 changes: 30 additions & 28 deletions data/mnist_data_handler.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
import os
from typing import Any, List, Tuple
from typing import Any, List, Optional, Tuple

import numpy as np
from tensorflow import keras
Expand Down Expand Up @@ -31,7 +31,7 @@ def get_basic_data(categorical: bool = False) -> Tuple[Tuple[Any, Any], Tuple[An
return (x_train, y_train), (x_test, y_test), input_shape, num_classes


def get_prepared_data(class_selection: List[int] = None) -> Tuple[Tuple[Any, Any], Tuple[Any, Any], Any, Any]:
def get_prepared_data(class_selection: Optional[List[int]] = None) -> Tuple[Tuple[Any, Any], Tuple[Any, Any], Any, Any]:
(x_train, y_train), (x_test, y_test), input_shape, num_classes = get_basic_data()

if class_selection is not None:
Expand Down Expand Up @@ -61,14 +61,17 @@ def get_prepared_data(class_selection: List[int] = None) -> Tuple[Tuple[Any, Any
return (x_train, y_train), (x_test, y_test), input_shape, num_classes


def get_unbalance_data(main_class: int, other_class_percentage: float, class_selection: List[int] = None) \
def get_unbalance_data(main_class: int, other_class_percentage: float, class_selection: Optional[List[int]] = None) \
-> Tuple[Tuple[Any, Any], Tuple[Any, Any], Any, Any]:
(x_train, y_train), (x_test, y_test), input_shape, num_classes = get_basic_data()

x_unbalanced_train = []
y_unbalanced_train = []

considered_class_count: int = num_classes if class_selection is None else len(
class_selection)
other_class_samples: int = int(
other_class_percentage * x_train.shape[0] * (len(class_selection) / num_classes))
other_class_percentage * x_train.shape[0] * (considered_class_count / num_classes))

for result, image in zip(y_train, x_train):
if result == main_class:
Expand Down Expand Up @@ -108,67 +111,66 @@ def get_unbalance_data(main_class: int, other_class_percentage: float, class_sel
return (x_train, y_train), (x_test, y_test), input_shape, num_classes


def split_mnist_data(class_selection: List[int] = None):
def split_mnist_data(class_selection: Optional[List[int]] = None) -> None:
(x_train, y_train), (x_test, y_test), input_shape, num_classes = get_basic_data()
logging.info('splitting %i train examples' % x_train.shape[0])
logging.info('splitting %i test examples' % x_test.shape[0])

separated_train_data: List[Tuple[np.array or List[Any], np.array or List[Any]]] = [([], []) for _ in
range(num_classes)]
separated_test_data: List[Tuple[np.array or List[Any], np.array or List[Any]]] = [([], []) for _ in
range(num_classes)]

if class_selection is None:
class_selection = range(num_classes)
separated_train_data: List[Tuple[np.array, np.array]] = [([], []) for _ in
range(num_classes)]
separated_test_data: List[Tuple[np.array, np.array]] = [([], []) for _ in
range(num_classes)]
ensured_class_selection: List[int] = list(range(
num_classes)) if class_selection is None else class_selection

for result, image in zip(y_train, x_train):
if result in class_selection:
if result in ensured_class_selection:
separated_train_data[result][0].append(image)
separated_train_data[result][1].append(0)

for result, image in zip(y_test, x_test):
if result in class_selection:
if result in ensured_class_selection:
separated_test_data[result][0].append(image)
separated_test_data[result][1].append(0)

for i, class_id in enumerate(class_selection):
for i, class_id in enumerate(ensured_class_selection):
separated_train_data[i] = (np.array(separated_train_data[class_id][0]).reshape([-1, input_shape[0], 1]),
np.array(separated_train_data[class_id][1]).reshape([-1, 1]))
separated_test_data[i] = (np.array(separated_test_data[class_id][0]).reshape([-1, input_shape[0], 1]),
np.array(separated_test_data[class_id][1]).reshape([-1, 1]))

processed_separated_train_data: List[Tuple[np.array, np.array]] = [
([], []) for _ in range(len(class_selection))]
([], []) for _ in range(len(ensured_class_selection))]
processed_separated_test_data: List[Tuple[np.array, np.array]] = [
([], []) for _ in range(len(class_selection))]
for i in range(len(class_selection)):
([], []) for _ in range(len(ensured_class_selection))]
for i in range(len(ensured_class_selection)):
processed_separated_train_data[i] = (
np.copy(separated_train_data[i][0]), np.copy(separated_train_data[i][1]))
processed_separated_test_data[i] = (
np.copy(separated_test_data[i][0]), np.copy(separated_test_data[i][1]))

for i in range(len(class_selection)):
for j in range(len(class_selection)):
for i in range(len(ensured_class_selection)):
for j in range(len(ensured_class_selection)):
np.random.shuffle(separated_train_data[j][0])
split_portion: int = int(
len(separated_train_data[j][0]) / len(class_selection))
split_portion = int(
len(separated_train_data[j][0]) / len(ensured_class_selection))
processed_separated_train_data[i] = (
np.append(
processed_separated_train_data[i][0], separated_train_data[j][0][0:split_portion], axis=0),
np.append(processed_separated_train_data[i][1], np.ones(
split_portion).reshape(-1, 1), axis=0)
)
np.random.shuffle(separated_test_data[j][0])
split_portion: int = int(
len(separated_test_data[j][0]) / len(class_selection))
split_portion = int(
len(separated_test_data[j][0]) / len(ensured_class_selection))
processed_separated_test_data[i] = (
np.append(
processed_separated_test_data[i][0], separated_test_data[j][0][0:split_portion], axis=0),
np.append(processed_separated_test_data[i][1], np.ones(
split_portion).reshape(-1, 1), axis=0)
)

for i, class_id in enumerate(class_selection):
for i, class_id in enumerate(ensured_class_selection):
logging.info('%i train examples for class #%i' %
(processed_separated_train_data[i][0].shape[0], class_id))
logging.info('%i test examples for class #%i' %
Expand All @@ -178,15 +180,15 @@ def split_mnist_data(class_selection: List[int] = None):
if not os.path.exists(data_path):
os.makedirs(data_path)

if len(class_selection) == num_classes:
if len(ensured_class_selection) == num_classes:
np.savez('%s/mnist_train_split' %
data_path, processed_separated_train_data)
np.savez('%s/mnist_test_split' %
data_path, processed_separated_test_data)
else:
np.savez('%s/mnist_train_split_%s' % (data_path, ''.join(str(e) + '_' for e in class_selection)),
np.savez('%s/mnist_train_split_%s' % (data_path, ''.join(str(e) + '_' for e in ensured_class_selection)),
processed_separated_train_data)
np.savez('%s/mnist_test_split_%s' % (data_path, ''.join(str(e) + '_' for e in class_selection)),
np.savez('%s/mnist_test_split_%s' % (data_path, ''.join(str(e) + '_' for e in ensured_class_selection)),
processed_separated_test_data)

logging.info("saved split data to \"%s\"" % data_path)
31 changes: 15 additions & 16 deletions data/model_data.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import os
from enum import Enum
from typing import Any, Dict, List
from typing import Any, Dict, List, Optional

from tensorflow import keras
from tensorflow.keras.models import Model
Expand All @@ -20,21 +20,20 @@ class ModelTrainType(Enum):


class ModelData:
def __init__(self, name: str, description: str = None, model: Model = None):
def __init__(self, name: str, description: Optional[str] = None, model: Optional[Model] = None) -> None:
self.name: str = name
self.model: Model = model if model is not None else keras.models.load_model(
self.get_model_path())
self.check_model_supported_layer()
self.description: str = description
self.data: dict = dict()
self.data['name'] = self.name
if description is not None:
self.data['description'] = self.description
self.data['description'] = description
self.data_file: EvaluationFile = EvaluationFile(self.name)
self.data_file.read_data()

def set_parameter(self, batch_size: int, epochs: int, layer_data: List[int], learning_rate: float,
training_samples: int, test_samples: int):
training_samples: int, test_samples: int) -> None:
self.data['batch_size'] = batch_size
self.data['epochs'] = epochs
self.data['layer_data'] = layer_data
Expand All @@ -44,20 +43,20 @@ def set_parameter(self, batch_size: int, epochs: int, layer_data: List[int], lea
self.data['test_samples'] = test_samples

def set_initial_performance(self, test_loss: float, test_accuracy: float, train_loss: float, train_accuracy: float,
classification_report: Any):
classification_report: Any) -> None:
self.data['test_loss'] = str(test_loss)
self.data['test_accuracy'] = str(test_accuracy)
self.data['train_loss'] = str(train_loss)
self.data['train_accuracy'] = str(train_accuracy)
self.data['classification_report'] = classification_report

def set_class_selection(self, class_selection: List[int]):
def set_class_selection(self, class_selection: Optional[List[int]]) -> None:
importance: dict = dict()
importance['class_selection'] = class_selection
self.data_file.append_main_data('processed', 'importance', importance)
self.data_file.write_data()

def set_importance_type(self, importance_type: int):
def set_importance_type(self, importance_type: int) -> None:
importance: dict = dict()
importance['importance_type'] = importance_type
self.data_file.append_main_data('processed', 'importance', importance)
Expand All @@ -66,31 +65,31 @@ def set_importance_type(self, importance_type: int):
def get_num_classes(self) -> int:
return self.data_file.data_cache['overall']['basic_model']['num_classes']

def get_class_selection(self) -> List[int] or None:
def get_class_selection(self) -> Optional[List[int]]:
return self.data_file.data_cache['processed']['importance']['class_selection']

def get_importance_type(self) -> int:
return self.data_file.data_cache['processed']['importance']['importance_type']

def store_model_data(self):
def store_model_data(self) -> None:
self.data_file.append_main_data('overall', 'basic_model', self.data)
self.data_file.write_data()

def store_main_data(self, key: str, sub_key: str, data: Dict[Any, Any]):
def store_main_data(self, key: str, sub_key: str, data: Dict[Any, Any]) -> None:
self.data_file.append_main_data(key, sub_key, data)
self.data_file.write_data()

def store_data(self, key: str, sub_key: str, sub_sub_key: str, data: Dict[Any, Any]):
def store_data(self, key: str, sub_key: str, sub_sub_key: str, data: Dict[Any, Any]) -> None:
self.data_file.append_data(key, sub_key, sub_sub_key, data)
self.data_file.write_data()

def save_model(self):
def save_model(self) -> None:
path: str = DATA_PATH + 'model/' + self.name + '/tf_model'
if not os.path.exists(path):
os.makedirs(path)
self.model.save(path)

def reload_model(self):
def reload_model(self) -> None:
self.model = keras.models.load_model(self.get_model_path())
self.check_model_supported_layer()

Expand All @@ -100,10 +99,10 @@ def get_model_path(self) -> str:
def get_path(self) -> str:
return DATA_PATH + 'model/' + self.name + '/'

def save_data(self):
def save_data(self) -> None:
self.data_file.write_data()

def check_model_supported_layer(self):
def check_model_supported_layer(self) -> None:
for index, layer in enumerate(self.model.layers):
if layer.__class__.__name__ not in SUPPORTED_LAYER and layer.__class__.__name__ not in IGNORED_LAYER:
raise Exception(
Expand Down
40 changes: 38 additions & 2 deletions definitions.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,8 @@
import os
from enum import IntEnum, IntFlag, auto
from typing import Generator, Iterable, List

from pyrr import Vector3

BASE_PATH = os.path.dirname(os.path.realpath(__file__))
DATA_PATH = BASE_PATH + '/storage/data/'
Expand All @@ -7,7 +11,39 @@
ADDITIONAL_EDGE_BUFFER_DATA: int = 8


def pairwise(it, size: int):
class ProcessRenderMode(IntFlag):
FINAL = auto()
NODE_ITERATIONS = auto()
EDGE_ITERATIONS = auto()
SMOOTHING = auto()


class CameraPose(IntEnum):
FRONT = 0
RIGHT = 1
LEFT = 2
LOWER_BACK_RIGHT = 3
BACK_RIGHT = 4
UPPER_BACK_LEFT = 5
UPPER_BACK_RIGHT = 6
BACK = 7
DEFAULT = 8


CAMERA_POSE_POSITION: List[Vector3] = [
Vector3([3.5, 0.0, 0.0]),
Vector3([0.0, 0.0, 2.5]),
Vector3([0.0, 0.0, -2.5]),
Vector3([-2.75, -1.0, 1.25]),
Vector3([-2.5, 0.0, 2.5]),
Vector3([-2.0, 2.0, -2.0]),
Vector3([-2.0, 2.0, 2.0]),
Vector3([-4.0, 0.0, 0.0]),
Vector3([-3.5, 0.0, 0.0])
]


def pairwise(it: Iterable, size: int) -> Generator:
it = iter(it)
while True:
try:
Expand All @@ -18,7 +54,7 @@ def pairwise(it, size: int):
return


def vec4wise(it):
def vec4wise(it: Iterable) -> Generator:
it = iter(it)
while True:
try:
Expand Down
Empty file added evaluation/__init__.py
Empty file.
Loading

0 comments on commit 25dd721

Please sign in to comment.