Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[MPA] Class-incremental Learning for instance-segmentation #1142

Merged
merged 14 commits into from
Jun 29, 2022
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,281 @@
description: Configuration for an instance segmentation task
header: Configuration for an instance segmentation task
learning_parameters:
batch_size:
affects_outcome_of: TRAINING
default_value: 5
description:
The number of training samples seen in each iteration of training.
Increasing this value improves training time and may make the training more
stable. A larger batch size has higher memory requirements.
editable: true
header: Batch size
max_value: 512
min_value: 1
type: INTEGER
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 5
visible_in_ui: true
warning:
Increasing this value may cause the system to use more memory than available,
potentially causing out of memory errors, please update with caution.
description: Learning Parameters
header: Learning Parameters
learning_rate:
affects_outcome_of: TRAINING
default_value: 0.01
description:
Increasing this value will speed up training convergence but might
make it unstable.
editable: true
header: Learning rate
max_value: 0.1
min_value: 1.0e-07
type: FLOAT
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 0.01
visible_in_ui: true
warning: null
learning_rate_warmup_iters:
affects_outcome_of: TRAINING
default_value: 100
description: ""
editable: true
header: Number of iterations for learning rate warmup
max_value: 10000
min_value: 0
type: INTEGER
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 100
visible_in_ui: true
warning: null
num_checkpoints:
affects_outcome_of: NONE
default_value: 5
description: ""
editable: true
header: Number of checkpoints that is done during the single training round
max_value: 100
min_value: 1
type: INTEGER
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 5
visible_in_ui: true
warning: null
num_iters:
affects_outcome_of: TRAINING
default_value: 1
description:
Increasing this value causes the results to be more robust but training
time will be longer.
editable: true
header: Number of training iterations
max_value: 100000
min_value: 1
type: INTEGER
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 1
visible_in_ui: true
warning: null
num_workers:
affects_outcome_of: NONE
default_value: 5
description:
Increasing this value might improve training speed however it might
cause out of memory errors. If the number of workers is set to zero, data loading
will happen in the main training thread.
editable: true
header: Number of cpu threads to use during batch generation
max_value: 8
min_value: 0
type: INTEGER
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 0
visible_in_ui: true
warning: null
type: PARAMETER_GROUP
visible_in_ui: true
postprocessing:
confidence_threshold:
affects_outcome_of: INFERENCE
default_value: 0.35
description:
This threshold only takes effect if the threshold is not set based
on the result.
editable: true
header: Confidence threshold
max_value: 1
min_value: 0
type: FLOAT
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
# value: 0.35
value: 0.01
visible_in_ui: true
warning: null
description: Postprocessing
header: Postprocessing
result_based_confidence_threshold:
affects_outcome_of: INFERENCE
default_value: true
description: Confidence threshold is derived from the results
editable: true
header: Result based confidence threshold
type: BOOLEAN
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: true
visible_in_ui: true
warning: null
type: PARAMETER_GROUP
visible_in_ui: true
algo_backend:
description: parameters for algo backend
header: Algo backend parameters
train_type:
affects_outcome_of: NONE
default_value: Incremental
description: Quantization preset that defines quantization scheme
editable: false
enum_name: TrainType
header: train type
options:
SemiSupervised: "SemiSupervised"
SelfSupervised: "SelfSupervised"
goodsong81 marked this conversation as resolved.
Show resolved Hide resolved
Incremental: "Incremental"
type: SELECTABLE
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: SelfSupervised
visible_in_ui: True
warning: null
type: PARAMETER_GROUP
visible_in_ui: true
type: CONFIGURABLE_PARAMETERS
visible_in_ui: true
pot_parameters:
description: POT Parameters
header: POT Parameters
preset:
affects_outcome_of: NONE
default_value: Performance
description: Quantization preset that defines quantization scheme
editable: True
enum_name: POTQuantizationPreset
header: Preset
options:
MIXED: Mixed
PERFORMANCE: Performance
type: SELECTABLE
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: Performance
visible_in_ui: True
warning: null
stat_subset_size:
affects_outcome_of: NONE
default_value: 300
description: Number of data samples used for post-training optimization
editable: True
header: Number of data samples
max_value: 9223372036854775807
min_value: 1
type: INTEGER
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 300
visible_in_ui: True
warning: null
type: PARAMETER_GROUP
visible_in_ui: true
nncf_optimization:
description: Optimization by NNCF
header: Optimization by NNCF
enable_quantization:
affects_outcome_of: INFERENCE
default_value: True
description: Enable quantization algorithm
editable: false
header: Enable quantization algorithm
type: BOOLEAN
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: true
visible_in_ui: false
warning: null
enable_pruning:
affects_outcome_of: INFERENCE
default_value: false
description: Enable filter pruning algorithm
editable: true
header: Enable filter pruning algorithm
type: BOOLEAN
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: false
visible_in_ui: true
warning: null
maximal_accuracy_degradation:
affects_outcome_of: NONE
default_value: 1.0
description: The maximal allowed accuracy metric drop in absolute values
editable: True
header: Maximum accuracy degradation
max_value: 100.0
min_value: 0.0
type: FLOAT
ui_rules:
action: DISABLE_EDITING
operator: AND
rules: []
type: UI_RULES
value: 1.0
visible_in_ui: True
warning: null
type: PARAMETER_GROUP
visible_in_ui: True
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
dataset_type = 'CocoDataset'
img_size = (1024, 1024)

img_norm_cfg = dict(
mean=(103.53, 116.28, 123.675), std=(1.0, 1.0, 1.0), to_rgb=False)

train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True,
with_mask=True, poly2mask=False),
dict(type='Resize', img_scale=img_size, keep_ratio=False),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'])
]

test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=img_size,
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]

__dataset_type = 'CocoDataset'
__data_root = 'data/coco/'

__samples_per_gpu = 4

data = dict(
samples_per_gpu=__samples_per_gpu,
workers_per_gpu=2,
train=dict(
type=__dataset_type,
ann_file=__data_root + 'annotations/instances_train2017.json',
img_prefix=__data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=__dataset_type,
ann_file=__data_root + 'annotations/instances_val2017.json',
img_prefix=__data_root + 'val2017/',
test_mode=True,
pipeline=test_pipeline),
test=dict(
type=__dataset_type,
ann_file=__data_root + 'annotations/instances_val2017.json',
img_prefix=__data_root + 'val2017/',
test_mode=True,
pipeline=test_pipeline)
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
metric: mAP
search_algorithm: smbo
early_stop: None
hp_space:
learning_parameters.learning_rate:
param_type: quniform
range:
- 0.001
- 0.1
- 0.001
learning_parameters.batch_size:
param_type: qloguniform
range:
- 2
- 4
- 2
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
_base_ = [
'../../../submodule/samples/cfgs/models/backbones/efficientnet_b2b.yaml',
'../../../submodule/recipes/stages/_base_/models/detectors/efficientnetb2b_maskrcnn.custom.py'
]
Loading