Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

20230116 A Transform to ignore artefact points #52

Merged
merged 10 commits into from
Feb 7, 2023
29 changes: 13 additions & 16 deletions .github/workflows/cicd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -36,42 +36,39 @@ jobs:
pytest -rA -v
--ignore=actions-runner


# Always run with --ipc=host and --shm-size=2gb (at least) to enable sufficient shared memory when predicting on large data
# predict.subtile_overlap specifies overlap between adjacent samples (in meters).
- name: Example inference run via Docker with inference-time subtiles overlap to smooth-out results.
# IMPORTANT: Always run images with --ipc=host and --shm-size=2gb (at least) to enable
# sufficient shared memory when predicting on large files.
- name: Example inference run via Docker with default config and checkpoint
run: >
docker run
-v /var/data/cicd/CICD_github_assets/myria3d_V3.2.0/inputs/:/inputs/
-v /var/data/cicd/CICD_github_assets/myria3d_V3.2.0/outputs/:/outputs/
-v /var/data/cicd/CICD_github_assets/myria3d_V3.3.0/inputs/:/inputs/
-v /var/data/cicd/CICD_github_assets/myria3d_V3.3.0/outputs/:/outputs/
--ipc=host
--shm-size=2gb
myria3d
python run.py
--config-path /inputs/
CharlesGaydon marked this conversation as resolved.
Show resolved Hide resolved
--config-name proto151_V2.0_epoch_100_Myria3DV3.1.0_predict_config_V3.2.0
predict.ckpt_path=/inputs/proto151_V2.0_epoch_100_Myria3DV3.1.0.ckpt
predict.src_las=/inputs/792000_6272000_subset_buildings.las
predict.output_dir=/outputs/
predict.subtile_overlap=25
datamodule.batch_size=10
predict.interpolator.probas_to_save=[building,unclassified]
task.task_name=predict

- name: Example inference run via Docker with default config and checkpoint
# predict.subtile_overlap specifies overlap between adjacent samples (in meters).
- name: Example inference run via Docker with inference-time subtiles overlap to smooth-out results.
run: >
docker run
-v /var/data/cicd/CICD_github_assets/myria3d_V3.2.0/inputs/:/inputs/
-v /var/data/cicd/CICD_github_assets/myria3d_V3.2.0/outputs/:/outputs/
-v /var/data/cicd/CICD_github_assets/myria3d_V3.3.0/inputs/:/inputs/
-v /var/data/cicd/CICD_github_assets/myria3d_V3.3.0/outputs/:/outputs/
--ipc=host
--shm-size=2gb
myria3d
python run.py
--config-path /inputs/
--config-name proto151_V2.0_epoch_100_Myria3DV3.1.0_predict_config_V3.3.0
predict.ckpt_path=/inputs/proto151_V2.0_epoch_100_Myria3DV3.1.0.ckpt
predict.src_las=/inputs/792000_6272000_subset_buildings.las
predict.output_dir=/outputs/
predict.subtile_overlap=25
datamodule.batch_size=10
predict.interpolator.probas_to_save=[building,unclassified]
predict.interpolator.probas_to_save=[building,ground]
task.task_name=predict

- name: Check code neatness (linter)
Expand Down
2 changes: 1 addition & 1 deletion configs/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ defaults:
- datamodule: hdf5_datamodule.yaml
- dataset_description: 20220607_151_dalles_proto.yaml # describes input features and classes
- callbacks: default.yaml # set this to null if you don't want to use callbacks
- model: pyg_randla_net_model.yaml # other option is pyg_randla_net_model
- model: pyg_randla_net_model.yaml

- logger: comet # set logger here or use command line (e.g. `python run.py logger=wandb`)
- task: default.yaml
Expand Down
2 changes: 1 addition & 1 deletion configs/datamodule/transforms/default.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
defaults:
- preparations: default.yaml
- preparations: points_budget.yaml
- augmentations: none.yaml
- normalizations: default.yaml

Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
# default preparations with grid sampling and random sampling.

train:

TargetTransform:
_target_: myria3d.pctl.transforms.transforms.TargetTransform
_args_:
- ${dataset_description.classification_preprocessing_dict}
- ${dataset_description.classification_dict}

DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass

GridSampling:
_target_: torch_geometric.transforms.GridSampling
_args_:
Expand All @@ -23,6 +27,10 @@ train:
_target_: torch_geometric.transforms.Center

eval:

CopyFullPos:
_target_: myria3d.pctl.transforms.transforms.CopyFullPos

TargetTransform:
_target_: myria3d.pctl.transforms.transforms.TargetTransform
_args_:
Expand All @@ -32,8 +40,8 @@ eval:
CopyFullPreparedTargets:
_target_: myria3d.pctl.transforms.transforms.CopyFullPreparedTargets

CopyFullPos:
_target_: myria3d.pctl.transforms.transforms.CopyFullPos
DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass

GridSampling:
_target_: torch_geometric.transforms.GridSampling
Expand All @@ -55,9 +63,13 @@ eval:
_target_: torch_geometric.transforms.Center

predict:

CopyFullPos:
_target_: myria3d.pctl.transforms.transforms.CopyFullPos

DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass

GridSampling:
_target_: torch_geometric.transforms.GridSampling
_args_:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
# default preparations with grid sampling and random sampling.

train:

TargetTransform:
_target_: myria3d.pctl.transforms.transforms.TargetTransform
_args_:
- ${dataset_description.classification_preprocessing_dict}
- ${dataset_description.classification_dict}

DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass

GridSampling:
_target_: torch_geometric.transforms.GridSampling
_args_:
Expand All @@ -26,18 +30,22 @@ train:
_target_: torch_geometric.transforms.Center

eval:

TargetTransform:
_target_: myria3d.pctl.transforms.transforms.TargetTransform
_args_:
- ${dataset_description.classification_preprocessing_dict}
- ${dataset_description.classification_dict}

CopyFullPreparedTargets:
_target_: myria3d.pctl.transforms.transforms.CopyFullPreparedTargets
DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass

CopyFullPos:
_target_: myria3d.pctl.transforms.transforms.CopyFullPos

CopyFullPreparedTargets:
_target_: myria3d.pctl.transforms.transforms.CopyFullPreparedTargets

GridSampling:
_target_: torch_geometric.transforms.GridSampling
_args_:
Expand All @@ -53,14 +61,17 @@ eval:
_args_:
- 40000

# For interpolation
CopySampledPos:
_target_: myria3d.pctl.transforms.transforms.CopySampledPos

Center:
_target_: torch_geometric.transforms.Center

predict:

DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass

CopyFullPos:
_target_: myria3d.pctl.transforms.transforms.CopyFullPos

Expand Down
9 changes: 3 additions & 6 deletions configs/dataset_description/20220607_151_dalles_proto.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,9 @@ _convert_: all # For omegaconf struct to be converted to python dictionnaries
# 160: antenna -> lasting_above
# 161: wind_turbines -> lasting_above
# 162: pylon -> lasting_above

# Expectded classification dict:
# classification_preprocessing_dict: {3: 5, 4: 5, 64:1, 65:1, 160: 64, 161: 64, 162: 64}

# Additionnaly, artefacts as well as synthetic points (65, 66) are set to "unclassified"
classification_preprocessing_dict: {3: 5, 4: 5, 160: 64, 161: 64, 162: 64, 0: 1, 7: 1, 46: 1, 47: 1, 48: 1, 49: 1, 50: 1, 51: 1, 52: 1, 53: 1, 54: 1, 55: 1, 56: 1, 57: 1, 58: 1, 64: 1, 65: 1, 66: 1, 67: 1, 77: 1, 155: 1, 204: 1}
# 65: noise --> -1 (to ignore them in inference process, but tey will still be included in the final output cloud).
# Some trash classes were left in this dataset We do not drop them (i.e. map them to -1) to avoid unintended conflict in production.
classification_preprocessing_dict: {3: 5, 4: 5, 160: 64, 161: 64, 162: 64, 0: 1, 7: 1, 46: 1, 47: 1, 48: 1, 49: 1, 50: 1, 51: 1, 52: 1, 53: 1, 54: 1, 55: 1, 56: 1, 57: 1, 58: 1, 64: 1, 66: 1, 67: 1, 77: 1, 155: 1, 204: 1}

# classification_dict = {code_int: name_str, ...} and MUST be sorted (increasing order).
classification_dict: {1: "unclassified", 2: "ground", 5: vegetation, 6: "building", 9: water, 17: bridge, 64: lasting_above}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ defaults:

logger:
comet:
experiment_name: "RandLaNetOverfit"
experiment_name: "RandLaNet-Overfit"

trainer:
min_epochs: 100
Expand Down
25 changes: 0 additions & 25 deletions configs/experiment/RandLaNet-PyG-Overfit-NoRS.yaml

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# @package _global_
defaults:
- RandLaNet_base_run_FR_pyg_randla_net.yaml
- RandLaNet_base_run_FR.yaml

logger:
comet:
Expand Down
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
# @package _global_
defaults:
- RandLaNet_base_run_FR_pyg_randla_net.yaml
- RandLaNet_base_run_FR.yaml
- override /model/criterion: WeightedCrossEntropyLoss.yaml

logger:
comet:
experiment_name: "RandLaNet_base_run_FR_pyg_randla_net-SQRT-ICFW"
experiment_name: "RandLaNet_base_run_FR-SQRT-ICFW"

dataset_description:
# Sqrt(Inverse Frequency) of classes in defaut dataset (a.k.a. `151proto`).
class_weights: [0.19,0.08,0.08,0.36,1.13,3.11,2.05]
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
# @package _global_
defaults:
- RandLaNet_base_run_FR_pyg_randla_net.yaml
- override /datamodule/transforms/preparations: no_random_subsampling.yaml
- RandLaNet_base_run_FR.yaml
- override /datamodule/transforms/augmentations: light.yaml

logger:
comet:
experiment_name: "RandLaNet_base_run_FR_pyg_randla_net_NoRS-(BS10xMAX40000pts)"
experiment_name: "RandLaNet_base_run_FR-(BatchSize10xBudget(300pts-40000pts))"


# Smaller BS : 10 x 40 000 (max) == 400 000 pts i.e. previous budget of 32 x 12 500pts.
Expand All @@ -16,5 +16,5 @@ trainer:
num_sanity_val_steps: 2
min_epochs: 100
max_epochs: 150
accumulate_grad_batches: 3 # b/c larger clouds will not fit in memory with original BS.
accumulate_grad_batches: 3 # b/c larger clouds will not fit in memory with original Batch Size
# gpus: [1]
3 changes: 2 additions & 1 deletion configs/model/criterion/CrossEntropyLoss.yaml
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
_target_: torch.nn.CrossEntropyLoss
label_smoothing: 0.0
label_smoothing: 0.0
ignore_index: 65 # artefacts are mapped to 65 by convention
1 change: 1 addition & 0 deletions configs/model/criterion/WeightedCrossEntropyLoss.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
_target_: torch.nn.CrossEntropyLoss
label_smoothing: 0.0
ignore_index: 65 # artefacts are mapped to 65 by convention
weight:
_target_: torch.FloatTensor
_args_:
Expand Down
48 changes: 30 additions & 18 deletions docs/source/apidoc/default_config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,16 +25,20 @@ datamodule:
_args_:
- ${dataset_description.classification_preprocessing_dict}
- ${dataset_description.classification_dict}
DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass
GridSampling:
_target_: torch_geometric.transforms.GridSampling
_args_:
- 0.25
FixedPoints:
_target_: torch_geometric.transforms.FixedPoints
MinimumNumNodes:
_target_: myria3d.pctl.transforms.transforms.MinimumNumNodes
_args_:
- 12500
replace: false
allow_duplicates: true
- 300
MaximumNumNodes:
_target_: myria3d.pctl.transforms.transforms.MaximumNumNodes
_args_:
- 40000
Center:
_target_: torch_geometric.transforms.Center
eval:
Expand All @@ -43,37 +47,45 @@ datamodule:
_args_:
- ${dataset_description.classification_preprocessing_dict}
- ${dataset_description.classification_dict}
CopyFullPreparedTargets:
_target_: myria3d.pctl.transforms.transforms.CopyFullPreparedTargets
DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass
CopyFullPos:
_target_: myria3d.pctl.transforms.transforms.CopyFullPos
CopyFullPreparedTargets:
_target_: myria3d.pctl.transforms.transforms.CopyFullPreparedTargets
GridSampling:
_target_: torch_geometric.transforms.GridSampling
_args_:
- 0.25
FixedPoints:
_target_: torch_geometric.transforms.FixedPoints
MinimumNumNodes:
_target_: myria3d.pctl.transforms.transforms.MinimumNumNodes
_args_:
- 300
MaximumNumNodes:
_target_: myria3d.pctl.transforms.transforms.MaximumNumNodes
_args_:
- 12500
replace: false
allow_duplicates: true
- 40000
CopySampledPos:
_target_: myria3d.pctl.transforms.transforms.CopySampledPos
Center:
_target_: torch_geometric.transforms.Center
predict:
DropPointsByClass:
_target_: myria3d.pctl.transforms.transforms.DropPointsByClass
CopyFullPos:
_target_: myria3d.pctl.transforms.transforms.CopyFullPos
GridSampling:
_target_: torch_geometric.transforms.GridSampling
_args_:
- 0.25
FixedPoints:
_target_: torch_geometric.transforms.FixedPoints
MinimumNumNodes:
_target_: myria3d.pctl.transforms.transforms.MinimumNumNodes
_args_:
- 300
MaximumNumNodes:
_target_: myria3d.pctl.transforms.transforms.MaximumNumNodes
_args_:
- 12500
replace: false
allow_duplicates: true
- 40000
CopySampledPos:
_target_: myria3d.pctl.transforms.transforms.CopySampledPos
Center:
Expand Down Expand Up @@ -137,7 +149,6 @@ dataset_description:
57: 1
58: 1
64: 1
65: 1
66: 1
67: 1
77: 1
Expand Down Expand Up @@ -208,6 +219,7 @@ model:
criterion:
_target_: torch.nn.CrossEntropyLoss
label_smoothing: 0.0
ignore_index: 65
_target_: myria3d.models.model.Model
d_in: ${dataset_description.d_in}
num_classes: ${dataset_description.num_classes}
Expand Down
Loading