Skip to content

Commit

Permalink
Merge pull request #91 from IGNF/pyproject_toml
Browse files Browse the repository at this point in the history
Migrate to Pyproject.toml and unify length of lines
  • Loading branch information
CharlesGaydon authored Oct 12, 2023
2 parents 8103d80 + 652f894 commit 2daf8b9
Show file tree
Hide file tree
Showing 20 changed files with 144 additions and 164 deletions.
10 changes: 10 additions & 0 deletions .flake8
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
[flake8]
max_line_length = 99
show_source = True
format = pylint
extend-ignore = E203,E501
exclude =
.git
__pycache__
logs/*
.vscode/*
2 changes: 1 addition & 1 deletion .github/workflows/cicd.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ jobs:
task.task_name=predict
- name: Check code neatness (linter)
run: docker run myria3d flake8
run: docker run myria3d python -m flake8

# Everything ran so we tag the valid docker image to keep it
# This happens for push events, which are in particular
Expand Down
6 changes: 6 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# main

### 3.4.11
- Unification of max length of lines (99) by applying black everywhere.

### 3.4.10
- Migrate from setup.cfg to pyproject.toml and .flake8.

### 3.4.9
- Support edge-case where source LAZ has no valid subtile (i.e. pre_filter=False for all candidate subtiles) during hdf5 creation

Expand Down
1 change: 0 additions & 1 deletion myria3d/callbacks/logging_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,6 @@ def __init__(
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
) -> None:

self.class_of_interest_idx = class_of_interest_idx

super().__init__(
Expand Down
13 changes: 10 additions & 3 deletions myria3d/models/interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,10 @@ def __init__(
self.probas_to_save = probas_to_save

# Maps ascending index (0,1,2,...) back to conventionnal LAS classification codes (6=buildings, etc.)
self.reverse_mapper: Dict[int, int] = {class_index: class_code for class_index, class_code in enumerate(classification_dict.keys())}
self.reverse_mapper: Dict[int, int] = {
class_index: class_code
for class_index, class_code in enumerate(classification_dict.keys())
}

self.logits: List[torch.Tensor] = []
self.idx_in_full_cloud_list: List[np.ndarray] = []
Expand All @@ -70,7 +73,9 @@ def load_full_las_for_update(self, src_las: str) -> np.ndarray:
# Copy from Classification to preserve data type
# Also preserves values of artefacts.
if self.predicted_classification_channel != "Classification":
pipeline |= pdal.Filter.ferry(dimensions=f"Classification=>{self.predicted_classification_channel}")
pipeline |= pdal.Filter.ferry(
dimensions=f"Classification=>{self.predicted_classification_channel}"
)

if self.entropy_channel:
pipeline |= pdal.Filter.ferry(dimensions=f"=>{self.entropy_channel}")
Expand Down Expand Up @@ -166,7 +171,9 @@ def reduce_predictions_and_save(self, raw_path: str, output_dir: str) -> str:
out_f = os.path.abspath(out_f)
log.info(f"Updated LAS ({basename}) will be saved to: \n {output_dir}\n")
log.info("Saving...")
pipeline = pdal.Writer.las(filename=out_f, extra_dims="all", minor_version=4, dataformat_id=8).pipeline(las)
pipeline = pdal.Writer.las(
filename=out_f, extra_dims="all", minor_version=4, dataformat_id=8
).pipeline(las)
pipeline.execute()
log.info("Saved.")

Expand Down
27 changes: 6 additions & 21 deletions myria3d/models/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,9 +62,7 @@ def __init__(self, **kwargs):
# it also allows to access params with 'self.hparams' attribute
self.save_hyperparameters()

neural_net_class = get_neural_net_class(
self.hparams.neural_net_class_name
)
neural_net_class = get_neural_net_class(self.hparams.neural_net_class_name)
self.model = neural_net_class(**self.hparams.neural_net_hparams)

self.softmax = nn.Softmax(dim=1)
Expand Down Expand Up @@ -100,9 +98,7 @@ def forward(self, batch: Batch) -> torch.Tensor:
# During evaluation on test data and inference, we interpolate predictions back to original positions
# KNN is way faster on CPU than on GPU by a 3 to 4 factor.
logits = logits.cpu()
batch_y = self._get_batch_tensor_by_enumeration(
batch.idx_in_original_cloud
)
batch_y = self._get_batch_tensor_by_enumeration(batch.idx_in_original_cloud)
logits = knn_interpolate(
logits.cpu(),
batch.copies["pos_sampled_copy"].cpu(),
Expand Down Expand Up @@ -139,9 +135,7 @@ def training_step(self, batch: Batch, batch_idx: int) -> dict:
targets, logits = self.forward(batch)
self.criterion = self.criterion.to(logits.device)
loss = self.criterion(logits, targets)
self.log(
"train/loss", loss, on_step=True, on_epoch=True, prog_bar=False
)
self.log("train/loss", loss, on_step=True, on_epoch=True, prog_bar=False)

with torch.no_grad():
preds = torch.argmax(logits.detach(), dim=1)
Expand Down Expand Up @@ -177,9 +171,7 @@ def validation_step(self, batch: Batch, batch_idx: int) -> dict:
preds = torch.argmax(logits.detach(), dim=1)
self.val_iou = self.val_iou.to(preds.device)
self.val_iou(preds, targets)
self.log(
"val/iou", self.val_iou, on_step=True, on_epoch=True, prog_bar=True
)
self.log("val/iou", self.val_iou, on_step=True, on_epoch=True, prog_bar=True)
return {"loss": loss, "logits": logits, "targets": targets}

def on_validation_epoch_end(self) -> None:
Expand Down Expand Up @@ -257,15 +249,8 @@ def configure_optimizers(self):
"monitor": self.hparams.monitor,
}

def _get_batch_tensor_by_enumeration(
self, pos_x: torch.Tensor
) -> torch.Tensor:
def _get_batch_tensor_by_enumeration(self, pos_x: torch.Tensor) -> torch.Tensor:
"""Get batch tensor (e.g. [0,0,1,1,2,2,...,B-1,B-1] )
from shape B,N,... to shape (N,...).
"""
return torch.cat(
[
torch.full((len(sample_pos),), i)
for i, sample_pos in enumerate(pos_x)
]
)
return torch.cat([torch.full((len(sample_pos),), i) for i, sample_pos in enumerate(pos_x)])
51 changes: 12 additions & 39 deletions myria3d/models/modules/pyg_randla_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,9 +49,7 @@ def __init__(
self.fp3 = FPModule(1, SharedMLP([256 + 128, 128]))
self.fp2 = FPModule(1, SharedMLP([128 + 32, 32]))
self.fp1 = FPModule(1, SharedMLP([32 + 32, d_bottleneck]))
self.mlp_classif = SharedMLP(
[d_bottleneck, 64, 32], dropout=[0.0, 0.5]
)
self.mlp_classif = SharedMLP([d_bottleneck, 64, 32], dropout=[0.0, 0.5])
self.fc_classif = Linear(32, num_classes)

def forward(self, x, pos, batch, ptr):
Expand Down Expand Up @@ -117,19 +115,15 @@ class LocalFeatureAggregation(MessagePassing):
def __init__(self, channels):
super().__init__(aggr="add")
self.mlp_encoder = SharedMLP([10, channels // 2])
self.mlp_attention = SharedMLP(
[channels, channels], bias=False, act=None, norm=None
)
self.mlp_attention = SharedMLP([channels, channels], bias=False, act=None, norm=None)
self.mlp_post_attention = SharedMLP([channels, channels])

def forward(self, edge_index, x, pos):
out = self.propagate(edge_index, x=x, pos=pos) # N, d_out
out = self.mlp_post_attention(out) # N, d_out
return out

def message(
self, x_j: Tensor, pos_i: Tensor, pos_j: Tensor, index: Tensor
) -> Tensor:
def message(self, x_j: Tensor, pos_i: Tensor, pos_j: Tensor, index: Tensor) -> Tensor:
"""Local Spatial Encoding (locSE) and attentive pooling of features.
Args:
Expand All @@ -146,13 +140,9 @@ def message(
# Encode local neighboorhod structural information
pos_diff = pos_j - pos_i
distance = torch.sqrt((pos_diff * pos_diff).sum(1, keepdim=True))
relative_infos = torch.cat(
[pos_i, pos_j, pos_diff, distance], dim=1
) # N * K, d
relative_infos = torch.cat([pos_i, pos_j, pos_diff, distance], dim=1) # N * K, d
local_spatial_encoding = self.mlp_encoder(relative_infos) # N * K, d
local_features = torch.cat(
[x_j, local_spatial_encoding], dim=1
) # N * K, 2d
local_features = torch.cat([x_j, local_spatial_encoding], dim=1) # N * K, 2d

# Attention will weight the different features of x
# along the neighborhood dimension.
Expand Down Expand Up @@ -199,9 +189,7 @@ def forward(self, x, pos, batch):
return x, pos, batch


def decimation_indices(
ptr: LongTensor, decimation_factor: Number
) -> Tuple[Tensor, LongTensor]:
def decimation_indices(ptr: LongTensor, decimation_factor: Number) -> Tuple[Tensor, LongTensor]:
"""Get indices which downsample each point cloud by a decimation factor.
Decimation happens separately for each cloud to prevent emptying smaller
Expand All @@ -225,21 +213,12 @@ def decimation_indices(

batch_size = ptr.size(0) - 1
bincount = ptr[1:] - ptr[:-1]
decimated_bincount = torch.div(
bincount, decimation_factor, rounding_mode="floor"
)
decimated_bincount = torch.div(bincount, decimation_factor, rounding_mode="floor")
# Decimation should not empty clouds completely.
decimated_bincount = torch.max(
torch.ones_like(decimated_bincount), decimated_bincount
)
decimated_bincount = torch.max(torch.ones_like(decimated_bincount), decimated_bincount)
idx_decim = torch.cat(
[
(
ptr[i]
+ torch.randperm(bincount[i], device=ptr.device)[
: decimated_bincount[i]
]
)
(ptr[i] + torch.randperm(bincount[i], device=ptr.device)[: decimated_bincount[i]])
for i in range(batch_size)
],
dim=0,
Expand Down Expand Up @@ -301,15 +280,9 @@ def main():
transform=transform,
pre_transform=pre_transform,
)
test_dataset = ShapeNet(
path, category, split="test", pre_transform=pre_transform
)
train_loader = DataLoader(
train_dataset, batch_size=12, shuffle=True, num_workers=6
)
test_loader = DataLoader(
test_dataset, batch_size=12, shuffle=False, num_workers=6
)
test_dataset = ShapeNet(path, category, split="test", pre_transform=pre_transform)
train_loader = DataLoader(train_dataset, batch_size=12, shuffle=True, num_workers=6)
test_loader = DataLoader(test_dataset, batch_size=12, shuffle=False, num_workers=6)

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = PyGRandLANet(3, category_num_classes).to(device)
Expand Down
18 changes: 14 additions & 4 deletions myria3d/pctl/datamodule/hdf5.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,13 +64,19 @@ def __init__(
t = transforms
self.preparation_train_transform: TRANSFORMS_LIST = t.get("preparations_train_list", [])
self.preparation_eval_transform: TRANSFORMS_LIST = t.get("preparations_eval_list", [])
self.preparation_predict_transform: TRANSFORMS_LIST = t.get("preparations_predict_list", [])
self.preparation_predict_transform: TRANSFORMS_LIST = t.get(
"preparations_predict_list", []
)
self.augmentation_transform: TRANSFORMS_LIST = t.get("augmentations_list", [])
self.normalization_transform: TRANSFORMS_LIST = t.get("normalizations_list", [])

@property
def train_transform(self) -> CustomCompose:
return CustomCompose(self.preparation_train_transform + self.normalization_transform + self.augmentation_transform)
return CustomCompose(
self.preparation_train_transform
+ self.normalization_transform
+ self.augmentation_transform
)

@property
def eval_transform(self) -> CustomCompose:
Expand All @@ -85,9 +91,13 @@ def prepare_data(self, stage: Optional[str] = None):

if stage in ["fit", "test"] or stage is None:
if self.split_csv_path and self.data_dir:
las_paths_by_split_dict = get_las_paths_by_split_dict(self.data_dir, self.split_csv_path)
las_paths_by_split_dict = get_las_paths_by_split_dict(
self.data_dir, self.split_csv_path
)
else:
log.warning("cfg.data_dir and cfg.split_csv_path are both null. Precomputed HDF5 dataset is used.")
log.warning(
"cfg.data_dir and cfg.split_csv_path are both null. Precomputed HDF5 dataset is used."
)
las_paths_by_split_dict = None
# Create the dataset in prepare_data, so that it is done one a single GPU.
self.las_paths_by_split_dict = las_paths_by_split_dict
Expand Down
18 changes: 4 additions & 14 deletions myria3d/pctl/dataset/copc.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,20 +36,15 @@ def __init__(
data_dir=None,
add_original_index: bool = True,
):

if len(tiles_basenames) == 0:
raise KeyError("Given list of files is empty")

processed_basenames = [
b.replace(".las", ".copc.laz") for b in tiles_basenames
]
processed_basenames = [b.replace(".las", ".copc.laz") for b in tiles_basenames]
self.copc_paths = [osp.join(copc_dir, b) for b in processed_basenames]

if data_dir:
# CONVERSION TO COPC IF NEEDED
raw_paths = [
find_file_in_dir(data_dir, b) for b in tiles_basenames
]
raw_paths = [find_file_in_dir(data_dir, b) for b in tiles_basenames]
try:
# IndexError if no file is found in dir.
[find_file_in_dir(copc_dir, b) for b in processed_basenames]
Expand All @@ -75,7 +70,6 @@ def load_points(idx) -> np.ndarray:
raise NotImplementedError()

def __getitem__(self, idx):

points = self.load_points(idx)

# filter if empty
Expand All @@ -96,9 +90,7 @@ def __getitem__(self, idx):
data = self.transform(data)

# filter if empty
if data is None or (
self.pre_filter is not None and self.pre_filter(data)
):
if data is None or (self.pre_filter is not None and self.pre_filter(data)):
return None

return data
Expand Down Expand Up @@ -245,9 +237,7 @@ def __init__(
)


def write_las_to_copc_laz(
las_path: str, copc_laz_path: str, add_original_index: bool = False
):
def write_las_to_copc_laz(las_path: str, copc_laz_path: str, add_original_index: bool = False):
"""Convert from LAS to COPC, for optimized later loading.
Resulting data starts at 0 on x and y.
Expand Down
Loading

0 comments on commit 2daf8b9

Please sign in to comment.