Skip to content

Commit

Permalink
Revise data model for MethodConfig
Browse files Browse the repository at this point in the history
  • Loading branch information
cmutel committed Oct 27, 2023
1 parent 9f6fb67 commit 764d74a
Show file tree
Hide file tree
Showing 5 changed files with 194 additions and 45 deletions.
4 changes: 1 addition & 3 deletions bw2calc/least_squares.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@ def load_lci_data(self) -> None:

def solve_linear_system(self, solver=lsmr) -> np.ndarray:
if self.technosphere_matrix.shape[0] == self.technosphere_matrix.shape[1]:
warnings.warn(
"Don't use LeastSquaresLCA for square matrices", EfficiencyWarning
)
warnings.warn("Don't use LeastSquaresLCA for square matrices", EfficiencyWarning)
self.solver_results = solver(self.technosphere_matrix, self.demand_array)
if self.solver_results[1] not in {1, 2}:
warnings.warn(
Expand Down
71 changes: 45 additions & 26 deletions bw2calc/method_config.py
Original file line number Diff line number Diff line change
@@ -1,56 +1,75 @@
from typing import Iterable, Optional
from typing import Optional, Sequence

from pydantic import BaseModel, model_validator


class MethodConfig(BaseModel):
impact_categories: Iterable[tuple[str, ...]]
normalizations: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None
weightings: Optional[dict[tuple[str, ...], tuple[str, ...]]] = None
impact_categories: Sequence[tuple[str, ...]]
normalizations: Optional[dict[tuple[str, ...], list[tuple[str, ...]]]] = None
weightings: Optional[dict[tuple[str, ...], list[tuple[str, ...]]]] = None

@model_validator(mode='after')
@model_validator(mode="after")
def normalizations_reference_impact_categories(self):
if not self.normalizations:
return self
difference = set(self.normalizations).difference(set(self.impact_categories))
references = set.union(*[set(lst) for lst in self.normalizations.values()])
difference = references.difference(set(self.impact_categories))
if difference:
raise ValueError(f"Impact categories in `normalizations` not present in `impact_categories`: {difference}")
raise ValueError(
f"Impact categories in `normalizations` not present in `impact_categories`: {difference}"
)
return self

@model_validator(mode='after')
def unique_normalizations(self):
if self.normalizations:
overlap = set(self.normalizations.values()).intersection(set(self.impact_categories))
if overlap:
raise ValueError(f"Normalization identifiers overlap impact category identifiers: {overlap}")
@model_validator(mode="after")
def normalizations_unique_from_impact_categories(self):
if not self.normalizations:
return self

references = set.union(*[set(lst) for lst in self.normalizations.values()])
overlap = set(self.normalizations).intersection(references)
if overlap:
raise ValueError(
f"Normalization identifiers overlap impact category identifiers: {overlap}"
)
return self

@model_validator(mode='after')
@model_validator(mode="after")
def weightings_reference_impact_categories_or_normalizations(self):
if not self.weightings:
return self
possibles = set(self.impact_categories)

if self.normalizations:
possibles = possibles.union(set(self.normalizations.values()))
difference = set(self.weightings).difference(possibles)
possibles = set(self.normalizations).union(set(self.impact_categories))
else:
possibles = set(self.impact_categories)

references = set.union(*[set(lst) for lst in self.weightings.values()])
difference = set(references).difference(possibles)
if difference:
raise ValueError(f"`weightings` refers to missing impact categories or normalizations: {difference}")
raise ValueError(
f"`weightings` refers to missing impact categories or normalizations: {difference}"
)
return self

@model_validator(mode='after')
def unique_weightings_to_impact_categories(self):
@model_validator(mode="after")
def weightings_unique_from_impact_categories(self):
if not self.weightings:
return self
overlap = set(self.weightings.values()).intersection(set(self.impact_categories))
overlap = set(self.weightings).intersection(set(self.impact_categories))
if overlap:
raise ValueError(f"Weighting identifiers overlap impact category identifiers: {overlap}")
raise ValueError(
f"Weighting identifiers overlap impact category identifiers: {overlap}"
)
return self

@model_validator(mode='after')
def unique_weightings_to_normalizations(self):
@model_validator(mode="after")
def weightings_unique_from_normalizations(self):
if not self.weightings:
return self
if self.normalizations:
overlap = set(self.weightings.values()).intersection(set(self.normalizations))
overlap = set(self.weightings).intersection(set(self.normalizations))
if overlap:
raise ValueError(f"Weighting identifiers overlap normalization identifiers: {overlap}")
raise ValueError(
f"Weighting identifiers overlap normalization identifiers: {overlap}"
)
return self
15 changes: 8 additions & 7 deletions bw2calc/multi_lca.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,14 @@
import warnings
from collections.abc import Mapping
from pathlib import Path
from typing import Iterable, Optional, Union
from typing import Iterable, Optional, Union, Sequence

import bw_processing as bwp
import matrix_utils as mu
import numpy as np
from fs.base import FS
from scipy import sparse
from pydantic import BaseModel
from scipy import sparse

from . import PYPARDISO, __version__
from .dictionary_manager import DictionaryManager
Expand Down Expand Up @@ -101,8 +101,8 @@ class MultiLCA(LCABase):

def __init__(
self,
demands: Iterable[Mapping],
method_config: Union[dict, MethodConfig],
demands: Sequence[Mapping],
method_config: dict,
inventory_data_objs: Iterable[Union[Path, FS, bwp.DatapackageBase]],
method_data_objs: Optional[Iterable[Union[Path, FS, bwp.DatapackageBase]]] = None,
normalization_data_objs: Optional[Iterable[Union[Path, FS, bwp.DatapackageBase]]] = None,
Expand All @@ -116,9 +116,10 @@ def __init__(
):
# Resolve potential iterator
self.demands = list(demands)
for i, fu in enumerate(self.demands):
if not isinstance(fu, Mapping):
raise ValueError(f"Demand section {i}: {fu} not a dictionary")

# Validation checks
DemandsValidator(demands)
MethodConfig(method_config)

self.packages = [get_datapackage(obj) for obj in inventory_data_objs]

Expand Down
4 changes: 1 addition & 3 deletions bw2calc/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,4 @@ def get_datapackage(obj):
return bwp.load_datapackage(OSFS(Path(obj)))

else:
raise TypeError(
"Unknown input type for loading datapackage: {}: {}".format(type(obj), obj)
)
raise TypeError("Unknown input type for loading datapackage: {}: {}".format(type(obj), obj))
145 changes: 139 additions & 6 deletions tests/test_method_config.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,154 @@
import pytest
from pydantic import ValidationError

from bw2calc.method_config import MethodConfig


def test_method_config_valid():
data = {
'impact_categories': [('foo', 'a'), ('foo', 'b')],
"impact_categories": [("foo", "a"), ("foo", "b")],
}
assert MethodConfig(**data)

data = {
"impact_categories": [("foo", "a"), ("foo", "b")],
"normalizations": {("norm", "standard"): [("foo", "a"), ("foo", "b")]},
}
assert MethodConfig(**data)

data = {
"impact_categories": [("foo", "a"), ("foo", "b")],
"normalizations": {("norm", "standard"): [("foo", "a"), ("foo", "b")]},
"weightings": {("weighting",): [("norm", "standard")]},
}
assert MethodConfig(**data)


def test_method_config_len_one_tuples_valid():
data = {
"impact_categories": [("a",), ("b",)],
}
assert MethodConfig(**data)

data = {
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [("a",), ("b",)]},
}
assert MethodConfig(**data)

data = {
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [("a",), ("b",)]},
"weightings": {("weighting",): [("norm",)]},
}
assert MethodConfig(**data)


def test_method_config_weighting_can_refer_impact_category():
data = {
'impact_categories': [('foo', 'a'), ('foo', 'b')],
'normalizations': {('foo', 'a'): ('norm', 'standard'), ('foo', 'b'): ('norm', 'standard')}
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [("a",), ("b",)]},
"weightings": {("weighting",): [("a",)]},
}
assert MethodConfig(**data)


def test_method_config_weighting_can_refer_normalization():
data = {
'impact_categories': [('foo', 'a'), ('foo', 'b')],
'normalizations': {('foo', 'a'): ('norm', 'standard'), ('foo', 'b'): ('norm', 'standard')},
'weightings': {('norm', 'standard'): ('weighting',)}
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [("a",), ("b",)]},
"weightings": {("weighting",): [("norm",)]},
}
assert MethodConfig(**data)


def test_method_config_wrong_tuple_types():
data = {
"impact_categories": [("a",), (1,)],
}
with pytest.raises(ValidationError):
assert MethodConfig(**data)

data = {
"impact_categories": [("a",), 1],
}
with pytest.raises(ValidationError):
assert MethodConfig(**data)

data = {
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [("a",), (1,)]},
}
with pytest.raises(ValidationError):
assert MethodConfig(**data)

data = {
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [(1,), ("b",)]},
}
with pytest.raises(ValidationError):
assert MethodConfig(**data)

data = {
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [("a",), ("b",)]},
"weightings": {("norm",): (1,)},
}
with pytest.raises(ValidationError):
assert MethodConfig(**data)

data = {
"impact_categories": [("a",), ("b",)],
"normalizations": {("norm",): [("a",), ("b",)]},
"weightings": {("norm",): 1},
}
with pytest.raises(ValidationError):
assert MethodConfig(**data)


def test_method_config_missing_normalization_reference():
data = {
"impact_categories": [("foo", "a"), ("foo", "b")],
"normalizations": {("norm", "standard"): [("foo", "c")]},
}
with pytest.raises(ValueError):
assert MethodConfig(**data)


def test_method_config_normalization_overlaps_impact_categories():
data = {
"impact_categories": [("foo", "a"), ("foo", "b")],
"normalizations": {("foo", "a"): [("foo", "a")]},
}
with pytest.raises(ValueError):
assert MethodConfig(**data)


def test_method_config_weighting_overlaps_impact_categories():
data = {
"impact_categories": [("foo", "a"), ("foo", "b")],
"normalizations": {("normalization",): [("foo", "a")]},
"weightings": {("foo", "a"): [("foo", "a")]},
}
with pytest.raises(ValueError):
assert MethodConfig(**data)


def test_method_config_weighting_overlaps_normalizations():
data = {
"impact_categories": [("foo", "a"), ("foo", "b")],
"normalizations": {("normalization",): [("foo", "a")]},
"weightings": {("normalization",): [("normalization",)]},
}
with pytest.raises(ValueError):
assert MethodConfig(**data)


def test_method_config_weighting_missing_reference():
data = {
"impact_categories": [("foo", "a"), ("foo", "b")],
"normalizations": {("normalization",): [("foo", "a")]},
"weightings": {("normalization",): [("foo", "c")]},
}
with pytest.raises(ValueError):
assert MethodConfig(**data)

0 comments on commit 764d74a

Please sign in to comment.