Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: Flatten explainer hierarchy #50

Merged
merged 8 commits into from
May 2, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions alibi/explainers/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
The 'alibi.explainers' module includes feature importance, counterfactual and anchor-based explainers.
"""

from .anchor.anchor_tabular import AnchorTabular
from .anchor.anchor_text import AnchorText
from .anchor.anchor_image import AnchorImage
from .cem.cem import CEM
from .anchor_tabular import AnchorTabular
from .anchor_text import AnchorText
from .anchor_image import AnchorImage
from .cem import CEM

__all__ = ["AnchorTabular",
"AnchorText",
Expand Down
Empty file.
Empty file.
File renamed without changes.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from . import anchor_base
from . import anchor_explanation
from .anchor_base import AnchorBaseBeam
from .anchor_explanation import AnchorExplanation
import logging
import numpy as np
from typing import Any, Callable, Tuple
Expand Down Expand Up @@ -250,9 +250,9 @@ def explain(self, image: np.ndarray, threshold: float = 0.95, delta: float = 0.1
segments, sample_fn = self.get_sample_fn(image, p_sample=p_sample)

# get anchors and add metadata
exp = anchor_base.AnchorBaseBeam.anchor_beam(sample_fn, delta=delta,
epsilon=tau, batch_size=batch_size,
desired_confidence=threshold, **kwargs) # type: Any
exp = AnchorBaseBeam.anchor_beam(sample_fn, delta=delta,
epsilon=tau, batch_size=batch_size,
desired_confidence=threshold, **kwargs) # type: Any
exp['instance'] = image
exp['prediction'] = self.predict_fn(np.expand_dims(image, axis=0))[0]

Expand All @@ -265,7 +265,7 @@ def explain(self, image: np.ndarray, threshold: float = 0.95, delta: float = 0.1
for i in range(ex[opt].shape[0])]
ex[opt] = tmp

exp = anchor_explanation.AnchorExplanation('image', exp)
exp = AnchorExplanation('image', exp)

# output explanation dictionary
explanation = {}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from . import anchor_base
from . import anchor_explanation
from .discretizer import Discretizer
from .anchor_base import AnchorBaseBeam
from .anchor_explanation import AnchorExplanation
from alibi.utils.discretizer import Discretizer
import numpy as np
from typing import Callable, Tuple, Dict, Any, Set

Expand Down Expand Up @@ -300,13 +300,13 @@ def explain(self, X: np.ndarray, threshold: float = 0.95, delta: float = 0.1,
sample_fn, mapping = self.get_sample_fn(X, desired_label=desired_label)

# get anchors and add metadata
exp = anchor_base.AnchorBaseBeam.anchor_beam(sample_fn, delta=delta, epsilon=tau,
batch_size=batch_size, desired_confidence=threshold,
max_anchor_size=max_anchor_size, **kwargs) # type: Any
exp = AnchorBaseBeam.anchor_beam(sample_fn, delta=delta, epsilon=tau,
batch_size=batch_size, desired_confidence=threshold,
max_anchor_size=max_anchor_size, **kwargs) # type: Any
self.add_names_to_exp(exp, mapping)
exp['instance'] = X
exp['prediction'] = self.predict_fn(X.reshape(1, -1))[0]
exp = anchor_explanation.AnchorExplanation('tabular', exp)
exp = AnchorExplanation('tabular', exp)

# output explanation dictionary
explanation = {}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from . import anchor_base
from . import anchor_explanation
from .anchor_base import AnchorBaseBeam
from .anchor_explanation import AnchorExplanation
import logging
import numpy as np
from typing import Any, Callable, Tuple, Dict
Expand Down Expand Up @@ -173,6 +173,7 @@ def sample_fn(present: list, num_samples: int, compute_labels: bool = True,
labels = (self.predict_fn(raw_data) == true_label).astype(int)
raw_data = np.array(raw_data).reshape(-1, 1)
return raw_data, data, labels

return words, positions, sample_fn

def explain(self, text: str, threshold: float = 0.95, delta: float = 0.1,
Expand Down Expand Up @@ -230,17 +231,17 @@ def explain(self, text: str, threshold: float = 0.95, delta: float = 0.1,
data_type = '<U' + str(int(total_len))

# get anchors and add metadata
exp = anchor_base.AnchorBaseBeam.anchor_beam(sample_fn, delta=delta,
epsilon=tau, batch_size=batch_size,
desired_confidence=threshold,
stop_on_first=True, data_type=data_type,
**kwargs) # type: Any
exp = AnchorBaseBeam.anchor_beam(sample_fn, delta=delta,
epsilon=tau, batch_size=batch_size,
desired_confidence=threshold,
stop_on_first=True, data_type=data_type,
**kwargs) # type: Any

exp['names'] = [words[x] for x in exp['feature']]
exp['positions'] = [positions[x] for x in exp['feature']]
exp['instance'] = text
exp['prediction'] = self.predict_fn([text])[0]
exp = anchor_explanation.AnchorExplanation('text', exp)
exp = AnchorExplanation('text', exp)

# output explanation dictionary
explanation = {}
Expand Down
File renamed without changes.
Empty file removed alibi/explainers/cem/__init__.py
Empty file.
Empty file.
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from sklearn.model_selection import train_test_split
import spacy
from alibi.explainers import AnchorText
from alibi.explainers.anchor.anchor_text import Neighbors
from alibi.explainers.anchor_text import Neighbors
from alibi.datasets import movie_sentiment
from alibi.utils.download import spacy_model

Expand Down
File renamed without changes.
2 changes: 2 additions & 0 deletions doc/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@
apidoc_output_dir = 'api'
apidoc_excluded_paths = ['**/*test*']
apidoc_module_first = True
apidoc_separate_modules = True
apidoc_extra_args = ['-d 6']

# Napoleon settings
napoleon_google_docstring = True
Expand Down
2 changes: 1 addition & 1 deletion doc/source/methods/Anchors.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"[[source]](../api/alibi.explainers.anchor.rst)"
"[[source]](../api/alibi.explainers.anchor_tabular.rst)"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion doc/source/methods/Trust Scores.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"[[source]](../api/alibi.confidence.rst)"
"[[source]](../api/alibi.confidence.trustscore.rst)"
]
},
{
Expand Down
9 changes: 8 additions & 1 deletion doc/source/overview/algorithms.md
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,17 @@ instance that would result in a different prediction). [Documentation](../method

## Model Confidence
These algorihtms provide instance-specific scores measuring the model confidence for making a
certain prediction.
particular prediction.

|Algorithm|Classification|Regression|Categorical features|Tabular|Text|Images|Needs training set|
|---|---|---|---|---|
|[Trust Scores](../methods/Trust\ Scores.ipynb)|✔|✘|✘|✔|✔[^1]|✔[^2]|Yes|

**Trust scores**: produce a "trust score" of a classifier's prediction. The trust score is the ratio
between the distance to the nearest class different from the predicted class and the distance to the
predicted class, higher scores correspond to more trustworthy predictions.
[Documentation](../methods/Trust\ Scores.ipynb),
[tabular example](../examples/trustscore_iris.nblink).

[^1]: Depending on model
[^2]: May require dimensionality reduction