From c8df34e9ebfb1c5a7862e3f1e55f1fbbd122b4c1 Mon Sep 17 00:00:00 2001 From: Adelin CONSTANS Date: Mon, 29 May 2023 09:37:09 +0200 Subject: [PATCH 001/138] example-based: add cole method --- xplique/example_based/cole.py | 411 ++++++++++++++++++++++++++++++++++ 1 file changed, 411 insertions(+) create mode 100644 xplique/example_based/cole.py diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py new file mode 100644 index 00000000..63fa69c6 --- /dev/null +++ b/xplique/example_based/cole.py @@ -0,0 +1,411 @@ +""" +Module related to Case Base Explainer +""" + + +import matplotlib.pyplot as plt +import numpy as np +from sklearn.metrics import DistanceMetric +from sklearn.neighbors import KDTree +import tensorflow as tf + +from ..plots.image import _standardize_image +from ..types import Callable, Union, Optional + + +class Cole: + """ + Used to compute the Case Based Explainer sytem, a twins sytem that use ANN and knn with + the same dataset. + + Ref. Eoin M. Kenny and Mark T. Keane. + Twin-Systems to Explain Artificial Neural Networks using Case-Based Reasoning: + Comparative Tests of Feature-Weighting Methods in ANN-CBR Twins for XAI. (2019) + https://www.ijcai.org/proceedings/2019/376 + """ + + def __init__( + self, + model: Callable, + case_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_train: np.ndarray, + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + distance_function: DistanceMetric = None, + weights_extraction_function: Callable = None, + k: Optional[int] = 3, + ): + """ + Parameters + ---------- + model + The model from wich we want to obtain explanations. + case_dataset + The dataset used to train the model, + also use by the function to calcul the closest examples. + labels_train + labels define by the dataset. + targets + labels predict by the model from the dataset. + distance_function + The function to calcul the distance between the inputs and all the dataset. + (Can use : euclidean, manhattan, minkowski etc...) + weights_extraction_function + The function to calcul the weight of every features, + many type of methode can be use but it will depend of + what type of dataset you've got. + examples: + def my_function(inputs, targets): + # outputs.shape == inputs.shape + return outputs + k + Represante how many nearest neighbours you want to be returns. + """ + # set attributes + self.model = model + self.case_dataset = case_dataset + self.weights_extraction_function = weights_extraction_function + self.k_neighbors = k + self.labels_train = labels_train + + # verify targets parametre + if targets is None: + targets = model(case_dataset) + nb_classes = targets.shape[1] + targets = tf.argmax(targets, axis=1) + targets = tf.one_hot( + targets, nb_classes + ) # nb_classes normalement en second argument mais la du coup 10. + + # verify distance_function parametre + if distance_function is None: + distance_function = DistanceMetric.get_metric("euclidean") + + # verify weight_extraction_function parametre + if weights_extraction_function is None: + self.weights_extraction_function = self._get_default_weights_extraction_function() + + # compute case dataset weights (used in distance) + # the weight extraction function may need the predictions to extract the weights + case_dataset_weight = self.weights_extraction_function(case_dataset, targets) + # for images, channels may disappear + if len(case_dataset_weight.shape) != len(case_dataset.shape): + case_dataset_weight = tf.expand_dims(case_dataset_weight, -1) + self.case_dataset_weight = case_dataset_weight + + # apply weights to the case dataset (weighted distance) + weighted_case_dataset = tf.math.multiply(case_dataset_weight, case_dataset) + # flatten features for kdtree + weighted_case_dataset = tf.reshape( + weighted_case_dataset, [weighted_case_dataset.shape[0], -1] + ) + + # create kdtree instance with weighted case dataset + # will be called to estimate closest examples + self.knn = KDTree(weighted_case_dataset, metric=distance_function) + + def extract_element_from_indices( + self, + labels_train: np.ndarray, + examples_indice: np.ndarray, + ): + """ + This function has to extract every example and weights from the dataset + by the indice calculate with de knn query in the explain function + + Parameters + ---------- + labels_train + labels define by the dataset. + examples_indice + Represente the indice of the K nearust neighbours of the input. + + Returns + ------- + examples + Represente the K nearust neighbours of the input. + examples_weights + features weight of the examples. + labels_examples + labels of the examples. + """ + all_examples = [] + all_weight_examples = [] + all_labels_examples = [] + for sample_examples_indice in examples_indice: + sample_examples = [] + weight_ex = [] + label_ex = [] + for indice in sample_examples_indice: + sample_examples.append(self.case_dataset[indice]) + weight_ex.append(self.case_dataset_weight[indice]) + label_ex.append(labels_train[indice]) + # (k, h, w, 1) + all_examples.append(tf.stack(sample_examples, axis=0)) + all_weight_examples.append(tf.stack(weight_ex, axis=0)) + all_labels_examples.append(tf.stack(label_ex, axis=0)) + # (n, k, h, w, 1) + examples = tf.stack(all_examples, axis=0) + examples_weights = tf.stack(all_weight_examples, axis=0) + labels_examples = tf.stack(all_labels_examples, axis=0) + + return examples, examples_weights, labels_examples + + def explain( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Union[tf.Tensor, np.ndarray] = None, + ): + """ + This function calculates the indice of the k closest example of the different inputs. + Then calls extract_element_from_indice to extract the examples from those indices. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N,W), (N,T,W), (N,W,H,C). + targets + Tensor or Array. Corresponding to the prediction of the samples by the model. + shape: (n, nb_classes) + Used by the `weights_extraction_function` if it is an Xplique attribution function, + For more details, please refer to the explain methods documentation. + + Returns + ------- + examples + Represente the K nearust neighbours of the input. + examples_distance + distance between the input and the examples. + examples_weight + features weight of the examples. + inputs_weights + features weight of the inputs. + examples_labels + labels of the examples. + """ + + # verify targets parametre + if targets is None: + targets = self.model(inputs) + nb_classes = targets.shape[1] + targets = tf.argmax(targets, axis=1) + targets = tf.one_hot(targets, nb_classes) + + # compute weight (used in distance) + # the weight extraction function may need the prediction to extract the weights + inputs_weights = self.weights_extraction_function(inputs, targets) + + # for images, channels may disappear + if len(inputs_weights.shape) != len(inputs.shape): + inputs_weights = tf.expand_dims(inputs_weights, -1) + + # apply weights to the inputs + weighted_inputs = tf.math.multiply(inputs_weights, inputs) + # flatten features for knn query + weighted_inputs = tf.reshape(weighted_inputs, [weighted_inputs.shape[0], -1]) + + # kdtree instance call with knn.query, + # call with the weighted inputs and the number of closest examples (k) + examples_distance, examples_indice = self.knn.query( + weighted_inputs, k=self.k_neighbors + ) + + # call the extract_element_from_indices function + examples, examples_weights, examples_labels = self.extract_element_from_indices( + self.labels_train, examples_indice + ) + + return ( + examples, + examples_distance, + examples_weights, + inputs_weights, + examples_labels, + ) + + @staticmethod + def _get_default_weights_extraction_function(): + """ + This function allows you to get the default weight extraction function. + """ + return lambda inputs, targets: tf.ones(inputs.shape) + + def show_result_images( + self, + inputs: Union[tf.Tensor, np.ndarray], + examples: Union[tf.Tensor, np.ndarray], + examples_distance: float, + inputs_weights: np.ndarray, + examples_weights: np.ndarray, + indice_original: int, + examples_labels: np.ndarray, + labels_test: np.ndarray, + clip_percentile: Optional[float] = 0.2, + cmapimages: Optional[str] = "gray", + cmapexplanation: Optional[str] = "coolwarm", + alpha: Optional[float] = 0.5, + ): + """ + This function is for image data, it show the returns of the explain function. + + Parameters + --------- + inputs + Tensor or Array. Input samples to be show next to examples. + Expected shape among (N,W), (N,T,W), (N,W,H,C). + examples + Represente the K nearust neighbours of the input. + examples_distance + Distance between input data and examples. + inputs_weights + features weight of the inputs. + examples_weight + features weight of the examples. + indice_original + Represente the indice of the inputs to show the true labels. + examples_labels + labels of the examples. + labels_test + Corresponding to labels of the dataset test. + clip_percentile + Percentile value to use if clipping is needed, e.g a value of 1 will perform a clipping + between percentile 1 and 99. + This parameter allows to avoid outliers in case of too extreme values. + cmapimages + For images. + The Colormap instance or registered colormap name used to map scalar data to colors. + This parameter is ignored for RGB(A) data. + cmapexplanation + For explanation. + The Colormap instance or registered colormap name used to map scalar data to colors. + This parameter is ignored for RGB(A) data. + alpha + The alpha blending value, between 0 (transparent) and 1 (opaque). + If alpha is an array, the alpha blending values are applied pixel by pixel, + and alpha must have the same shape as X. + """ + # pylint: disable=too-many-arguments + + # Initialize 'input_and_examples' and 'corresponding_weights' that they + # will be use to show every closest examples and the explanation + inputs = tf.expand_dims(inputs, 1) + inputs_weights = tf.expand_dims(inputs_weights, 1) + input_and_examples = tf.concat([inputs, examples], axis=1) + corresponding_weights = tf.concat([inputs_weights, examples_weights], axis=1) + + # calcul the prediction of input and examples + # that they will be used at title of the image + # nevessary loop becaue we have n * k elements + predicted_labels = [] + for samples in input_and_examples: + predicted = self.model(samples) + predicted = tf.argmax(predicted, axis=1) + predicted_labels.append(predicted) + + # configure the grid to show all results + plt.rcParams["figure.autolayout"] = True + plt.rcParams["figure.figsize"] = [20, 10] + + # loop to organize and show all results + for j in range(np.asarray(input_and_examples).shape[0]): + fig = plt.figure() + gridspec = fig.add_gridspec(2, input_and_examples.shape[1]) + for k in range(len(input_and_examples[j])): + fig.add_subplot(gridspec[0, k]) + if k == 0: + plt.title( + f"Original image\nGround Truth: {labels_test[indice_original[j]]}"\ + + f"\nPrediction: {predicted_labels[j][k]}" + ) + else: + plt.title( + f"Examples\nGround Truth: {examples_labels[j][k-1]}"\ + + f"\nPrediction: {predicted_labels[j][k]}"\ + + f"\nDistance: {round(examples_distance[j][k-1], 2)}" + ) + plt.imshow(input_and_examples[j][k], cmap=cmapimages) + plt.axis("off") + fig.add_subplot(gridspec[1, k]) + plt.imshow(input_and_examples[j][k], cmap=cmapimages) + plt.imshow( + _standardize_image(corresponding_weights[j][k], clip_percentile), + cmap=cmapexplanation, + alpha=alpha, + ) + plt.axis("off") + plt.show() + + def show_result_tabular( + self, + inputs: Union[tf.Tensor, np.ndarray], + examples: Union[tf.Tensor, np.ndarray], + examples_distance: float, + indice_original: int, + examples_labels: np.ndarray, + labels_test: np.ndarray, + show_values: bool = False, + ): + """ + This function is for image data, it show the returns of the explain function. + + Parameters + --------- + inputs + Tensor or Array. Input samples to be show next to examples. + Expected shape among (N,W), (N,T,W), (N,W,H,C). + examples + Represente the K nearust neighbours of the input. + examples_weight + features weight of the examples. + indice_original + Represente the indice of the inputs to show the true labels. + examples_labels + labels of the examples. + labels_test + Corresponding to labels of the dataset test. + show_values + boolean default at False, to show the values of examples. + """ + + # Initialize 'input_and_examples' and 'corresponding_weights' that they + # will be use to show every closest examples and the explanation + inputs = tf.expand_dims(inputs, 1) + input_and_examples = tf.concat([inputs, examples], axis=1) + + # calcul the prediction of input and examples + # that they will be used at title of the image + # nevessary loop becaue we have n * k elements + predicted_labels = [] + for samples in input_and_examples: + predicted = self.model(samples) + predicted = tf.argmax(predicted, axis=1) + predicted_labels.append(predicted) + + # apply argmax function to labels + labels_test = tf.argmax(labels_test, axis=1) + examples_labels = tf.argmax(examples_labels, axis=1) + + # define values_string if show_values is at None + values_string = "" + + # loop to organize and show all results + for i in range(input_and_examples.shape[0]): + for j in range(input_and_examples.shape[1]): + if show_values is True: + values_string = f"\t\tValues: {input_and_examples[i][j]}" + if j == 0: + print( + f"Originale_data, indice: {indice_original[i]}"\ + + f"\tDistance: \t\tGround Truth: {labels_test[i]}"\ + + f"\t\tPrediction: {predicted_labels[i][j]}" + + values_string + ) + else: + print( + f"\tExamples: {j}"\ + + f"\t\tDistance: {round(examples_distance[i][j-1], 2)}"\ + + f"\t\tGround Truth: {examples_labels[i][j-1]}"\ + + f"\t\tPrediction: {predicted_labels[i][j]}" + + values_string + ) + print("\n") From c331801391a10fe64490018b12df3226bdb7cb79 Mon Sep 17 00:00:00 2001 From: Adelin CONSTANS Date: Mon, 29 May 2023 09:39:36 +0200 Subject: [PATCH 002/138] tests: unit testing cole --- tests/example_based/test_cole.py | 199 +++++++++++++++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 tests/example_based/test_cole.py diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py new file mode 100644 index 00000000..941df68b --- /dev/null +++ b/tests/example_based/test_cole.py @@ -0,0 +1,199 @@ +""" +Test Cole +""" +from math import prod, sqrt + +import numpy as np +from sklearn.metrics import DistanceMetric +import tensorflow as tf + +from xplique.example_based import Cole +from xplique.types import Union + +from ..utils import generate_data, generate_model, almost_equal, generate_agnostic_model + + +def test_neighbors_distance(): + """ + The function test every output of the explanation method + """ + # Method parameters initialisation + input_shape = (3, 3, 1) + nb_labels = 10 + nb_samples = 10 + nb_samples_test = 8 + k = 3 + + # Data generation + matrix_train = tf.stack([i * tf.ones(input_shape) for i in range(nb_samples)]) + matrix_test = matrix_train[1:-1] + labels_train = tf.range(nb_samples) + labels_test = labels_train[1:-1] + + # Model generation + model = generate_model(input_shape, nb_labels) + + # Initialisation of weights_extraction_function and distance_function + # They will be used in CaseBasedExplainer initialisation + distance_function = DistanceMetric.get_metric("euclidean") + + # CaseBasedExplainer initialisation + method = Cole( + model, + matrix_train, + labels_train, + targets=None, + distance_function=distance_function, + weights_extraction_function=lambda inputs, targets: tf.ones(inputs.shape), + ) + + # Method explanation + ( + examples, + examples_distance, + examples_weights, + inputs_weights, + examples_labels, + ) = method.explain(matrix_test, labels_test) + + # test every outputs shape + assert examples.shape == (nb_samples_test, k) + input_shape + assert examples_distance.shape == (nb_samples_test, k) + assert examples_weights.shape == (nb_samples_test, k) + input_shape + assert inputs_weights.shape == (nb_samples_test,) + input_shape + assert examples_labels.shape == (nb_samples_test, k) + + for i in range(len(labels_test)): + # test examples: + assert almost_equal(examples[i][0], matrix_train[i + 1]) + assert almost_equal(examples[i][1], matrix_train[i + 2]) or almost_equal( + examples[i][1], matrix_train[i] + ) + assert almost_equal(examples[i][2], matrix_train[i]) or almost_equal( + examples[i][2], matrix_train[i + 2] + ) + + # test examples_distance + assert almost_equal(examples_distance[i][0], 0) + assert almost_equal(examples_distance[i][1], sqrt(prod(input_shape))) + assert almost_equal(examples_distance[i][2], sqrt(prod(input_shape))) + + # test examples_labels + assert almost_equal(examples_labels[i][0], labels_train[i + 1]) + assert almost_equal(examples_labels[i][1], labels_train[i + 2]) or almost_equal( + examples_labels[i][1], labels_train[i] + ) + assert almost_equal(examples_labels[i][2], labels_train[i]) or almost_equal( + examples_labels[i][2], labels_train[i + 2] + ) + + +def weights_attribution( + inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray] +): + """ + Custom weights extraction function + Zeros everywhere and target at 0, 0, 0 + """ + weights = tf.Variable(tf.zeros(inputs.shape, dtype=tf.float32)) + weights[:, 0, 0, 0].assign(targets) + return weights + + +def test_weights_attribution(): + """ + Function to test the weights attribution + """ + # Method parameters initialisation + input_shape = (3, 3, 1) + nb_labels = 10 + nb_samples = 10 + + # Data generation + matrix_train = tf.stack( + [i * tf.ones(input_shape, dtype=tf.float32) for i in range(nb_samples)] + ) + matrix_test = matrix_train[1:-1] + labels_train = tf.range(nb_samples, dtype=tf.float32) + labels_test = labels_train[1:-1] + + # Model generation + model = generate_model(input_shape, nb_labels) + + # Initialisation of distance_function + # It will be used in CaseBasedExplainer initialisation + distance_function = DistanceMetric.get_metric("euclidean") + + # CaseBasedExplainer initialisation + method = Cole( + model, + matrix_train, + labels_train, + targets=labels_train, + distance_function=distance_function, + weights_extraction_function=weights_attribution, + ) + + # test case dataset weigth + assert almost_equal(method.case_dataset_weight[:, 0, 0, 0], method.labels_train) + assert almost_equal( + tf.reduce_sum(method.case_dataset_weight, axis=[1, 2, 3]), method.labels_train + ) + + # Method explanation + _, _, examples_weights, inputs_weights, examples_labels =\ + method.explain(matrix_test, labels_test) + + # test examples weights + assert almost_equal(examples_weights[:, :, 0, 0, 0], examples_labels) + assert almost_equal( + tf.reduce_sum(examples_weights, axis=[2, 3, 4]), examples_labels + ) + + # test inputs weights + assert almost_equal(inputs_weights[:, 0, 0, 0], labels_test) + assert almost_equal(tf.reduce_sum(inputs_weights, axis=[1, 2, 3]), labels_test) + + +def test_tabular_inputs(): + """ + Function to test the acceptation of tabular data input in the method + """ + # Method parameters initialisation + data_shape = (3,) + input_shape = data_shape + nb_labels = 3 + nb_samples = 20 + nb_inputs = 5 + k = 3 + + # Data generation + dataset, targets = generate_data(data_shape, nb_labels, nb_samples) + dataset_train = dataset[:-nb_inputs] + dataset_test = dataset[-nb_inputs:] + targets_train = targets[:-nb_inputs] + targets_test = targets[-nb_inputs:] + + # Model generation + model = generate_agnostic_model(input_shape, nb_labels) + + # Initialisation of weights_extraction_function and distance_function + # They will be used in CaseBasedExplainer initialisation + distance_function = DistanceMetric.get_metric("euclidean") + + # CaseBasedExplainer initialisation + method = Cole( + model, + dataset_train, + targets_train, + targets=targets_train, + distance_function=distance_function, + weights_extraction_function=lambda inputs, targets: tf.ones(inputs.shape), + k=k, + ) + + # Method explanation + examples, _, _, _, _ = method.explain(dataset_test, targets_test) + + # test examples shape + assert examples.shape == (nb_inputs, k) + input_shape From e1e4d82307464be8e462b99e9540f44e298fc6e4 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 29 May 2023 09:50:25 +0200 Subject: [PATCH 003/138] example based: introduce projections --- xplique/example_based/projections/__init__.py | 8 + .../example_based/projections/attributions.py | 156 ++++++++++++++++++ xplique/example_based/projections/base.py | 151 +++++++++++++++++ xplique/example_based/projections/custom.py | 90 ++++++++++ .../example_based/projections/latent_space.py | 48 ++++++ 5 files changed, 453 insertions(+) create mode 100644 xplique/example_based/projections/__init__.py create mode 100644 xplique/example_based/projections/attributions.py create mode 100644 xplique/example_based/projections/base.py create mode 100644 xplique/example_based/projections/custom.py create mode 100644 xplique/example_based/projections/latent_space.py diff --git a/xplique/example_based/projections/__init__.py b/xplique/example_based/projections/__init__.py new file mode 100644 index 00000000..d5d4cf90 --- /dev/null +++ b/xplique/example_based/projections/__init__.py @@ -0,0 +1,8 @@ +""" +Projections +""" + +from .attributions import AttributionProjection +from .base import Projection +from .custom import CustomProjection +from .latent_space import LatentSpaceProjection diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py new file mode 100644 index 00000000..7f9f624f --- /dev/null +++ b/xplique/example_based/projections/attributions.py @@ -0,0 +1,156 @@ +""" +Attribution, a projection from example based module +""" + + +import tensorflow as tf +import numpy as np + +from ...attributions.base import BlackBoxExplainer +from ...attributions import Saliency +from ...commons import find_layer +from ...types import Callable, Union, Optional + +from .base import Projection + + +class AttributionProjection(Projection): + """ + Projection build on an attribution function to provide local projections. + This class is used as the projection of the `Cole` similar examples method. + + Depending on the `latent_layer`, the model will be splited between + the feature extractor and the predictor. + The feature extractor will become the `space_projection()` method, then + the predictor will be used to build the attribution method explain, and + its `explain()` method will become the `get_weights()` method. + + If no `latent_layer` is provided, the model is not splited, + the `space_projection()` is the identity function, and + the attributions (`get_weights()`) are compute on the whole model. + + Parameters + ---------- + model + The model from which we want to obtain explanations. + latent_layer + Layer used to split the model, the first part will be used for projection and + the second to compute the attributions. By default, the model is not split. + For such split, the `model` should be a `tf.keras.Model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + The method as described in the paper apply the separation on the last convolutionnal layer. + To do so, the `"last_conv"` parameter will extract it. + Otherwise, `-1` could be used for the last layer before softmax. + attribution_method + Class of the attribution method to use for projection. + It should inherit from `xplique.attributions.base.BlackBoxExplainer`. + Ignored if a projection is given. + attribution_kwargs + Parameters to be passed at the construction of the `attribution_method`. + """ + + def __init__( + self, + model: Callable, + method: BlackBoxExplainer = Saliency, + latent_layer: Optional[Union[str, int]] = None, + **attribution_kwargs + ): + self.model = model + + if latent_layer is None: + # no split + self.latent_layer = None + space_projection = lambda inputs: inputs + get_weights = method(model, **attribution_kwargs) + else: + # split the model if a latent_layer is provided + if latent_layer == "last_conv": + self.latent_layer = next( + layer for layer in model.layers[::-1] if hasattr(layer, "filters") + ) + else: + self.latent_layer = find_layer(model, latent_layer) + + space_projection = tf.keras.Model( + model.input, self.latent_layer.output, name="features_extractor" + ) + self.predictor = tf.keras.Model( + self.latent_layer.output, model.output, name="predictor" + ) + get_weights = method(self.predictor, **attribution_kwargs) + + # set methods + super().__init__(get_weights, space_projection) + + # attribution methods output do not have channel + # we wrap get_weights to expend dimensions if needed + self.__wrap_get_weights_to_extend_channels(self.get_weights) + + def __wrap_get_weights_to_extend_channels(self, get_weights: Callable): + """ + Extend channel if miss match between inputs and weights + """ + + def wrapped_get_weights(inputs, targets): + weights = get_weights(inputs, targets) + weights = tf.cond( + pred=weights.shape == inputs.shape, + true_fn=lambda: weights, + false_fn=lambda: tf.expand_dims(weights, axis=-1), + ) + return weights + + self.get_weights = wrapped_get_weights + + def get_input_weights( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + For visualization purpose (and only), we may be interested to project weights + from the projected space to the input space. + This is applied only if their is a difference in dimension. + We assume here that we are treating images and an upsampling is applied. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Additional parameter for `self.get_weights` function. + + Returns + ------- + input_weights + Tensor with the same dimension as `inputs` modulo the channels. + They are an upsampled version of the actual weights used in the projection. + """ + projected_inputs = self.space_projection(inputs) + weights = self.get_weights(projected_inputs, targets) + + # take mean over channels for images + channel_mean_fn = lambda: tf.reduce_mean(weights, axis=-1, keepdims=True) + weights = tf.cond( + pred=tf.shape(weights).shape[0] < 4, + true_fn=lambda: weights, + false_fn=channel_mean_fn, + ) + + # resizing + resize_fn = lambda: tf.image.resize( + weights, inputs.shape[1:-1], method="bicubic" + ) + input_weights = tf.cond( + pred=projected_inputs.shape == inputs.shape, + true_fn=lambda: weights, + false_fn=resize_fn, + ) + return input_weights diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py new file mode 100644 index 00000000..debe261a --- /dev/null +++ b/xplique/example_based/projections/base.py @@ -0,0 +1,151 @@ +""" +Base projection for similar examples in example based module +""" + +from abc import ABC + +import tensorflow as tf +import numpy as np + +from ...commons import sanitize_inputs_targets +from ...types import Callable, Union, Optional + + +class Projection(ABC): + """ + Base class used by `NaturalExampleBasedExplainer` to projet samples to a meaningfull space + for the model to explain. + + Projection have two parts a `space_projection` and `weights`, to apply a projection, + the samples are first projected to a new space and then weighted. + Either the `space_projection` or the `weights` could be `None` but, + if both are, the projection is an identity function. + + At least one of the two part should include the model in the computation + for distance between projected elements to make sense for the model. + + Note that the cost of this projection should be limited + as it will be applied to all samples of the train dataset. + + Parameters + ---------- + get_weights + Callable, a function that return the weights (Tensor) for a given input (Tensor). + Weights should have the same shape as the input (possible difference on channels). + + Example of `get_weights()` function: + ``` + def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), + targets: Union(tf.Tensor, np.ndarray) = None): + ''' + Example of function to get weights, + projected_inputs are the elements for which weights are comlputed. + targets are optionnal additionnal parameters for weights computation. + ''' + weights = ... # do some magic with inputs and targets, it should use the model. + return weights + ``` + space_projection + Callable that take samples and return a Tensor in the projected sapce. + An example of projected space is the latent space of a model. See `LatentSpaceProjection` + """ + + def __init__(self, get_weights: Callable = None, space_projection: Callable = None): + assert get_weights is not None or space_projection is not None, ( + "At least one of `get_weights` and `space_projection`" + + "should not be `None`." + ) + + # set get weights + if get_weights is None: + # no weights + get_weights = lambda inputs, _: tf.ones(tf.shape(inputs)) + if not hasattr(get_weights, "__call__"): + raise TypeError( + f"`get_weights` should be `Callable`, not a {type(get_weights)}" + ) + self.get_weights = get_weights + + # set space_projection + if space_projection is None: + space_projection = lambda inputs: inputs + if not hasattr(space_projection, "__call__"): + raise TypeError( + f"`space_projection` should be a `Callable`, not a {type(space_projection)}" + ) + self.space_projection = space_projection + + def get_input_weights( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + Depending on the projection, we may not be able to visualize weights + as they are after the space projection. In this case, this method should be overwritten, + as in `AttributionProjection` that applies an upsampling. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Additional parameter for `self.get_weights` function. + + Returns + ------- + input_weights + Tensor with the same dimension as `inputs` modulo the channels. + They are an upsampled version of the actual weights used in the projection. + """ + projected_inputs = self.space_projection(inputs) + assert tf.reduce_all(tf.equal(projected_inputs, inputs)), ( + "Weights cannot be interpreted in the input space" + + "if `space_projection()` is not an identity." + + "Either remove 'weights' from the returns or" + + "make your own projection and overwrite `get_input_weights`." + ) + + weights = self.get_weights(projected_inputs, targets) + + return weights + + @sanitize_inputs_targets + def project( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + Project samples in a space meaningful for the model, + either by weights the inputs, projecting in a latent space or both. + This function should be called at the init and for each explanation. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Additional parameter for `self.get_weights` function. + + Returns + ------- + projected_samples + The samples projected in the new space. + """ + projected_inputs = self.space_projection(inputs) + weights = self.get_weights(projected_inputs, targets) + + return tf.multiply(weights, projected_inputs) + + def __call__( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """project alias""" + return self.project(inputs, targets) diff --git a/xplique/example_based/projections/custom.py b/xplique/example_based/projections/custom.py new file mode 100644 index 00000000..966c6ada --- /dev/null +++ b/xplique/example_based/projections/custom.py @@ -0,0 +1,90 @@ +""" +Custom, a projection from example based module +""" + +import tensorflow as tf +import numpy as np + +from ...types import Callable, Union + +from .base import Projection + + +class CustomProjection(Projection): + """ + Base class used by `NaturalExampleBasedExplainer` to projet samples to a meaningfull space + for the model to explain. + + Projection have two parts a `space_projection` and `weights`, to apply a projection, + the samples are first projected to a new space and then weighted. + Either the `space_projection` or the `weights` could be `None` but, + if both are, the projection is an identity function. + + At least one of the two part should include the model in the computation + for distance between projected elements to make sense for the model. + + Note that the cost of this projection should be limited + as it will be applied to all samples of the train dataset. + + Parameters + ---------- + weights + Either a Tensor or a Callable. + - In the case of a Tensor, weights are applied in the projected space + (after `space_projection`). + Hence weights should have the same shape as a `projected_input`. + - In the case of a Callable, the function should return the weights when called, + as a way to get the weights (a Tensor) + It is pertinent in the case on weights dependent on the inputs, i.e. local weighting. + + Example of `get_weights()` function: + ``` + def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), + targets: Union(tf.Tensor, np.ndarray) = None): + ''' + Example of function to get weights, + projected_inputs are the elements for which weights are comlputed. + targets are optionnal additionnal parameters for weights computation. + ''' + weights = ... # do some magic with inputs and targets, it should use the model. + return weights + ``` + space_projection + Callable that take samples and return a Tensor in the projected sapce. + An example of projected space is the latent space of a model. + In this case, the model should be splitted and the + """ + + def __init__( + self, + weights: Union[Callable, tf.Tensor, np.ndarray] = None, + space_projection: Callable = None, + ): + # Set weights or + if weights is None or hasattr(weights, "__call__"): + # weights is already a function or there is no weights + get_weights = weights + elif isinstance(weights, (tf.Tensor, np.ndarray)): + # weights is a tensor + if isinstance(weights, np.ndarray): + weights = tf.convert_to_tensor(weights, dtype=tf.float32) + + # define a function that returns the weights + def get_weights(inputs, _ = None): + nweights = tf.expand_dims(weights, axis=0) + return tf.repeat(nweights, tf.shape(inputs)[0], axis=0) + + else: + raise TypeError( + "`weights` should be a tensor or a `Callable`," + + f"not a {type(weights)}" + ) + + # Set space_projection + if space_projection is not None and not hasattr(space_projection, "__call__"): + raise TypeError( + "`space_projection` should be a `Callable`," + + f"not a {type(space_projection)}" + ) + + super().__init__(get_weights, space_projection) diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py new file mode 100644 index 00000000..04ce0304 --- /dev/null +++ b/xplique/example_based/projections/latent_space.py @@ -0,0 +1,48 @@ +""" +Custom, a projection from example based module +""" + +import tensorflow as tf + +from ...commons import find_layer +from ...types import Callable, Union + +from .base import Projection + + +class LatentSpaceProjection(Projection): + """ + Projection that project inputs in the model latent space. + It does not have weighting. + + Parameters + ---------- + model + The model from which we want to obtain explanations. + latent_layer + Layer used to split the `model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + To separate after the last convolution, `"last_conv"` can be used. + Otherwise, `-1` could be used for the last layer before softmax. + """ + + def __init__(self, model: Callable, latent_layer: Union[str, int] = -1): + self.model = model + + # split the model if a latent_layer is provided + if latent_layer == "last_conv": + self.latent_layer = next( + layer for layer in model.layers[::-1] if hasattr(layer, "filters") + ) + else: + self.latent_layer = find_layer(model, latent_layer) + + latent_space_projection = tf.keras.Model( + model.input, self.latent_layer.output, name="features_extractor" + ) + + super().__init__(space_projection=latent_space_projection) From eda33170c54a914a05da54a9673818d0ad36c683 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 29 May 2023 09:50:46 +0200 Subject: [PATCH 004/138] example based: introduce search methods --- .../example_based/search_methods/__init__.py | 8 + xplique/example_based/search_methods/base.py | 180 +++++++++++++++ xplique/example_based/search_methods/knn.py | 207 ++++++++++++++++++ 3 files changed, 395 insertions(+) create mode 100644 xplique/example_based/search_methods/__init__.py create mode 100644 xplique/example_based/search_methods/base.py create mode 100644 xplique/example_based/search_methods/knn.py diff --git a/xplique/example_based/search_methods/__init__.py b/xplique/example_based/search_methods/__init__.py new file mode 100644 index 00000000..228e1acd --- /dev/null +++ b/xplique/example_based/search_methods/__init__.py @@ -0,0 +1,8 @@ +""" +Search methods +""" + +from .base import BaseSearchMethod + +# from .sklearn_knn import SklearnKNN +from .knn import KNN diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py new file mode 100644 index 00000000..13a05f6a --- /dev/null +++ b/xplique/example_based/search_methods/base.py @@ -0,0 +1,180 @@ +""" +Base search method for example-based module +""" + +from abc import ABC, abstractmethod + +import tensorflow as tf +import numpy as np + +from ...types import Callable, Union, Optional, List + +from ...commons import sanitize_dataset + +from ..projections.base import Projection + + +def _sanitize_returns(returns: Optional[Union[List[str], str]] = None, + possibilities: List[str] = None, + default: Union[List[str], str] = None): + """ + Factorization of `set_returns` for `BaseSearchMethod` and `SimilarExamples`. + It cleans the `returns` parameter. + Results is either a sublist of possibilities or a value among possibilities. + + Parameters + ---------- + returns + The value to verify and put to the `instance.returns` attribute. + possibilities + List of possible unit values for `instance.returns`. + default + Value in case `returns` is None. + + Returns + ------- + returns + The cleaned `returns` value. + """ + if possibilities is None: + possibilities = ["examples"] + if default is None: + default = ["examples"] + + if returns is None: + returns = default + elif isinstance(returns, str): + if returns == "all": + returns = possibilities + elif returns in possibilities: + returns = [returns] + else: + raise ValueError(f"{returns} should belong to {possibilities}") + elif isinstance(returns, list): + pass # already in the right format. + else: + raise ValueError(f"{returns} should either be `str` or `List[str]`") + + return returns + + +class BaseSearchMethod(ABC): + """ + Base class used by `NaturalExampleBasedExplainer` search examples in + a meaningful space for the model. It can also be used alone but will not provided + model explanations. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space sould be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optionnal parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + k: int = 1, + projection: Union[Projection, Callable] = None, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + ): # pylint: disable=R0801 + # set batch size + if hasattr(cases_dataset, "_batch_size"): + self.batch_size = cases_dataset._batch_size + else: + self.batch_size = batch_size + + self.cases_dataset = sanitize_dataset(cases_dataset, self.batch_size) + self.targets_dataset = sanitize_dataset(targets_dataset, self.batch_size) + if self.targets_dataset is None: + # The `find_examples()` method need to be able to iterate on `self.targets_dataset` + self.targets_dataset = [None] * self.cases_dataset.cardinality().numpy() + + self.set_k(k) + self.set_returns(search_returns) + self.projection = projection + + def set_k(self, k: int): + """ + Change value of k with constructing a new `BaseSearchMethod`. + It is useful because the constructor can be computionnaly expensive. + + Parameters + ---------- + k + The number of examples to retrieve. + """ + assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" + self.k = k + + def set_returns(self, returns: Optional[Union[List[str], str]] = None): + """ + Set `self.returns` used to define returned elements in `self.find_examples()`. + + Parameters + ---------- + returns + Most elements are useful in `xplique.plots.plot_examples()`. + `returns` can be set to 'all' for all possible elements to be returned. + - 'examples' correspond to the expected examples, + the inputs may be included in first position. (n, k(+1), ...) + - 'indices' the indices of the examples in the `search_set`. + Used to retrieve the original example and labels. (n, k, ...) + - 'distances' the distances between the inputs and the corresponding examples. + They are associated to the examples. (n, k, ...) + - 'include_inputs' specify if inputs should be included in the returned elements. + Note that it changes the number of returned elements from k to k+1. + """ + possibilities = ["examples", "indices", "distances", "include_inputs"] + default = "examples" + self.returns = _sanitize_returns(returns, possibilities, default) + + + @abstractmethod + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): + """ + Search the samples to return as examples. Called by the explain methods. + It may also return the indices corresponding to the samples, + based on `self.returns` value. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Assumed to have been already projected. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + """ + raise NotImplementedError() + + def __call__(self, inputs: Union[tf.Tensor, np.ndarray]): + """find_samples alias""" + return self.find_examples(inputs) diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py new file mode 100644 index 00000000..ed8d721b --- /dev/null +++ b/xplique/example_based/search_methods/knn.py @@ -0,0 +1,207 @@ +""" +KNN online search method in example-based module +""" + +import numpy as np +import tensorflow as tf + +from ...commons import dataset_gather +from ...types import Callable, List, Union, Optional, Tuple + +from .base import BaseSearchMethod +from ..projections import Projection + + +class KNN(BaseSearchMethod): + """ + KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. + Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space sould be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optionnal parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + k: int = 1, + projection: Union[Projection, Callable] = None, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): # pylint: disable=R0801 + super().__init__( + cases_dataset, targets_dataset, k, projection, search_returns, batch_size + ) + + if hasattr(distance, "__call__"): + self.distance_fn = distance + elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( + distance, int + ): + self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) + else: + raise AttributeError( + "The distance parameter is expected to be either a Callable or in" + + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", + +f"but {distance} was received.", + ) + + self.distance_fn_over_all_x2 = lambda x1, x2: tf.map_fn( + fn=lambda x2: self.distance_fn(x1, x2), + elems=x2, + ) + + # Computes crossed distances between two tensors x1(shape=(n1, ...)) and x2(shape=(n2, ...)) + # The result is a distance matrix of size (n1, n2) + self.crossed_distances_fn = lambda x1, x2: tf.vectorized_map( + fn=lambda a1: self.distance_fn_over_all_x2(a1, x2), + elems=x1 + ) + + def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: + """ + Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. + Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. + + Parameters + ---------- + inputs + Tensor or Array. Input samples on which knn are computed. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + + Returns + ------- + best_distances + Tensor of distances between the knn and the inputs with dimension (n, k). + The n inputs times their k-nearest neighbors. + best_indices + Tensor of indices of the knn in `self.cases_dataset` with dimension (n, k, 2). + Where, n represent the number of inputs and k the number of corresponding examples. + The index of each element is encoded by two values, + the batch index and the index of the element in the batch. + Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. + """ + nb_inputs = tf.shape(inputs)[0] + + # initialiaze + # (n, k, 2) + best_indices = tf.Variable(tf.fill((nb_inputs, self.k, 2), -1)) + # (n, k) + best_distances = tf.Variable(tf.fill((nb_inputs, self.k), np.inf)) + # (n, bs) + batch_indices = tf.expand_dims(tf.range(self.batch_size, dtype=tf.int32), axis=0) + batch_indices = tf.tile(batch_indices, multiples=(nb_inputs, 1)) + + # iterate on batches + for batch_index, (cases, targets) in enumerate( + zip(self.cases_dataset, self.targets_dataset) + ): + # project batch of dataset cases + if self.projection is not None: + projected_cases = self.projection.project(cases, targets) + else: + projected_cases = cases + + # add new elements + # (n, current_bs, 2) + indices = batch_indices[:, : tf.shape(projected_cases)[0]] + new_indices = tf.stack( + [tf.fill(indices.shape, tf.cast(batch_index, tf.int32)), indices], axis=-1 + ) + + # compute distances + # (n, current_bs) + distances = self.crossed_distances_fn(inputs, projected_cases) + + # (n, k+curent_bs, 2) + concatenated_indices = tf.concat([best_indices, new_indices], axis=1) + # (n, k+curent_bs) + concatenated_distances = tf.concat([best_distances, distances], axis=1) + + # sort all + # (n, k) + sort_order = tf.argsort( + concatenated_distances, axis=1, direction="ASCENDING" + )[:, : self.k] + + best_indices.assign( + tf.gather(concatenated_indices, sort_order, axis=1, batch_dims=1) + ) + best_distances.assign( + tf.gather(concatenated_distances, sort_order, axis=1, batch_dims=1) + ) + + return best_distances, best_indices + + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): + """ + Search the samples to return as examples. Called by the explain methods. + It may also return the indices corresponding to the samples, + based on `return_indices` value. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Assumed to have been already projected. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + """ + # compute neighbors + examples_distances, examples_indices = self.kneighbors(inputs) + + # Set values in return dict + return_dict = {} + if "examples" in self.returns: + return_dict["examples"] = dataset_gather(self.cases_dataset, examples_indices) + if "include_inputs" in self.returns: + inputs = tf.expand_dims(inputs, axis=1) + return_dict["examples"] = tf.concat( + [inputs, return_dict["examples"]], axis=1 + ) + if "indices" in self.returns: + return_dict["indices"] = examples_indices + if "distances" in self.returns: + return_dict["distances"] = examples_distances + + # Return a dict only different variables are returned + if len(return_dict) == 1: + return list(return_dict.values())[0] + return return_dict From 4b0a3406ee66b5fec54532d0f196913cff2cebc3 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 29 May 2023 09:51:58 +0200 Subject: [PATCH 005/138] example based: add base class --- xplique/example_based/similar_examples.py | 380 ++++++++++++++++++++++ 1 file changed, 380 insertions(+) create mode 100644 xplique/example_based/similar_examples.py diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py new file mode 100644 index 00000000..2961b1e0 --- /dev/null +++ b/xplique/example_based/similar_examples.py @@ -0,0 +1,380 @@ +""" +Base model for example-based +""" + +import math + +import tensorflow as tf +import numpy as np + +from ..types import Callable, Dict, List, Optional, Type, Union + +from ..commons import sanitize_inputs_targets +from ..commons import sanitize_dataset, dataset_gather +from .search_methods import KNN, BaseSearchMethod +from .projections import Projection + +from .search_methods.base import _sanitize_returns + + +class SimilarExamples: + """ + Base class for natural example-base methods explaining models, + they project the cases_dataset into a pertinent space for the with a `Projection`, + then they call the `BaseSearchMethod` on it. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + search_method + An algorithm to search the examples in the projected space. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space sould be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optionnal parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + search_method_kwargs + Parameters to be passed at the construction of the `search_method`. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + search_method: Type[BaseSearchMethod] = KNN, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + **search_method_kwargs, + ): + assert ( + projection is not None + ), "`SimilarExamples` without `projection` is a `BaseSearchMethod`." + + # set attributes + batch_size = self.__initialize_cases_dataset( + cases_dataset, labels_dataset, targets_dataset, batch_size + ) + self.k = k + self.set_returns(case_returns) + self.projection = projection + + # set `search_returns` if not provided and overwrite it otherwise + search_method_kwargs["search_returns"] = ["indices", "distances"] + + # initiate search_method + self.search_method = search_method( + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + batch_size=batch_size, + **search_method_kwargs, + ) + + def __initialize_cases_dataset( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], + batch_size: Optional[int], + ) -> int: + """ + Factorization of `__init__()` method for dataset related attributes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + labels_dataset + Labels associated to the examples in the dataset. + Indices should match with cases_dataset. + targets_dataset + Targets associated to the cases_dataset for dataset projection. + See `projection` for detail. + batch_size + Number of sample treated simultaneously when using the datasets. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + + Returns + ------- + batch_size + Number of sample treated simultaneously when using the datasets. + Extracted from the datasets in case they are `tf.data.Dataset`. + Otherwise, the input value. + """ + # at least one dataset provided + if isinstance(cases_dataset, tf.data.Dataset): + # set batch size (ignore provided argument) and cardinality + if isinstance(cases_dataset.element_spec, tuple): + batch_size = tf.shape(next(iter(cases_dataset))[0])[0].numpy() + else: + batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() + + cardinality = cases_dataset.cardinality().numpy() + else: + # if case_dataset is not a `tf.data.Dataset`, then neither should the other. + assert not isinstance(labels_dataset, tf.data.Dataset) + assert not isinstance(targets_dataset, tf.data.Dataset) + # set batch size and cardinality + batch_size = min(batch_size, len(cases_dataset)) + cardinality = math.ceil(len(cases_dataset) / batch_size) + + # verify cardinality and create datasets from the tensors + self.cases_dataset = sanitize_dataset( + cases_dataset, batch_size, cardinality + ) + self.labels_dataset = sanitize_dataset( + labels_dataset, batch_size, cardinality + ) + self.targets_dataset = sanitize_dataset( + targets_dataset, batch_size, cardinality + ) + + # if the provided `cases_dataset` has several columns + if isinstance(self.cases_dataset.element_spec, tuple): + # switch case on the number of columns of `cases_dataset` + if len(self.cases_dataset.element_spec) == 2: + assert self.labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + self.labels_dataset = self.cases_dataset.map(lambda x, y: y) + self.cases_dataset = self.cases_dataset.map(lambda x, y: x) + + elif len(self.cases_dataset.element_spec) == 3: + assert self.labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + assert self.targets_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + self.targets_dataset = self.cases_dataset.map(lambda x, y, t: t) + self.labels_dataset = self.cases_dataset.map(lambda x, y, t: y) + self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) + else: + raise AttributeError( + "`cases_dataset` cannot possess more than 3 columns," + + f"{len(self.cases_dataset.element_spec)} were detected." + ) + + self.cases_dataset = self.cases_dataset.prefetch(tf.data.AUTOTUNE) + if self.labels_dataset is not None: + self.labels_dataset = self.labels_dataset.prefetch(tf.data.AUTOTUNE) + if self.targets_dataset is not None: + self.targets_dataset = self.targets_dataset.prefetch(tf.data.AUTOTUNE) + + return batch_size + + def set_k(self, k: int): + """ + Setter for the k parameter. + + Parameters + ---------- + k + Number of examples to return, it should be a positive integer. + """ + assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" + self.k = k + self.search_method.set_k(k) + + def set_returns(self, returns: Union[List[str], str]): + """ + Set `self.returns` used to define returned elements in `self.explain()`. + + Parameters + ---------- + returns + Most elements are useful in `xplique.plots.plot_examples()`. + `returns` can be set to 'all' for all possible elements to be returned. + - 'examples' correspond to the expected examples, + the inputs may be included in first position. (n, k(+1), ...) + - 'weights' the weights in the input space used in the projection. + They are associated to the input and the examples. (n, k(+1), ...) + - 'distances' the distances between the inputs and the corresponding examples. + They are associated to the examples. (n, k, ...) + - 'labels' if provided through `dataset_labels`, + they are the labels associated with the examples. (n, k, ...) + - 'include_inputs' specify if inputs should be included in the returned elements. + Note that it changes the number of returned elements from k to k+1. + """ + possibilities = ["examples", "weights", "distances", "labels", "include_inputs"] + default = "examples" + self.returns = _sanitize_returns(returns, possibilities, default) + + @sanitize_inputs_targets + def explain( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + Compute examples to explain the inputs. + It project inputs with `self.projection` in the search space + and find examples with `self.search_method`. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Tensor or Array passed to the projection function. + + Returns + ------- + return_dict + Dictionnary with listed elements in `self.returns`. + If only one element is present it returns the element. + The elements that can be returned are: + examples, weights, distances, indices, and labels. + """ + # project inputs + projected_inputs = self.projection(inputs, targets) + + # look for closest elements to projected inputs + search_output = self.search_method(projected_inputs) + + # manage returned elements + return self.format_search_output(search_output, inputs, targets) + + def __call__( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """explain alias""" + return self.explain(inputs, targets) + + def format_search_output( + self, + search_output: Dict[str, tf.Tensor], + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + Format the output of the `search_method` to match the expected returns in `self.returns`. + + Parameters + ---------- + search_output + Dictionnary with the required outputs from the `search_method`. + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Tensor or Array passed to the projection function. + Here it is used by the explain function of attribution methods. + Refer to the corresponding method documentation for more detail. + Note that the default method is `Saliency`. + + Returns + ------- + return_dict + Dictionnary with listed elements in `self.returns`. + If only one element is present it returns the element. + The elements that can be returned are: + examples, weights, distances, indices, and labels. + """ + return_dict = {} + + examples = dataset_gather(self.cases_dataset, search_output["indices"]) + examples_labels = dataset_gather(self.labels_dataset, search_output["indices"]) + examples_targets = dataset_gather( + self.targets_dataset, search_output["indices"] + ) + + # add examples and weights + if "examples" in self.returns or "weights" in self.returns: + if "include_inputs" in self.returns: + # include inputs + inputs = tf.expand_dims(inputs, axis=1) + examples = tf.concat([inputs, examples], axis=1) + if targets is not None: + targets = tf.expand_dims(targets, axis=1) + examples_targets = tf.concat([targets, examples_targets], axis=1) + else: + examples_targets = [None] * len(examples) + if "examples" in self.returns: + return_dict["examples"] = examples + if "weights" in self.returns: + # get weights of examples (n, k, ...) + # we iterate on the inputs dimension through maps + # and ask weights for batch of examples + weights = [] + for ex, ex_targ in zip(examples, examples_targets): + if isinstance(self.projection, Projection): + # get weights in the input space + weights.append(self.projection.get_input_weights(ex, ex_targ)) + else: + raise AttributeError( + "Cannot extract weights from the provided projection function" + + "Either remove 'weights' from the `case_returns` or" + + "inherit from `Projection` and overwrite `get_input_weights`." + ) + + return_dict["weights"] = tf.stack(weights, axis=0) + + # optimization test TODO + # return_dict["weights"] = tf.vectorized_map( + # fn=lambda x: self.projection.get_input_weights(x[0], x[1]), + # elems=(examples, examples_targets), + # # fn_output_signature=tf.float32, + # ) + + # add indices, distances, and labels + if "distances" in self.returns: + return_dict["distances"] = search_output["distances"] + if "labels" in self.returns: + assert ( + examples_labels is not None + ), "The method cannot return labels without a label dataset." + return_dict["labels"] = examples_labels + + # return a dict only different variables are returned + if len(return_dict) == 1: + return list(return_dict.values())[0] + return return_dict From 0daf2817abc8ce8d6d31ff1bd7d1f76954e1c95d Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:36:52 +0100 Subject: [PATCH 006/138] cole: update and improve --- xplique/__init__.py | 3 +- xplique/example_based/__init__.py | 6 + xplique/example_based/cole.py | 492 ++++++------------------------ xplique/types/__init__.py | 2 +- 4 files changed, 109 insertions(+), 394 deletions(-) create mode 100644 xplique/example_based/__init__.py diff --git a/xplique/__init__.py b/xplique/__init__.py index 32ee5166..8ab3377a 100644 --- a/xplique/__init__.py +++ b/xplique/__init__.py @@ -9,9 +9,10 @@ __version__ = '1.3.3' from . import attributions +from . import commons from . import concepts +from . import example_based from . import features_visualizations -from . import commons from . import plots from .commons import Tasks diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py new file mode 100644 index 00000000..a958a62b --- /dev/null +++ b/xplique/example_based/__init__.py @@ -0,0 +1,6 @@ +""" +Example-based methods available +""" + +from .cole import Cole +from .similar_examples import SimilarExamples diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py index 63fa69c6..85c4c2d6 100644 --- a/xplique/example_based/cole.py +++ b/xplique/example_based/cole.py @@ -1,411 +1,119 @@ """ -Module related to Case Base Explainer +Implementation of Cole method a simlilar examples method from example based module """ - -import matplotlib.pyplot as plt import numpy as np -from sklearn.metrics import DistanceMetric -from sklearn.neighbors import KDTree import tensorflow as tf -from ..plots.image import _standardize_image -from ..types import Callable, Union, Optional +from ..attributions.base import BlackBoxExplainer +from ..attributions import Saliency +from ..types import Callable, List, Optional, Union, Type + +from .similar_examples import SimilarExamples +from .projections import AttributionProjection +from .search_methods import KNN +from .search_methods import BaseSearchMethod -class Cole: +class Cole(SimilarExamples): """ - Used to compute the Case Based Explainer sytem, a twins sytem that use ANN and knn with - the same dataset. + Cole is a similar examples methods that gives the most similar examples to a query. + Cole use the model to build a search space so that distances are meaningful for the model. + It uses attribution methods to weights inputs. + Those attributions may be computed in the latent space for complex data types like images. - Ref. Eoin M. Kenny and Mark T. Keane. + It is an implementation of a method proposed by Kenny et Keane in 2019, Twin-Systems to Explain Artificial Neural Networks using Case-Based Reasoning: - Comparative Tests of Feature-Weighting Methods in ANN-CBR Twins for XAI. (2019) - https://www.ijcai.org/proceedings/2019/376 + https://researchrepository.ucd.ie/handle/10197/11064 + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + search_method + An algorithm to search the examples in the projected space. + k + The number of examples to retrieve. Default value is `1`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." + case_returns + String or list of string with the elements to return in `self.explain()`. + See `self.set_returns()` from parent class `SimilarExamples` for detail. + By default, the `explain()` method will only return the examples. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + latent_layer + Layer used to split the model, the first part will be used for projection and + the second to compute the attributions. By default, the model is not split. + For such split, the `model` should be a `tf.keras.Model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + The method as described in the paper apply the separation on the last convolutionnal layer. + To do so, the `"last_conv"` parameter will extract it. + Otherwise, `-1` could be used for the last layer before softmax. + attribution_method + Class of the attribution method to use for projection. + It should inherit from `xplique.attributions.base.BlackBoxExplainer`. + Ignored if a projection is given. + attribution_kwargs + Parameters to be passed at the construction of the `attribution_method`. """ def __init__( self, - model: Callable, - case_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_train: np.ndarray, - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, - distance_function: DistanceMetric = None, - weights_extraction_function: Callable = None, - k: Optional[int] = 3, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + model: tf.keras.Model, + targets_dataset: Union[tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.Tensor, np.ndarray]] = None, + search_method: Type[BaseSearchMethod] = KNN, + k: int = 1, + distance: Union[str, Callable] = "euclidean", + case_returns: Optional[Union[List[str], str]] = "examples", + batch_size: Optional[int] = 32, + latent_layer: Optional[Union[str, int]] = None, + attribution_method: Type[BlackBoxExplainer] = Saliency, + **attribution_kwargs, ): - """ - Parameters - ---------- - model - The model from wich we want to obtain explanations. - case_dataset - The dataset used to train the model, - also use by the function to calcul the closest examples. - labels_train - labels define by the dataset. - targets - labels predict by the model from the dataset. - distance_function - The function to calcul the distance between the inputs and all the dataset. - (Can use : euclidean, manhattan, minkowski etc...) - weights_extraction_function - The function to calcul the weight of every features, - many type of methode can be use but it will depend of - what type of dataset you've got. - examples: - def my_function(inputs, targets): - # outputs.shape == inputs.shape - return outputs - k - Represante how many nearest neighbours you want to be returns. - """ - # set attributes - self.model = model - self.case_dataset = case_dataset - self.weights_extraction_function = weights_extraction_function - self.k_neighbors = k - self.labels_train = labels_train - - # verify targets parametre - if targets is None: - targets = model(case_dataset) - nb_classes = targets.shape[1] - targets = tf.argmax(targets, axis=1) - targets = tf.one_hot( - targets, nb_classes - ) # nb_classes normalement en second argument mais la du coup 10. - - # verify distance_function parametre - if distance_function is None: - distance_function = DistanceMetric.get_metric("euclidean") - - # verify weight_extraction_function parametre - if weights_extraction_function is None: - self.weights_extraction_function = self._get_default_weights_extraction_function() - - # compute case dataset weights (used in distance) - # the weight extraction function may need the predictions to extract the weights - case_dataset_weight = self.weights_extraction_function(case_dataset, targets) - # for images, channels may disappear - if len(case_dataset_weight.shape) != len(case_dataset.shape): - case_dataset_weight = tf.expand_dims(case_dataset_weight, -1) - self.case_dataset_weight = case_dataset_weight - - # apply weights to the case dataset (weighted distance) - weighted_case_dataset = tf.math.multiply(case_dataset_weight, case_dataset) - # flatten features for kdtree - weighted_case_dataset = tf.reshape( - weighted_case_dataset, [weighted_case_dataset.shape[0], -1] + # buil attribution projection + projection = AttributionProjection( + model=model, + method=attribution_method, + latent_layer=latent_layer, + **attribution_kwargs, ) - # create kdtree instance with weighted case dataset - # will be called to estimate closest examples - self.knn = KDTree(weighted_case_dataset, metric=distance_function) - - def extract_element_from_indices( - self, - labels_train: np.ndarray, - examples_indice: np.ndarray, - ): - """ - This function has to extract every example and weights from the dataset - by the indice calculate with de knn query in the explain function - - Parameters - ---------- - labels_train - labels define by the dataset. - examples_indice - Represente the indice of the K nearust neighbours of the input. - - Returns - ------- - examples - Represente the K nearust neighbours of the input. - examples_weights - features weight of the examples. - labels_examples - labels of the examples. - """ - all_examples = [] - all_weight_examples = [] - all_labels_examples = [] - for sample_examples_indice in examples_indice: - sample_examples = [] - weight_ex = [] - label_ex = [] - for indice in sample_examples_indice: - sample_examples.append(self.case_dataset[indice]) - weight_ex.append(self.case_dataset_weight[indice]) - label_ex.append(labels_train[indice]) - # (k, h, w, 1) - all_examples.append(tf.stack(sample_examples, axis=0)) - all_weight_examples.append(tf.stack(weight_ex, axis=0)) - all_labels_examples.append(tf.stack(label_ex, axis=0)) - # (n, k, h, w, 1) - examples = tf.stack(all_examples, axis=0) - examples_weights = tf.stack(all_weight_examples, axis=0) - labels_examples = tf.stack(all_labels_examples, axis=0) - - return examples, examples_weights, labels_examples - - def explain( - self, - inputs: Union[tf.Tensor, np.ndarray], - targets: Union[tf.Tensor, np.ndarray] = None, - ): - """ - This function calculates the indice of the k closest example of the different inputs. - Then calls extract_element_from_indice to extract the examples from those indices. - - Parameters - ---------- - inputs - Tensor or Array. Input samples to be explained. - Expected shape among (N,W), (N,T,W), (N,W,H,C). - targets - Tensor or Array. Corresponding to the prediction of the samples by the model. - shape: (n, nb_classes) - Used by the `weights_extraction_function` if it is an Xplique attribution function, - For more details, please refer to the explain methods documentation. - - Returns - ------- - examples - Represente the K nearust neighbours of the input. - examples_distance - distance between the input and the examples. - examples_weight - features weight of the examples. - inputs_weights - features weight of the inputs. - examples_labels - labels of the examples. - """ - - # verify targets parametre - if targets is None: - targets = self.model(inputs) - nb_classes = targets.shape[1] - targets = tf.argmax(targets, axis=1) - targets = tf.one_hot(targets, nb_classes) - - # compute weight (used in distance) - # the weight extraction function may need the prediction to extract the weights - inputs_weights = self.weights_extraction_function(inputs, targets) - - # for images, channels may disappear - if len(inputs_weights.shape) != len(inputs.shape): - inputs_weights = tf.expand_dims(inputs_weights, -1) - - # apply weights to the inputs - weighted_inputs = tf.math.multiply(inputs_weights, inputs) - # flatten features for knn query - weighted_inputs = tf.reshape(weighted_inputs, [weighted_inputs.shape[0], -1]) - - # kdtree instance call with knn.query, - # call with the weighted inputs and the number of closest examples (k) - examples_distance, examples_indice = self.knn.query( - weighted_inputs, k=self.k_neighbors + assert targets_dataset is not None + + super().__init__( + cases_dataset, + labels_dataset, + targets_dataset, + search_method, + k, + projection, + case_returns, + batch_size, + distance=distance, ) - - # call the extract_element_from_indices function - examples, examples_weights, examples_labels = self.extract_element_from_indices( - self.labels_train, examples_indice - ) - - return ( - examples, - examples_distance, - examples_weights, - inputs_weights, - examples_labels, - ) - - @staticmethod - def _get_default_weights_extraction_function(): - """ - This function allows you to get the default weight extraction function. - """ - return lambda inputs, targets: tf.ones(inputs.shape) - - def show_result_images( - self, - inputs: Union[tf.Tensor, np.ndarray], - examples: Union[tf.Tensor, np.ndarray], - examples_distance: float, - inputs_weights: np.ndarray, - examples_weights: np.ndarray, - indice_original: int, - examples_labels: np.ndarray, - labels_test: np.ndarray, - clip_percentile: Optional[float] = 0.2, - cmapimages: Optional[str] = "gray", - cmapexplanation: Optional[str] = "coolwarm", - alpha: Optional[float] = 0.5, - ): - """ - This function is for image data, it show the returns of the explain function. - - Parameters - --------- - inputs - Tensor or Array. Input samples to be show next to examples. - Expected shape among (N,W), (N,T,W), (N,W,H,C). - examples - Represente the K nearust neighbours of the input. - examples_distance - Distance between input data and examples. - inputs_weights - features weight of the inputs. - examples_weight - features weight of the examples. - indice_original - Represente the indice of the inputs to show the true labels. - examples_labels - labels of the examples. - labels_test - Corresponding to labels of the dataset test. - clip_percentile - Percentile value to use if clipping is needed, e.g a value of 1 will perform a clipping - between percentile 1 and 99. - This parameter allows to avoid outliers in case of too extreme values. - cmapimages - For images. - The Colormap instance or registered colormap name used to map scalar data to colors. - This parameter is ignored for RGB(A) data. - cmapexplanation - For explanation. - The Colormap instance or registered colormap name used to map scalar data to colors. - This parameter is ignored for RGB(A) data. - alpha - The alpha blending value, between 0 (transparent) and 1 (opaque). - If alpha is an array, the alpha blending values are applied pixel by pixel, - and alpha must have the same shape as X. - """ - # pylint: disable=too-many-arguments - - # Initialize 'input_and_examples' and 'corresponding_weights' that they - # will be use to show every closest examples and the explanation - inputs = tf.expand_dims(inputs, 1) - inputs_weights = tf.expand_dims(inputs_weights, 1) - input_and_examples = tf.concat([inputs, examples], axis=1) - corresponding_weights = tf.concat([inputs_weights, examples_weights], axis=1) - - # calcul the prediction of input and examples - # that they will be used at title of the image - # nevessary loop becaue we have n * k elements - predicted_labels = [] - for samples in input_and_examples: - predicted = self.model(samples) - predicted = tf.argmax(predicted, axis=1) - predicted_labels.append(predicted) - - # configure the grid to show all results - plt.rcParams["figure.autolayout"] = True - plt.rcParams["figure.figsize"] = [20, 10] - - # loop to organize and show all results - for j in range(np.asarray(input_and_examples).shape[0]): - fig = plt.figure() - gridspec = fig.add_gridspec(2, input_and_examples.shape[1]) - for k in range(len(input_and_examples[j])): - fig.add_subplot(gridspec[0, k]) - if k == 0: - plt.title( - f"Original image\nGround Truth: {labels_test[indice_original[j]]}"\ - + f"\nPrediction: {predicted_labels[j][k]}" - ) - else: - plt.title( - f"Examples\nGround Truth: {examples_labels[j][k-1]}"\ - + f"\nPrediction: {predicted_labels[j][k]}"\ - + f"\nDistance: {round(examples_distance[j][k-1], 2)}" - ) - plt.imshow(input_and_examples[j][k], cmap=cmapimages) - plt.axis("off") - fig.add_subplot(gridspec[1, k]) - plt.imshow(input_and_examples[j][k], cmap=cmapimages) - plt.imshow( - _standardize_image(corresponding_weights[j][k], clip_percentile), - cmap=cmapexplanation, - alpha=alpha, - ) - plt.axis("off") - plt.show() - - def show_result_tabular( - self, - inputs: Union[tf.Tensor, np.ndarray], - examples: Union[tf.Tensor, np.ndarray], - examples_distance: float, - indice_original: int, - examples_labels: np.ndarray, - labels_test: np.ndarray, - show_values: bool = False, - ): - """ - This function is for image data, it show the returns of the explain function. - - Parameters - --------- - inputs - Tensor or Array. Input samples to be show next to examples. - Expected shape among (N,W), (N,T,W), (N,W,H,C). - examples - Represente the K nearust neighbours of the input. - examples_weight - features weight of the examples. - indice_original - Represente the indice of the inputs to show the true labels. - examples_labels - labels of the examples. - labels_test - Corresponding to labels of the dataset test. - show_values - boolean default at False, to show the values of examples. - """ - - # Initialize 'input_and_examples' and 'corresponding_weights' that they - # will be use to show every closest examples and the explanation - inputs = tf.expand_dims(inputs, 1) - input_and_examples = tf.concat([inputs, examples], axis=1) - - # calcul the prediction of input and examples - # that they will be used at title of the image - # nevessary loop becaue we have n * k elements - predicted_labels = [] - for samples in input_and_examples: - predicted = self.model(samples) - predicted = tf.argmax(predicted, axis=1) - predicted_labels.append(predicted) - - # apply argmax function to labels - labels_test = tf.argmax(labels_test, axis=1) - examples_labels = tf.argmax(examples_labels, axis=1) - - # define values_string if show_values is at None - values_string = "" - - # loop to organize and show all results - for i in range(input_and_examples.shape[0]): - for j in range(input_and_examples.shape[1]): - if show_values is True: - values_string = f"\t\tValues: {input_and_examples[i][j]}" - if j == 0: - print( - f"Originale_data, indice: {indice_original[i]}"\ - + f"\tDistance: \t\tGround Truth: {labels_test[i]}"\ - + f"\t\tPrediction: {predicted_labels[i][j]}" - + values_string - ) - else: - print( - f"\tExamples: {j}"\ - + f"\t\tDistance: {round(examples_distance[i][j-1], 2)}"\ - + f"\t\tGround Truth: {examples_labels[i][j-1]}"\ - + f"\t\tPrediction: {predicted_labels[i][j]}" - + values_string - ) - print("\n") diff --git a/xplique/types/__init__.py b/xplique/types/__init__.py index 52cca202..ba01d0c2 100644 --- a/xplique/types/__init__.py +++ b/xplique/types/__init__.py @@ -2,5 +2,5 @@ Typing module """ -from typing import Union, Tuple, List, Callable, Dict, Optional, Any +from typing import Union, Tuple, List, Callable, Dict, Optional, Any, Type from .custom_type import OperatorSignature From 62a621d4e6a431d338f3b39f66816a25c8a9ca0d Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 29 May 2023 09:59:04 +0200 Subject: [PATCH 007/138] example based tests: update and complete --- tests/commons/test_tf_dataset_operation.py | 144 +++++++++ tests/example_based/__init__.py | 0 tests/example_based/test_cole.py | 285 ++++++++--------- tests/example_based/test_image_plot.py | 101 ++++++ tests/example_based/test_similar_examples.py | 305 +++++++++++++++++++ tests/example_based/test_split_projection.py | 85 ++++++ tests/utils.py | 8 + xplique/commons/__init__.py | 2 +- xplique/commons/data_conversion.py | 33 +- 9 files changed, 806 insertions(+), 157 deletions(-) create mode 100644 tests/commons/test_tf_dataset_operation.py create mode 100644 tests/example_based/__init__.py create mode 100644 tests/example_based/test_image_plot.py create mode 100644 tests/example_based/test_similar_examples.py create mode 100644 tests/example_based/test_split_projection.py diff --git a/tests/commons/test_tf_dataset_operation.py b/tests/commons/test_tf_dataset_operation.py new file mode 100644 index 00000000..1f9a5f42 --- /dev/null +++ b/tests/commons/test_tf_dataset_operation.py @@ -0,0 +1,144 @@ +""" +Test operations on tf datasets +""" +import os +import sys + +sys.path.append(os.getcwd()) + +import unittest + +import numpy as np +import tensorflow as tf + + +from xplique.commons.tf_dataset_operations import * +from xplique.commons.tf_dataset_operations import _almost_equal + + +def test_are_dataset_first_elems_equal(): + """ + Verify that the function is able to compare the first element of datasets + """ + tf_dataset_up = tf.data.Dataset.from_tensor_slices( + tf.reshape(tf.range(90), (10, 3, 3)) + ) + tf_dataset_up_small = tf.data.Dataset.from_tensor_slices( + tf.reshape(tf.range(45), (5, 3, 3)) + ) + tf_dataset_down = tf.data.Dataset.from_tensor_slices( + tf.reshape(tf.range(90, 0, -1), (10, 3, 3)) + ) + + zipped = tf.data.Dataset.zip((tf_dataset_up, tf_dataset_up)) + zipped_batched_in = tf.data.Dataset.zip( + (tf_dataset_up.batch(3), tf_dataset_up.batch(3)) + ) + + assert are_dataset_first_elems_equal(tf_dataset_up, tf_dataset_up) + assert are_dataset_first_elems_equal(tf_dataset_up.batch(3), tf_dataset_up.batch(3)) + assert are_dataset_first_elems_equal(tf_dataset_up, tf_dataset_up_small) + assert are_dataset_first_elems_equal( + tf_dataset_up.batch(3), tf_dataset_up_small.batch(3) + ) + assert are_dataset_first_elems_equal(zipped, zipped) + assert are_dataset_first_elems_equal(zipped.batch(3), zipped.batch(3)) + assert are_dataset_first_elems_equal(zipped_batched_in, zipped_batched_in) + assert not are_dataset_first_elems_equal(tf_dataset_up, zipped) + assert not are_dataset_first_elems_equal(tf_dataset_up.batch(3), zipped.batch(3)) + assert not are_dataset_first_elems_equal(tf_dataset_up.batch(3), zipped_batched_in) + assert not are_dataset_first_elems_equal(tf_dataset_up, tf_dataset_down) + assert not are_dataset_first_elems_equal( + tf_dataset_up.batch(3), tf_dataset_down.batch(3) + ) + + +def test_is_not_shuffled(): + """ + Verify the function is able to detect dataset that do not provide stable order of elements + """ + tf_dataset = tf.data.Dataset.from_tensor_slices( + tf.reshape(tf.range(90), (10, 3, 3)) + ) + tf_shuffled_once = tf_dataset.shuffle(3, reshuffle_each_iteration=False) + zipped = tf.data.Dataset.zip((tf_dataset, tf_dataset)) + + assert is_not_shuffled(tf_dataset) + assert is_not_shuffled(tf_dataset.batch(3)) + assert is_not_shuffled(tf_shuffled_once) + assert is_not_shuffled(tf_shuffled_once.batch(3)) + assert is_not_shuffled(zipped) + assert is_not_shuffled(zipped.batch(3)) + + +def test_batch_size_matches(): + """ + Test that the function is able to detect incoherence between dataset and batch_size + """ + tf_dataset = tf.data.Dataset.from_tensor_slices( + tf.reshape(tf.range(90), (10, 3, 3)) + ) + tf_dataset_b2 = tf_dataset.batch(2) + tf_dataset_b5 = tf_dataset.batch(5) + tf_dataset_b25 = tf_dataset_b5.batch(2) + tf_dataset_b52 = tf_dataset_b2.batch(5) + tf_dataset_b32 = tf_dataset.batch(32) + + assert batch_size_matches(tf_dataset, 3) + assert batch_size_matches(tf_dataset_b2, 2) + assert batch_size_matches(tf_dataset_b5, 5) + assert batch_size_matches(tf_dataset_b25, 2) + assert batch_size_matches(tf_dataset_b52, 5) + assert batch_size_matches(tf_dataset_b32, 10) + + +def test_sanitize_dataset(): + """ + Test that verifies that the function harmonize inputs into datasets + """ + tf_tensor = tf.reshape(tf.range(90), (10, 3, 3)) + np_array = np.array(tf_tensor) + tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) + tf_dataset_b4 = tf_dataset.batch(4) + + # test convertion + assert sanitize_dataset(None, 1) is None + assert are_dataset_first_elems_equal(tf_dataset, tf_dataset) + assert are_dataset_first_elems_equal(tf_dataset_b4, tf_dataset_b4) + assert are_dataset_first_elems_equal( + sanitize_dataset(tf_tensor, 4, 3), tf_dataset_b4 + ) + assert are_dataset_first_elems_equal( + sanitize_dataset(np_array, 4, 3), tf_dataset_b4 + ) + + # test catch assertion errors + test_raise_assertion_error = unittest.TestCase().assertRaises + test_raise_assertion_error( + AssertionError, sanitize_dataset, tf_dataset.shuffle(2).batch(4), 4 + ) + test_raise_assertion_error(AssertionError, sanitize_dataset, tf_dataset_b4, 3) + test_raise_assertion_error(AssertionError, sanitize_dataset, tf_dataset_b4, 4, 4) + test_raise_assertion_error(AssertionError, sanitize_dataset, np_array[:6], 4, 4) + + +def test_dataset_gather(): + """ + Test dataset gather function + """ + # (5, 2, 3, 3) + tf_dataset = tf.data.Dataset.from_tensor_slices( + tf.reshape(tf.range(90), (10, 3, 3)) + ).batch(2) + + indices_1 = np.array([[[0, 0], [1, 1]], [[2, 1], [0, 0]]]) + # (2, 2, 3, 3) + results_1 = dataset_gather(tf_dataset, indices_1) + assert np.all(tf.shape(results_1).numpy() == np.array([2, 2, 3, 3])) + assert _almost_equal(results_1[0, 0], results_1[1, 1]) + + indices_2 = tf.constant([[[1, 1]]]) + # (1, 1, 3, 3) + results_2 = dataset_gather(tf_dataset, indices_2) + assert np.all(tf.shape(results_2).numpy() == np.array([1, 1, 3, 3])) + assert _almost_equal(results_1[0, 1], results_2[0, 0]) diff --git a/tests/example_based/__init__.py b/tests/example_based/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py index 941df68b..9fb1b73b 100644 --- a/tests/example_based/test_cole.py +++ b/tests/example_based/test_cole.py @@ -1,199 +1,182 @@ """ Test Cole """ +import os + +import sys + +sys.path.append(os.getcwd()) + from math import prod, sqrt import numpy as np -from sklearn.metrics import DistanceMetric +import scipy import tensorflow as tf -from xplique.example_based import Cole +from xplique.attributions import Occlusion, Saliency + +from xplique.example_based import Cole, SimilarExamples +from xplique.example_based.projections import CustomProjection +from xplique.example_based.search_methods import KNN from xplique.types import Union -from ..utils import generate_data, generate_model, almost_equal, generate_agnostic_model +from tests.utils import ( + generate_data, + generate_model, + almost_equal, + generate_timeseries_model, +) -def test_neighbors_distance(): +def get_setup(input_shape, nb_samples=10, nb_labels=10): """ - The function test every output of the explanation method + Generate data and model for Cole """ - # Method parameters initialisation - input_shape = (3, 3, 1) - nb_labels = 10 - nb_samples = 10 - nb_samples_test = 8 - k = 3 - # Data generation - matrix_train = tf.stack([i * tf.ones(input_shape) for i in range(nb_samples)]) - matrix_test = matrix_train[1:-1] - labels_train = tf.range(nb_samples) - labels_test = labels_train[1:-1] + x_train = tf.stack( + [i * tf.ones(input_shape, tf.float32) for i in range(nb_samples)] + ) + x_test = x_train[1:-1] + y_train = tf.one_hot(tf.range(len(x_train)) % nb_labels, depth=nb_labels) # Model generation model = generate_model(input_shape, nb_labels) - # Initialisation of weights_extraction_function and distance_function - # They will be used in CaseBasedExplainer initialisation - distance_function = DistanceMetric.get_metric("euclidean") - - # CaseBasedExplainer initialisation - method = Cole( - model, - matrix_train, - labels_train, - targets=None, - distance_function=distance_function, - weights_extraction_function=lambda inputs, targets: tf.ones(inputs.shape), - ) - - # Method explanation - ( - examples, - examples_distance, - examples_weights, - inputs_weights, - examples_labels, - ) = method.explain(matrix_test, labels_test) - - # test every outputs shape - assert examples.shape == (nb_samples_test, k) + input_shape - assert examples_distance.shape == (nb_samples_test, k) - assert examples_weights.shape == (nb_samples_test, k) + input_shape - assert inputs_weights.shape == (nb_samples_test,) + input_shape - assert examples_labels.shape == (nb_samples_test, k) - - for i in range(len(labels_test)): - # test examples: - assert almost_equal(examples[i][0], matrix_train[i + 1]) - assert almost_equal(examples[i][1], matrix_train[i + 2]) or almost_equal( - examples[i][1], matrix_train[i] - ) - assert almost_equal(examples[i][2], matrix_train[i]) or almost_equal( - examples[i][2], matrix_train[i + 2] - ) - - # test examples_distance - assert almost_equal(examples_distance[i][0], 0) - assert almost_equal(examples_distance[i][1], sqrt(prod(input_shape))) - assert almost_equal(examples_distance[i][2], sqrt(prod(input_shape))) - - # test examples_labels - assert almost_equal(examples_labels[i][0], labels_train[i + 1]) - assert almost_equal(examples_labels[i][1], labels_train[i + 2]) or almost_equal( - examples_labels[i][1], labels_train[i] - ) - assert almost_equal(examples_labels[i][2], labels_train[i]) or almost_equal( - examples_labels[i][2], labels_train[i + 2] - ) - - -def weights_attribution( - inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray] -): - """ - Custom weights extraction function - Zeros everywhere and target at 0, 0, 0 - """ - weights = tf.Variable(tf.zeros(inputs.shape, dtype=tf.float32)) - weights[:, 0, 0, 0].assign(targets) - return weights + return model, x_train, x_test, y_train -def test_weights_attribution(): +def test_cole_attribution(): """ - Function to test the weights attribution + Test Cole attribution projection. + It should be the same as a manual projection. + Test that the distance has an impact. """ - # Method parameters initialisation - input_shape = (3, 3, 1) + # Setup + nb_samples = 20 + input_shape = (5, 5) nb_labels = 10 - nb_samples = 10 - - # Data generation - matrix_train = tf.stack( - [i * tf.ones(input_shape, dtype=tf.float32) for i in range(nb_samples)] + k = 3 + x_train = tf.random.uniform( + (nb_samples,) + input_shape, minval=-1, maxval=1, seed=0 ) - matrix_test = matrix_train[1:-1] - labels_train = tf.range(nb_samples, dtype=tf.float32) - labels_test = labels_train[1:-1] + x_test = tf.random.uniform((nb_samples,) + input_shape, minval=-1, maxval=1, seed=2) + labels = tf.one_hot( + indices=tf.repeat(input=tf.range(nb_labels), repeats=[nb_samples // nb_labels]), + depth=nb_labels, + ) + y_train = labels + y_test = tf.random.shuffle(labels, seed=1) # Model generation - model = generate_model(input_shape, nb_labels) + model = generate_timeseries_model(input_shape, nb_labels) - # Initialisation of distance_function - # It will be used in CaseBasedExplainer initialisation - distance_function = DistanceMetric.get_metric("euclidean") + # Cole with attribution method constructor + method_constructor = Cole( + cases_dataset=x_train, + targets_dataset=y_train, + search_method=KNN, + k=k, + batch_size=7, + distance="euclidean", + model=model, + attribution_method=Saliency, + ) - # CaseBasedExplainer initialisation - method = Cole( - model, - matrix_train, - labels_train, - targets=labels_train, - distance_function=distance_function, - weights_extraction_function=weights_attribution, + # Cole with attribution explain + projection = CustomProjection(weights=Saliency(model)) + + euclidean_dist = lambda x, z: tf.sqrt(tf.reduce_sum(tf.square(x - z))) + method_call = SimilarExamples( + cases_dataset=x_train, + targets_dataset=y_train, + search_method=KNN, + k=k, + distance=euclidean_dist, + projection=projection, ) - # test case dataset weigth - assert almost_equal(method.case_dataset_weight[:, 0, 0, 0], method.labels_train) - assert almost_equal( - tf.reduce_sum(method.case_dataset_weight, axis=[1, 2, 3]), method.labels_train + method_different_distance = Cole( + cases_dataset=x_train, + targets_dataset=y_train, + search_method=KNN, + k=k, + batch_size=2, + distance=np.inf, # infinity norm based distance + model=model, + attribution_method=Saliency, ) - # Method explanation - _, _, examples_weights, inputs_weights, examples_labels =\ - method.explain(matrix_test, labels_test) + # Generate explanation + examples_constructor = method_constructor.explain(x_test, y_test) + examples_call = method_call.explain(x_test, y_test) + examples_different_distance = method_different_distance(x_test, y_test) + + # Verifications + # Shape should be (n, k, h, w, c) + assert examples_constructor.shape == (len(x_test), k) + input_shape + assert examples_call.shape == (len(x_test), k) + input_shape + assert examples_different_distance.shape == (len(x_test), k) + input_shape + + # both methods should be the same + assert almost_equal(examples_constructor, examples_call) - # test examples weights - assert almost_equal(examples_weights[:, :, 0, 0, 0], examples_labels) + # a different distance should give different results + assert not almost_equal(examples_constructor, examples_different_distance) + + # check weights are equal to the attribution directly on the input + method_constructor.set_returns(["weights", "include_inputs"]) assert almost_equal( - tf.reduce_sum(examples_weights, axis=[2, 3, 4]), examples_labels + method_constructor.explain(x_test, y_test)[:, 0], + Saliency(model)(x_test, y_test), ) - # test inputs weights - assert almost_equal(inputs_weights[:, 0, 0, 0], labels_test) - assert almost_equal(tf.reduce_sum(inputs_weights, axis=[1, 2, 3]), labels_test) - -def test_tabular_inputs(): +def test_cole_spliting(): """ - Function to test the acceptation of tabular data input in the method + Test Cole with a `latent_layer` provided. + It should split the model. """ - # Method parameters initialisation - data_shape = (3,) - input_shape = data_shape - nb_labels = 3 - nb_samples = 20 - nb_inputs = 5 - k = 3 - - # Data generation - dataset, targets = generate_data(data_shape, nb_labels, nb_samples) - dataset_train = dataset[:-nb_inputs] - dataset_test = dataset[-nb_inputs:] - targets_train = targets[:-nb_inputs] - targets_test = targets[-nb_inputs:] + # Setup + nb_samples = 10 + input_shape = (6, 6, 3) + nb_labels = 5 + k = 1 + x_train = tf.random.uniform((nb_samples,) + input_shape, minval=0, maxval=1) + x_test = tf.random.uniform((nb_samples,) + input_shape, minval=0, maxval=1) + labels = tf.one_hot( + indices=tf.repeat(input=tf.range(nb_labels), repeats=[nb_samples // nb_labels]), + depth=nb_labels, + ) + y_train = labels + y_test = tf.random.shuffle(labels) # Model generation - model = generate_agnostic_model(input_shape, nb_labels) - - # Initialisation of weights_extraction_function and distance_function - # They will be used in CaseBasedExplainer initialisation - distance_function = DistanceMetric.get_metric("euclidean") + model = generate_model(input_shape, nb_labels) - # CaseBasedExplainer initialisation + # Cole with attribution method constructor method = Cole( - model, - dataset_train, - targets_train, - targets=targets_train, - distance_function=distance_function, - weights_extraction_function=lambda inputs, targets: tf.ones(inputs.shape), + cases_dataset=x_train, + targets_dataset=y_train, + search_method=KNN, k=k, + case_returns=["examples", "weights", "include_inputs"], + model=model, + latent_layer="last_conv", + attribution_method=Occlusion, + patch_size=2, + patch_stride=1, ) - # Method explanation - examples, _, _, _, _ = method.explain(dataset_test, targets_test) + # Generate explanation + outputs = method.explain(x_test, y_test) + examples, weights = outputs["examples"], outputs["weights"] + + # Verifications + # Shape should be (n, k, h, w, c) + nb_samples_test = x_test.shape[0] + assert examples.shape == (nb_samples_test, k + 1) + input_shape + assert weights.shape[:-1] == (nb_samples_test, k + 1) + input_shape[:-1] + - # test examples shape - assert examples.shape == (nb_inputs, k) + input_shape +# test_cole_attribution() +# test_cole_spliting() diff --git a/tests/example_based/test_image_plot.py b/tests/example_based/test_image_plot.py new file mode 100644 index 00000000..f8254d17 --- /dev/null +++ b/tests/example_based/test_image_plot.py @@ -0,0 +1,101 @@ +""" +Test Cole +""" +import os +import sys + +sys.path.append(os.getcwd()) + +from math import prod, sqrt + +import numpy as np +import scipy +import tensorflow as tf + +from xplique.attributions import Occlusion, Saliency + +from xplique.example_based import Cole, SimilarExamples +from xplique.example_based.projections import CustomProjection +from xplique.example_based.search_methods import KNN +from xplique.plots.image import plot_examples + +from tests.utils import ( + generate_data, + generate_model, + almost_equal, + generate_timeseries_model, +) + + +def get_setup(input_shape, nb_samples=10, nb_labels=10): + """ + Generate data and model for Cole + """ + # Data generation + x_train = tf.stack( + [i * tf.ones(input_shape, tf.float32) for i in range(nb_samples)] + ) + x_test = x_train[1:-1] + y_train = tf.one_hot(tf.range(len(x_train)) % nb_labels, depth=nb_labels) + + # Model generation + model = generate_model(input_shape, nb_labels) + + return model, x_train, x_test, y_train + + +def test_plot_cole_spliting(): + """ + Test examples plot function. + """ + # Setup + nb_samples = 10 + input_shape = (6, 6, 3) + nb_labels = 5 + k = 1 + x_train = tf.random.uniform((nb_samples,) + input_shape, minval=0, maxval=1) + x_test = tf.random.uniform((nb_samples,) + input_shape, minval=0, maxval=1) + labels = tf.one_hot( + indices=tf.repeat(input=tf.range(nb_labels), repeats=[nb_samples // nb_labels]), + depth=nb_labels, + ) + y_train = labels + y_test = tf.random.shuffle(labels) + + # Model generation + model = generate_model(input_shape, nb_labels) + + # Cole with attribution method constructor + method = Cole( + cases_dataset=x_train, + labels_dataset=tf.argmax(y_train, axis=1), + targets_dataset=y_train, + search_method=KNN, + k=k, + case_returns="all", + model=model, + latent_layer="last_conv", + attribution_method=Occlusion, + patch_size=2, + patch_stride=1, + ) + + # Generate explanation + outputs = method.explain(x_test, y_test) + + # get predictions on examples + predicted_labels = tf.map_fn( + fn=lambda x: tf.cast(tf.argmax(model(x), axis=1), tf.int32), + elems=outputs["examples"], + fn_output_signature=tf.int32, + ) + + # test plot + plot_examples( + test_labels=tf.argmax(y_test, axis=1), + predicted_labels=predicted_labels, + **outputs + ) + + +# test_plot_cole_spliting() diff --git a/tests/example_based/test_similar_examples.py b/tests/example_based/test_similar_examples.py new file mode 100644 index 00000000..fbc5824e --- /dev/null +++ b/tests/example_based/test_similar_examples.py @@ -0,0 +1,305 @@ +""" +Test Cole +""" +import os +import sys + +sys.path.append(os.getcwd()) + +from math import prod, sqrt +import unittest + +import numpy as np +import tensorflow as tf + +from xplique.commons import sanitize_dataset, are_dataset_first_elems_equal +from xplique.types import Union + +from xplique.example_based import SimilarExamples +from xplique.example_based.projections import CustomProjection +from xplique.example_based.search_methods import KNN + +from tests.utils import almost_equal + + +def get_setup(input_shape, nb_samples=10, nb_labels=10): + """ + Generate data and model for SimilarExamples + """ + # Data generation + x_train = tf.stack( + [i * tf.ones(input_shape, tf.float32) for i in range(nb_samples)] + ) + x_test = x_train[1:-1] + y_train = tf.range(len(x_train), dtype=tf.float32) % nb_labels + + return x_train, x_test, y_train + + +def test_similar_examples_input_datasets_management(): + """ + Test management of dataset init inputs + """ + proj = CustomProjection(space_projection=lambda inputs, targets=None: inputs) + + tf_tensor = tf.reshape(tf.range(90), (10, 3, 3)) + np_array = np.array(tf_tensor) + tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) + too_short_np_array = np_array[:3] + too_long_tf_dataset = tf_dataset.concatenate(tf_dataset) + + tf_dataset_b3 = tf_dataset.batch(3) + tf_dataset_b5 = tf_dataset.batch(5) + too_long_tf_dataset_b5 = too_long_tf_dataset.batch(5) + too_long_tf_dataset_b10 = too_long_tf_dataset.batch(10) + + tf_shuffled = tf_dataset.shuffle(32, 0).batch(4) + tf_one_shuffle = tf_dataset.shuffle(32, 0, reshuffle_each_iteration=False).batch(4) + + # Method initialization that should work + method = SimilarExamples(tf_dataset_b3, None, np_array, projection=proj) + assert are_dataset_first_elems_equal(method.cases_dataset, tf_dataset_b3) + assert are_dataset_first_elems_equal(method.labels_dataset, None) + assert are_dataset_first_elems_equal(method.targets_dataset, tf_dataset_b3) + + method = SimilarExamples(np_array, tf_tensor, None, batch_size=5, projection=proj) + assert are_dataset_first_elems_equal(method.cases_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(method.labels_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(method.targets_dataset, None) + + method = SimilarExamples( + tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), + None, + np_array, + projection=proj, + ) + assert are_dataset_first_elems_equal(method.cases_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(method.labels_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(method.targets_dataset, tf_dataset_b5) + + method = SimilarExamples( + tf.data.Dataset.zip((tf_one_shuffle, tf_one_shuffle)), projection=proj + ) + assert are_dataset_first_elems_equal(method.cases_dataset, tf_one_shuffle) + assert are_dataset_first_elems_equal(method.labels_dataset, tf_one_shuffle) + assert are_dataset_first_elems_equal(method.targets_dataset, None) + + method = SimilarExamples(tf_one_shuffle, projection=proj) + assert are_dataset_first_elems_equal(method.cases_dataset, tf_one_shuffle) + assert are_dataset_first_elems_equal(method.labels_dataset, None) + assert are_dataset_first_elems_equal(method.targets_dataset, None) + + # Method initialization that should not work + test_raise_assertion_error = unittest.TestCase().assertRaises + test_raise_assertion_error(TypeError, SimilarExamples) + test_raise_assertion_error(AssertionError, SimilarExamples, tf_tensor) + test_raise_assertion_error( + AssertionError, SimilarExamples, tf_shuffled, projection=proj + ) + test_raise_assertion_error( + AssertionError, SimilarExamples, tf_dataset, tf_tensor, projection=proj + ) + test_raise_assertion_error( + AssertionError, SimilarExamples, tf_dataset_b3, tf_dataset_b5, projection=proj + ) + test_raise_assertion_error( + AssertionError, + SimilarExamples, + tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), + np_array, + projection=proj, + ) + test_raise_assertion_error( + AssertionError, SimilarExamples, tf_dataset_b3, too_short_np_array + ) + test_raise_assertion_error( + AssertionError, SimilarExamples, tf_dataset, None, too_long_tf_dataset + ) + test_raise_assertion_error( + AssertionError, + SimilarExamples, + tf_dataset_b5, + too_long_tf_dataset_b5, + projection=proj, + ) + test_raise_assertion_error( + AssertionError, + SimilarExamples, + too_long_tf_dataset_b10, + tf_dataset_b5, + projection=proj, + ) + + +def test_similar_examples_basic(): + """ + Test the SimilarExamples with an identity projection. + """ + # Setup + input_shape = (4, 4, 1) + k = 3 + x_train, x_test, _ = get_setup(input_shape) + + identity_projection = CustomProjection( + space_projection=lambda inputs, targets=None: inputs + ) + + # Method initialization + method = SimilarExamples( + cases_dataset=x_train, + projection=identity_projection, + search_method=KNN, + k=k, + batch_size=3, + distance="euclidean", + ) + + # Generate explanation + examples = method.explain(x_test) + + # Verifications + # Shape should be (n, k, h, w, c) + assert examples.shape == (len(x_test), k) + input_shape + + for i in range(len(x_test)): + # test examples: + assert almost_equal(examples[i, 0], x_train[i + 1]) + assert almost_equal(examples[i, 1], x_train[i + 2]) or almost_equal( + examples[i, 1], x_train[i] + ) + assert almost_equal(examples[i, 2], x_train[i]) or almost_equal( + examples[i, 2], x_train[i + 2] + ) + + +def test_similar_examples_return_multiple_elements(): + """ + Test the returns attribute. + Test modifying k. + """ + # Setup + input_shape = (5, 5, 1) + k = 3 + x_train, x_test, y_train = get_setup(input_shape) + + nb_samples_test = len(x_test) + assert nb_samples_test + 2 == len(y_train) + + identity_projection = CustomProjection( + space_projection=lambda inputs, targets=None: inputs + ) + + # Method initialization + method = SimilarExamples( + cases_dataset=x_train, + labels_dataset=y_train, + projection=identity_projection, + search_method=KNN, + k=1, + batch_size=3, + distance="euclidean", + ) + + method.set_returns("all") + + method.set_k(k) + + # Generate explanation + method_output = method.explain(x_test) + + assert isinstance(method_output, dict) + + examples = method_output["examples"] + weights = method_output["weights"] + distances = method_output["distances"] + labels = method_output["labels"] + + # test every outputs shape (with the include inputs) + assert examples.shape == (nb_samples_test, k + 1) + input_shape + assert weights.shape == (nb_samples_test, k + 1) + input_shape + # the inputs distance ae zero and indices do not exist + assert distances.shape == (nb_samples_test, k) + assert labels.shape == (nb_samples_test, k) + + for i in range(nb_samples_test): + # test examples: + assert almost_equal(examples[i, 0], x_test[i]) + assert almost_equal(examples[i, 1], x_train[i + 1]) + assert almost_equal(examples[i, 2], x_train[i + 2]) or almost_equal( + examples[i, 2], x_train[i] + ) + assert almost_equal(examples[i, 3], x_train[i]) or almost_equal( + examples[i, 3], x_train[i + 2] + ) + + # test weights + assert almost_equal(weights[i], tf.ones(weights[i].shape, dtype=tf.float32)) + + # test distances + assert almost_equal(distances[i, 0], 0) + assert almost_equal(distances[i, 1], sqrt(prod(input_shape))) + assert almost_equal(distances[i, 2], sqrt(prod(input_shape))) + + # test labels + assert almost_equal(labels[i, 0], y_train[i + 1]) + assert almost_equal(labels[i, 1], y_train[i]) or almost_equal( + labels[i, 1], y_train[i + 2] + ) + assert almost_equal(labels[i, 2], y_train[i]) or almost_equal( + labels[i, 2], y_train[i + 2] + ) + + +def test_similar_examples_weighting(): + """ + Test the application of the projection weighting. + """ + # Setup + input_shape = (4, 4, 1) + nb_samples = 10 + k = 3 + x_train, x_test, y_train = get_setup(input_shape, nb_samples) + + # Define the weighing function + weights = np.zeros(x_train[0].shape) + weights[1] = np.ones(weights[1].shape) + + # create huge noise on non interesting features + noise = np.random.uniform(size=x_train.shape, low=-100, high=100) + x_train = np.float32(weights * np.array(x_train) + (1 - weights) * noise) + + weighting_function = CustomProjection(weights=weights) + + method = SimilarExamples( + cases_dataset=x_train, + labels_dataset=y_train, + projection=weighting_function, + search_method=KNN, + k=k, + batch_size=5, + distance="euclidean", + ) + + # Generate explanation + examples = method.explain(x_test) + + # Verifications + # Shape should be (n, k, h, w, c) + nb_samples_test = x_test.shape[0] + assert examples.shape == (nb_samples_test, k) + input_shape + + for i in range(nb_samples_test): + # test examples: + assert almost_equal(examples[i, 0], x_train[i + 1]) + assert almost_equal(examples[i, 1], x_train[i + 2]) or almost_equal( + examples[i, 1], x_train[i] + ) + assert almost_equal(examples[i, 2], x_train[i]) or almost_equal( + examples[i, 2], x_train[i + 2] + ) + + +# test_similar_examples_input_dataset_management() +# test_similar_examples_basic() +# test_similar_examples_return_multiple_elements() +# test_similar_examples_weighting() diff --git a/tests/example_based/test_split_projection.py b/tests/example_based/test_split_projection.py new file mode 100644 index 00000000..bc560b48 --- /dev/null +++ b/tests/example_based/test_split_projection.py @@ -0,0 +1,85 @@ +import numpy as np +import tensorflow as tf +from tensorflow.keras.layers import ( + Dense, + Conv2D, + Activation, + Dropout, + Flatten, + MaxPooling2D, + Input, +) + +from xplique.example_based.projections import AttributionProjection +from xplique.example_based.projections import LatentSpaceProjection +from ..utils import generate_data, almost_equal + + +def _generate_model(input_shape=(32, 32, 3), output_shape=10): + model = tf.keras.Sequential() + model.add(Input(shape=input_shape)) + model.add(Conv2D(4, kernel_size=(2, 2), activation="relu", name="conv2d_1")) + model.add(Conv2D(4, kernel_size=(2, 2), activation="relu", name="conv2d_2")) + model.add(MaxPooling2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + model.add(Flatten()) + model.add(Dense(output_shape, name="dense")) + model.add(Activation("softmax", name="softmax")) + model.compile(loss="categorical_crossentropy", optimizer="sgd") + + return model + + +def test_attribution_latent_layer(): + """We should target the right layer using either int, string or default procedure""" + tf.keras.backend.clear_session() + + model = _generate_model() + + first_conv_layer = model.get_layer("conv2d_1") + last_conv_layer = model.get_layer("conv2d_2") + flatten_layer = model.get_layer("flatten") + + # default should not include model spliting + projection_default = AttributionProjection(model) + assert projection_default.latent_layer is None + + # last_conv should be recognized + projection_default = AttributionProjection(model, latent_layer="last_conv") + assert projection_default.latent_layer == last_conv_layer + + # target the first conv layer + projection_default = AttributionProjection(model, latent_layer=0) + assert projection_default.latent_layer == first_conv_layer + + # target a random flatten layer + projection_default = AttributionProjection(model, latent_layer="flatten") + assert projection_default.latent_layer == flatten_layer + + +def test_latent_space_latent_layer(): + """We should target the right layer using either int, string or default procedure""" + tf.keras.backend.clear_session() + + model = _generate_model() + + first_conv_layer = model.get_layer("conv2d_1") + last_conv_layer = model.get_layer("conv2d_2") + flatten_layer = model.get_layer("flatten") + last_layer = model.get_layer("softmax") + + # default should not include model spliting + projection_default = LatentSpaceProjection(model) + assert projection_default.latent_layer == last_layer + + # last_conv should be recognized + projection_default = LatentSpaceProjection(model, latent_layer="last_conv") + assert projection_default.latent_layer == last_conv_layer + + # target the first conv layer + projection_default = LatentSpaceProjection(model, latent_layer=0) + assert projection_default.latent_layer == first_conv_layer + + # target a random flatten layer + projection_default = LatentSpaceProjection(model, latent_layer="flatten") + assert projection_default.latent_layer == flatten_layer diff --git a/tests/utils.py b/tests/utils.py index 67cf0e36..92d348e2 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -31,6 +31,14 @@ def generate_model(input_shape=(32, 32, 3), output_shape=10): return model +def generate_agnostic_model(input_shape=(3,), nb_labels=3): + model = Sequential() + model.add(Input(input_shape)) + model.add(Flatten()) + model.add(Dense(nb_labels)) + + return model + def generate_timeseries_model(input_shape=(20, 10), output_shape=10): model = Sequential() model.add(Input(shape=input_shape)) diff --git a/xplique/commons/__init__.py b/xplique/commons/__init__.py index 94237f90..db9fcf3a 100644 --- a/xplique/commons/__init__.py +++ b/xplique/commons/__init__.py @@ -2,7 +2,7 @@ Utility classes and functions """ -from .data_conversion import tensor_sanitize, numpy_sanitize +from .data_conversion import tensor_sanitize, numpy_sanitize, sanitize_inputs_targets from .model_override import guided_relu_policy, deconv_relu_policy, override_relu_gradient, \ find_layer, open_relu_policy from .tf_operations import repeat_labels, batch_tensor diff --git a/xplique/commons/data_conversion.py b/xplique/commons/data_conversion.py index 517f86ad..ae5d7eeb 100644 --- a/xplique/commons/data_conversion.py +++ b/xplique/commons/data_conversion.py @@ -5,11 +5,12 @@ import tensorflow as tf import numpy as np -from ..types import Union, Optional, Tuple +from ..types import Union, Optional, Tuple, Callable def tensor_sanitize(inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]]) -> Tuple[tf.Tensor, tf.Tensor]: + targets: Optional[Union[tf.Tensor, np.ndarray]] = None + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Ensure the output as tf.Tensor, accept various inputs format including: tf.Tensor, List, numpy array, tf.data.Dataset (when label = None). @@ -35,17 +36,20 @@ def tensor_sanitize(inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray], if hasattr(inputs, '_batch_size'): inputs = inputs.unbatch() # unpack the dataset, assume we have tuple of (input, target) - targets = [target for _, target in inputs] inputs = [inp for inp, _ in inputs] + if targets is not None: + targets = [target for _, target in inputs] inputs = tf.cast(inputs, tf.float32) - targets = tf.cast(targets, tf.float32) + if targets is not None: + targets = tf.cast(targets, tf.float32) return inputs, targets def numpy_sanitize(inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]]) -> Tuple[tf.Tensor, tf.Tensor]: + targets: Optional[Union[tf.Tensor, np.ndarray]] = None + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Ensure the output as np.ndarray, accept various inputs format including: tf.Tensor, List, numpy array, tf.data.Dataset (when label = None). @@ -66,3 +70,22 @@ def numpy_sanitize(inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray], """ inputs, targets = tensor_sanitize(inputs, targets) return inputs.numpy(), targets.numpy() + + +def sanitize_inputs_targets(explanation_method: Callable): + """ + Wrap a method explanation function to ensure tf.Tensor as inputs and targets. + But targets may be None. + + explanation_method + Function to wrap, should return an tf.tensor. + """ + def sanitize(self, inputs: Union[tf.data.Dataset, tf.Tensor, np.array], + targets: Optional[Union[tf.Tensor, np.array]] = None, + *args): + # ensure we have tf.tensor + inputs, targets = tensor_sanitize(inputs, targets) + # then enter the explanation function + return explanation_method(self, inputs, targets, *args) + + return sanitize From 2aa133dbf824fb0ffd7614a54fb62ab2cf8779c0 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:44:32 +0100 Subject: [PATCH 008/138] plots: add image visualization for example based --- xplique/plots/__init__.py | 2 +- xplique/plots/image.py | 118 +++++++++++++++++++++++++++++++++++++- 2 files changed, 118 insertions(+), 2 deletions(-) diff --git a/xplique/plots/__init__.py b/xplique/plots/__init__.py index 12e25eae..c7037f6a 100644 --- a/xplique/plots/__init__.py +++ b/xplique/plots/__init__.py @@ -1,6 +1,6 @@ """ Utility functions to visualize explanations """ -from .image import plot_attributions, plot_attribution, plot_maco +from .image import plot_attributions, plot_attribution, plot_maco, plot_examples from .tabular import plot_feature_impact, plot_mean_feature_impact, summary_plot_tabular from .timeseries import plot_timeseries_attributions diff --git a/xplique/plots/image.py b/xplique/plots/image.py index ca69b87d..c90d956b 100644 --- a/xplique/plots/image.py +++ b/xplique/plots/image.py @@ -171,7 +171,7 @@ def plot_attributions( cols Number of columns. img_size - Size of each subplots (in inch), considering we keep aspect ratio + Size of each subplots (in inch), considering we keep aspect ratio. plot_kwargs Additional parameters passed to `plt.imshow()`. """ @@ -230,3 +230,119 @@ def plot_maco(image, alpha, percentile_image=1.0, percentile_alpha=80): plt.imshow(np.concatenate([image, alpha], -1)) plt.axis('off') + + +def plot_examples( + examples: np.ndarray, + weights: np.ndarray = None, + distances: float = None, + labels: np.ndarray = None, + test_labels: np.ndarray = None, + predicted_labels: np.ndarray = None, + img_size: float = 2., + **attribution_kwargs, +): + """ + This function is for image data, it show the returns of the explain function. + + Parameters + --------- + examples + Represente the k nearest neighbours of the input. (n, k+1, h, w, c) + weights + Features weight of the examples. + distances + Distance between input data and examples. + labels + Labels of the examples. + labels_test + Corresponding to labels of the dataset test. + attribution_kwargs + Additionnal parameters passed to `xplique.plots.plot_attribution()`. + img_size: + Size of each subplots (in inch), considering we keep aspect ratio + """ + # pylint: disable=too-many-arguments + if weights is not None: + assert examples.shape[:2] == weights.shape[:2],\ + "Number of weights must correspond to the number of examples." + if distances is not None: + assert examples.shape[0] == distances.shape[0],\ + "Number of samples treated should match between examples and distances." + assert examples.shape[1] == distances.shape[1] + 1,\ + "Number of distances for each input must correspond to the number of examples -1." + if labels is not None: + assert examples.shape[0] == labels.shape[0],\ + "Number of samples treated should match between examples and labels." + assert examples.shape[1] == labels.shape[1] + 1,\ + "Number of labels for each input must correspond to the number of examples -1." + + # number of rows depends if weights are provided + rows_by_input = 1 + (weights is not None) + rows = rows_by_input * examples.shape[0] + cols = examples.shape[1] + # get width and height of our images + l_width, l_height = examples.shape[2:4] + + # define the figure margin, width, height in inch + margin = 0.3 + spacing = 0.3 + figwidth = cols * img_size + (cols-1) * spacing + 2 * margin + figheight = rows * img_size * l_height/l_width + (rows-1) * spacing + 2 * margin + + left = margin/figwidth + bottom = margin/figheight + + space_with_line = spacing / (3 * img_size) + + fig = plt.figure() + fig.set_size_inches(figwidth, figheight) + + fig.subplots_adjust( + left = left, + bottom = bottom, + right = 1.-left, + top = 1.-bottom, + wspace = spacing/img_size, + hspace= spacing/img_size * l_width/l_height + ) + + # configure the grid to show all results + plt.rcParams["figure.autolayout"] = True + plt.rcParams["figure.figsize"] = [3 * examples.shape[1], 4 * (1 + (weights is not None))] + + # loop to organize and show all results + for i in range(examples.shape[0]): + for k in range(examples.shape[1]): + plt.subplot(rows, cols, rows_by_input * i * cols + k + 1) + + # set title + if k == 0: + title = "Original image" + title += f"\nGround Truth: {test_labels[i]}" if test_labels is not None else "" + title += f"\nPrediction: {predicted_labels[i, k]}"\ + if predicted_labels is not None else "" + else: + title = f"Example {k}" + title += f"\nGround Truth: {labels[i, k-1]}" if labels is not None else "" + title += f"\nPrediction: {predicted_labels[i, k]}"\ + if predicted_labels is not None else "" + title += f"\nDistance: {distances[i, k-1]:.4f}" if distances is not None else "" + plt.title(title) + + # plot image + img = _normalize(examples[i, k]) + if img.shape[-1] == 1: + plt.imshow(img[:,:,0], cmap="gray") + else: + plt.imshow(img) + plt.axis("off") + + # plot weights + if weights is not None: + plt.subplot(rows, cols, (rows_by_input * i + 1) * cols + k + 1) + plot_attribution(weights[i, k], examples[i, k], **attribution_kwargs) + plt.axis("off") + plt.plot([-1, 1.5], [-space_with_line, -space_with_line], + color='black', lw=1, transform=plt.gca().transAxes, clip_on=False) + fig.tight_layout() From bed4747aa34035826886eb9ed168104a9c7cd4ed Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:50:46 +0100 Subject: [PATCH 009/138] commons: add operations for tf dataset --- xplique/commons/__init__.py | 2 + xplique/commons/data_conversion.py | 22 +-- xplique/commons/tf_dataset_operations.py | 235 +++++++++++++++++++++++ 3 files changed, 247 insertions(+), 12 deletions(-) create mode 100644 xplique/commons/tf_dataset_operations.py diff --git a/xplique/commons/__init__.py b/xplique/commons/__init__.py index db9fcf3a..c5312a2e 100644 --- a/xplique/commons/__init__.py +++ b/xplique/commons/__init__.py @@ -11,3 +11,5 @@ get_inference_function, get_gradient_functions) from .exceptions import no_gradients_available, raise_invalid_operator from .forgrad import forgrad +from .tf_dataset_operations import are_dataset_first_elems_equal, dataset_gather, sanitize_dataset,\ + is_not_shuffled, batch_size_matches diff --git a/xplique/commons/data_conversion.py b/xplique/commons/data_conversion.py index ae5d7eeb..9bcf3309 100644 --- a/xplique/commons/data_conversion.py +++ b/xplique/commons/data_conversion.py @@ -9,8 +9,7 @@ def tensor_sanitize(inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None - ) -> Tuple[tf.Tensor, tf.Tensor]: + targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: """ Ensure the output as tf.Tensor, accept various inputs format including: tf.Tensor, List, numpy array, tf.data.Dataset (when label = None). @@ -36,20 +35,17 @@ def tensor_sanitize(inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray], if hasattr(inputs, '_batch_size'): inputs = inputs.unbatch() # unpack the dataset, assume we have tuple of (input, target) + targets = [target for _, target in inputs] inputs = [inp for inp, _ in inputs] - if targets is not None: - targets = [target for _, target in inputs] inputs = tf.cast(inputs, tf.float32) - if targets is not None: - targets = tf.cast(targets, tf.float32) + targets = tf.cast(targets, tf.float32) return inputs, targets def numpy_sanitize(inputs: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None - ) -> Tuple[tf.Tensor, tf.Tensor]: + targets: Optional[Union[tf.Tensor, np.ndarray]]) -> Tuple[tf.Tensor, tf.Tensor]: """ Ensure the output as np.ndarray, accept various inputs format including: tf.Tensor, List, numpy array, tf.data.Dataset (when label = None). @@ -80,12 +76,14 @@ def sanitize_inputs_targets(explanation_method: Callable): explanation_method Function to wrap, should return an tf.tensor. """ - def sanitize(self, inputs: Union[tf.data.Dataset, tf.Tensor, np.array], + def sanitize(self, inputs: Union[tf.Tensor, np.array], targets: Optional[Union[tf.Tensor, np.array]] = None, - *args): + ): # ensure we have tf.tensor - inputs, targets = tensor_sanitize(inputs, targets) + inputs = tf.cast(inputs, tf.float32) + if targets is not None: + targets = tf.cast(targets, tf.float32) # then enter the explanation function - return explanation_method(self, inputs, targets, *args) + return explanation_method(self, inputs, targets) return sanitize diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/commons/tf_dataset_operations.py new file mode 100644 index 00000000..69e750eb --- /dev/null +++ b/xplique/commons/tf_dataset_operations.py @@ -0,0 +1,235 @@ +""" +Set of functions to manipulated `tf.data.Dataset` +""" +from itertools import product + +import numpy as np +import tensorflow as tf + +from ..types import Optional, Union + + +def _almost_equal(arr1, arr2, epsilon=1e-6): + """Ensure two array are almost equal at an epsilon""" + return np.shape(arr1) == np.shape(arr2) and np.sum(np.abs(arr1 - arr2)) < epsilon + + +def are_dataset_first_elems_equal( + dataset1: Optional[tf.data.Dataset], dataset2: Optional[tf.data.Dataset] +) -> bool: + """ + Test if the first batch of elements of two datasets are the same. + It is used to verify equality between datasets in a lazy way. + + Parameters + ---------- + dataset1 + First `tf.data.Dataset` to compare. + dataset2 + Second `tf.data.Dataset` to compare. + + Returns + ------- + test_result + Boolean value of the equality. + """ + if dataset1 is None: + return dataset2 is None + + if dataset2 is None: + return False + + next1 = next(iter(dataset1)) + next2 = next(iter(dataset2)) + if isinstance(next1, tuple): + next1 = next1[0] + if isinstance(next2, tuple): + next2 = next2[0] + else: + return False + + return _almost_equal(next1, next2) + + +def is_not_shuffled(dataset: Optional[tf.data.Dataset]) -> bool: + """ + Test if the provided dataset reshuffle at each iteration. + Tensorflow do not provide clean way to verify it, + hence we draw two times the first element and compare it. + It may not always detect shuffled datasets, but this is enough of a safety net. + + Parameters + ---------- + dataset + Tensorflow dataset to test. + + Returns + ------- + test_result + Boolean value of the test. + """ + return are_dataset_first_elems_equal(dataset, dataset) + + +def batch_size_matches(dataset: Optional[tf.data.Dataset], batch_size: int) -> bool: + """ + Test if batch size of a tensorflow dataset matches the expected one. + Tensorflow do not provide clean way to verify it, + hence we draw a batch and check its first dimension. + It may fail in some really precise cases, but this is enough of a safety net. + + Parameters + ---------- + dataset + Tensorflow dataset to test. + batch_size + The expected batch size of the dataset. + + Returns + ------- + test_result + Boolean value of the test. + """ + if dataset is None: + # ignored + return True + + first_item = next(iter(dataset)) + if isinstance(first_item, tuple): + return tf.reduce_all( + [tf.shape(item)[0].numpy() == batch_size for item in first_item] + ) + return tf.shape(first_item)[0].numpy() == batch_size + + +def sanitize_dataset( + dataset: Union[tf.data.Dataset, tf.Tensor, np.array], + batch_size: int, + cardinality: Optional[int] = None, +) -> Optional[tf.data.Dataset]: + """ + Function to ensure input dataset match expected format. + It also transforms tensors in `tf.data.Dataset` and also verify the properties. + This function verify that datasets do not reshuffle at each iteration and + that their batch isze and cardinality match the expected ones. + Note that, that Tensorflow do not provide easy way to make those tests, hence, + for cost constraints, our tests are not perfect. + + Parameters + ---------- + dataset + Tensorflow dataset to verify or tensor to transform in `tf.data.Dataset` and verify. + batch_size + The expected batch size used either to verify the input dataset + or batch the transformed tensor. + cardinality + Expected number of batch in the dataset or batched transformed tensor. + + Returns + ------- + dataset + Verified dataset or transformed tensor. In both case a `tf.data.Dataset`, + that does not reshuffle at each iteration and + with batch size and cardinality matching the expected ones. + """ + if dataset is not None: + if isinstance(dataset, tf.data.Dataset): + assert is_not_shuffled(dataset), ( + "Datasets should not be shuffled, " + + "the order of the element should stay the same at each iteration." + ) + assert batch_size_matches( + dataset, batch_size + ), "The batch size should match between datasets." + else: + dataset = tf.data.Dataset.from_tensor_slices(dataset).batch(batch_size) + + if cardinality is not None and cardinality > 0: + dataset_cardinality = dataset.cardinality().numpy() + if dataset_cardinality > 0: + assert dataset_cardinality == cardinality, ( + "The number of batch should match between datasets. " + + f"Received {dataset.cardinality().numpy()} vs {cardinality}. " + + "You may have provided non-batched datasets or datasets with different length." + ) + + return dataset + + +def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: + """ + Imitation of `tf.gather` for `tf.data.Dataset`, + it extract elements from `dataset` at the given indices. + We could see it as returning the `indices` tensor + where each index was replaced by the corresponding element in `dataset`. + The aim is to use it in the `example_based` module to extract examples form the cases dataset. + Hence, `indices` expect dimensions of (n, k, 2), + where n represent the number of inputs and k the number of corresponding examples. + Here indices for each element are encoded by two values, + the batch index and the index of the element in the batch. + + Example of application + ``` + >>> dataset = tf.data.Dataset.from_tensor_slices( + ... tf.reshape(tf.range(20), (-1, 2, 2)) + ... ).batch(3) # shape=(None, 2, 2) + >>> indices = tf.constant([[[0, 0]], [[1, 0]]]) # shape=(2, 1, 2) + >>> dataset_gather(dataset, indices) + + ``` + + Parameters + ---------- + dataset + Tensorflow dataset to verify or tensor to transform in `tf.data.Dataset` and verify. + indices + Tensor of indices of elements to extract from the `dataset`. + `indices` should be of dimensions (n, k, 2), + this is to match the format of indices in the `example_based` module. + Indeed, n represent the number of inputs and k the number of corresponding examples. + The index of each element is encoded by two values, + the batch index and the index of the element in the batch. + + Returns + ------- + results + + indices should be (n, k, 2) + """ + if dataset is None: + return None + + example = next(iter(dataset)) + # (n, bs, ...) + results = tf.Variable( + tf.zeros( + indices.shape[:-1] + example[0].shape, dtype=dataset.element_spec.dtype + ) + ) + + nb_results = product(indices.shape[:-1]) + current_nb_results = 0 + + for i, batch in enumerate(dataset): + # check if the batch is interesting + if not tf.reduce_any(indices[..., 0] == i): + continue + + # extract pertinent elements + pertinent_indices_location = tf.where(indices[..., 0] == i) + samples_index = tf.gather_nd(indices[..., 1], pertinent_indices_location) + samples = tf.gather(batch, samples_index) + + # put them at the right place in results + for location, sample in zip(pertinent_indices_location, samples): + results[location[0], location[1]].assign(sample) + current_nb_results += 1 + + # test if results are filled to break the loop + if current_nb_results == nb_results: + break + return results From 9f9acfb27452262631bceca95722e8102e45af8d Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:53:13 +0100 Subject: [PATCH 010/138] pylint: disable similarities for signatures --- .pylintrc | 19 +++++++++++++++++++ setup.cfg | 1 + 2 files changed, 20 insertions(+) create mode 100644 .pylintrc diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 00000000..91513741 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,19 @@ +[MASTER] +disable= + R0903, # allows to expose only one public method + R0914, # allow multiples local variables + E0401, # pending issue with pylint see pylint#2603 + E1123, # issues between pylint and tensorflow since 2.2.0 + E1120, # see pylint#3613 + C3001, # lambda function as variable + +[FORMAT] +max-line-length=100 +max-args=12 + +[SIMILARITIES] +min-similarity-lines=6 +ignore-comments=yes +ignore-docstrings=yes +ignore-imports=no +ignore-signatures=yes diff --git a/setup.cfg b/setup.cfg index 3fde7c5a..3a85bd3f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -26,6 +26,7 @@ min-similarity-lines = 6 ignore-comments = yes ignore-docstrings = yes ignore-imports = no +ignore-signatures = yes [tox:tox] envlist = py{37,38,39,310}-lint, py{37,38,39,310}-tf{22,25,28,211}, py{38,39,310}-tf{25,28,211}-torch{111,113,200} From d29f92f38ac58f67d9979ecd8ea6a336157300e3 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 12 Feb 2024 16:48:57 +0100 Subject: [PATCH 011/138] example based: introduce base example method abstraction --- xplique/example_based/base_example_method.py | 380 +++++++++++++++++++ 1 file changed, 380 insertions(+) create mode 100644 xplique/example_based/base_example_method.py diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py new file mode 100644 index 00000000..eca11a9e --- /dev/null +++ b/xplique/example_based/base_example_method.py @@ -0,0 +1,380 @@ +""" +Base model for example-based +""" + +import math + +import tensorflow as tf +import numpy as np + +from ..types import Callable, Dict, List, Optional, Type, Union + +from ..commons import sanitize_inputs_targets +from ..commons import sanitize_dataset, dataset_gather +from .search_methods import KNN, BaseSearchMethod +from .projections import Projection + +from .search_methods.base import _sanitize_returns + + +class BaseExampleMethod: + """ + Base class for natural example-based methods explaining models, + they project the cases_dataset into a pertinent space for the with a `Projection`, + then they call the `BaseSearchMethod` on it. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + search_method + An algorithm to search the examples in the projected space. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + search_method_kwargs + Parameters to be passed at the construction of the `search_method`. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + search_method: Type[BaseSearchMethod] = KNN, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + **search_method_kwargs, + ): + assert ( + projection is not None + ), "`BaseExampleMethod` without `projection` is a `BaseSearchMethod`." + + # set attributes + batch_size = self.__initialize_cases_dataset( + cases_dataset, labels_dataset, targets_dataset, batch_size + ) + self.k = k + self.set_returns(case_returns) + self.projection = projection + + # set `search_returns` if not provided and overwrite it otherwise + search_method_kwargs["search_returns"] = ["indices", "distances"] + + # initiate search_method + self.search_method = search_method( + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + batch_size=batch_size, + **search_method_kwargs, + ) + + def __initialize_cases_dataset( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], + batch_size: Optional[int], + ) -> int: + """ + Factorization of `__init__()` method for dataset related attributes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + labels_dataset + Labels associated to the examples in the dataset. + Indices should match with cases_dataset. + targets_dataset + Targets associated to the cases_dataset for dataset projection. + See `projection` for detail. + batch_size + Number of sample treated simultaneously when using the datasets. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + + Returns + ------- + batch_size + Number of sample treated simultaneously when using the datasets. + Extracted from the datasets in case they are `tf.data.Dataset`. + Otherwise, the input value. + """ + # at least one dataset provided + if isinstance(cases_dataset, tf.data.Dataset): + # set batch size (ignore provided argument) and cardinality + if isinstance(cases_dataset.element_spec, tuple): + batch_size = tf.shape(next(iter(cases_dataset))[0])[0].numpy() + else: + batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() + + cardinality = cases_dataset.cardinality().numpy() + else: + # if case_dataset is not a `tf.data.Dataset`, then neither should the other. + assert not isinstance(labels_dataset, tf.data.Dataset) + assert not isinstance(targets_dataset, tf.data.Dataset) + # set batch size and cardinality + batch_size = min(batch_size, len(cases_dataset)) + cardinality = math.ceil(len(cases_dataset) / batch_size) + + # verify cardinality and create datasets from the tensors + self.cases_dataset = sanitize_dataset( + cases_dataset, batch_size, cardinality + ) + self.labels_dataset = sanitize_dataset( + labels_dataset, batch_size, cardinality + ) + self.targets_dataset = sanitize_dataset( + targets_dataset, batch_size, cardinality + ) + + # if the provided `cases_dataset` has several columns + if isinstance(self.cases_dataset.element_spec, tuple): + # switch case on the number of columns of `cases_dataset` + if len(self.cases_dataset.element_spec) == 2: + assert self.labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + self.labels_dataset = self.cases_dataset.map(lambda x, y: y) + self.cases_dataset = self.cases_dataset.map(lambda x, y: x) + + elif len(self.cases_dataset.element_spec) == 3: + assert self.labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + assert self.targets_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + self.targets_dataset = self.cases_dataset.map(lambda x, y, t: t) + self.labels_dataset = self.cases_dataset.map(lambda x, y, t: y) + self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) + else: + raise AttributeError( + "`cases_dataset` cannot possess more than 3 columns," + + f"{len(self.cases_dataset.element_spec)} were detected." + ) + + self.cases_dataset = self.cases_dataset.prefetch(tf.data.AUTOTUNE) + if self.labels_dataset is not None: + self.labels_dataset = self.labels_dataset.prefetch(tf.data.AUTOTUNE) + if self.targets_dataset is not None: + self.targets_dataset = self.targets_dataset.prefetch(tf.data.AUTOTUNE) + + return batch_size + + def set_k(self, k: int): + """ + Setter for the k parameter. + + Parameters + ---------- + k + Number of examples to return, it should be a positive integer. + """ + assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" + self.k = k + self.search_method.set_k(k) + + def set_returns(self, returns: Union[List[str], str]): + """ + Set `self.returns` used to define returned elements in `self.explain()`. + + Parameters + ---------- + returns + Most elements are useful in `xplique.plots.plot_examples()`. + `returns` can be set to 'all' for all possible elements to be returned. + - 'examples' correspond to the expected examples, + the inputs may be included in first position. (n, k(+1), ...) + - 'weights' the weights in the input space used in the projection. + They are associated to the input and the examples. (n, k(+1), ...) + - 'distances' the distances between the inputs and the corresponding examples. + They are associated to the examples. (n, k, ...) + - 'labels' if provided through `dataset_labels`, + they are the labels associated with the examples. (n, k, ...) + - 'include_inputs' specify if inputs should be included in the returned elements. + Note that it changes the number of returned elements from k to k+1. + """ + possibilities = ["examples", "weights", "distances", "labels", "include_inputs"] + default = "examples" + self.returns = _sanitize_returns(returns, possibilities, default) + + @sanitize_inputs_targets + def explain( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + Compute examples to explain the inputs. + It project inputs with `self.projection` in the search space + and find examples with `self.search_method`. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Tensor or Array passed to the projection function. + + Returns + ------- + return_dict + Dictionnary with listed elements in `self.returns`. + If only one element is present it returns the element. + The elements that can be returned are: + examples, weights, distances, indices, and labels. + """ + # project inputs + projected_inputs = self.projection(inputs, targets) + + # look for closest elements to projected inputs + search_output = self.search_method(projected_inputs) + + # manage returned elements + return self.format_search_output(search_output, inputs, targets) + + def __call__( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """explain alias""" + return self.explain(inputs, targets) + + def format_search_output( + self, + search_output: Dict[str, tf.Tensor], + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + Format the output of the `search_method` to match the expected returns in `self.returns`. + + Parameters + ---------- + search_output + Dictionnary with the required outputs from the `search_method`. + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Tensor or Array passed to the projection function. + Here it is used by the explain function of attribution methods. + Refer to the corresponding method documentation for more detail. + Note that the default method is `Saliency`. + + Returns + ------- + return_dict + Dictionnary with listed elements in `self.returns`. + If only one element is present it returns the element. + The elements that can be returned are: + examples, weights, distances, indices, and labels. + """ + return_dict = {} + + examples = dataset_gather(self.cases_dataset, search_output["indices"]) + examples_labels = dataset_gather(self.labels_dataset, search_output["indices"]) + examples_targets = dataset_gather( + self.targets_dataset, search_output["indices"] + ) + + # add examples and weights + if "examples" in self.returns or "weights" in self.returns: + if "include_inputs" in self.returns: + # include inputs + inputs = tf.expand_dims(inputs, axis=1) + examples = tf.concat([inputs, examples], axis=1) + if targets is not None: + targets = tf.expand_dims(targets, axis=1) + examples_targets = tf.concat([targets, examples_targets], axis=1) + else: + examples_targets = [None] * len(examples) + if "examples" in self.returns: + return_dict["examples"] = examples + if "weights" in self.returns: + # get weights of examples (n, k, ...) + # we iterate on the inputs dimension through maps + # and ask weights for batch of examples + weights = [] + for ex, ex_targ in zip(examples, examples_targets): + if isinstance(self.projection, Projection): + # get weights in the input space + weights.append(self.projection.get_input_weights(ex, ex_targ)) + else: + raise AttributeError( + "Cannot extract weights from the provided projection function" + + "Either remove 'weights' from the `case_returns` or" + + "inherit from `Projection` and overwrite `get_input_weights`." + ) + + return_dict["weights"] = tf.stack(weights, axis=0) + + # optimization test TODO + # return_dict["weights"] = tf.vectorized_map( + # fn=lambda x: self.projection.get_input_weights(x[0], x[1]), + # elems=(examples, examples_targets), + # # fn_output_signature=tf.float32, + # ) + + # add indices, distances, and labels + if "distances" in self.returns: + return_dict["distances"] = search_output["distances"] + if "labels" in self.returns: + assert ( + examples_labels is not None + ), "The method cannot return labels without a label dataset." + return_dict["labels"] = examples_labels + + # return a dict only different variables are returned + if len(return_dict) == 1: + return list(return_dict.values())[0] + return return_dict From 69fef126c26117813ea23fdd053d2409f3a6fec2 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 12 Feb 2024 16:49:36 +0100 Subject: [PATCH 012/138] example based: adapt similar examples --- xplique/example_based/similar_examples.py | 325 ++-------------------- 1 file changed, 23 insertions(+), 302 deletions(-) diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index 2961b1e0..2a9634d3 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -13,42 +13,39 @@ from ..commons import sanitize_dataset, dataset_gather from .search_methods import KNN, BaseSearchMethod from .projections import Projection +from .base_example_method import BaseExampleMethod from .search_methods.base import _sanitize_returns -class SimilarExamples: +class SimilarExamples(BaseExampleMethod): """ - Base class for natural example-base methods explaining models, - they project the cases_dataset into a pertinent space for the with a `Projection`, - then they call the `BaseSearchMethod` on it. + Base class for similar examples. Parameters ---------- cases_dataset The dataset used to train the model, examples are extracted from the dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. labels_dataset Labels associated to the examples in the dataset. Indices should match with cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other dataset should match `cases_dataset`. - Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset Targets associated to the cases_dataset for dataset projection. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other dataset should match `cases_dataset`. - Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. - search_method - An algorithm to search the examples in the projected space. k The number of examples to retrieve. projection Projection or Callable that project samples from the input space to the search space. - The search space sould be a space where distance make sense for the model. + The search space should be a space where distance make sense for the model. It should not be `None`, otherwise, all examples could be computed only with the `search_method`. @@ -58,7 +55,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar ''' Example of projection, inputs are the elements to project. - targets are optionnal parameters to orientated the projection. + targets are optional parameters to orientated the projection. ''' projected_inputs = # do some magic on inputs, it should use the model. return projected_inputs @@ -69,8 +66,12 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar batch_size Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - search_method_kwargs - Parameters to be passed at the construction of the `search_method`. + distance + Distance for the knn search method. + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. """ def __init__( @@ -78,303 +79,23 @@ def __init__( cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - search_method: Type[BaseSearchMethod] = KNN, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, - **search_method_kwargs, + distance: Union[int, str, Callable] = "euclidean", ): - assert ( - projection is not None - ), "`SimilarExamples` without `projection` is a `BaseSearchMethod`." - - # set attributes - batch_size = self.__initialize_cases_dataset( - cases_dataset, labels_dataset, targets_dataset, batch_size - ) - self.k = k - self.set_returns(case_returns) - self.projection = projection - - # set `search_returns` if not provided and overwrite it otherwise - search_method_kwargs["search_returns"] = ["indices", "distances"] - - # initiate search_method - self.search_method = search_method( + # the only difference with parent is that the search method is always KNN + search_method = KNN + + super().__init__( cases_dataset=cases_dataset, + labels_dataset=labels_dataset, targets_dataset=targets_dataset, + search_method=search_method, k=k, projection=projection, + case_returns=case_returns, batch_size=batch_size, - **search_method_kwargs, - ) - - def __initialize_cases_dataset( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], - batch_size: Optional[int], - ) -> int: - """ - Factorization of `__init__()` method for dataset related attributes. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - labels_dataset - Labels associated to the examples in the dataset. - Indices should match with cases_dataset. - targets_dataset - Targets associated to the cases_dataset for dataset projection. - See `projection` for detail. - batch_size - Number of sample treated simultaneously when using the datasets. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - - Returns - ------- - batch_size - Number of sample treated simultaneously when using the datasets. - Extracted from the datasets in case they are `tf.data.Dataset`. - Otherwise, the input value. - """ - # at least one dataset provided - if isinstance(cases_dataset, tf.data.Dataset): - # set batch size (ignore provided argument) and cardinality - if isinstance(cases_dataset.element_spec, tuple): - batch_size = tf.shape(next(iter(cases_dataset))[0])[0].numpy() - else: - batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() - - cardinality = cases_dataset.cardinality().numpy() - else: - # if case_dataset is not a `tf.data.Dataset`, then neither should the other. - assert not isinstance(labels_dataset, tf.data.Dataset) - assert not isinstance(targets_dataset, tf.data.Dataset) - # set batch size and cardinality - batch_size = min(batch_size, len(cases_dataset)) - cardinality = math.ceil(len(cases_dataset) / batch_size) - - # verify cardinality and create datasets from the tensors - self.cases_dataset = sanitize_dataset( - cases_dataset, batch_size, cardinality - ) - self.labels_dataset = sanitize_dataset( - labels_dataset, batch_size, cardinality - ) - self.targets_dataset = sanitize_dataset( - targets_dataset, batch_size, cardinality - ) - - # if the provided `cases_dataset` has several columns - if isinstance(self.cases_dataset.element_spec, tuple): - # switch case on the number of columns of `cases_dataset` - if len(self.cases_dataset.element_spec) == 2: - assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." - + "Hence, `labels_dataset` should be empty." - ) - self.labels_dataset = self.cases_dataset.map(lambda x, y: y) - self.cases_dataset = self.cases_dataset.map(lambda x, y: x) - - elif len(self.cases_dataset.element_spec) == 3: - assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." - + "Hence, `labels_dataset` should be empty." - ) - assert self.targets_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." - + "Hence, `labels_dataset` should be empty." - ) - self.targets_dataset = self.cases_dataset.map(lambda x, y, t: t) - self.labels_dataset = self.cases_dataset.map(lambda x, y, t: y) - self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) - else: - raise AttributeError( - "`cases_dataset` cannot possess more than 3 columns," - + f"{len(self.cases_dataset.element_spec)} were detected." - ) - - self.cases_dataset = self.cases_dataset.prefetch(tf.data.AUTOTUNE) - if self.labels_dataset is not None: - self.labels_dataset = self.labels_dataset.prefetch(tf.data.AUTOTUNE) - if self.targets_dataset is not None: - self.targets_dataset = self.targets_dataset.prefetch(tf.data.AUTOTUNE) - - return batch_size - - def set_k(self, k: int): - """ - Setter for the k parameter. - - Parameters - ---------- - k - Number of examples to return, it should be a positive integer. - """ - assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" - self.k = k - self.search_method.set_k(k) - - def set_returns(self, returns: Union[List[str], str]): - """ - Set `self.returns` used to define returned elements in `self.explain()`. - - Parameters - ---------- - returns - Most elements are useful in `xplique.plots.plot_examples()`. - `returns` can be set to 'all' for all possible elements to be returned. - - 'examples' correspond to the expected examples, - the inputs may be included in first position. (n, k(+1), ...) - - 'weights' the weights in the input space used in the projection. - They are associated to the input and the examples. (n, k(+1), ...) - - 'distances' the distances between the inputs and the corresponding examples. - They are associated to the examples. (n, k, ...) - - 'labels' if provided through `dataset_labels`, - they are the labels associated with the examples. (n, k, ...) - - 'include_inputs' specify if inputs should be included in the returned elements. - Note that it changes the number of returned elements from k to k+1. - """ - possibilities = ["examples", "weights", "distances", "labels", "include_inputs"] - default = "examples" - self.returns = _sanitize_returns(returns, possibilities, default) - - @sanitize_inputs_targets - def explain( - self, - inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, - ): - """ - Compute examples to explain the inputs. - It project inputs with `self.projection` in the search space - and find examples with `self.search_method`. - - Parameters - ---------- - inputs - Tensor or Array. Input samples to be explained. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - More information in the documentation. - targets - Tensor or Array passed to the projection function. - - Returns - ------- - return_dict - Dictionnary with listed elements in `self.returns`. - If only one element is present it returns the element. - The elements that can be returned are: - examples, weights, distances, indices, and labels. - """ - # project inputs - projected_inputs = self.projection(inputs, targets) - - # look for closest elements to projected inputs - search_output = self.search_method(projected_inputs) - - # manage returned elements - return self.format_search_output(search_output, inputs, targets) - - def __call__( - self, - inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, - ): - """explain alias""" - return self.explain(inputs, targets) - - def format_search_output( - self, - search_output: Dict[str, tf.Tensor], - inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, - ): - """ - Format the output of the `search_method` to match the expected returns in `self.returns`. - - Parameters - ---------- - search_output - Dictionnary with the required outputs from the `search_method`. - inputs - Tensor or Array. Input samples to be explained. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - More information in the documentation. - targets - Tensor or Array passed to the projection function. - Here it is used by the explain function of attribution methods. - Refer to the corresponding method documentation for more detail. - Note that the default method is `Saliency`. - - Returns - ------- - return_dict - Dictionnary with listed elements in `self.returns`. - If only one element is present it returns the element. - The elements that can be returned are: - examples, weights, distances, indices, and labels. - """ - return_dict = {} - - examples = dataset_gather(self.cases_dataset, search_output["indices"]) - examples_labels = dataset_gather(self.labels_dataset, search_output["indices"]) - examples_targets = dataset_gather( - self.targets_dataset, search_output["indices"] + distance=distance ) - - # add examples and weights - if "examples" in self.returns or "weights" in self.returns: - if "include_inputs" in self.returns: - # include inputs - inputs = tf.expand_dims(inputs, axis=1) - examples = tf.concat([inputs, examples], axis=1) - if targets is not None: - targets = tf.expand_dims(targets, axis=1) - examples_targets = tf.concat([targets, examples_targets], axis=1) - else: - examples_targets = [None] * len(examples) - if "examples" in self.returns: - return_dict["examples"] = examples - if "weights" in self.returns: - # get weights of examples (n, k, ...) - # we iterate on the inputs dimension through maps - # and ask weights for batch of examples - weights = [] - for ex, ex_targ in zip(examples, examples_targets): - if isinstance(self.projection, Projection): - # get weights in the input space - weights.append(self.projection.get_input_weights(ex, ex_targ)) - else: - raise AttributeError( - "Cannot extract weights from the provided projection function" - + "Either remove 'weights' from the `case_returns` or" - + "inherit from `Projection` and overwrite `get_input_weights`." - ) - - return_dict["weights"] = tf.stack(weights, axis=0) - - # optimization test TODO - # return_dict["weights"] = tf.vectorized_map( - # fn=lambda x: self.projection.get_input_weights(x[0], x[1]), - # elems=(examples, examples_targets), - # # fn_output_signature=tf.float32, - # ) - - # add indices, distances, and labels - if "distances" in self.returns: - return_dict["distances"] = search_output["distances"] - if "labels" in self.returns: - assert ( - examples_labels is not None - ), "The method cannot return labels without a label dataset." - return_dict["labels"] = examples_labels - - # return a dict only different variables are returned - if len(return_dict) == 1: - return list(return_dict.values())[0] - return return_dict From 539c387a382a166526c32b5eeea9484e6d374ad0 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 12 Feb 2024 16:50:22 +0100 Subject: [PATCH 013/138] example based: adapt cole --- xplique/example_based/cole.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py index 85c4c2d6..ded8fbfd 100644 --- a/xplique/example_based/cole.py +++ b/xplique/example_based/cole.py @@ -45,8 +45,6 @@ class Cole(SimilarExamples): Batch size and cardinality of other dataset should match `cases_dataset`. Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. - search_method - An algorithm to search the examples in the projected space. k The number of examples to retrieve. Default value is `1`. distance @@ -87,7 +85,6 @@ def __init__( model: tf.keras.Model, targets_dataset: Union[tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.Tensor, np.ndarray]] = None, - search_method: Type[BaseSearchMethod] = KNN, k: int = 1, distance: Union[str, Callable] = "euclidean", case_returns: Optional[Union[List[str], str]] = "examples", @@ -110,7 +107,6 @@ def __init__( cases_dataset, labels_dataset, targets_dataset, - search_method, k, projection, case_returns, From 7a58c9fd2b7453dd1b6a245d69167b5c3930bfab Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 12 Feb 2024 16:50:59 +0100 Subject: [PATCH 014/138] example based: adapt tests --- tests/example_based/test_cole.py | 4 ---- tests/example_based/test_image_plot.py | 3 +-- tests/example_based/test_similar_examples.py | 3 --- tests/example_based/test_split_projection.py | 2 +- 4 files changed, 2 insertions(+), 10 deletions(-) diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py index 9fb1b73b..9d8c63a0 100644 --- a/tests/example_based/test_cole.py +++ b/tests/example_based/test_cole.py @@ -74,7 +74,6 @@ def test_cole_attribution(): method_constructor = Cole( cases_dataset=x_train, targets_dataset=y_train, - search_method=KNN, k=k, batch_size=7, distance="euclidean", @@ -89,7 +88,6 @@ def test_cole_attribution(): method_call = SimilarExamples( cases_dataset=x_train, targets_dataset=y_train, - search_method=KNN, k=k, distance=euclidean_dist, projection=projection, @@ -98,7 +96,6 @@ def test_cole_attribution(): method_different_distance = Cole( cases_dataset=x_train, targets_dataset=y_train, - search_method=KNN, k=k, batch_size=2, distance=np.inf, # infinity norm based distance @@ -157,7 +154,6 @@ def test_cole_spliting(): method = Cole( cases_dataset=x_train, targets_dataset=y_train, - search_method=KNN, k=k, case_returns=["examples", "weights", "include_inputs"], model=model, diff --git a/tests/example_based/test_image_plot.py b/tests/example_based/test_image_plot.py index f8254d17..25908f44 100644 --- a/tests/example_based/test_image_plot.py +++ b/tests/example_based/test_image_plot.py @@ -15,7 +15,7 @@ from xplique.attributions import Occlusion, Saliency from xplique.example_based import Cole, SimilarExamples -from xplique.example_based.projections import CustomProjection +from xplique.example_based.projections import Projection from xplique.example_based.search_methods import KNN from xplique.plots.image import plot_examples @@ -70,7 +70,6 @@ def test_plot_cole_spliting(): cases_dataset=x_train, labels_dataset=tf.argmax(y_train, axis=1), targets_dataset=y_train, - search_method=KNN, k=k, case_returns="all", model=model, diff --git a/tests/example_based/test_similar_examples.py b/tests/example_based/test_similar_examples.py index fbc5824e..3e6c0401 100644 --- a/tests/example_based/test_similar_examples.py +++ b/tests/example_based/test_similar_examples.py @@ -148,7 +148,6 @@ def test_similar_examples_basic(): method = SimilarExamples( cases_dataset=x_train, projection=identity_projection, - search_method=KNN, k=k, batch_size=3, distance="euclidean", @@ -194,7 +193,6 @@ def test_similar_examples_return_multiple_elements(): cases_dataset=x_train, labels_dataset=y_train, projection=identity_projection, - search_method=KNN, k=1, batch_size=3, distance="euclidean", @@ -274,7 +272,6 @@ def test_similar_examples_weighting(): cases_dataset=x_train, labels_dataset=y_train, projection=weighting_function, - search_method=KNN, k=k, batch_size=5, distance="euclidean", diff --git a/tests/example_based/test_split_projection.py b/tests/example_based/test_split_projection.py index bc560b48..db3105d1 100644 --- a/tests/example_based/test_split_projection.py +++ b/tests/example_based/test_split_projection.py @@ -40,7 +40,7 @@ def test_attribution_latent_layer(): last_conv_layer = model.get_layer("conv2d_2") flatten_layer = model.get_layer("flatten") - # default should not include model spliting + # default should not include model splitting projection_default = AttributionProjection(model) assert projection_default.latent_layer is None From 2255896be846f63e7976851f93f00696f5f262d1 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:07:30 +0100 Subject: [PATCH 015/138] base example method: dataset projections in projections --- xplique/example_based/base_example_method.py | 29 +++++++++++++++----- 1 file changed, 22 insertions(+), 7 deletions(-) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index eca11a9e..2c4b99df 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -93,19 +93,34 @@ def __init__( batch_size = self.__initialize_cases_dataset( cases_dataset, labels_dataset, targets_dataset, batch_size ) + self.k = k self.set_returns(case_returns) - self.projection = projection + + assert hasattr(projection, "__call__"), "projection should be a callable." + + # check projection type + if isinstance(projection, Projection): + self.projection = projection + elif hasattr(projection, "__call__"): + self.projection = Projection(get_weights=None, space_projection=projection) + else: + raise AttributeError( + "projection should be a `Projection` or a `Callable`, not a" + + f"{type(projection)}" + ) + + # project dataset + projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, + self.targets_dataset) # set `search_returns` if not provided and overwrite it otherwise search_method_kwargs["search_returns"] = ["indices", "distances"] # initiate search_method self.search_method = search_method( - cases_dataset=cases_dataset, - targets_dataset=targets_dataset, + cases_dataset=projected_cases_dataset, k=k, - projection=projection, batch_size=batch_size, **search_method_kwargs, ) @@ -266,7 +281,7 @@ def explain( Returns ------- return_dict - Dictionnary with listed elements in `self.returns`. + Dictionary with listed elements in `self.returns`. If only one element is present it returns the element. The elements that can be returned are: examples, weights, distances, indices, and labels. @@ -300,7 +315,7 @@ def format_search_output( Parameters ---------- search_output - Dictionnary with the required outputs from the `search_method`. + Dictionary with the required outputs from the `search_method`. inputs Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). @@ -314,7 +329,7 @@ def format_search_output( Returns ------- return_dict - Dictionnary with listed elements in `self.returns`. + Dictionary with listed elements in `self.returns`. If only one element is present it returns the element. The elements that can be returned are: examples, weights, distances, indices, and labels. From ead19ea762c3be35b71bb8b327b318b5807c6919 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:08:17 +0100 Subject: [PATCH 016/138] base projection: dataset projections in projections --- xplique/example_based/projections/base.py | 60 ++++++++++++++++++----- 1 file changed, 48 insertions(+), 12 deletions(-) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index debe261a..9581cc22 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -7,13 +7,13 @@ import tensorflow as tf import numpy as np -from ...commons import sanitize_inputs_targets +from ...commons import sanitize_inputs_targets, get_device from ...types import Callable, Union, Optional -class Projection(ABC): +class Projection(ABC): # TODO See if this should stay as abstract class or if we should remove CustomProjection """ - Base class used by `NaturalExampleBasedExplainer` to projet samples to a meaningfull space + Base class used by `NaturalExampleBasedExplainer` to project samples to a meaningful space for the model to explain. Projection have two parts a `space_projection` and `weights`, to apply a projection, @@ -39,14 +39,14 @@ def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), targets: Union(tf.Tensor, np.ndarray) = None): ''' Example of function to get weights, - projected_inputs are the elements for which weights are comlputed. - targets are optionnal additionnal parameters for weights computation. + projected_inputs are the elements for which weights are computed. + targets are optional additional parameters for weights computation. ''' weights = ... # do some magic with inputs and targets, it should use the model. return weights ``` space_projection - Callable that take samples and return a Tensor in the projected sapce. + Callable that take samples and return a Tensor in the projected space. An example of projected space is the latent space of a model. See `LatentSpaceProjection` """ @@ -75,6 +75,9 @@ def __init__(self, get_weights: Callable = None, space_projection: Callable = No ) self.space_projection = space_projection + # set device + self.device = get_device() + def get_input_weights( self, inputs: Union[tf.Tensor, np.ndarray], @@ -83,7 +86,7 @@ def get_input_weights( """ Depending on the projection, we may not be able to visualize weights as they are after the space projection. In this case, this method should be overwritten, - as in `AttributionProjection` that applies an upsampling. + as in `AttributionProjection` that applies an up-sampling. Parameters ---------- @@ -98,7 +101,7 @@ def get_input_weights( ------- input_weights Tensor with the same dimension as `inputs` modulo the channels. - They are an upsampled version of the actual weights used in the projection. + They are an up-sampled version of the actual weights used in the projection. """ projected_inputs = self.space_projection(inputs) assert tf.reduce_all(tf.equal(projected_inputs, inputs)), ( @@ -137,10 +140,10 @@ def project( projected_samples The samples projected in the new space. """ - projected_inputs = self.space_projection(inputs) - weights = self.get_weights(projected_inputs, targets) - - return tf.multiply(weights, projected_inputs) + with tf.device(self.device): + projected_inputs = self.space_projection(inputs) + weights = self.get_weights(projected_inputs, targets) + return tf.multiply(weights, projected_inputs) def __call__( self, @@ -149,3 +152,36 @@ def __call__( ): """project alias""" return self.project(inputs, targets) + + def project_dataset( + self, + cases_dataset: tf.data.Dataset, + targets_dataset: Optional[tf.data.Dataset] = None, + ) -> Optional[tf.data.Dataset]: + """ + Apply the projection to a dataset through `Dataset.map` + + Parameters + ---------- + cases_dataset + Dataset of samples to be projected. + targets_dataset + Dataset of targets for the samples. + + Returns + ------- + projected_dataset + The projected dataset. + """ + # project dataset, note that projection is done at iteration time + if targets_dataset is None: + projected_cases_dataset = cases_dataset.map(self.project) + else: + # in case targets are provided, we zip the datasets and project them together + projected_cases_dataset = tf.data.Dataset.zip( + (cases_dataset, targets_dataset) + ).map( + lambda x, y: self.project(x, y) + ) + + return projected_cases_dataset From 48bc853ad0edb641bce6c18c888ac7ced74ff01c Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:09:42 +0100 Subject: [PATCH 017/138] latent space projection: dataset projections in projections --- .../example_based/projections/latent_space.py | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py index 04ce0304..3bfc1d9f 100644 --- a/xplique/example_based/projections/latent_space.py +++ b/xplique/example_based/projections/latent_space.py @@ -8,6 +8,7 @@ from ...types import Callable, Union from .base import Projection +from .commons import model_splitting class LatentSpaceProjection(Projection): @@ -31,18 +32,6 @@ class LatentSpaceProjection(Projection): """ def __init__(self, model: Callable, latent_layer: Union[str, int] = -1): - self.model = model - - # split the model if a latent_layer is provided - if latent_layer == "last_conv": - self.latent_layer = next( - layer for layer in model.layers[::-1] if hasattr(layer, "filters") - ) - else: - self.latent_layer = find_layer(model, latent_layer) - - latent_space_projection = tf.keras.Model( - model.input, self.latent_layer.output, name="features_extractor" - ) - - super().__init__(space_projection=latent_space_projection) + features_extractor, _ = model_splitting(model, latent_layer) + super().__init__(space_projection=features_extractor) + # TODO test if gpu is used for the projection From fcacfd1f263b654ac168470dca42eaecb53fb865 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:10:01 +0100 Subject: [PATCH 018/138] attribution projection: dataset projections in projections --- .../example_based/projections/attributions.py | 94 ++++++++++--------- 1 file changed, 52 insertions(+), 42 deletions(-) diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index 7f9f624f..2ebf37c8 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -1,17 +1,18 @@ """ Attribution, a projection from example based module """ - +import warnings import tensorflow as tf import numpy as np +from xplique.types import Optional from ...attributions.base import BlackBoxExplainer from ...attributions import Saliency -from ...commons import find_layer from ...types import Callable, Union, Optional from .base import Projection +from .commons import model_splitting class AttributionProjection(Projection): @@ -19,13 +20,13 @@ class AttributionProjection(Projection): Projection build on an attribution function to provide local projections. This class is used as the projection of the `Cole` similar examples method. - Depending on the `latent_layer`, the model will be splited between + Depending on the `latent_layer`, the model will be splitted between the feature extractor and the predictor. The feature extractor will become the `space_projection()` method, then the predictor will be used to build the attribution method explain, and its `explain()` method will become the `get_weights()` method. - If no `latent_layer` is provided, the model is not splited, + If no `latent_layer` is provided, the model is not splitted, the `space_projection()` is the identity function, and the attributions (`get_weights()`) are compute on the whole model. @@ -42,7 +43,7 @@ class AttributionProjection(Projection): If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. - The method as described in the paper apply the separation on the last convolutionnal layer. + The method as described in the paper apply the separation on the last convolutional layer. To do so, the `"last_conv"` parameter will extract it. Otherwise, `-1` could be used for the last layer before softmax. attribution_method @@ -60,53 +61,23 @@ def __init__( latent_layer: Optional[Union[str, int]] = None, **attribution_kwargs ): - self.model = model + self.method = method if latent_layer is None: # no split self.latent_layer = None - space_projection = lambda inputs: inputs - get_weights = method(model, **attribution_kwargs) + space_projection = None + self.predictor = model else: # split the model if a latent_layer is provided - if latent_layer == "last_conv": - self.latent_layer = next( - layer for layer in model.layers[::-1] if hasattr(layer, "filters") - ) - else: - self.latent_layer = find_layer(model, latent_layer) - - space_projection = tf.keras.Model( - model.input, self.latent_layer.output, name="features_extractor" - ) - self.predictor = tf.keras.Model( - self.latent_layer.output, model.output, name="predictor" - ) - get_weights = method(self.predictor, **attribution_kwargs) + space_projection, self.predictor = model_splitting(model, latent_layer) + + # compute attributions + get_weights = self.method(self.predictor, **attribution_kwargs) # set methods super().__init__(get_weights, space_projection) - # attribution methods output do not have channel - # we wrap get_weights to expend dimensions if needed - self.__wrap_get_weights_to_extend_channels(self.get_weights) - - def __wrap_get_weights_to_extend_channels(self, get_weights: Callable): - """ - Extend channel if miss match between inputs and weights - """ - - def wrapped_get_weights(inputs, targets): - weights = get_weights(inputs, targets) - weights = tf.cond( - pred=weights.shape == inputs.shape, - true_fn=lambda: weights, - false_fn=lambda: tf.expand_dims(weights, axis=-1), - ) - return weights - - self.get_weights = wrapped_get_weights - def get_input_weights( self, inputs: Union[tf.Tensor, np.ndarray], @@ -154,3 +125,42 @@ def get_input_weights( false_fn=resize_fn, ) return input_weights + + def project_dataset( + self, + cases_dataset: tf.data.Dataset, + targets_dataset: tf.data.Dataset, + ) -> tf.data.Dataset: + """ + Apply the projection to a dataset without `Dataset.map`. + Because attribution methods create a `tf.data.Dataset` for batching, + however doing so inside a `Dataset.map` is not recommended. + + Parameters + ---------- + cases_dataset + Dataset of samples to be projected. + targets_dataset + Dataset of targets for the samples. + + Returns + ------- + projected_dataset + The projected dataset. + """ + # TODO see if a warning is needed + + projected_cases_dataset = [] + batch_size = None + + # iteratively project the dataset + for inputs, targets in tf.data.Dataset.zip((cases_dataset, targets_dataset)): + if batch_size is None: + batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this + projected_cases_dataset.append(self.project(inputs, targets)) + + projected_cases_dataset = tf.concat(projected_cases_dataset, axis=0) + projected_cases_dataset = tf.data.Dataset.from_tensor_slices(projected_cases_dataset) + projected_cases_dataset = projected_cases_dataset.batch(batch_size) + + return projected_cases_dataset \ No newline at end of file From 4dbb6b6141cd903080266cc491038436dfc55138 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:10:39 +0100 Subject: [PATCH 019/138] example based: introduce hadamard projection --- xplique/example_based/projections/__init__.py | 2 +- xplique/example_based/projections/hadamard.py | 120 ++++++++++++++++++ 2 files changed, 121 insertions(+), 1 deletion(-) create mode 100644 xplique/example_based/projections/hadamard.py diff --git a/xplique/example_based/projections/__init__.py b/xplique/example_based/projections/__init__.py index d5d4cf90..4b33a895 100644 --- a/xplique/example_based/projections/__init__.py +++ b/xplique/example_based/projections/__init__.py @@ -4,5 +4,5 @@ from .attributions import AttributionProjection from .base import Projection -from .custom import CustomProjection +from .hadamard import HadamardProjection from .latent_space import LatentSpaceProjection diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py new file mode 100644 index 00000000..87234883 --- /dev/null +++ b/xplique/example_based/projections/hadamard.py @@ -0,0 +1,120 @@ +""" +Attribution, a projection from example based module +""" +import warnings + +import tensorflow as tf +import numpy as np +from xplique.types import Optional + +from ...commons import get_gradient_functions +from ...types import Callable, Union, Optional, OperatorSignature + +from .base import Projection +from .commons import model_splitting + + +class HadamardProjection(Projection): + """ + Projection build on an the latent space and the gradient. + This class is used as the projection of the `Cole` similar examples method. + + Depending on the `latent_layer`, the model will be splitted between + the feature extractor and the predictor. + The feature extractor will become the `space_projection()` method, then + the predictor will be used to build the attribution method explain, and + its `explain()` method will become the `get_weights()` method. + + If no `latent_layer` is provided, the model is not splitted, + the `space_projection()` is the identity function, and + the attributions (`get_weights()`) are compute on the whole model. + + Parameters + ---------- + model + The model from which we want to obtain explanations. + latent_layer + Layer used to split the model, the first part will be used for projection and + the second to compute the attributions. By default, the model is not split. + For such split, the `model` should be a `tf.keras.Model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + The method as described in the paper apply the separation on the last convolutional layer. + To do so, the `"last_conv"` parameter will extract it. + Otherwise, `-1` could be used for the last layer before softmax. + operator + Operator to use to compute the explanation, if None use standard predictions. + """ + + def __init__( + self, + model: Callable, + latent_layer: Optional[Union[str, int]] = None, + operator: Optional[OperatorSignature] = None, + ): + if latent_layer is None: + # no split + self.latent_layer = None + space_projection = None + self.predictor = model + else: + # split the model if a latent_layer is provided + space_projection, self.predictor = model_splitting(model, latent_layer) + + # the weights are given be the gradient of the operator + gradients, _ = get_gradient_functions(self.predictor, operator) + get_weights = lambda inputs, targets: gradients(self.predictor, inputs, targets) # TODO check usage of gpu + + # set methods + super().__init__(get_weights, space_projection) + + def get_input_weights( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + For visualization purpose (and only), we may be interested to project weights + from the projected space to the input space. + This is applied only if their is a difference in dimension. + We assume here that we are treating images and an upsampling is applied. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Additional parameter for `self.get_weights` function. + + Returns + ------- + input_weights + Tensor with the same dimension as `inputs` modulo the channels. + They are an upsampled version of the actual weights used in the projection. + """ + projected_inputs = self.space_projection(inputs) + weights = self.get_weights(projected_inputs, targets) + + # take mean over channels for images + channel_mean_fn = lambda: tf.reduce_mean(weights, axis=-1, keepdims=True) + weights = tf.cond( + pred=tf.shape(weights).shape[0] < 4, + true_fn=lambda: weights, + false_fn=channel_mean_fn, + ) + + # resizing + resize_fn = lambda: tf.image.resize( + weights, inputs.shape[1:-1], method="bicubic" + ) + input_weights = tf.cond( + pred=projected_inputs.shape == inputs.shape, + true_fn=lambda: weights, + false_fn=resize_fn, + ) + return input_weights From 6e8ddb1d9fbc693cd7669ac6119f54a236a1ac5f Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:13:32 +0100 Subject: [PATCH 020/138] base search method: remove projection from search --- xplique/example_based/search_methods/base.py | 32 ++------------------ 1 file changed, 2 insertions(+), 30 deletions(-) diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 13a05f6a..1c7c0f1b 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -11,8 +11,6 @@ from ...commons import sanitize_dataset -from ..projections.base import Projection - def _sanitize_returns(returns: Optional[Union[List[str], str]] = None, possibilities: List[str] = None, @@ -69,27 +67,8 @@ class BaseSearchMethod(ABC): cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. - targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. k The number of examples to retrieve. - projection - Projection or Callable that project samples from the input space to the search space. - The search space sould be a space where distance make sense for the model. - It should not be `None`, otherwise, - all examples could be computed only with the `search_method`. - - Example of Callable: - ``` - def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): - ''' - Example of projection, - inputs are the elements to project. - targets are optionnal parameters to orientated the projection. - ''' - projected_inputs = # do some magic on inputs, it should use the model. - return projected_inputs - ``` search_returns String or list of string with the elements to return in `self.find_examples()`. See `self.set_returns()` for detail. @@ -101,12 +80,11 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], k: int = 1, - projection: Union[Projection, Callable] = None, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, ): # pylint: disable=R0801 + # set batch size if hasattr(cases_dataset, "_batch_size"): self.batch_size = cases_dataset._batch_size @@ -114,19 +92,14 @@ def __init__( self.batch_size = batch_size self.cases_dataset = sanitize_dataset(cases_dataset, self.batch_size) - self.targets_dataset = sanitize_dataset(targets_dataset, self.batch_size) - if self.targets_dataset is None: - # The `find_examples()` method need to be able to iterate on `self.targets_dataset` - self.targets_dataset = [None] * self.cases_dataset.cardinality().numpy() self.set_k(k) self.set_returns(search_returns) - self.projection = projection def set_k(self, k: int): """ Change value of k with constructing a new `BaseSearchMethod`. - It is useful because the constructor can be computionnaly expensive. + It is useful because the constructor can be computationally expensive. Parameters ---------- @@ -170,7 +143,6 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): ---------- inputs Tensor or Array. Input samples to be explained. - Assumed to have been already projected. Expected shape among (N, W), (N, T, W), (N, W, H, C). """ raise NotImplementedError() From 5abb298d500e0485fab7fa0373cab7d809a0459b Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:13:45 +0100 Subject: [PATCH 021/138] knn search method: remove projection from search --- xplique/example_based/search_methods/knn.py | 37 +++------------------ 1 file changed, 4 insertions(+), 33 deletions(-) diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index ed8d721b..8530f4a6 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -22,27 +22,8 @@ class KNN(BaseSearchMethod): cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. - targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. k The number of examples to retrieve. - projection - Projection or Callable that project samples from the input space to the search space. - The search space sould be a space where distance make sense for the model. - It should not be `None`, otherwise, - all examples could be computed only with the `search_method`. - - Example of Callable: - ``` - def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): - ''' - Example of projection, - inputs are the elements to project. - targets are optionnal parameters to orientated the projection. - ''' - projected_inputs = # do some magic on inputs, it should use the model. - return projected_inputs - ``` search_returns String or list of string with the elements to return in `self.find_examples()`. See `self.set_returns()` for detail. @@ -59,15 +40,13 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], k: int = 1, - projection: Union[Projection, Callable] = None, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", ): # pylint: disable=R0801 super().__init__( - cases_dataset, targets_dataset, k, projection, search_returns, batch_size + cases_dataset, k, search_returns, batch_size ) if hasattr(distance, "__call__"): @@ -131,25 +110,17 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, t batch_indices = tf.tile(batch_indices, multiples=(nb_inputs, 1)) # iterate on batches - for batch_index, (cases, targets) in enumerate( - zip(self.cases_dataset, self.targets_dataset) - ): - # project batch of dataset cases - if self.projection is not None: - projected_cases = self.projection.project(cases, targets) - else: - projected_cases = cases - + for batch_index, cases in enumerate(self.cases_dataset): # add new elements # (n, current_bs, 2) - indices = batch_indices[:, : tf.shape(projected_cases)[0]] + indices = batch_indices[:, : tf.shape(cases)[0]] new_indices = tf.stack( [tf.fill(indices.shape, tf.cast(batch_index, tf.int32)), indices], axis=-1 ) # compute distances # (n, current_bs) - distances = self.crossed_distances_fn(inputs, projected_cases) + distances = self.crossed_distances_fn(inputs, cases) # (n, k+curent_bs, 2) concatenated_indices = tf.concat([best_indices, new_indices], axis=1) From 92056f161c21c0d706fdb0d632e0da5684264e7a Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:14:58 +0100 Subject: [PATCH 022/138] cole: add hadamard product possibility --- xplique/example_based/cole.py | 54 +++++++++++++++++++++++------------ 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py index ded8fbfd..3fdfc82f 100644 --- a/xplique/example_based/cole.py +++ b/xplique/example_based/cole.py @@ -6,13 +6,10 @@ import tensorflow as tf from ..attributions.base import BlackBoxExplainer -from ..attributions import Saliency from ..types import Callable, List, Optional, Union, Type from .similar_examples import SimilarExamples -from .projections import AttributionProjection -from .search_methods import KNN -from .search_methods import BaseSearchMethod +from .projections import AttributionProjection, HadamardProjection class Cole(SimilarExamples): @@ -31,19 +28,19 @@ class Cole(SimilarExamples): cases_dataset The dataset used to train the model, examples are extracted from the dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. labels_dataset Labels associated to the examples in the dataset. Indices should match with cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other dataset should match `cases_dataset`. - Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset Targets associated to the cases_dataset for dataset projection. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other dataset should match `cases_dataset`. - Becareful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. k The number of examples to retrieve. Default value is `1`. @@ -59,6 +56,8 @@ class Cole(SimilarExamples): batch_size Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + device + Device to use for the projection, if None, use the default device. latent_layer Layer used to split the model, the first part will be used for projection and the second to compute the attributions. By default, the model is not split. @@ -68,13 +67,13 @@ class Cole(SimilarExamples): If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. - The method as described in the paper apply the separation on the last convolutionnal layer. + The method as described in the paper apply the separation on the last convolutional layer. To do so, the `"last_conv"` parameter will extract it. Otherwise, `-1` could be used for the last layer before softmax. attribution_method Class of the attribution method to use for projection. It should inherit from `xplique.attributions.base.BlackBoxExplainer`. - Ignored if a projection is given. + By default, it computes the gradient to make the Hadamard product in the latent space. attribution_kwargs Parameters to be passed at the construction of the `attribution_method`. """ @@ -89,20 +88,39 @@ def __init__( distance: Union[str, Callable] = "euclidean", case_returns: Optional[Union[List[str], str]] = "examples", batch_size: Optional[int] = 32, + device: Optional[str] = None, latent_layer: Optional[Union[str, int]] = None, - attribution_method: Type[BlackBoxExplainer] = Saliency, + attribution_method: Union[str, Type[BlackBoxExplainer]] = "gradient", **attribution_kwargs, ): - # buil attribution projection - projection = AttributionProjection( - model=model, - method=attribution_method, - latent_layer=latent_layer, - **attribution_kwargs, - ) - assert targets_dataset is not None + # build the corresponding projection + if isinstance(attribution_method, str) and attribution_method.lower() == "gradient": + + operator = attribution_kwargs.get("operator", None) + + projection = HadamardProjection( + model=model, + latent_layer=latent_layer, + operator=operator, + device=device, + ) + elif issubclass(attribution_method, BlackBoxExplainer): + # build attribution projection + projection = AttributionProjection( + model=model, + method=attribution_method, + latent_layer=latent_layer, + device=device, + **attribution_kwargs, + ) + else: + raise ValueError( + f"attribution_method should be 'gradient' or a subclass of BlackBoxExplainer," +\ + "not {attribution_method}" + ) + super().__init__( cases_dataset, labels_dataset, From 3f69e7b034add4f9b5dab98404aa734ac15cede3 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:16:55 +0100 Subject: [PATCH 023/138] projections: factorize model splitting --- xplique/example_based/projections/commons.py | 60 ++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 xplique/example_based/projections/commons.py diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py new file mode 100644 index 00000000..59dc7ee8 --- /dev/null +++ b/xplique/example_based/projections/commons.py @@ -0,0 +1,60 @@ +""" +Commons for projections +""" + +import tensorflow as tf + +from ...commons import find_layer +from ...types import Callable, Union, Optional, Tuple + + +def model_splitting(model: tf.keras.Model, + latent_layer: Union[str, int], + return_layer: bool = False, + ) -> Tuple[Callable, Callable, Optional[tf.keras.layers.Layer]]: + """ + Split the model into two parts, before and after the `latent_layer`. + The parts will respectively be called `features_extractor` and `predictor`. + + Parameters + ---------- + model + Model to be split. + latent_layer + Layer used to split the `model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + To separate after the last convolution, `"last_conv"` can be used. + Otherwise, `-1` could be used for the last layer before softmax. + return_layer + If True, return the latent layer found. + + Returns + ------- + features_extractor + Model used to project the inputs. + predictor + Model used to compute the attributions. + latent_layer + Layer used to split the `model`. + """ + if latent_layer == "last_conv": + latent_layer = next( + layer for layer in model.layers[::-1] if hasattr(layer, "filters") + ) + else: + latent_layer = find_layer(model, latent_layer) + + features_extractor = tf.keras.Model( + model.input, latent_layer.output, name="features_extractor" + ) + predictor = tf.keras.Model( + latent_layer.output, model.output, name="predictor" + ) + + if return_layer: + return features_extractor, predictor, latent_layer + return features_extractor, predictor \ No newline at end of file From 5cdf1c132da0d29a88e35900ac5470ce76a80700 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 28 Feb 2024 11:54:01 +0100 Subject: [PATCH 024/138] example based projections: fuse custom projection with base class --- xplique/example_based/projections/base.py | 53 ++++++++---- xplique/example_based/projections/custom.py | 90 --------------------- 2 files changed, 38 insertions(+), 105 deletions(-) delete mode 100644 xplique/example_based/projections/custom.py diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 9581cc22..54192ed5 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -11,7 +11,7 @@ from ...types import Callable, Union, Optional -class Projection(ABC): # TODO See if this should stay as abstract class or if we should remove CustomProjection +class Projection(ABC): """ Base class used by `NaturalExampleBasedExplainer` to project samples to a meaningful space for the model to explain. @@ -30,13 +30,17 @@ class Projection(ABC): # TODO See if this should stay as abstract class or if w Parameters ---------- get_weights - Callable, a function that return the weights (Tensor) for a given input (Tensor). + Either a Tensor or a Callable. + - In the case of a Tensor, weights are applied in the projected space. + - In the case of a callable, a function is expected. + It should take inputs and targets as parameters and return the weights (Tensor). Weights should have the same shape as the input (possible difference on channels). + The inputs of `get_weights()` correspond to the projected inputs. Example of `get_weights()` function: ``` def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), - targets: Union(tf.Tensor, np.ndarray) = None): + targets: Optional[Union[tf.Tensor, np.ndarray]] = None): ''' Example of function to get weights, projected_inputs are the elements for which weights are computed. @@ -48,35 +52,53 @@ def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), space_projection Callable that take samples and return a Tensor in the projected space. An example of projected space is the latent space of a model. See `LatentSpaceProjection` + device + Device to use for the projection, if None, use the default device. """ - def __init__(self, get_weights: Callable = None, space_projection: Callable = None): + def __init__(self, + get_weights: Optional[Union[Callable, tf.Tensor, np.ndarray]] = None, + space_projection: Optional[Callable] = None, + device: Optional[str] = None): assert get_weights is not None or space_projection is not None, ( "At least one of `get_weights` and `space_projection`" + "should not be `None`." ) - # set get weights + # set get_weights if get_weights is None: # no weights - get_weights = lambda inputs, _: tf.ones(tf.shape(inputs)) - if not hasattr(get_weights, "__call__"): + self.get_weights = lambda inputs, _: tf.ones(tf.shape(inputs)) + elif isinstance(get_weights, (tf.Tensor, np.ndarray)): + # weights is a tensor + if isinstance(get_weights, np.ndarray): + weights = tf.convert_to_tensor(get_weights, dtype=tf.float32) + + # define a function that returns the weights + def get_weights(inputs, _ = None): + nweights = tf.expand_dims(weights, axis=0) + return tf.repeat(nweights, tf.shape(inputs)[0], axis=0) + self.get_weights = get_weights + elif hasattr(get_weights, "__call__"): + # weights is a function + self.get_weights = get_weights + else: raise TypeError( - f"`get_weights` should be `Callable`, not a {type(get_weights)}" + f"`get_weights` should be `Callable` or a Tensor, not a {type(get_weights)}" ) - self.get_weights = get_weights - + # set space_projection if space_projection is None: - space_projection = lambda inputs: inputs - if not hasattr(space_projection, "__call__"): + self.space_projection = lambda inputs: inputs + elif hasattr(space_projection, "__call__"): + self.space_projection = space_projection + else: raise TypeError( f"`space_projection` should be a `Callable`, not a {type(space_projection)}" ) - self.space_projection = space_projection # set device - self.device = get_device() + self.device = get_device(device) def get_input_weights( self, @@ -143,7 +165,8 @@ def project( with tf.device(self.device): projected_inputs = self.space_projection(inputs) weights = self.get_weights(projected_inputs, targets) - return tf.multiply(weights, projected_inputs) + weighted_projected_inputs = tf.multiply(weights, projected_inputs) + return weighted_projected_inputs def __call__( self, diff --git a/xplique/example_based/projections/custom.py b/xplique/example_based/projections/custom.py deleted file mode 100644 index 966c6ada..00000000 --- a/xplique/example_based/projections/custom.py +++ /dev/null @@ -1,90 +0,0 @@ -""" -Custom, a projection from example based module -""" - -import tensorflow as tf -import numpy as np - -from ...types import Callable, Union - -from .base import Projection - - -class CustomProjection(Projection): - """ - Base class used by `NaturalExampleBasedExplainer` to projet samples to a meaningfull space - for the model to explain. - - Projection have two parts a `space_projection` and `weights`, to apply a projection, - the samples are first projected to a new space and then weighted. - Either the `space_projection` or the `weights` could be `None` but, - if both are, the projection is an identity function. - - At least one of the two part should include the model in the computation - for distance between projected elements to make sense for the model. - - Note that the cost of this projection should be limited - as it will be applied to all samples of the train dataset. - - Parameters - ---------- - weights - Either a Tensor or a Callable. - - In the case of a Tensor, weights are applied in the projected space - (after `space_projection`). - Hence weights should have the same shape as a `projected_input`. - - In the case of a Callable, the function should return the weights when called, - as a way to get the weights (a Tensor) - It is pertinent in the case on weights dependent on the inputs, i.e. local weighting. - - Example of `get_weights()` function: - ``` - def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), - targets: Union(tf.Tensor, np.ndarray) = None): - ''' - Example of function to get weights, - projected_inputs are the elements for which weights are comlputed. - targets are optionnal additionnal parameters for weights computation. - ''' - weights = ... # do some magic with inputs and targets, it should use the model. - return weights - ``` - space_projection - Callable that take samples and return a Tensor in the projected sapce. - An example of projected space is the latent space of a model. - In this case, the model should be splitted and the - """ - - def __init__( - self, - weights: Union[Callable, tf.Tensor, np.ndarray] = None, - space_projection: Callable = None, - ): - # Set weights or - if weights is None or hasattr(weights, "__call__"): - # weights is already a function or there is no weights - get_weights = weights - elif isinstance(weights, (tf.Tensor, np.ndarray)): - # weights is a tensor - if isinstance(weights, np.ndarray): - weights = tf.convert_to_tensor(weights, dtype=tf.float32) - - # define a function that returns the weights - def get_weights(inputs, _ = None): - nweights = tf.expand_dims(weights, axis=0) - return tf.repeat(nweights, tf.shape(inputs)[0], axis=0) - - else: - raise TypeError( - "`weights` should be a tensor or a `Callable`," - + f"not a {type(weights)}" - ) - - # Set space_projection - if space_projection is not None and not hasattr(space_projection, "__call__"): - raise TypeError( - "`space_projection` should be a `Callable`," - + f"not a {type(space_projection)}" - ) - - super().__init__(get_weights, space_projection) From 0c141bb6e64d751939ca7871afb5e633addaee4c Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:18:22 +0100 Subject: [PATCH 025/138] projections tests: adapt to changes and complete --- tests/example_based/test_projections.py | 133 +++++++++++++++++++ tests/example_based/test_split_projection.py | 85 ------------ 2 files changed, 133 insertions(+), 85 deletions(-) create mode 100644 tests/example_based/test_projections.py delete mode 100644 tests/example_based/test_split_projection.py diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py new file mode 100644 index 00000000..8fe8b28f --- /dev/null +++ b/tests/example_based/test_projections.py @@ -0,0 +1,133 @@ +import numpy as np +import tensorflow as tf +from tensorflow.keras.layers import ( + Dense, + Conv2D, + Activation, + Dropout, + Flatten, + MaxPooling2D, + Input, +) + +from xplique.attributions import Saliency +from xplique.example_based.projections import Projection, AttributionProjection, LatentSpaceProjection +from xplique.example_based.projections.commons import model_splitting +from ..utils import generate_data, almost_equal + +def get_setup(input_shape, nb_samples=10, nb_labels=2): + """ + Generate data and model for SimilarExamples + """ + # Data generation + x_train = tf.stack( + [i * tf.ones(input_shape, tf.float32) for i in range(nb_samples)] + ) + x_test = x_train[1:-1] + y_train = tf.one_hot(tf.range(len(x_train)) % nb_labels, nb_labels) + + return x_train, x_test, y_train + + +def _generate_model(input_shape=(32, 32, 3), output_shape=2): + model = tf.keras.Sequential() + model.add(Input(shape=input_shape)) + model.add(Conv2D(4, kernel_size=(2, 2), activation="relu", name="conv2d_1")) + model.add(Conv2D(4, kernel_size=(2, 2), activation="relu", name="conv2d_2")) + model.add(MaxPooling2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + model.add(Flatten()) + model.add(Dense(output_shape, name="dense")) + model.add(Activation("softmax", name="softmax")) + model.compile(loss="categorical_crossentropy", optimizer="sgd") + + return model + + +def test_model_splitting_latent_layer(): + """We should target the right layer using either int, string or default procedure""" + tf.keras.backend.clear_session() + + model = _generate_model() + + first_conv_layer = model.get_layer("conv2d_1") + last_conv_layer = model.get_layer("conv2d_2") + flatten_layer = model.get_layer("flatten") + + # last_conv should be recognized + _, _, latent_layer = model_splitting(model, latent_layer="last_conv", return_layer=True) + assert latent_layer == last_conv_layer + + # target the first conv layer + _, _, latent_layer = model_splitting(model, latent_layer=0, return_layer=True) + assert latent_layer == first_conv_layer + + # target a random flatten layer + _, _, latent_layer = model_splitting(model, latent_layer="flatten", return_layer=True) + assert latent_layer == flatten_layer + + +def test_simple_projection_mapping(): + """ + Test if a simple projection can be mapped. + """ + # Setup + input_shape = (7, 7, 3) + nb_samples = 10 + nb_labels = 2 + x_train, _, y_train = get_setup(input_shape, nb_samples=nb_samples, nb_labels=nb_labels) + + weights = tf.random.uniform((input_shape[0], input_shape[1], 1), minval=0, maxval=1) + + space_projection = lambda x, y=None: tf.nn.max_pool2d(x, ksize=3, strides=1, padding="SAME") + + projection = Projection(get_weights=weights, space_projection=space_projection) + + # Generate tf.data.Dataset from numpy + train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(3) + + # Apply the projection by mapping the dataset + projected_train_dataset = projection.project_dataset(train_dataset) + + +def test_latent_space_projection_mapping(): + """ + Test if the latent space projection can be mapped. + """ + # Setup + input_shape = (7, 7, 3) + nb_samples = 10 + nb_labels = 2 + x_train, _, y_train = get_setup(input_shape, nb_samples=nb_samples, nb_labels=nb_labels) + + model = _generate_model(input_shape=input_shape, output_shape=nb_labels) + + projection = LatentSpaceProjection(model, "last_conv") + + # Generate tf.data.Dataset from numpy + train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(3) + + # Apply the projection by mapping the dataset + projected_train_dataset = projection.project_dataset(train_dataset) + + +def test_attribution_projection_mapping(): + """ + Test if the attribution projection can be mapped. + """ + # Setup + input_shape = (7, 7, 3) + nb_samples = 10 + nb_labels = 2 + x_train, _, y_train = get_setup(input_shape, nb_samples=nb_samples, nb_labels=nb_labels) + + model = _generate_model(input_shape=input_shape, output_shape=nb_labels) + + projection = AttributionProjection(model, method=Saliency, latent_layer="last_conv") + + # Generate tf.data.Dataset from numpy + train_dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(3) + targets_dataset = tf.data.Dataset.from_tensor_slices(y_train).batch(3) + + # Apply the projection by mapping the dataset + projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) \ No newline at end of file diff --git a/tests/example_based/test_split_projection.py b/tests/example_based/test_split_projection.py deleted file mode 100644 index db3105d1..00000000 --- a/tests/example_based/test_split_projection.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import tensorflow as tf -from tensorflow.keras.layers import ( - Dense, - Conv2D, - Activation, - Dropout, - Flatten, - MaxPooling2D, - Input, -) - -from xplique.example_based.projections import AttributionProjection -from xplique.example_based.projections import LatentSpaceProjection -from ..utils import generate_data, almost_equal - - -def _generate_model(input_shape=(32, 32, 3), output_shape=10): - model = tf.keras.Sequential() - model.add(Input(shape=input_shape)) - model.add(Conv2D(4, kernel_size=(2, 2), activation="relu", name="conv2d_1")) - model.add(Conv2D(4, kernel_size=(2, 2), activation="relu", name="conv2d_2")) - model.add(MaxPooling2D(pool_size=(2, 2))) - model.add(Dropout(0.25)) - model.add(Flatten()) - model.add(Dense(output_shape, name="dense")) - model.add(Activation("softmax", name="softmax")) - model.compile(loss="categorical_crossentropy", optimizer="sgd") - - return model - - -def test_attribution_latent_layer(): - """We should target the right layer using either int, string or default procedure""" - tf.keras.backend.clear_session() - - model = _generate_model() - - first_conv_layer = model.get_layer("conv2d_1") - last_conv_layer = model.get_layer("conv2d_2") - flatten_layer = model.get_layer("flatten") - - # default should not include model splitting - projection_default = AttributionProjection(model) - assert projection_default.latent_layer is None - - # last_conv should be recognized - projection_default = AttributionProjection(model, latent_layer="last_conv") - assert projection_default.latent_layer == last_conv_layer - - # target the first conv layer - projection_default = AttributionProjection(model, latent_layer=0) - assert projection_default.latent_layer == first_conv_layer - - # target a random flatten layer - projection_default = AttributionProjection(model, latent_layer="flatten") - assert projection_default.latent_layer == flatten_layer - - -def test_latent_space_latent_layer(): - """We should target the right layer using either int, string or default procedure""" - tf.keras.backend.clear_session() - - model = _generate_model() - - first_conv_layer = model.get_layer("conv2d_1") - last_conv_layer = model.get_layer("conv2d_2") - flatten_layer = model.get_layer("flatten") - last_layer = model.get_layer("softmax") - - # default should not include model spliting - projection_default = LatentSpaceProjection(model) - assert projection_default.latent_layer == last_layer - - # last_conv should be recognized - projection_default = LatentSpaceProjection(model, latent_layer="last_conv") - assert projection_default.latent_layer == last_conv_layer - - # target the first conv layer - projection_default = LatentSpaceProjection(model, latent_layer=0) - assert projection_default.latent_layer == first_conv_layer - - # target a random flatten layer - projection_default = LatentSpaceProjection(model, latent_layer="flatten") - assert projection_default.latent_layer == flatten_layer From 56ddd00cc35cf6a6ccc9c5872dae69d9572a60b4 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:18:47 +0100 Subject: [PATCH 026/138] similar examples tests: adapt to changes --- tests/example_based/test_similar_examples.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/tests/example_based/test_similar_examples.py b/tests/example_based/test_similar_examples.py index 3e6c0401..2ec371d3 100644 --- a/tests/example_based/test_similar_examples.py +++ b/tests/example_based/test_similar_examples.py @@ -16,7 +16,7 @@ from xplique.types import Union from xplique.example_based import SimilarExamples -from xplique.example_based.projections import CustomProjection +from xplique.example_based.projections import Projection, LatentSpaceProjection from xplique.example_based.search_methods import KNN from tests.utils import almost_equal @@ -40,9 +40,9 @@ def test_similar_examples_input_datasets_management(): """ Test management of dataset init inputs """ - proj = CustomProjection(space_projection=lambda inputs, targets=None: inputs) + proj = Projection(space_projection=lambda inputs, targets=None: inputs) - tf_tensor = tf.reshape(tf.range(90), (10, 3, 3)) + tf_tensor = tf.reshape(tf.range(90, dtype=tf.float32), (10, 3, 3)) np_array = np.array(tf_tensor) tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) too_short_np_array = np_array[:3] @@ -140,7 +140,7 @@ def test_similar_examples_basic(): k = 3 x_train, x_test, _ = get_setup(input_shape) - identity_projection = CustomProjection( + identity_projection = Projection( space_projection=lambda inputs, targets=None: inputs ) @@ -184,7 +184,7 @@ def test_similar_examples_return_multiple_elements(): nb_samples_test = len(x_test) assert nb_samples_test + 2 == len(y_train) - identity_projection = CustomProjection( + identity_projection = Projection( space_projection=lambda inputs, targets=None: inputs ) @@ -266,7 +266,7 @@ def test_similar_examples_weighting(): noise = np.random.uniform(size=x_train.shape, low=-100, high=100) x_train = np.float32(weights * np.array(x_train) + (1 - weights) * noise) - weighting_function = CustomProjection(weights=weights) + weighting_function = Projection(get_weights=weights) method = SimilarExamples( cases_dataset=x_train, @@ -286,6 +286,9 @@ def test_similar_examples_weighting(): assert examples.shape == (nb_samples_test, k) + input_shape for i in range(nb_samples_test): + print(i) + print(examples[i, 0]) + print(x_train[i + 1]) # test examples: assert almost_equal(examples[i, 0], x_train[i + 1]) assert almost_equal(examples[i, 1], x_train[i + 2]) or almost_equal( From c44d256d5486664db178f21bcdc7b0793ab39dad Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Feb 2024 16:19:14 +0100 Subject: [PATCH 027/138] cole tests: adapt to changes and add hadamard --- tests/example_based/test_cole.py | 74 ++++++++++++++++++++++++++------ 1 file changed, 62 insertions(+), 12 deletions(-) diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py index 9d8c63a0..a9dc1afe 100644 --- a/tests/example_based/test_cole.py +++ b/tests/example_based/test_cole.py @@ -7,18 +7,13 @@ sys.path.append(os.getcwd()) -from math import prod, sqrt - import numpy as np -import scipy import tensorflow as tf +from xplique.commons.operators_operations import gradients_predictions from xplique.attributions import Occlusion, Saliency - from xplique.example_based import Cole, SimilarExamples -from xplique.example_based.projections import CustomProjection -from xplique.example_based.search_methods import KNN -from xplique.types import Union +from xplique.example_based.projections import Projection from tests.utils import ( generate_data, @@ -38,11 +33,12 @@ def get_setup(input_shape, nb_samples=10, nb_labels=10): ) x_test = x_train[1:-1] y_train = tf.one_hot(tf.range(len(x_train)) % nb_labels, depth=nb_labels) + y_test = y_train[1:-1] # Model generation model = generate_model(input_shape, nb_labels) - return model, x_train, x_test, y_train + return model, x_train, x_test, y_train, y_test def test_cole_attribution(): @@ -81,8 +77,12 @@ def test_cole_attribution(): attribution_method=Saliency, ) - # Cole with attribution explain - projection = CustomProjection(weights=Saliency(model)) + # Cole with attribution explain batch gradient is overwritten for test purpose, do not copy! + explainer = Saliency(model) + explainer.batch_gradient = \ + lambda model, inputs, targets, batch_size:\ + explainer.gradient(model, inputs, targets) + projection = Projection(get_weights=explainer) euclidean_dist = lambda x, z: tf.sqrt(tf.reduce_sum(tf.square(x - z))) method_call = SimilarExamples( @@ -128,7 +128,57 @@ def test_cole_attribution(): ) -def test_cole_spliting(): +def test_cole_hadamard(): + """ + Test Cole with Hadamard projection. + It should be the same as a manual projection. + """ + # Setup + input_shape = (7, 7, 3) + nb_samples = 10 + nb_labels = 2 + k = 3 + model, x_train, x_test, y_train, y_test =\ + get_setup(input_shape, nb_samples=nb_samples, nb_labels=nb_labels) + + # Cole with Hadamard projection constructor + method_constructor = Cole( + cases_dataset=x_train, + targets_dataset=y_train, + k=k, + batch_size=7, + distance="euclidean", + model=model, + projection_method="gradient", + ) + + # Cole with Hadamard projection explain batch gradient is overwritten for test purpose, do not copy! + weights_extraction = lambda inputs, targets: gradients_predictions(model, inputs, targets) + projection = Projection(get_weights=weights_extraction) + + euclidean_dist = lambda x, z: tf.sqrt(tf.reduce_sum(tf.square(x - z))) + method_call = SimilarExamples( + cases_dataset=x_train, + targets_dataset=y_train, + k=k, + distance=euclidean_dist, + projection=projection, + ) + + # Generate explanation + examples_constructor = method_constructor.explain(x_test, y_test) + examples_call = method_call.explain(x_test, y_test) + + # Verifications + # Shape should be (n, k, h, w, c) + assert examples_constructor.shape == (len(x_test), k) + input_shape + assert examples_call.shape == (len(x_test), k) + input_shape + + # both methods should be the same + assert almost_equal(examples_constructor, examples_call) + + +def test_cole_splitting(): """ Test Cole with a `latent_layer` provided. It should split the model. @@ -175,4 +225,4 @@ def test_cole_spliting(): # test_cole_attribution() -# test_cole_spliting() +# test_cole_splitting() From 445fce18cbb20f80ebffa2c680595928fd179605 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 15 Feb 2024 15:03:50 +0100 Subject: [PATCH 028/138] tf operations: add get device for dataset mapping --- xplique/commons/__init__.py | 2 +- xplique/commons/tf_operations.py | 25 +++++++++++++++++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/xplique/commons/__init__.py b/xplique/commons/__init__.py index c5312a2e..6153c01a 100644 --- a/xplique/commons/__init__.py +++ b/xplique/commons/__init__.py @@ -5,7 +5,7 @@ from .data_conversion import tensor_sanitize, numpy_sanitize, sanitize_inputs_targets from .model_override import guided_relu_policy, deconv_relu_policy, override_relu_gradient, \ find_layer, open_relu_policy -from .tf_operations import repeat_labels, batch_tensor +from .tf_operations import repeat_labels, batch_tensor, get_device from .callable_operations import predictions_one_hot_callable from .operators_operations import (Tasks, get_operator, check_operator, operator_batching, get_inference_function, get_gradient_functions) diff --git a/xplique/commons/tf_operations.py b/xplique/commons/tf_operations.py index 1d6e5fae..3831b41f 100644 --- a/xplique/commons/tf_operations.py +++ b/xplique/commons/tf_operations.py @@ -54,3 +54,28 @@ def batch_tensor(tensors: Union[Tuple, tf.Tensor], dataset = dataset.batch(batch_size) return dataset + + +def get_device(device: Optional[str] = None) -> str: + """ + Gets the name of the device to use. If there are any available GPUs, it will use the first one + in the system, otherwise, it will use the CPU. + + Parameters + ---------- + device + A string specifying the device on which to run the computations. If None, it will search + for available GPUs, and if none are found, it will return the first CPU. + + Returns + ------- + device + A string with the name of the device on which to run the computations. + """ + if device is not None: + return device + + physical_devices = tf.config.list_physical_devices('GPU') + if physical_devices is None or len(physical_devices) == 0: + return 'cpu:0' + return 'GPU:0' From 7cfa2f391ed0a5a78f885cc5862b470ae28816be Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Wed, 6 Mar 2024 17:56:49 +0100 Subject: [PATCH 029/138] feat: add a new KNN object and improve distance computation efficiency --- xplique/example_based/base_example_method.py | 2 +- .../example_based/search_methods/__init__.py | 4 +- xplique/example_based/search_methods/base.py | 16 +- xplique/example_based/search_methods/knn.py | 313 +++++++++++++++--- 4 files changed, 279 insertions(+), 56 deletions(-) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 2c4b99df..9e3facf2 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -290,7 +290,7 @@ def explain( projected_inputs = self.projection(inputs, targets) # look for closest elements to projected inputs - search_output = self.search_method(projected_inputs) + search_output = self.search_method(projected_inputs, targets) # manage returned elements return self.format_search_output(search_output, inputs, targets) diff --git a/xplique/example_based/search_methods/__init__.py b/xplique/example_based/search_methods/__init__.py index 228e1acd..010b7cb3 100644 --- a/xplique/example_based/search_methods/__init__.py +++ b/xplique/example_based/search_methods/__init__.py @@ -2,7 +2,7 @@ Search methods """ -from .base import BaseSearchMethod +from .base import BaseSearchMethod, ORDER # from .sklearn_knn import SklearnKNN -from .knn import KNN +from .knn import BaseKNN, KNN, FilterKNN diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 1c7c0f1b..303575c3 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -1,7 +1,7 @@ """ Base search method for example-based module """ - +from enum import Enum from abc import ABC, abstractmethod import tensorflow as tf @@ -11,6 +11,14 @@ from ...commons import sanitize_dataset +class ORDER(Enum): + """ + Enumeration for the two types of ordering for the sorting function. + ASCENDING puts the elements with the smallest value first. + DESCENDING puts the elements with the largest value first. + """ + ASCENDING = 1 + DESCENDING = 2 def _sanitize_returns(returns: Optional[Union[List[str], str]] = None, possibilities: List[str] = None, @@ -133,7 +141,7 @@ def set_returns(self, returns: Optional[Union[List[str], str]] = None): @abstractmethod - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -147,6 +155,6 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): """ raise NotImplementedError() - def __call__(self, inputs: Union[tf.Tensor, np.ndarray]): + def __call__(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): """find_samples alias""" - return self.find_examples(inputs) + return self.find_examples(inputs, targets) diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index 8530f4a6..9b0f228b 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -1,18 +1,102 @@ """ KNN online search method in example-based module """ +import math +from abc import abstractmethod import numpy as np import tensorflow as tf -from ...commons import dataset_gather +from ...commons import dataset_gather, sanitize_dataset from ...types import Callable, List, Union, Optional, Tuple -from .base import BaseSearchMethod -from ..projections import Projection +from .base import BaseSearchMethod, ORDER +class BaseKNN(BaseSearchMethod): + """ + """ + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + k: int = 1, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + order: ORDER = ORDER.ASCENDING + ): + super().__init__( + cases_dataset, k, search_returns, batch_size + ) + + assert isinstance(order, ORDER), f"order should be an instance of ORDER and not {type(order)}" + self.order = order + # fill value + self.fill_value = np.inf if self.order == ORDER.ASCENDING else -np.inf + + @abstractmethod + def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> Tuple[tf.Tensor, tf.Tensor]: + """ + Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. + Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. + + Parameters + ---------- + inputs + Tensor or Array. Input samples on which knn are computed. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Tensor or Array. Target samples to be explained. + + Returns + ------- + best_distances + Tensor of distances between the knn and the inputs with dimension (n, k). + The n inputs times their k-nearest neighbors. + best_indices + Tensor of indices of the knn in `self.cases_dataset` with dimension (n, k, 2). + Where, n represent the number of inputs and k the number of corresponding examples. + The index of each element is encoded by two values, + the batch index and the index of the element in the batch. + Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. + """ + raise NotImplementedError -class KNN(BaseSearchMethod): + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): + """ + Search the samples to return as examples. Called by the explain methods. + It may also return the indices corresponding to the samples, + based on `return_indices` value. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Assumed to have been already projected. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + """ + # compute neighbors + examples_distances, examples_indices = self.kneighbors(inputs, targets) + + # Set values in return dict + return_dict = {} + if "examples" in self.returns: + return_dict["examples"] = dataset_gather(self.cases_dataset, examples_indices) + if "include_inputs" in self.returns: + inputs = tf.expand_dims(inputs, axis=1) + return_dict["examples"] = tf.concat( + [inputs, return_dict["examples"]], axis=1 + ) + if "indices" in self.returns: + return_dict["indices"] = examples_indices + if "distances" in self.returns: + return_dict["distances"] = examples_distances + + # Return a dict only different variables are returned + if len(return_dict) == 1: + return list(return_dict.values())[0] + return return_dict + +class KNN(BaseKNN): """ KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. @@ -36,7 +120,6 @@ class KNN(BaseSearchMethod): "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number yielding the corresponding p-norm." We also added 'cosine'. """ - def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -44,9 +127,10 @@ def __init__( search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", + order: ORDER = ORDER.ASCENDING ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size + cases_dataset, k, search_returns, batch_size, order ) if hasattr(distance, "__call__"): @@ -54,27 +138,32 @@ def __init__( elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( distance, int ): - self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) + self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance, axis=-1) else: raise AttributeError( "The distance parameter is expected to be either a Callable or in" - + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", - +f"but {distance} was received.", + + " ['fro', 'euclidean', 1, 2, np.inf] " + +f"but {type(distance)} was received." ) - self.distance_fn_over_all_x2 = lambda x1, x2: tf.map_fn( - fn=lambda x2: self.distance_fn(x1, x2), - elems=x2, - ) - - # Computes crossed distances between two tensors x1(shape=(n1, ...)) and x2(shape=(n2, ...)) - # The result is a distance matrix of size (n1, n2) - self.crossed_distances_fn = lambda x1, x2: tf.vectorized_map( - fn=lambda a1: self.distance_fn_over_all_x2(a1, x2), - elems=x1 - ) + @tf.function + def _crossed_distances_fn(self, x1, x2): + n = x1.shape[0] + m = x2.shape[0] + x2 = tf.expand_dims(x2, axis=0) + x2 = tf.repeat(x2, n, axis=0) + # reshape for broadcasting + x1 = tf.reshape(x1, (n, 1, -1)) + x2 = tf.reshape(x2, (n, m, -1)) + def compute_distance(args): + a, b = args + return self.distance_fn(a, b) + args = (x1, x2) + # Use vectorized_map to apply compute_distance element-wise + distances = tf.vectorized_map(compute_distance, args) + return distances - def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: + def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], _ = None) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. @@ -104,12 +193,13 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, t # (n, k, 2) best_indices = tf.Variable(tf.fill((nb_inputs, self.k, 2), -1)) # (n, k) - best_distances = tf.Variable(tf.fill((nb_inputs, self.k), np.inf)) + best_distances = tf.Variable(tf.fill((nb_inputs, self.k), self.fill_value)) # (n, bs) batch_indices = tf.expand_dims(tf.range(self.batch_size, dtype=tf.int32), axis=0) batch_indices = tf.tile(batch_indices, multiples=(nb_inputs, 1)) # iterate on batches + # for batch_index, (cases, cases_targets) in enumerate(zip(self.cases_dataset, self.targets_dataset)): for batch_index, cases in enumerate(self.cases_dataset): # add new elements # (n, current_bs, 2) @@ -120,7 +210,7 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, t # compute distances # (n, current_bs) - distances = self.crossed_distances_fn(inputs, cases) + distances = self._crossed_distances_fn(inputs, cases) # (n, k+curent_bs, 2) concatenated_indices = tf.concat([best_indices, new_indices], axis=1) @@ -130,7 +220,7 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, t # sort all # (n, k) sort_order = tf.argsort( - concatenated_distances, axis=1, direction="ASCENDING" + concatenated_distances, axis=1, direction=self.order.name.upper() )[:, : self.k] best_indices.assign( @@ -142,37 +232,162 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, t return best_distances, best_indices - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): +class FilterKNN(BaseKNN): + """ + KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. + Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + k + The number of examples to retrieve. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + filter_fn + A Callable that takes as inputs the inputs, their targets, the cases and their targets and + returns a boolean mask of shape (n, m) where n is the number of inputs and m the number of cases. + This boolean mask is used to choose between which inputs and cases to compute the distances. + """ + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + filter_fn: Optional[Callable] = None, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + order: ORDER = ORDER.ASCENDING + ): # pylint: disable=R0801 + super().__init__( + cases_dataset, k, search_returns, batch_size, order + ) + + if hasattr(distance, "__call__"): + self.distance_fn = distance + elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( + distance, int + ): + self.distance_fn = lambda x1, x2, m: tf.where(m, tf.norm(x1 - x2, ord=distance, axis=-1), self.fill_value) + else: + raise AttributeError( + "The distance parameter is expected to be either a Callable or in" + + " ['fro', 'euclidean', 1, 2, np.inf] " + +f"but {type(distance)} was received." + ) + + # set targets_dataset + if targets_dataset is not None: + batch_size = min(batch_size, len(cases_dataset)) + cardinality = math.ceil(len(cases_dataset) / batch_size) + self.targets_dataset = sanitize_dataset( + targets_dataset, batch_size, cardinality + ) + else: + self.targets_dataset = [None]*len(cases_dataset) + + # TODO: Assertion on the function signature + if filter_fn is None: + filter_fn = lambda x, z, y, t: tf.ones((tf.shape(x)[0], tf.shape(z)[0]), dtype=tf.bool) + self.filter_fn = filter_fn + + @tf.function + def _crossed_distances_fn(self, x1, x2, mask): + n = x1.shape[0] + m = x2.shape[0] + x2 = tf.expand_dims(x2, axis=0) + x2 = tf.repeat(x2, n, axis=0) + # reshape for broadcasting + x1 = tf.reshape(x1, (n, 1, -1)) + x2 = tf.reshape(x2, (n, m, -1)) + def compute_distance(args): + a, b, mask = args + return self.distance_fn(a, b, mask) + args = (x1, x2, mask) + # Use vectorized_map to apply compute_distance element-wise + distances = tf.vectorized_map(compute_distance, args) + return distances + + def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> Tuple[tf.Tensor, tf.Tensor]: """ - Search the samples to return as examples. Called by the explain methods. - It may also return the indices corresponding to the samples, - based on `return_indices` value. + Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. + Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. Parameters ---------- inputs - Tensor or Array. Input samples to be explained. - Assumed to have been already projected. + Tensor or Array. Input samples on which knn are computed. Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + + Returns + ------- + best_distances + Tensor of distances between the knn and the inputs with dimension (n, k). + The n inputs times their k-nearest neighbors. + best_indices + Tensor of indices of the knn in `self.cases_dataset` with dimension (n, k, 2). + Where, n represent the number of inputs and k the number of corresponding examples. + The index of each element is encoded by two values, + the batch index and the index of the element in the batch. + Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. """ - # compute neighbors - examples_distances, examples_indices = self.kneighbors(inputs) + nb_inputs = tf.shape(inputs)[0] - # Set values in return dict - return_dict = {} - if "examples" in self.returns: - return_dict["examples"] = dataset_gather(self.cases_dataset, examples_indices) - if "include_inputs" in self.returns: - inputs = tf.expand_dims(inputs, axis=1) - return_dict["examples"] = tf.concat( - [inputs, return_dict["examples"]], axis=1 - ) - if "indices" in self.returns: - return_dict["indices"] = examples_indices - if "distances" in self.returns: - return_dict["distances"] = examples_distances + # initialiaze + # (n, k, 2) + best_indices = tf.Variable(tf.fill((nb_inputs, self.k, 2), -1)) + # (n, k) + best_distances = tf.Variable(tf.fill((nb_inputs, self.k), self.fill_value)) + # (n, bs) + batch_indices = tf.expand_dims(tf.range(self.batch_size, dtype=tf.int32), axis=0) + batch_indices = tf.tile(batch_indices, multiples=(nb_inputs, 1)) - # Return a dict only different variables are returned - if len(return_dict) == 1: - return list(return_dict.values())[0] - return return_dict + # iterate on batches + for batch_index, (cases, cases_targets) in enumerate(zip(self.cases_dataset, self.targets_dataset)): + # add new elements + # (n, current_bs, 2) + indices = batch_indices[:, : tf.shape(cases)[0]] + new_indices = tf.stack( + [tf.fill(indices.shape, tf.cast(batch_index, tf.int32)), indices], axis=-1 + ) + + # get filter masks + # (n, current_bs) + filter_mask = self.filter_fn(inputs, cases, targets, cases_targets) + + # compute distances + # (n, current_bs) + distances = self._crossed_distances_fn(inputs, cases, mask=filter_mask) + + # (n, k+curent_bs, 2) + concatenated_indices = tf.concat([best_indices, new_indices], axis=1) + # (n, k+curent_bs) + concatenated_distances = tf.concat([best_distances, distances], axis=1) + + # sort all + # (n, k) + sort_order = tf.argsort( + concatenated_distances, axis=1, direction=self.order.name.upper() + )[:, : self.k] + + best_indices.assign( + tf.gather(concatenated_indices, sort_order, axis=1, batch_dims=1) + ) + best_distances.assign( + tf.gather(concatenated_distances, sort_order, axis=1, batch_dims=1) + ) + + return best_distances, best_indices \ No newline at end of file From 52d1c6a7641379f636569b6bca0d2bce8be4da6a Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Wed, 6 Mar 2024 17:57:32 +0100 Subject: [PATCH 030/138] tests: add tests for KNNs --- tests/example_based/test_knn.py | 505 ++++++++++++++++++++++++++++++++ 1 file changed, 505 insertions(+) create mode 100644 tests/example_based/test_knn.py diff --git a/tests/example_based/test_knn.py b/tests/example_based/test_knn.py new file mode 100644 index 00000000..e1d43b08 --- /dev/null +++ b/tests/example_based/test_knn.py @@ -0,0 +1,505 @@ +""" +Test the different search methods. +""" +import pytest +import numpy as np +import tensorflow as tf + +from xplique.example_based.search_methods import BaseKNN, KNN, FilterKNN, ORDER + +def get_setup(input_shape, nb_samples=10, nb_labels=10): + """ + Generate data and model for SimilarExamples + """ + # Data generation + x_train = tf.stack( + [i * tf.ones(input_shape, tf.float32) for i in range(nb_samples)] + ) + x_test = x_train[1:-1] + y_train = tf.range(len(x_train), dtype=tf.float32) % nb_labels + + return x_train, x_test, y_train + +class MockKNN(BaseKNN): + """ + Mock KNN class for testing the find_examples method + """ + def kneighbors(self, inputs, targets): + """ + Define a mock kneighbors method for testing the find_examples method of + the base class. + """ + best_distances = tf.random.normal((inputs.shape[0], self.k), dtype=tf.float32) + best_indices= tf.random.uniform((inputs.shape[0], self.k, 2), maxval=self.k, dtype=tf.int32) + return best_distances, best_indices + +def same_target_filter(inputs, cases, targets, cases_targets): + """ + Filter function that returns a boolean mask with true when point-wise inputs and cases + have the same target. + """ + # get the labels predicted by the model + # (n, ) + predicted_labels = tf.argmax(targets, axis=-1) + + # for each input, if the target label is the same as the predicted label + # the mask as a True value and False otherwise + label_targets = tf.argmax(cases_targets, axis=-1) # (bs,) + mask = tf.equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) + return mask + +def test_base_init(): + """ + Test the initialization of the base KNN class (not the super). + Check if it raises the relevant errors when the input is invalid. + """ + base_knn = MockKNN( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]), + k=2, + search_returns='distances', + ) + assert base_knn.order == ORDER.ASCENDING + assert base_knn.fill_value == np.inf + + # Test with reverse order + order = ORDER.DESCENDING + base_knn = MockKNN( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]), + k=2, + search_returns='distances', + order=order + ) + assert base_knn.order == order + assert base_knn.fill_value == -np.inf + + # Test with invalid order + with pytest.raises(AssertionError): + base_knn = MockKNN( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]), + k=2, + search_returns='distances', + order='invalid' + ) + +def test_base_find_examples(): + """ + Test the find_examples method of the base KNN class. + """ + returns = ["examples", "indices", "distances"] + mock_knn = MockKNN( + tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12.]], dtype=tf.float32), + k = 2, + search_returns = returns, + ) + + inputs = tf.random.normal((5, 3), dtype=tf.float32) + return_dict = mock_knn.find_examples(inputs) + assert set(return_dict.keys()) == set(returns) + assert return_dict["examples"].shape == (5, 2, 3) + assert return_dict["indices"].shape == (5, 2, 2) + assert return_dict["distances"].shape == (5, 2) + + returns = ["examples", "include_inputs"] + mock_knn = MockKNN( + tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12.]], dtype=tf.float32), + k = 2, + search_returns = returns, + ) + return_dict = mock_knn.find_examples(inputs) + assert return_dict.shape == (5, 3, 3) + + mock_knn = MockKNN( + tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12.]], dtype=tf.float32), + k = 2, + ) + return_dict = mock_knn.find_examples(inputs) + assert return_dict.shape == (5, 2, 3) + +def test_knn_init(): + """ + Test the initialization of the KNN class which are not linked to the super class. + """ + cases_dataset = tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12.]], dtype=tf.float32) + x1 = tf.random.normal((1, 3), dtype=tf.float32) + x2 = tf.random.normal((3, 3), dtype=tf.float32) + + # Test with distances that are compatible with tf.norm + distances = ["euclidean", 1, 2, np.inf, 5] + for distance in distances: + knn = KNN( + cases_dataset, + k=2, + search_returns='distances', + distance=distance, + ) + assert tf.reduce_all(tf.equal(knn.distance_fn(x1, x2), tf.norm(x1 - x2, ord=distance, axis=-1))) + + # Test with a custom distance function + def custom_distance(x1, x2): + return tf.reduce_sum(tf.abs(x1 - x2), axis=-1) + knn = KNN( + cases_dataset, + k=2, + search_returns='distances', + distance=custom_distance, + ) + assert tf.reduce_all(tf.equal(knn.distance_fn(x1, x2), custom_distance(x1, x2))) + + # Test with invalid distance + invalid_distances = [None, "invalid", 0.5] + for distance in invalid_distances: + with pytest.raises(AttributeError): + knn = KNN( + cases_dataset, + k=2, + search_returns='distances', + distance=distance, + ) + +def test_knn_compute_distances(): + """ + Test the private method _compute_distances_fn of the KNN class. + """ + # Test with input and cases being 1D + knn = KNN( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]), + k=2, + distance='euclidean', + order=ORDER.ASCENDING + ) + x1 = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=tf.float32) + x2 = tf.constant([[7.0, 8.0], [9.0, 10.0]], dtype=tf.float32) + + expected_distance = tf.constant( + [ + [np.sqrt(72), np.sqrt(128)], + [np.sqrt(32), np.sqrt(72)], + [np.sqrt(8), np.sqrt(32)] + ], dtype=tf.float32 + ) + + distances = knn._crossed_distances_fn(x1, x2) + assert distances.shape == (x1.shape[0], x2.shape[0]) + assert tf.reduce_all(tf.equal(distances, expected_distance)) + + # Test with higher dimensions + data = np.array([ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]] + ]) + + knn = KNN( + data, + k=2, + distance="euclidean", + order=ORDER.ASCENDING + ) + + x1 = tf.constant( + [ + [[1, 2, 3],[4, 5, 6],[7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]] + ], dtype=tf.float32 + ) + + x2 = tf.constant( + [ + [[28, 29, 30], [31, 32, 33], [34, 35, 36]], + [[37, 38, 39], [40, 41, 42], [43, 44, 45]], + ], dtype=tf.float32 + ) + + expected_distance = tf.constant( + [[np.sqrt(9)*27, np.sqrt(9)*36], + [np.sqrt(9)*18, np.sqrt(9)*27], + [np.sqrt(9)*9, np.sqrt(9)*18]], dtype=tf.float32) + + distances = knn._crossed_distances_fn(x1, x2) + assert distances.shape == (x1.shape[0], x2.shape[0]) + assert tf.reduce_all(tf.equal(distances, expected_distance)) + + +def test_knn_kneighbors(): + """ + Test the kneighbors method of the KNN class. + """ + # Test with input and cases being 1D + cases = tf.constant([[1.], [2.], [3.], [4.], [5.]], dtype=tf.float32) + inputs = tf.constant([[1.5], [2.5], [4.5]], dtype=tf.float32) + knn = KNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + ) + + distances, indices = knn.kneighbors(inputs) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + assert tf.reduce_all(tf.equal(distances, tf.constant([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], dtype=tf.float32))) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + + # Test with reverse order + knn = KNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + order=ORDER.DESCENDING + ) + + distances, indices = knn.kneighbors(inputs) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + assert tf.reduce_all(tf.equal(distances, tf.constant([[3.5, 2.5], [2.5, 1.5], [3.5, 2.5]], dtype=tf.float32))) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + + # Test with input and cases being 2D + cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) + inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + knn = KNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + ) + + distances, indices = knn.kneighbors(inputs) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + assert tf.reduce_all(tf.equal(distances, tf.constant([[np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)]], dtype=tf.float32))) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + + # Test with reverse order + knn = KNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + order=ORDER.DESCENDING + ) + + distances, indices = knn.kneighbors(inputs) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + expected_distances = tf.constant([[np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + +def test_filter_knn_compute_distances(): + """ + Test the private method _compute_distances_fn of the FilterKNN class. + """ + # Test in Low dimension + knn = FilterKNN( + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]), + k=2, + distance='euclidean', + order=ORDER.ASCENDING + ) + x1 = tf.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], dtype=tf.float32) + x2 = tf.constant([[7.0, 8.0], [9.0, 10.0]], dtype=tf.float32) + expected_distance = tf.constant( + [ + [np.sqrt(72), np.sqrt(128)], + [np.sqrt(32), np.sqrt(72)], + [np.sqrt(8), np.sqrt(32)] + ], dtype=tf.float32 + ) + mask = tf.ones((x1.shape[0], x2.shape[0]), dtype=tf.bool) + distances = knn._crossed_distances_fn(x1, x2, mask) + assert distances.shape == (x1.shape[0], x2.shape[0]) + assert tf.reduce_all(tf.equal(distances, expected_distance)) + + mask = tf.constant([[True, False], [False, True], [True, True]], dtype=tf.bool) + expected_distance = tf.constant([[np.sqrt(72), np.inf], [np.inf, np.sqrt(72)], [np.sqrt(8), np.sqrt(32)]], dtype=tf.float32) + distances = knn._crossed_distances_fn(x1, x2, mask) + assert tf.reduce_all(tf.equal(distances, expected_distance)) + + # Test with higher dimensions + data = np.array([ + [[1, 2, 3], [4, 5, 6], [7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]] + ]) + + knn = FilterKNN( + data, + k=2, + distance="euclidean", + order=ORDER.ASCENDING + ) + + x1 = tf.constant( + [ + [[1, 2, 3],[4, 5, 6],[7, 8, 9]], + [[10, 11, 12], [13, 14, 15], [16, 17, 18]], + [[19, 20, 21], [22, 23, 24], [25, 26, 27]] + ], dtype=tf.float32 + ) + + x2 = tf.constant( + [ + [[28, 29, 30], [31, 32, 33], [34, 35, 36]], + [[37, 38, 39], [40, 41, 42], [43, 44, 45]], + ], dtype=tf.float32 + ) + + expected_distance = tf.constant( + [[np.sqrt(9)*27, np.sqrt(9)*36], + [np.sqrt(9)*18, np.sqrt(9)*27], + [np.sqrt(9)*9, np.sqrt(9)*18]], dtype=tf.float32) + + mask = tf.ones((x1.shape[0], x2.shape[0]), dtype=tf.bool) + distances = knn._crossed_distances_fn(x1, x2, mask) + assert distances.shape == (x1.shape[0], x2.shape[0]) + assert tf.reduce_all(tf.equal(distances, expected_distance)) + + mask = tf.constant([[True, False], [False, True], [True, True]], dtype=tf.bool) + expected_distance = tf.constant([[np.sqrt(9)*27, np.inf], [np.inf, np.sqrt(9)*27], [np.sqrt(9)*9, np.sqrt(9)*18]], dtype=tf.float32) + distances = knn._crossed_distances_fn(x1, x2, mask) + assert distances.shape == (x1.shape[0], x2.shape[0]) + assert tf.reduce_all(tf.equal(distances, expected_distance)) + +def test_filter_knn_kneighbors(): + """ + """ + # Test with input and cases being 1D + cases = tf.constant([[1.], [2.], [3.], [4.], [5.]], dtype=tf.float32) + inputs = tf.constant([[1.5], [2.5], [4.5]], dtype=tf.float32) + ## default filter and default order + knn = KNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + ) + + distances, indices = knn.kneighbors(inputs) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + assert tf.reduce_all(tf.equal(distances, tf.constant([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], dtype=tf.float32))) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + + cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + + ## add a filter that is not the default + knn = FilterKNN( + cases, + targets_dataset=cases_targets, + k=2, + batch_size=2, + distance="euclidean", + filter_fn=same_target_filter + ) + mask = same_target_filter(inputs, cases, targets, cases_targets) + print(mask) + distances, indices = knn.kneighbors(inputs, targets) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + assert tf.reduce_all(tf.equal(distances, tf.constant([[0.5, 2.5], [0.5, 0.5], [0.5, 1.5]], dtype=tf.float32))) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [1, 1]],[[0, 1], [1, 0]],[[2, 0], [1, 0]]], dtype=tf.int32))) + + ## test with reverse order + knn = FilterKNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + order=ORDER.DESCENDING + ) + + distances, indices = knn.kneighbors(inputs, targets) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + expected_distances = tf.constant([[3.5, 2.5], [2.5, 1.5], [3.5, 2.5]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(distances, expected_distances)) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + + ## add a filter that is not the default one and reverse order + knn = FilterKNN( + cases, + targets_dataset=cases_targets, + k=2, + batch_size=2, + distance="euclidean", + order=ORDER.DESCENDING, + filter_fn=same_target_filter + ) + + distances, indices = knn.kneighbors(inputs, targets) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + assert tf.reduce_all(tf.equal(distances, tf.constant([[2.5, 0.5], [2.5, 0.5], [2.5, 1.5]], dtype=tf.float32))) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32))) + + # Test with input and cases being 2D + cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) + inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + ## default filter and default order + knn = FilterKNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + ) + + distances, indices = knn.kneighbors(inputs) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + assert tf.reduce_all(tf.equal(distances, tf.constant([[np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)]], dtype=tf.float32))) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + + cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + ## add a filter that is not the default + knn = FilterKNN( + cases, + targets_dataset=cases_targets, + k=2, + batch_size=2, + distance="euclidean", + filter_fn=same_target_filter + ) + + distances, indices = knn.kneighbors(inputs, targets) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + expected_distances = tf.constant([[np.sqrt(0.5), np.sqrt(2*2.5**2)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(2*1.5**2)],], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [1, 1]],[[0, 1], [1, 0]],[[2, 0], [1, 0]]], dtype=tf.int32))) + + ## test with reverse order and default filter + knn = FilterKNN( + cases, + k=2, + batch_size=2, + distance="euclidean", + order=ORDER.DESCENDING + ) + + distances, indices = knn.kneighbors(inputs) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + expected_distances = tf.constant([[np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + + ## add a filter that is not the default one and reverse order + knn = FilterKNN( + cases, + targets_dataset=cases_targets, + k=2, + batch_size=2, + distance="euclidean", + order=ORDER.DESCENDING, + filter_fn=same_target_filter + ) + + distances, indices = knn.kneighbors(inputs, targets) + assert distances.shape == (3, 2) + assert indices.shape == (3, 2, 2) + expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + assert tf.reduce_all(tf.equal(indices, tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32))) From 939bbc8bf02a4aea5d9c1bed38bc9fc983bb2f40 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Thu, 7 Mar 2024 15:23:49 +0100 Subject: [PATCH 031/138] feat: develop the naive semi factual, update search methods for integration --- tests/example_based/test_contrastive.py | 53 ++++++++++++++ tests/example_based/test_knn.py | 4 +- xplique/example_based/__init__.py | 1 + xplique/example_based/base_example_method.py | 3 + xplique/example_based/contrastive_examples.py | 70 +++++++++++++++++++ xplique/example_based/search_methods/base.py | 8 +++ xplique/example_based/search_methods/knn.py | 25 +++---- 7 files changed, 145 insertions(+), 19 deletions(-) create mode 100644 tests/example_based/test_contrastive.py create mode 100644 xplique/example_based/contrastive_examples.py diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py new file mode 100644 index 00000000..40204492 --- /dev/null +++ b/tests/example_based/test_contrastive.py @@ -0,0 +1,53 @@ +""" +Tests for the contrastive methods. +""" +import tensorflow as tf +import numpy as np + +from xplique.example_based import NaiveSemiFactuals +from xplique.example_based.projections import Projection + +def test_naive_semi_factuals(): + """ + """ + cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) + cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + + cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) + cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) + semi_factuals = NaiveSemiFactuals(cases_dataset, cases_targets_dataset, k=2, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) + + inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + + mask = semi_factuals.filter_fn(inputs, cases, targets, cases_targets) + assert mask.shape == (inputs.shape[0], cases.shape[0]) + + expected_mask = tf.constant([ + [True, False, False, True, False], + [False, True, True, False, True], + [False, True, True, False, True]], dtype=tf.bool) + assert tf.reduce_all(tf.equal(mask, expected_mask)) + + return_dict = semi_factuals(inputs, targets) + assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) + + examples = return_dict["examples"] + distances = return_dict["distances"] + indices = return_dict["indices"] + + assert examples.shape == (3, 3, 2) # (n, k+1, W) + assert distances.shape == (3, 2) # (n, k) + assert indices.shape == (3, 2, 2) # (n, k, 2) + + expected_examples = tf.constant([ + [[1.5, 2.5], [4., 5.], [1., 2.]], + [[2.5, 3.5], [5., 6.], [2., 3.]], + [[4.5, 5.5], [2., 3.], [3., 4.]]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(examples, expected_examples)) + + expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + + expected_indices = tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(indices, expected_indices)) diff --git a/tests/example_based/test_knn.py b/tests/example_based/test_knn.py index e1d43b08..63d4d504 100644 --- a/tests/example_based/test_knn.py +++ b/tests/example_based/test_knn.py @@ -368,7 +368,7 @@ def test_filter_knn_kneighbors(): cases = tf.constant([[1.], [2.], [3.], [4.], [5.]], dtype=tf.float32) inputs = tf.constant([[1.5], [2.5], [4.5]], dtype=tf.float32) ## default filter and default order - knn = KNN( + knn = FilterKNN( cases, k=2, batch_size=2, @@ -393,8 +393,6 @@ def test_filter_knn_kneighbors(): distance="euclidean", filter_fn=same_target_filter ) - mask = same_target_filter(inputs, cases, targets, cases_targets) - print(mask) distances, indices = knn.kneighbors(inputs, targets) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index a958a62b..8f0ea443 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -4,3 +4,4 @@ from .cole import Cole from .similar_examples import SimilarExamples +from .contrastive_examples import NaiveSemiFactuals diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 9e3facf2..df8ac306 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -122,6 +122,7 @@ def __init__( cases_dataset=projected_cases_dataset, k=k, batch_size=batch_size, + targets_dataset=self.targets_dataset, **search_method_kwargs, ) @@ -381,6 +382,8 @@ def format_search_output( # ) # add indices, distances, and labels + if "indices" in self.returns: + return_dict["indices"] = search_output["indices"] if "distances" in self.returns: return_dict["distances"] = search_output["distances"] if "labels" in self.returns: diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py new file mode 100644 index 00000000..365e8355 --- /dev/null +++ b/xplique/example_based/contrastive_examples.py @@ -0,0 +1,70 @@ +""" +Implementation of both counterfactuals and semi factuals methods for classification tasks. +""" +import numpy as np +import tensorflow as tf + +from ..types import Callable, List, Optional, Union + +from .base_example_method import BaseExampleMethod +from .search_methods import BaseSearchMethod, KNN, ORDER, FilterKNN +from .projections import Projection + +class NaiveSemiFactuals(BaseExampleMethod): + """ + Define a naive version of semi factuals search. That for a given sample + it will return the farthest sample which have the same label. + """ + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): + search_method = FilterKNN + + if projection is None: + projection = Projection(space_projection=lambda inputs: inputs) + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + distance=distance, + filter_fn=self.filter_fn, + order = ORDER.DESCENDING + ) + + + def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: + """ + Filter function to mask the cases for which the label is different from the predicted + label on the inputs. + """ + # get the labels predicted by the model + # (n, ) + predicted_labels = tf.argmax(targets, axis=-1) + + # for each input, if the target label is the same as the predicted label + # the mask as a True value and False otherwise + label_targets = tf.argmax(cases_targets, axis=-1) # (bs,) + mask = tf.equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) + return mask + +class PredictedLabelAwareSemiFactuals(): + def __init__(self) -> None: + raise NotImplementedError + +class NaiveCounterFactuals(BaseExampleMethod): + def __init__(): + raise NotImplementedError diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 303575c3..a165688d 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -91,6 +91,7 @@ def __init__( k: int = 1, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, ): # pylint: disable=R0801 # set batch size @@ -104,6 +105,13 @@ def __init__( self.set_k(k) self.set_returns(search_returns) + # set targets_dataset + if targets_dataset is not None: + self.targets_dataset = sanitize_dataset(targets_dataset, self.batch_size) + else: + # make an iterable of None + self.targets_dataset = [None]*len(cases_dataset) + def set_k(self, k: int): """ Change value of k with constructing a new `BaseSearchMethod`. diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index 9b0f228b..5291999a 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -21,10 +21,11 @@ def __init__( k: int = 1, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, - order: ORDER = ORDER.ASCENDING + order: ORDER = ORDER.ASCENDING, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, ): super().__init__( - cases_dataset, k, search_returns, batch_size + cases_dataset, k, search_returns, batch_size, targets_dataset ) assert isinstance(order, ORDER), f"order should be an instance of ORDER and not {type(order)}" @@ -127,10 +128,11 @@ def __init__( search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", - order: ORDER = ORDER.ASCENDING + order: ORDER = ORDER.ASCENDING, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order + cases_dataset, k, search_returns, batch_size, order, targets_dataset ) if hasattr(distance, "__call__"): @@ -272,7 +274,7 @@ def __init__( order: ORDER = ORDER.ASCENDING ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order + cases_dataset, k, search_returns, batch_size, order, targets_dataset ) if hasattr(distance, "__call__"): @@ -288,16 +290,6 @@ def __init__( +f"but {type(distance)} was received." ) - # set targets_dataset - if targets_dataset is not None: - batch_size = min(batch_size, len(cases_dataset)) - cardinality = math.ceil(len(cases_dataset) / batch_size) - self.targets_dataset = sanitize_dataset( - targets_dataset, batch_size, cardinality - ) - else: - self.targets_dataset = [None]*len(cases_dataset) - # TODO: Assertion on the function signature if filter_fn is None: filter_fn = lambda x, z, y, t: tf.ones((tf.shape(x)[0], tf.shape(z)[0]), dtype=tf.bool) @@ -390,4 +382,5 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Uni tf.gather(concatenated_distances, sort_order, axis=1, batch_dims=1) ) - return best_distances, best_indices \ No newline at end of file + return best_distances, best_indices + \ No newline at end of file From 8d9f234f590dbd20fc917dcff77ba07173a3d66e Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Thu, 7 Mar 2024 17:23:13 +0100 Subject: [PATCH 032/138] feat: add a semi factuals method that is dedicated to one specific label --- tests/example_based/test_contrastive.py | 118 +++++++++++++++++- xplique/example_based/__init__.py | 2 +- xplique/example_based/contrastive_examples.py | 62 ++++++++- 3 files changed, 176 insertions(+), 6 deletions(-) diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py index 40204492..c87fd67b 100644 --- a/tests/example_based/test_contrastive.py +++ b/tests/example_based/test_contrastive.py @@ -1,11 +1,12 @@ """ Tests for the contrastive methods. """ +import pytest + import tensorflow as tf import numpy as np -from xplique.example_based import NaiveSemiFactuals -from xplique.example_based.projections import Projection +from xplique.example_based import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals def test_naive_semi_factuals(): """ @@ -51,3 +52,116 @@ def test_naive_semi_factuals(): expected_indices = tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32) assert tf.reduce_all(tf.equal(indices, expected_indices)) + +def test_labelaware_semifactuals(): + """ + """ + cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) + cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + + cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) + cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) + + inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + + semi_factuals = PredictedLabelAwareSemiFactuals(cases_dataset, cases_targets_dataset, target_label=0, k=2, batch_size=2, case_returns=["examples", "distances", "include_inputs"]) + # assert the filtering on the right label went right + + combined_dataset = tf.data.Dataset.zip((cases_dataset.unbatch(), cases_targets_dataset.unbatch())) + for elem, label in combined_dataset: + print(f"elem: {elem}, label: {label}") + print(f"lambda_fn: {tf.equal(tf.argmax(label, axis=-1),0)}") + combined_dataset = combined_dataset.filter(lambda x, y: tf.equal(tf.argmax(y, axis=-1),0)) + + for elem, label in combined_dataset: + print(f"elem: {elem}, label: {label}") + + filter_cases = semi_factuals.cases_dataset + filter_targets = semi_factuals.targets_dataset + + # for elem in filter_cases: + # print(elem) + # for elem in filter_targets: + # print(elem) + + expected_filter_cases = tf.constant([[2., 3.], [3., 4.], [5., 6.]], dtype=tf.float32) + expected_filter_targets = tf.constant([[1, 0], [1, 0], [1, 0]], dtype=tf.float32) + + tensor_filter_cases = [] + for elem in filter_cases.unbatch(): + tensor_filter_cases.append(elem) + tensor_filter_cases = tf.stack(tensor_filter_cases) + assert tf.reduce_all(tf.equal(tensor_filter_cases, expected_filter_cases)) + + tensor_filter_targets = [] + for elem in filter_targets.unbatch(): + tensor_filter_targets.append(elem) + tensor_filter_targets = tf.stack(tensor_filter_targets) + assert tf.reduce_all(tf.equal(tensor_filter_targets, expected_filter_targets)) + + # check the call method + filter_inputs = tf.constant([[2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + filter_targets = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) + + return_dict = semi_factuals(filter_inputs, filter_targets) + assert set(return_dict.keys()) == set(["examples", "distances"]) + + examples = return_dict["examples"] + distances = return_dict["distances"] + + assert examples.shape == (2, 3, 2) # (n_label0, k+1, W) + assert distances.shape == (2, 2) # (n_label0, k) + + expected_examples = tf.constant([ + [[2.5, 3.5], [5., 6.], [2., 3.]], + [[4.5, 5.5], [2., 3.], [3., 4.]]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(examples, expected_examples)) + + expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + + + # check an error is raised when a target does not match the target label + with pytest.raises(AssertionError): + semi_factuals(inputs, targets) + + # same but with the other label + semi_factuals = PredictedLabelAwareSemiFactuals(cases_dataset, cases_targets_dataset, target_label=1, k=2, batch_size=2, case_returns=["examples", "distances", "include_inputs"]) + filter_cases = semi_factuals.cases_dataset + filter_targets = semi_factuals.targets_dataset + + expected_filter_cases = tf.constant([[1., 2.], [4., 5.]], dtype=tf.float32) + expected_filter_targets = tf.constant([[0, 1], [0, 1]], dtype=tf.float32) + + tensor_filter_cases = [] + for elem in filter_cases.unbatch(): + tensor_filter_cases.append(elem) + tensor_filter_cases = tf.stack(tensor_filter_cases) + assert tf.reduce_all(tf.equal(tensor_filter_cases, expected_filter_cases)) + + tensor_filter_targets = [] + for elem in filter_targets.unbatch(): + tensor_filter_targets.append(elem) + tensor_filter_targets = tf.stack(tensor_filter_targets) + assert tf.reduce_all(tf.equal(tensor_filter_targets, expected_filter_targets)) + + # check the call method + filter_inputs = tf.constant([[1.5, 2.5]], dtype=tf.float32) + filter_targets = tf.constant([[0, 1]], dtype=tf.float32) + + return_dict = semi_factuals(filter_inputs, filter_targets) + assert set(return_dict.keys()) == set(["examples", "distances"]) + + examples = return_dict["examples"] + distances = return_dict["distances"] + + assert examples.shape == (1, 3, 2) # (n_label1, k+1, W) + assert distances.shape == (1, 2) # (n_label1, k) + + expected_examples = tf.constant([ + [[1.5, 2.5], [4., 5.], [1., 2.]]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(examples, expected_examples)) + + expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index 8f0ea443..75238093 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -4,4 +4,4 @@ from .cole import Cole from .similar_examples import SimilarExamples -from .contrastive_examples import NaiveSemiFactuals +from .contrastive_examples import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 365e8355..c3383e92 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -61,9 +61,65 @@ def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: mask = tf.equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) return mask -class PredictedLabelAwareSemiFactuals(): - def __init__(self) -> None: - raise NotImplementedError +class PredictedLabelAwareSemiFactuals(BaseExampleMethod): + """ + As we know semi-factuals should belong to the same class as the input, + we propose here a method that is dedicated to a specific label. + """ + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + target_label: int, + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): + # filter the cases dataset and targets dataset to keep only the ones + # that have the target label + # TODO: improve this unbatch and batch + combined_dataset = tf.data.Dataset.zip((cases_dataset.unbatch(), targets_dataset.unbatch())) + combined_dataset = combined_dataset.filter(lambda x, y: tf.equal(tf.argmax(y, axis=-1),target_label)) + + # separate the cases and targets + cases_dataset = combined_dataset.map(lambda x, y: x).batch(batch_size) + targets_dataset = combined_dataset.map(lambda x, y: y).batch(batch_size) + + # delete the combined dataset + del combined_dataset + + if projection is None: + projection = Projection(space_projection=lambda inputs: inputs) + + search_method = KNN + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + distance=distance, + order = ORDER.DESCENDING + ) + + self.target_label = target_label + + def __call__( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + # assert targets are all the same as the target label + if targets is not None: + assert tf.reduce_all(tf.argmax(targets, axis=-1) == self.target_label), "All targets should be the same as the target label." + return super().__call__(inputs, targets) class NaiveCounterFactuals(BaseExampleMethod): def __init__(): From f5f05a3df6b92d73e2b810e129f7d9d5667c0090 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Wed, 13 Mar 2024 11:28:29 +0100 Subject: [PATCH 033/138] feat: add a naive counter factuals methods and its test --- tests/example_based/test_contrastive.py | 65 ++++++++++++++----- xplique/example_based/__init__.py | 2 +- xplique/example_based/contrastive_examples.py | 50 +++++++++++++- 3 files changed, 98 insertions(+), 19 deletions(-) diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py index c87fd67b..bac1aaa2 100644 --- a/tests/example_based/test_contrastive.py +++ b/tests/example_based/test_contrastive.py @@ -6,7 +6,7 @@ import tensorflow as tf import numpy as np -from xplique.example_based import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals +from xplique.example_based import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals, NaiveCounterFactuals def test_naive_semi_factuals(): """ @@ -69,22 +69,11 @@ def test_labelaware_semifactuals(): # assert the filtering on the right label went right combined_dataset = tf.data.Dataset.zip((cases_dataset.unbatch(), cases_targets_dataset.unbatch())) - for elem, label in combined_dataset: - print(f"elem: {elem}, label: {label}") - print(f"lambda_fn: {tf.equal(tf.argmax(label, axis=-1),0)}") combined_dataset = combined_dataset.filter(lambda x, y: tf.equal(tf.argmax(y, axis=-1),0)) - for elem, label in combined_dataset: - print(f"elem: {elem}, label: {label}") - filter_cases = semi_factuals.cases_dataset filter_targets = semi_factuals.targets_dataset - # for elem in filter_cases: - # print(elem) - # for elem in filter_targets: - # print(elem) - expected_filter_cases = tf.constant([[2., 3.], [3., 4.], [5., 6.]], dtype=tf.float32) expected_filter_targets = tf.constant([[1, 0], [1, 0], [1, 0]], dtype=tf.float32) @@ -99,7 +88,7 @@ def test_labelaware_semifactuals(): tensor_filter_targets.append(elem) tensor_filter_targets = tf.stack(tensor_filter_targets) assert tf.reduce_all(tf.equal(tensor_filter_targets, expected_filter_targets)) - + # check the call method filter_inputs = tf.constant([[2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) filter_targets = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) @@ -121,11 +110,10 @@ def test_labelaware_semifactuals(): expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - # check an error is raised when a target does not match the target label with pytest.raises(AssertionError): semi_factuals(inputs, targets) - + # same but with the other label semi_factuals = PredictedLabelAwareSemiFactuals(cases_dataset, cases_targets_dataset, target_label=1, k=2, batch_size=2, case_returns=["examples", "distances", "include_inputs"]) filter_cases = semi_factuals.cases_dataset @@ -145,7 +133,7 @@ def test_labelaware_semifactuals(): tensor_filter_targets.append(elem) tensor_filter_targets = tf.stack(tensor_filter_targets) assert tf.reduce_all(tf.equal(tensor_filter_targets, expected_filter_targets)) - + # check the call method filter_inputs = tf.constant([[1.5, 2.5]], dtype=tf.float32) filter_targets = tf.constant([[0, 1]], dtype=tf.float32) @@ -165,3 +153,48 @@ def test_labelaware_semifactuals(): expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + +def test_naive_counter_factuals(): + """ + """ + cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) + cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + + cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) + cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) + counter_factuals = NaiveCounterFactuals(cases_dataset, cases_targets_dataset, k=2, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) + + inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + + mask = counter_factuals.filter_fn(inputs, cases, targets, cases_targets) + assert mask.shape == (inputs.shape[0], cases.shape[0]) + + expected_mask = tf.constant([ + [False, True, True, False, True], + [True, False, False, True, False], + [True, False, False, True, False]], dtype=tf.bool) + assert tf.reduce_all(tf.equal(mask, expected_mask)) + + return_dict = counter_factuals(inputs, targets) + assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) + + examples = return_dict["examples"] + distances = return_dict["distances"] + indices = return_dict["indices"] + + assert examples.shape == (3, 3, 2) # (n, k+1, W) + assert distances.shape == (3, 2) # (n, k) + assert indices.shape == (3, 2, 2) # (n, k, 2) + + expected_examples = tf.constant([ + [[1.5, 2.5], [2., 3.], [3., 4.]], + [[2.5, 3.5], [1., 2.], [4., 5.]], + [[4.5, 5.5], [4., 5.], [1., 2.]]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(examples, expected_examples)) + + expected_distances = tf.constant([[np.sqrt(2*0.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*1.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*0.5**2), np.sqrt(2*3.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + + expected_indices = tf.constant([[[0, 1], [1, 0]],[[0, 0], [1, 1]],[[1, 1], [0, 0]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(indices, expected_indices)) diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index 75238093..89e08d1b 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -4,4 +4,4 @@ from .cole import Cole from .similar_examples import SimilarExamples -from .contrastive_examples import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals +from .contrastive_examples import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals, NaiveCounterFactuals diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index c3383e92..83b03b11 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -122,5 +122,51 @@ def __call__( return super().__call__(inputs, targets) class NaiveCounterFactuals(BaseExampleMethod): - def __init__(): - raise NotImplementedError + """ + + """ + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): + search_method = FilterKNN + + if projection is None: + projection = Projection(space_projection=lambda inputs: inputs) + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + distance=distance, + filter_fn=self.filter_fn, + order = ORDER.ASCENDING + ) + + + def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: + """ + Filter function to mask the cases for which the label is different from the predicted + label on the inputs. + """ + # get the labels predicted by the model + # (n, ) + predicted_labels = tf.argmax(targets, axis=-1) + + # for each input, if the target label is the same as the predicted label + # the mask as a True value and False otherwise + label_targets = tf.argmax(cases_targets, axis=-1) # (bs,) + mask = tf.not_equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) + return mask From 7b32df9830e99d6390f0e40f72f04f1944faa98d Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Thu, 21 Mar 2024 12:58:09 +0100 Subject: [PATCH 034/138] add Prototypes --- tests/example_based/test_prototypes.py | 248 +++++++++++ tests/utils.py | 49 +++ xplique/example_based/__init__.py | 1 + xplique/example_based/base_example_method.py | 1 + xplique/example_based/prototypes.py | 114 ++++++ .../example_based/search_methods/__init__.py | 4 + xplique/example_based/search_methods/base.py | 2 + .../search_methods/mmd_critic_search.py | 98 +++++ .../search_methods/proto_dash_search.py | 244 +++++++++++ .../search_methods/proto_greedy_search.py | 385 ++++++++++++++++++ .../search_methods/prototypes_search.py | 137 +++++++ 11 files changed, 1283 insertions(+) create mode 100644 tests/example_based/test_prototypes.py create mode 100644 xplique/example_based/prototypes.py create mode 100644 xplique/example_based/search_methods/mmd_critic_search.py create mode 100644 xplique/example_based/search_methods/proto_dash_search.py create mode 100644 xplique/example_based/search_methods/proto_greedy_search.py create mode 100644 xplique/example_based/search_methods/prototypes_search.py diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py new file mode 100644 index 00000000..39ba2425 --- /dev/null +++ b/tests/example_based/test_prototypes.py @@ -0,0 +1,248 @@ +""" +Test Prototypes +""" +import os +import sys + +sys.path.append(os.getcwd()) + +from math import prod, sqrt +import unittest +import time + +import numpy as np +from sklearn.metrics.pairwise import rbf_kernel +import tensorflow as tf + +from xplique.commons import sanitize_dataset, are_dataset_first_elems_equal +from xplique.types import Union + +from xplique.example_based import Prototypes +from xplique.example_based.projections import Projection, LatentSpaceProjection +from xplique.example_based.search_methods import ProtoGreedySearch, ProtoDashSearch, MMDCriticSearch + +from tests.utils import almost_equal, get_Gaussian_Data, load_data, plot + + +def test_proto_greedy_basic(): + """ + Test the SimilarExamples with an identity projection. + """ + # Setup + k = 3 + nb_prototypes = 3 + gamma = 0.026 + x_train, y_train = get_Gaussian_Data(nb_samples_class=20) + # x_train, y_train = load_data('usps') + # x_test, y_test = load_data('usps.t') + # x_test = tf.random.shuffle(x_test) + # x_test = x_test[0:8] + + identity_projection = Projection( + space_projection=lambda inputs, targets=None: inputs + ) + + def custom_kernel_wrapper(gamma): + def custom_kernel(x,y=None): + return rbf_kernel(x,y,gamma) + return custom_kernel + + kernel_fn = custom_kernel_wrapper(gamma) + + kernel_type = "global" + + # Method initialization + method = Prototypes( + cases_dataset=x_train, + labels_dataset=y_train, + search_method=ProtoGreedySearch, + k=k, + projection=identity_projection, + batch_size=32, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + kernel_fn=kernel_fn, + ) + + # Generate explanation + prototype_indices, prototype_weights = method.get_prototypes() + + prototypes = tf.gather(x_train, prototype_indices) + prototype_labels = tf.gather(y_train, prototype_indices) + + # sort by label + prototype_labels_sorted = prototype_labels.numpy().argsort() + + prototypes = tf.gather(prototypes, prototype_labels_sorted) + prototype_indices = tf.gather(prototype_indices, prototype_labels_sorted) + prototype_labels = tf.gather(prototype_labels, prototype_labels_sorted) + prototype_weights = tf.gather(prototype_weights, prototype_labels_sorted) + + # Verifications + # Shape + assert prototype_indices.shape == (nb_prototypes,) + assert prototypes.shape == (nb_prototypes, x_train.shape[1]) + assert prototype_weights.shape == (nb_prototypes,) + + # at least 1 prototype per class is selected + assert tf.unique(prototype_labels)[0].shape == tf.unique(y_train)[0].shape + + # uniqueness test of prototypes + assert prototype_indices.shape == tf.unique(prototype_indices)[0].shape + + # Check if all indices are between 0 and x_train.shape[0]-1 + assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) + + # # Visualize all prototypes + # plot(prototypes, prototype_weights, 'proto_greedy') + +def test_proto_dash_basic(): + """ + Test the SimilarExamples with an identity projection. + """ + # Setup + k = 3 + nb_prototypes = 3 + gamma = 0.026 + x_train, y_train = get_Gaussian_Data(nb_samples_class=20) + # x_train, y_train = load_data('usps') + # x_test, y_test = load_data('usps.t') + # x_test = tf.random.shuffle(x_test) + # x_test = x_test[0:8] + + identity_projection = Projection( + space_projection=lambda inputs, targets=None: inputs + ) + + def custom_kernel_wrapper(gamma): + def custom_kernel(x,y=None): + return rbf_kernel(x,y,gamma) + return custom_kernel + + kernel_fn = custom_kernel_wrapper(gamma) + + kernel_type = "global" + + # Method initialization + method = Prototypes( + cases_dataset=x_train, + labels_dataset=y_train, + search_method=ProtoDashSearch, + k=k, + projection=identity_projection, + batch_size=32, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + kernel_fn=kernel_fn, + ) + + # Generate explanation + prototype_indices, prototype_weights = method.get_prototypes() + + prototypes = tf.gather(x_train, prototype_indices) + prototype_labels = tf.gather(y_train, prototype_indices) + + # sort by label + prototype_labels_sorted = prototype_labels.numpy().argsort() + + prototypes = tf.gather(prototypes, prototype_labels_sorted) + prototype_indices = tf.gather(prototype_indices, prototype_labels_sorted) + prototype_labels = tf.gather(prototype_labels, prototype_labels_sorted) + prototype_weights = tf.gather(prototype_weights, prototype_labels_sorted) + + # Verifications + # Shape + assert prototype_indices.shape == (nb_prototypes,) + assert prototypes.shape == (nb_prototypes, x_train.shape[1]) + assert prototype_weights.shape == (nb_prototypes,) + + # at least 1 prototype per class is selected + assert tf.unique(prototype_labels)[0].shape == tf.unique(y_train)[0].shape + + # uniqueness test of prototypes + assert prototype_indices.shape == tf.unique(prototype_indices)[0].shape + + # Check if all indices are between 0 and x_train.shape[0]-1 + assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) + + # # Visualize all prototypes + # plot(prototypes, prototype_weights, 'proto_dash') + +def test_mmd_critic_basic(): + """ + Test the SimilarExamples with an identity projection. + """ + # Setup + k = 3 + nb_prototypes = 3 + gamma = 0.026 + x_train, y_train = get_Gaussian_Data(nb_samples_class=20) + # x_train, y_train = load_data('usps') + # x_test, y_test = load_data('usps.t') + # x_test = tf.random.shuffle(x_test) + # x_test = x_test[0:8] + + identity_projection = Projection( + space_projection=lambda inputs, targets=None: inputs + ) + + def custom_kernel_wrapper(gamma): + def custom_kernel(x,y=None): + return rbf_kernel(x,y,gamma) + return custom_kernel + + kernel_fn = custom_kernel_wrapper(gamma) + + kernel_type = "global" + + # Method initialization + method = Prototypes( + cases_dataset=x_train, + labels_dataset=y_train, + search_method=MMDCriticSearch, + k=k, + projection=identity_projection, + batch_size=32, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + kernel_fn=kernel_fn, + ) + + # Generate explanation + prototype_indices, prototype_weights = method.get_prototypes() + + prototypes = tf.gather(x_train, prototype_indices) + prototype_labels = tf.gather(y_train, prototype_indices) + + # sort by label + prototype_labels_sorted = prototype_labels.numpy().argsort() + + prototypes = tf.gather(prototypes, prototype_labels_sorted) + prototype_indices = tf.gather(prototype_indices, prototype_labels_sorted) + prototype_labels = tf.gather(prototype_labels, prototype_labels_sorted) + prototype_weights = tf.gather(prototype_weights, prototype_labels_sorted) + + # Verifications + # Shape + assert prototype_indices.shape == (nb_prototypes,) + assert prototypes.shape == (nb_prototypes, x_train.shape[1]) + assert prototype_weights.shape == (nb_prototypes,) + + # at least 1 prototype per class is selected + assert tf.unique(prototype_labels)[0].shape == tf.unique(y_train)[0].shape + + # uniqueness test of prototypes + assert prototype_indices.shape == tf.unique(prototype_indices)[0].shape + + # Check if all indices are between 0 and x_train.shape[0]-1 + assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) + + # # Visualize all prototypes + # plot(prototypes, prototype_weights, 'mmd_critic') + +test_proto_greedy_basic() +# test_proto_dash_basic() +# test_mmd_critic_basic() diff --git a/tests/utils.py b/tests/utils.py index 92d348e2..4219cd1a 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -1,7 +1,11 @@ import signal, time import numpy as np +import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression +from sklearn.datasets import load_svmlight_file +from pathlib import Path +from math import ceil import tensorflow as tf from tensorflow.keras.models import Sequential, Model from tensorflow.keras.layers import (Dense, Conv1D, Conv2D, Activation, GlobalAveragePooling1D, @@ -250,3 +254,48 @@ def download_file(identifier: str, for chunk in response.iter_content(chunk_size): if chunk: file.write(chunk) + +def get_Gaussian_Data(nb_samples_class=20): + tf.random.set_seed(42) + + sigma = 0.05 + mu = [10, 20, 30] + + X = tf.concat([tf.random.normal(shape=(nb_samples_class,1), mean=mu[i], stddev=sigma, dtype=tf.float32) for i in range(3)], axis=0) + y = tf.concat([tf.ones(shape=(nb_samples_class), dtype=tf.int32) * i for i in range(3)], axis=0) + + return(X, y) + +def load_data(fname): + data_dir = Path('/home/mohamed-chafik.bakey/MMD-critic/data') + X, y = load_svmlight_file(str(data_dir / fname)) + X = tf.constant(X.todense(), dtype=tf.float32) + y = tf.constant(np.array(y), dtype=tf.int64) + sort_indices = y.numpy().argsort() + X = tf.gather(X, sort_indices, axis=0) + y = tf.gather(y, sort_indices) + y -= 1 + return X, y + +def plot(prototypes_sorted, prototype_weights_sorted, extension): + + output_dir = Path('tests/example_based/tmp') + k = prototypes_sorted.shape[0] + + # Visualize all prototypes + num_cols = 8 + num_rows = ceil(k / num_cols) + fig, axes = plt.subplots(num_rows, num_cols, figsize=(6, num_rows * 1.25)) + if prototype_weights_sorted is not None: + # Adjust the spacing between lines + plt.subplots_adjust(hspace=1) + for i, axis in enumerate(axes.ravel()): + if i >= k: + axis.axis('off') + continue + axis.imshow(prototypes_sorted[i].numpy().reshape(16, 16), cmap='gray') + if prototype_weights_sorted is not None: + axis.set_title("{:.2f}".format(prototype_weights_sorted[i].numpy())) + axis.axis('off') + # fig.suptitle(f'{k} Prototypes') + plt.savefig(output_dir / f'{k}_prototypes_{extension}.png') \ No newline at end of file diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index a958a62b..0cdb3d2f 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -4,3 +4,4 @@ from .cole import Cole from .similar_examples import SimilarExamples +from .prototypes import Prototypes diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 2c4b99df..31ea4f89 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -120,6 +120,7 @@ def __init__( # initiate search_method self.search_method = search_method( cases_dataset=projected_cases_dataset, + labels_dataset=labels_dataset, k=k, batch_size=batch_size, **search_method_kwargs, diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py new file mode 100644 index 00000000..9df3081f --- /dev/null +++ b/xplique/example_based/prototypes.py @@ -0,0 +1,114 @@ +""" +Base model for prototypes +""" + +import math + +import time + +import tensorflow as tf +import numpy as np + +from ..types import Callable, Dict, List, Optional, Type, Union + +from ..commons import sanitize_inputs_targets +from ..commons import sanitize_dataset, dataset_gather +from .search_methods import ProtoGreedySearch, PrototypesSearch +from .projections import Projection +from .base_example_method import BaseExampleMethod + +from .search_methods.base import _sanitize_returns + + +class Prototypes(BaseExampleMethod): + """ + Base class for prototypes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + distance + Distance for the knn search method. + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + search_method: Type[PrototypesSearch] = ProtoGreedySearch, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + **search_method_kwargs, + ): + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + **search_method_kwargs, + ) + + def get_prototypes(self): + """ + Return the prototypes computed by the search method. + + Returns: + prototype_indices : Tensor + prototype indices. + prototype_indices : Tensor + prototype weights. + """ + return self.search_method.prototype_indices, self.search_method.prototype_weights + diff --git a/xplique/example_based/search_methods/__init__.py b/xplique/example_based/search_methods/__init__.py index 228e1acd..d54e85a4 100644 --- a/xplique/example_based/search_methods/__init__.py +++ b/xplique/example_based/search_methods/__init__.py @@ -6,3 +6,7 @@ # from .sklearn_knn import SklearnKNN from .knn import KNN +from .prototypes_search import PrototypesSearch +from .proto_greedy_search import ProtoGreedySearch +from .proto_dash_search import ProtoDashSearch +from .mmd_critic_search import MMDCriticSearch diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 1c7c0f1b..5dde1a9c 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -80,6 +80,7 @@ class BaseSearchMethod(ABC): def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, k: int = 1, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, @@ -92,6 +93,7 @@ def __init__( self.batch_size = batch_size self.cases_dataset = sanitize_dataset(cases_dataset, self.batch_size) + self.labels_dataset = sanitize_dataset(labels_dataset, self.batch_size) self.set_k(k) self.set_returns(search_returns) diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py new file mode 100644 index 00000000..cfe70941 --- /dev/null +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -0,0 +1,98 @@ +""" +MMDCritic search method in example-based module +""" + +import numpy as np +import tensorflow as tf + +from ...commons import dataset_gather +from ...types import Callable, List, Union, Optional, Tuple + +from .proto_greedy_search import ProtoGreedySearch +from ..projections import Projection + + +class MMDCriticSearch(ProtoGreedySearch): + """ + KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. + Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + k + The number of examples to retrieve. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + """ + + def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): + """ + Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. + + Here, we have a special case of protogreedy where we give equal weights to all prototypes, + the objective here is simplified to speed up processing + + Find argmax_{c} F(S ∪ c) - F(S) + ≡ + Find argmax_{c} F(S ∪ c) + ≡ + Find argmax_{c} (sum1 - sum2) where: sum1 = (2 / n) * ∑[i=1 to n] κ(x_i, c) + sum2 = 1/(|S|+1) [2 * ∑[j=1 to |S|] * κ(x_j, c) + κ(c, c)] + + Parameters + ---------- + selection_indices : Tensor + Indices corresponding to the selected prototypes. + selection_cases : Tensor + Cases corresponding to the selected prototypes. + selection_labels : Tensor + Labels corresponding to the selected prototypes. + selection_weights : Tensor + Weights corresponding to the selected prototypes. + selection_selection_kernel : Tensor + Kernel matrix computed from the selected prototypes. + candidates_indices : Tensor + Indices corresponding to the candidate prototypes. + candidates_cases : Tensor + Cases corresponding to the candidate prototypes. + candidates_labels : Tensor + Labels corresponding to the candidate prototypes. + candidates_selection_kernel : Tensor + Kernel matrix between the candidates and the selected prototypes. + + Returns + ------- + objectives + Tensor that contains the computed objective values for each candidate. + objectives_weights + Tensor that contains the computed objective weights for each candidate. + """ + + nb_candidates = candidates_indices.shape[0] + nb_selection = selection_indices.shape[0] + + sum1 = 2 * tf.gather(self.col_means, candidates_indices) + + if nb_selection == 0: + sum2 = tf.abs(tf.gather(self.diag, candidates_indices)) + else: + temp = tf.transpose(candidates_selection_kernel, perm=[1, 0]) + sum2 = tf.reduce_sum(temp, axis=0) * 2 + tf.gather(self.diag, candidates_indices) + sum2 /= (nb_selection + 1) + + objectives = sum1 - sum2 + objectives_weights = tf.ones(shape=(nb_candidates, nb_selection+1), dtype=tf.float32) / tf.cast(nb_selection+1, dtype=tf.float32) + + return objectives, objectives_weights diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py new file mode 100644 index 00000000..e29d12b8 --- /dev/null +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -0,0 +1,244 @@ +""" +ProtoDash search method in example-based module +""" + +import numpy as np +from sklearn.metrics.pairwise import rbf_kernel +from scipy.optimize import minimize +import tensorflow as tf + +from ...commons import dataset_gather +from ...types import Callable, List, Union, Optional, Tuple + +from .proto_greedy_search import ProtoGreedySearch +from ..projections import Projection + +class Optimizer(): + """ + Class to solve the quadratic problem: + F(S) ≡ max_{w:supp(w)∈ S, w ≥ 0} l(w), + where l(w) = w^T * μ_p - 1/2 * w^T * K * w + + Parameters + ---------- + initial_weights : Tensor + Initial weight vector. + min_weight : float, optional + Lower bound on weight. Default is 0. + max_weight : float, optional + Upper bound on weight. Default is 10000. + """ + + def __init__( + self, + initial_weights: Union[tf.Tensor, np.ndarray], + min_weight: float = 0, + max_weight: float = 10000 + ): + self.initial_weights = initial_weights + self.min_weight = min_weight + self.max_weight = max_weight + self.bounds = [(min_weight, max_weight)] * initial_weights.shape[0] + self.objective_fn = lambda w, u, K: - (w @ u - 0.5 * w @ K @ w) + + def optimize(self, u, K): + """ + Perform optimization to find the optimal values of the weight vector (w) + and the corresponding objective function value. + + Parameters + ---------- + u : Tensor + Mean similarity of each prototype. + K : Tensor + The kernel matrix. + + Returns + ------- + best_weights : Tensor + The optimal value of the weight vector (w). + best_objective : Tensor + The value of the objective function corresponding to the best_weights. + """ + + u = u.numpy() + K = K.numpy() + + result = minimize(self.objective_fn, self.initial_weights, args=(u, K), method='SLSQP', bounds=self.bounds, options={'disp': False}) + + # Get the best weights + best_weights = result.x + best_weights = tf.expand_dims(tf.convert_to_tensor(best_weights, dtype=tf.float32), axis=0) + + # Get the best objective + best_objective = -result.fun + best_objective = tf.expand_dims(tf.convert_to_tensor(best_objective, dtype=tf.float32), axis=0) + + assert tf.reduce_all(best_weights >= 0) + + return best_weights, best_objective + + +class ProtoDashSearch(ProtoGreedySearch): + """ + Protodash method for searching prototypes. + + References: + .. [#] `Karthik S. Gurumoorthy, Amit Dhurandhar, Guillermo Cecchi, + "ProtoDash: Fast Interpretable Prototype Selection" + `_ + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + k + The number of examples to retrieve. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + nb_prototypes : int + Number of prototypes to find. + find_prototypes_kwargs + Additional parameters passed to `find_prototypes` function. + """ + + def find_prototypes(self, nb_prototypes, kernel_type: str = 'local', kernel_fn: callable = rbf_kernel, use_optimizer: bool = False): + """ + Search for prototypes and their corresponding weights. + + Parameters + ---------- + nb_prototypes : int + Number of prototypes to find. + nb_prototypes : int + Number of prototypes to find. + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function or kernel matrix, by default rbf_kernel. + use_optimizer : bool, optional + Flag indicating whether to use an optimizer for prototype selection, by default False. + + Returns + ------- + prototype_indices : Tensor + The indices of the selected prototypes. + prototype_weights : + The normalized weights of the selected prototypes. + """ + + self.use_optimizer = use_optimizer + + return super().find_prototypes(nb_prototypes, kernel_type, kernel_fn) + + def update_selection_weights(self, selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective): + """ + Update the selection weights based on the given parameters. + Pursuant to Lemma IV.4: + If best_gradient ≤ 0, then + ζ(S∪{best_sample_index}) = ζ(S) and specifically, w_{best_sample_index} = 0. + Otherwise, the stationarity and complementary slackness KKT conditions + entails that w_{best_sample_index} = best_gradient / κ(best_sample_index, best_sample_index) + + Parameters + ---------- + selected_indices : Tensor + Indices corresponding to the selected prototypes. + selected_weights : Tensor + Weights corresponding to the selected prototypes. + selection_selection_kernel : Tensor + Kernel matrix computed from the selected prototypes. + best_indice : int + The index of the selected prototype with the highest objective function value. + best_weights : Tensor + The weights corresponding to the optimal solution of the objective function for each candidate. + best_objective : float + The computed objective function value. + + Returns + ------- + selection_weights : Tensor + Updated weights corresponding to the selected prototypes. + """ + + if best_objective <= 0: + selection_weights = tf.concat([selection_weights, [0]], axis=0) + else: + u = tf.expand_dims(tf.gather(self.col_means, selection_indices), axis=1) + K = selection_selection_kernel + + if self.use_optimizer: + initial_weights = tf.concat([selection_weights, [best_objective / tf.gather(self.diag, best_indice)]], axis=0) + opt = Optimizer(initial_weights) + selection_weights, _ = opt.optimize(u, K) + selection_weights = tf.squeeze(selection_weights, axis=0) + else: + # We added epsilon to the diagonal of K to ensure that K is invertible + K_inv = tf.linalg.inv(K + ProtoDashSearch.EPSILON * tf.eye(K.shape[-1])) + selection_weights = tf.linalg.matmul(K_inv, u) + selection_weights = tf.maximum(selection_weights, 0) + selection_weights = tf.squeeze(selection_weights, axis=1) + + return selection_weights + + def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): + """ + Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. + Calculate the gradient of l(w) = w^T * μ_p - 1/2 * w^T * K * w + w.r.t w, on the optimal weight point ζ^(S) + g = ∇l(ζ^(S)) = μ_p - K * ζ^(S) + g is computed for each candidate c + + Parameters + ---------- + selection_indices : Tensor + Indices corresponding to the selected prototypes. + selection_cases : Tensor + Cases corresponding to the selected prototypes. + selection_labels : Tensor + Labels corresponding to the selected prototypes. + selection_weights : Tensor + Weights corresponding to the selected prototypes. + selection_selection_kernel : Tensor + Kernel matrix computed from the selected prototypes. + candidates_indices : Tensor + Indices corresponding to the candidate prototypes. + candidates_cases : Tensor + Cases corresponding to the candidate prototypes. + candidates_labels : Tensor + Labels corresponding to the candidate prototypes. + candidates_selection_kernel : Tensor + Kernel matrix between the candidates and the selected prototypes. + + Returns + ------- + objectives + Tensor that contains the computed objective values for each candidate. + objectives_weights + Tensor that contains the computed objective weights for each candidate. + """ + + u = tf.gather(self.col_means, candidates_indices) + + if selection_indices.shape[0] == 0: + # S = ∅ and ζ^(∅) = 0, g = ∇l(ζ^(∅)) = μ_p + objectives = u + else: + u = tf.expand_dims(u, axis=1) + K = candidates_selection_kernel + + objectives = u - tf.matmul(K, tf.expand_dims(selection_weights, axis=1)) + objectives = tf.squeeze(objectives, axis=1) + + return objectives, None diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py new file mode 100644 index 00000000..00128b5c --- /dev/null +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -0,0 +1,385 @@ +""" +ProtoGreedy search method in example-based module +""" + +import numpy as np +from sklearn.metrics.pairwise import rbf_kernel +import tensorflow as tf + +from ...commons import dataset_gather, sanitize_dataset +from ...types import Callable, List, Union, Optional, Tuple + +from .prototypes_search import PrototypesSearch +from ..projections import Projection + + +class ProtoGreedySearch(PrototypesSearch): + """ + ProtoGreedy method for searching prototypes. + + References: + .. [#] `Karthik S. Gurumoorthy, Amit Dhurandhar, Guillermo Cecchi, + "ProtoDash: Fast Interpretable Prototype Selection" + `_ + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + k + The number of examples to retrieve. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + nb_prototypes : int + Number of prototypes to find. + find_prototypes_kwargs + Additional parameters passed to `find_prototypes` function. + """ + + # Avoid zero division during procedure. (the value is not important, as if the denominator is + # zero, then the nominator will also be zero). + EPSILON = tf.constant(1e-6) + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + nb_prototypes: int = 1, + **find_prototypes_kwargs + ): # pylint: disable=R0801 + super().__init__( + cases_dataset, labels_dataset, k, search_returns, batch_size, distance, nb_prototypes, **find_prototypes_kwargs + ) + + def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): + """ + Compute the objective and its weights for each candidate. + + Parameters + ---------- + selection_indices : Tensor + Indices corresponding to the selected prototypes. + selection_cases : Tensor + Cases corresponding to the selected prototypes. + selection_labels : Tensor + Labels corresponding to the selected prototypes. + selection_weights : Tensor + Weights corresponding to the selected prototypes. + selection_selection_kernel : Tensor + Kernel matrix computed from the selected prototypes. + candidates_indices : Tensor + Indices corresponding to the candidate prototypes. + candidates_cases : Tensor + Cases corresponding to the candidate prototypes. + candidates_labels : Tensor + Labels corresponding to the candidate prototypes. + candidates_selection_kernel : Tensor + Kernel matrix between the candidates and the selected prototypes. + + Returns + ------- + objectives + Tensor that contains the computed objective values for each candidate. + objectives_weights + Tensor that contains the computed objective weights for each candidate. + """ + + nb_candidates = candidates_indices.shape[0] + nb_selection = selection_cases.shape[0] + + repeated_selection_indices = tf.tile(tf.expand_dims(selection_indices, 0), [nb_candidates, 1]) + repeated_selection_candidates_indices = tf.concat([repeated_selection_indices, tf.expand_dims(candidates_indices, 1)], axis=1) + u = tf.expand_dims(tf.gather(self.col_means, repeated_selection_candidates_indices), axis=2) + + if nb_selection == 0: + K = tf.expand_dims(tf.expand_dims(tf.gather(self.diag, candidates_indices), axis=-1), axis=-1) + else: + repeated_selection_selection_kernel = tf.tile(tf.expand_dims(selection_selection_kernel, 0), [nb_candidates, 1, 1]) + repeated_selection_selection_kernel = tf.pad(repeated_selection_selection_kernel, [[0, 0], [0, 1], [0, 1]]) + + candidates_diag = tf.expand_dims(tf.expand_dims(tf.gather(self.diag, candidates_indices), axis=-1), axis=-1) + candidates_diag = tf.pad(candidates_diag, [[0, 0], [nb_selection, 0], [nb_selection, 0]]) + + candidates_selection_kernel = tf.expand_dims(candidates_selection_kernel, axis=-1) + candidates_selection_kernel = tf.pad(candidates_selection_kernel, [[0, 0], [0, 1], [nb_selection, 0]]) + + K = repeated_selection_selection_kernel + candidates_diag + candidates_selection_kernel + tf.transpose(candidates_selection_kernel, [0, 2, 1]) + + # Compute the objective weights for each candidate in the batch + K_inv = tf.linalg.inv(K + ProtoGreedySearch.EPSILON * tf.eye(K.shape[-1])) + objectives_weights = tf.matmul(K_inv, u) + objectives_weights = tf.maximum(objectives_weights, 0) + + # Compute the objective for each candidate in the batch + objectives = tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), u) - 0.5 * tf.matmul(tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), K), objectives_weights) + objectives = tf.squeeze(objectives, axis=[1,2]) + + return objectives, objectives_weights + + def update_selection_weights(self, selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective): + """ + Update the selection weights based on the optimization results. + + Parameters + ---------- + selected_indices : Tensor + Indices corresponding to the selected prototypes. + selected_weights : Tensor + Weights corresponding to the selected prototypes. + selection_selection_kernel : Tensor + Kernel matrix computed from the selected prototypes. + best_indice : int + The index of the selected prototype with the highest objective function value. + best_weights : Tensor + The weights corresponding to the optimal solution of the objective function for each candidate. + best_objective : float + The computed objective function value. + + Returns + ------- + selection_weights : Tensor + Updated weights corresponding to the selected prototypes. + """ + + selection_weights = best_weights + + return selection_weights + + def compute_kernel_attributes(self, kernel_type: str = 'local', kernel_fn: callable = rbf_kernel): + """ + Compute the attributes of the class that are related to the kernel. + + Parameters + ---------- + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function or kernel matrix, by default rbf_kernel. + + Returns + ------- + selection_weights : Tensor + Updated weights corresponding to the selected prototypes. + """ + if kernel_type in ['local', 'global']: + self.kernel_type = kernel_type + else: + raise AttributeError( + "The kernel_type parameter is expected to be in" + + " ['local', 'global'] ", + +f"but {kernel_type} was received.", + ) + + if hasattr(kernel_fn, "__call__"): + def custom_kernel_fn(x1, x2, y1, y2): + if self.kernel_type == 'global': + kernel_matrix = kernel_fn(x1,x2) + if isinstance(kernel_matrix, np.ndarray): + kernel_matrix = tf.convert_to_tensor(kernel_matrix) + else: + # In the case of a local kernel, calculations are limited to within the class. + # Across different classes, the kernel values are set to 0. + kernel_matrix = np.zeros((x1.shape[0], x2.shape[0]), dtype=np.float32) + y_intersect = np.intersect1d(y1, y2) + for i in range(y_intersect.shape[0]): + y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] + y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] + sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) + kernel_matrix[tf.reshape(y1_indices, (-1, 1)), tf.reshape(y2_indices, (1, -1))] = sub_matrix + kernel_matrix = tf.convert_to_tensor(kernel_matrix) + return kernel_matrix + + self.kernel_fn = custom_kernel_fn + else: + raise AttributeError( + "The kernel parameter is expected to be a Callable", + +f"but {kernel_fn} was received.", + ) + + # TODO: for local explanation add the ability to compute distance_fn based on the kernel + + # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset. + # We take advantage of the symmetry of this matrix to traverse only its lower triangle. + col_sums = [] + diag = [] + row_sums = [] + + for batch_col_index, (batch_col_cases, batch_col_labels) in enumerate( + zip(self.cases_dataset, self.labels_dataset) + ): + batch_col_sums = tf.zeros((batch_col_cases.shape[0])) + + for batch_row_index, (batch_row_cases, batch_row_labels) in enumerate( + zip(self.cases_dataset, self.labels_dataset) + ): + if batch_row_index < batch_col_index: + continue + + batch_kernel = self.kernel_fn(batch_row_cases, batch_col_cases, batch_row_labels, batch_col_labels) + + batch_col_sums = batch_col_sums + tf.reduce_sum(batch_kernel, axis=0) + + if batch_col_index == batch_row_index: + if batch_col_index != 0: + batch_col_sums = batch_col_sums + row_sums[batch_row_index] + + diag.append(tf.linalg.diag_part(batch_kernel)) + + if batch_col_index == 0: + if batch_row_index == 0: + row_sums.append(None) + else: + row_sums.append(tf.reduce_sum(batch_kernel, axis=1)) + else: + row_sums[batch_row_index] += tf.reduce_sum(batch_kernel, axis=1) + + col_sums.append(batch_col_sums) + + self.col_sums = tf.concat(col_sums, axis=0) + self.n = self.col_sums.shape[0] + self.col_means = self.col_sums / self.n + self.diag = tf.concat(diag, axis=0) + self.nb_features = batch_col_cases.shape[1] + + def find_prototypes(self, nb_prototypes, kernel_type: str = 'local', kernel_fn: callable = rbf_kernel): + """ + Search for prototypes and their corresponding weights. + + Parameters + ---------- + nb_prototypes : int + Number of prototypes to find. + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function or kernel matrix, by default rbf_kernel. + + Returns + ------- + prototype_indices : Tensor + The indices of the selected prototypes. + prototype_weights : + The normalized weights of the selected prototypes. + """ + + self.compute_kernel_attributes(kernel_type, kernel_fn) + + # Tensors to store selected indices and their corresponding cases, labels and weights. + selection_indices = tf.constant([], dtype=tf.int32) + selection_cases = tf.zeros((0, self.nb_features), dtype=tf.float32) + selection_labels = tf.constant([], dtype=tf.int32) + selection_weights = tf.constant([], dtype=tf.float32) + # Tensor to store the all_candidates-selection kernel of the previous iteration. + all_candidates_selection_kernel = tf.zeros((self.n, 0), dtype=tf.float32) + # Tensor to store the selection-selection kernel. + selection_selection_kernel = None + + k = 0 + while k < nb_prototypes: + + nb_selection = selection_cases.shape[0] + + # Tensor to store the all_candidates-last_selected kernel + if nb_selection !=0: + all_candidates_last_selected_kernel = tf.zeros((self.n), dtype=tf.float32) + + best_objective = None + best_indice = None + best_case = None + best_label = None + best_weights = None + + for batch_index, (cases, labels) in enumerate( + zip(self.cases_dataset, self.labels_dataset) + ): + batch_inside_indices = tf.range(cases.shape[0], dtype=tf.int32) + batch_indices = batch_index * self.batch_size + batch_inside_indices + + # Filter the batch to keep only candidate indices. + if nb_selection == 0: + candidates_indices = batch_indices + else: + candidates_indices = tf.convert_to_tensor(np.setdiff1d(batch_indices, selection_indices)) + + nb_candidates = candidates_indices.shape[0] + + if nb_candidates == 0: + continue + + candidates_inside_indices = candidates_indices % self.batch_size + candidates_cases = tf.gather(cases, candidates_inside_indices) + candidates_labels = tf.gather(labels, candidates_inside_indices) + + # Compute the candidates-selection kernel for the batch + if nb_selection == 0: + candidates_selection_kernel = None + else: + candidates_last_selected_kernel = self.kernel_fn(candidates_cases, selection_cases[-1:, :], candidates_labels, selection_labels[-1:]) + candidates_selection_kernel = tf.concat([tf.gather(all_candidates_selection_kernel, candidates_indices, axis=0), candidates_last_selected_kernel], axis=1) + all_candidates_last_selected_kernel = tf.tensor_scatter_nd_update(all_candidates_last_selected_kernel, tf.expand_dims(candidates_indices, axis=1), tf.squeeze(candidates_last_selected_kernel, axis=1)) + + # Compute the objectives for the batch + objectives, objectives_weights = self.compute_objectives(selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel) + + # Select the best objective in the batch + objectives_argmax = tf.argmax(objectives) + + if (best_objective is None) or (tf.gather(objectives, objectives_argmax) > best_objective): + best_objective = tf.gather(objectives, objectives_argmax) + best_indice = tf.squeeze(tf.gather(candidates_indices, objectives_argmax)) + best_case = tf.gather(candidates_cases, objectives_argmax) + best_label = tf.gather(candidates_labels, objectives_argmax) + if objectives_weights is not None: + best_weights = tf.squeeze(tf.gather(objectives_weights, objectives_argmax)) + + # Update the all_candidates-selection kernel + if nb_selection != 0: + all_candidates_selection_kernel = tf.concat([all_candidates_selection_kernel, tf.expand_dims(all_candidates_last_selected_kernel, axis=1)], axis=1) + + # Update the selection-selection kernel + if nb_selection == 0: + selection_selection_kernel = tf.gather(self.diag, [[best_indice]]) + else: + selection_selection_kernel = tf.pad(selection_selection_kernel, [[0, 1], [0, 1]]) + + best_candidate_selection_kernel = tf.gather(all_candidates_selection_kernel, [best_indice], axis=0) + best_candidate_selection_kernel = tf.pad(best_candidate_selection_kernel, [[nb_selection, 0], [0, 1]]) + + best_candidate_diag = tf.expand_dims(tf.gather(self.diag, [best_indice]), axis=-1) + best_candidate_diag = tf.pad(best_candidate_diag, [[nb_selection, 0], [nb_selection, 0]]) + + selection_selection_kernel = selection_selection_kernel + best_candidate_diag + best_candidate_selection_kernel + tf.transpose(best_candidate_selection_kernel) + + # Update selection indices, cases and labels + selection_indices = tf.concat([selection_indices, [best_indice]], axis=0) + selection_cases = tf.concat([selection_cases, [best_case]], axis=0) + selection_labels = tf.concat([selection_labels, [best_label]], axis=0) + + # Update selection weights + selection_weights = self.update_selection_weights(selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective) + + k += 1 + + prototype_indices = selection_indices + prototype_weights = selection_weights + + # Normalize the weights + prototype_weights = prototype_weights / tf.reduce_sum(prototype_weights) + + return prototype_indices, prototype_weights \ No newline at end of file diff --git a/xplique/example_based/search_methods/prototypes_search.py b/xplique/example_based/search_methods/prototypes_search.py new file mode 100644 index 00000000..6db01b3c --- /dev/null +++ b/xplique/example_based/search_methods/prototypes_search.py @@ -0,0 +1,137 @@ +""" +Prototypes search method in example-based module +""" + +from abc import ABC, abstractmethod +import numpy as np +from sklearn.metrics.pairwise import rbf_kernel +import tensorflow as tf + +from ...commons import dataset_gather +from ...types import Callable, List, Union, Optional, Tuple + +from .base import BaseSearchMethod +from ..projections import Projection + + +class PrototypesSearch(BaseSearchMethod): + """ + Prototypes search method to find prototypes and the examples closest to these prototypes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + k + The number of examples to retrieve. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + nb_prototypes : int + Number of prototypes to find. + find_prototypes_kwargs + Additional parameters passed to `find_prototypes` function. + """ + + # Avoid zero division during procedure. (the value is not important, as if the denominator is + # zero, then the nominator will also be zero). + EPSILON = tf.constant(1e-6) + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + nb_prototypes: int = 1, + **find_prototypes_kwargs, + ): # pylint: disable=R0801 + super().__init__( + cases_dataset, labels_dataset, k, search_returns, batch_size + ) + + if hasattr(distance, "__call__"): + self.distance_fn = distance + elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( + distance, int + ): + self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) + else: + raise AttributeError( + "The distance parameter is expected to be either a Callable or in" + + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", + +f"but {distance} was received.", + ) + + self.prototype_indices, self.prototype_weights = self.find_prototypes(nb_prototypes, **find_prototypes_kwargs) + + @abstractmethod + def find_prototypes(self, nb_prototypes: int, **find_prototypes_kwargs): + """ + Search for prototypes and their corresponding weights. + + Parameters + ---------- + nb_prototypes : int + Number of prototypes to find. + + find_prototypes_kwargs + Additional parameters passed to `find_prototypes` function. + + Returns + ------- + prototype_indices : Tensor + The indices of the selected prototypes. + prototype_weights : + The normalized weights of the selected prototypes. + """ + return NotImplementedError() + + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): + """ + Search the samples to return as examples. Called by the explain methods. + It may also return the indices corresponding to the samples, + based on `return_indices` value. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Assumed to have been already projected. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + """ + # TODO: Find examples: here we provide a local explanation. + # Find the nearest prototypes to inputs + # we use self.distance_fn and self.prototype_indices. + examples_indices = None + examples_distances = None + + # Set values in return dict + return_dict = {} + if "examples" in self.returns: + return_dict["examples"] = dataset_gather(self.cases_dataset, examples_indices) + if "include_inputs" in self.returns: + inputs = tf.expand_dims(inputs, axis=1) + return_dict["examples"] = tf.concat( + [inputs, return_dict["examples"]], axis=1 + ) + if "indices" in self.returns: + return_dict["indices"] = examples_indices + if "distances" in self.returns: + return_dict["distances"] = examples_distances + + # Return a dict only different variables are returned + if len(return_dict) == 1: + return list(return_dict.values())[0] + return return_dict From a86fd55e3857d534e131d6fc821721cabde0c856 Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Thu, 21 Mar 2024 13:13:00 +0100 Subject: [PATCH 035/138] add Prototypes fix up --- tests/example_based/test_prototypes.py | 2 +- .../search_methods/proto_greedy_search.py | 15 --------------- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index 39ba2425..68ae3e59 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -243,6 +243,6 @@ def custom_kernel(x,y=None): # # Visualize all prototypes # plot(prototypes, prototype_weights, 'mmd_critic') -test_proto_greedy_basic() +# test_proto_greedy_basic() # test_proto_dash_basic() # test_mmd_critic_basic() diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 00128b5c..a383b33a 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -50,21 +50,6 @@ class ProtoGreedySearch(PrototypesSearch): # zero, then the nominator will also be zero). EPSILON = tf.constant(1e-6) - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - search_returns: Optional[Union[List[str], str]] = None, - batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = "euclidean", - nb_prototypes: int = 1, - **find_prototypes_kwargs - ): # pylint: disable=R0801 - super().__init__( - cases_dataset, labels_dataset, k, search_returns, batch_size, distance, nb_prototypes, **find_prototypes_kwargs - ) - def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): """ Compute the objective and its weights for each candidate. From d70557330fb6b9e1f3bedbeb4cc9e9692fc75d9f Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Thu, 4 Apr 2024 12:48:02 +0200 Subject: [PATCH 036/138] add Prototypes fix up --- tests/example_based/test_prototypes.py | 50 +-- tests/utils.py | 28 +- xplique/example_based/__init__.py | 3 + xplique/example_based/base_example_method.py | 1 - xplique/example_based/mmd_critic.py | 100 ++++++ xplique/example_based/proto_dash.py | 100 ++++++ xplique/example_based/proto_greedy.py | 100 ++++++ xplique/example_based/prototypes.py | 154 +++++++++- .../example_based/search_methods/__init__.py | 1 - xplique/example_based/search_methods/base.py | 2 - .../search_methods/mmd_critic_search.py | 17 +- .../search_methods/proto_dash_search.py | 63 ++-- .../search_methods/proto_greedy_search.py | 289 +++++++++++------- .../search_methods/prototypes_search.py | 137 --------- 14 files changed, 728 insertions(+), 317 deletions(-) create mode 100644 xplique/example_based/mmd_critic.py create mode 100644 xplique/example_based/proto_dash.py create mode 100644 xplique/example_based/proto_greedy.py delete mode 100644 xplique/example_based/search_methods/prototypes_search.py diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index 68ae3e59..720c47ee 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -17,11 +17,10 @@ from xplique.commons import sanitize_dataset, are_dataset_first_elems_equal from xplique.types import Union -from xplique.example_based import Prototypes +from xplique.example_based import Prototypes, ProtoGreedy, ProtoDash, MMDCritic from xplique.example_based.projections import Projection, LatentSpaceProjection -from xplique.example_based.search_methods import ProtoGreedySearch, ProtoDashSearch, MMDCriticSearch -from tests.utils import almost_equal, get_Gaussian_Data, load_data, plot +from tests.utils import almost_equal, get_Gaussian_Data, load_data, plot, plot_local_explanation def test_proto_greedy_basic(): @@ -52,10 +51,9 @@ def custom_kernel(x,y=None): kernel_type = "global" # Method initialization - method = Prototypes( + method = ProtoGreedy( cases_dataset=x_train, labels_dataset=y_train, - search_method=ProtoGreedySearch, k=k, projection=identity_projection, batch_size=32, @@ -65,8 +63,8 @@ def custom_kernel(x,y=None): kernel_fn=kernel_fn, ) - # Generate explanation - prototype_indices, prototype_weights = method.get_prototypes() + # Generate global explanation + prototype_indices, prototype_weights = method.get_global_prototypes() prototypes = tf.gather(x_train, prototype_indices) prototype_labels = tf.gather(y_train, prototype_indices) @@ -97,6 +95,12 @@ def custom_kernel(x,y=None): # # Visualize all prototypes # plot(prototypes, prototype_weights, 'proto_greedy') + # # Generate local explanation + # examples = method.explain(x_test) + + # # Visualize local explanation + # plot_local_explanation(examples, x_test, 'proto_greedy') + def test_proto_dash_basic(): """ Test the SimilarExamples with an identity projection. @@ -125,10 +129,9 @@ def custom_kernel(x,y=None): kernel_type = "global" # Method initialization - method = Prototypes( + method = ProtoDash( cases_dataset=x_train, labels_dataset=y_train, - search_method=ProtoDashSearch, k=k, projection=identity_projection, batch_size=32, @@ -138,8 +141,8 @@ def custom_kernel(x,y=None): kernel_fn=kernel_fn, ) - # Generate explanation - prototype_indices, prototype_weights = method.get_prototypes() + # Generate global explanation + prototype_indices, prototype_weights = method.get_global_prototypes() prototypes = tf.gather(x_train, prototype_indices) prototype_labels = tf.gather(y_train, prototype_indices) @@ -170,6 +173,12 @@ def custom_kernel(x,y=None): # # Visualize all prototypes # plot(prototypes, prototype_weights, 'proto_dash') + # # Generate local explanation + # examples = method.explain(x_test) + + # # Visualize local explanation + # plot_local_explanation(examples, x_test, 'proto_dash') + def test_mmd_critic_basic(): """ Test the SimilarExamples with an identity projection. @@ -198,10 +207,9 @@ def custom_kernel(x,y=None): kernel_type = "global" # Method initialization - method = Prototypes( + method = MMDCritic( cases_dataset=x_train, labels_dataset=y_train, - search_method=MMDCriticSearch, k=k, projection=identity_projection, batch_size=32, @@ -211,8 +219,8 @@ def custom_kernel(x,y=None): kernel_fn=kernel_fn, ) - # Generate explanation - prototype_indices, prototype_weights = method.get_prototypes() + # Generate global explanation + prototype_indices, prototype_weights = method.get_global_prototypes() prototypes = tf.gather(x_train, prototype_indices) prototype_labels = tf.gather(y_train, prototype_indices) @@ -243,6 +251,12 @@ def custom_kernel(x,y=None): # # Visualize all prototypes # plot(prototypes, prototype_weights, 'mmd_critic') -# test_proto_greedy_basic() -# test_proto_dash_basic() -# test_mmd_critic_basic() + # # Generate local explanation + # examples = method.explain(x_test) + + # # Visualize local explanation + # plot_local_explanation(examples, x_test, 'mmd_critic') + +test_proto_greedy_basic() +test_proto_dash_basic() +test_mmd_critic_basic() diff --git a/tests/utils.py b/tests/utils.py index 4219cd1a..280d7a5f 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -298,4 +298,30 @@ def plot(prototypes_sorted, prototype_weights_sorted, extension): axis.set_title("{:.2f}".format(prototype_weights_sorted[i].numpy())) axis.axis('off') # fig.suptitle(f'{k} Prototypes') - plt.savefig(output_dir / f'{k}_prototypes_{extension}.png') \ No newline at end of file + plt.savefig(output_dir / f'{k}_prototypes_{extension}.png') + +def plot_local_explanation(examples, x_test, extension): + + output_dir = Path('tests/example_based/tmp') + k = examples.shape[1] + + # Visualize + num_cols = k+1 + num_rows = x_test.shape[0] + fig, axes = plt.subplots(num_rows, num_cols, figsize=(6, num_rows * 0.75)) + # Adjust the spacing between lines + plt.subplots_adjust(hspace=1) + axes[0,0].set_title("x_test") + for i in range(examples.shape[0]): + axes[i,0].imshow(x_test[i].numpy().reshape(16, 16), cmap='gray') + axes[i,0].axis('off') + for j in range(examples.shape[1]): + axe = axes[i,j+1] + axe.imshow(examples[i,j].numpy().reshape(16, 16), cmap='gray') + # axe.set_title("{:.2f}".format(prototype_distances[i,j])) + if i == 0: + axe.set_title("prototype_{}".format(j + 1)) + axe.axis('off') + + fig.suptitle(f'{k}-nearst prototypes') + plt.savefig(output_dir / f'{k}_nearest_prototypes_{extension}.png') \ No newline at end of file diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index 0cdb3d2f..a30a789a 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -5,3 +5,6 @@ from .cole import Cole from .similar_examples import SimilarExamples from .prototypes import Prototypes +from .proto_greedy import ProtoGreedy +from .proto_dash import ProtoDash +from .mmd_critic import MMDCritic diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 31ea4f89..2c4b99df 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -120,7 +120,6 @@ def __init__( # initiate search_method self.search_method = search_method( cases_dataset=projected_cases_dataset, - labels_dataset=labels_dataset, k=k, batch_size=batch_size, **search_method_kwargs, diff --git a/xplique/example_based/mmd_critic.py b/xplique/example_based/mmd_critic.py new file mode 100644 index 00000000..a2ccfb47 --- /dev/null +++ b/xplique/example_based/mmd_critic.py @@ -0,0 +1,100 @@ +""" +MMDCritic method for searching prototypes +""" + +import math + +import time + +import tensorflow as tf +import numpy as np + +from ..types import Callable, Dict, List, Optional, Type, Union + +from ..commons import sanitize_inputs_targets +from ..commons import sanitize_dataset, dataset_gather +from .search_methods import MMDCriticSearch +from .projections import Projection +from .prototypes import Prototypes + +from .search_methods.base import _sanitize_returns + + +class MMDCritic(Prototypes): + """ + MMDCritic method for searching prototypes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + search_method_kwargs + Parameters to be passed at the construction of the `search_method`. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + **search_method_kwargs, + ): + # the only difference with parent is that the search method is always MMDCriticSearch + search_method = MMDCriticSearch + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + **search_method_kwargs, + ) + diff --git a/xplique/example_based/proto_dash.py b/xplique/example_based/proto_dash.py new file mode 100644 index 00000000..475e138b --- /dev/null +++ b/xplique/example_based/proto_dash.py @@ -0,0 +1,100 @@ +""" +ProtoDash method for searching prototypes +""" + +import math + +import time + +import tensorflow as tf +import numpy as np + +from ..types import Callable, Dict, List, Optional, Type, Union + +from ..commons import sanitize_inputs_targets +from ..commons import sanitize_dataset, dataset_gather +from .search_methods import ProtoDashSearch +from .projections import Projection +from .prototypes import Prototypes + +from .search_methods.base import _sanitize_returns + + +class ProtoDash(Prototypes): + """ + ProtoDash method for searching prototypes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + search_method_kwargs + Parameters to be passed at the construction of the `search_method`. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + **search_method_kwargs, + ): + # the only difference with parent is that the search method is always ProtoDashSearch + search_method = ProtoDashSearch + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + **search_method_kwargs, + ) + diff --git a/xplique/example_based/proto_greedy.py b/xplique/example_based/proto_greedy.py new file mode 100644 index 00000000..2c43565b --- /dev/null +++ b/xplique/example_based/proto_greedy.py @@ -0,0 +1,100 @@ +""" +ProtoGreedy method for searching prototypes +""" + +import math + +import time + +import tensorflow as tf +import numpy as np + +from ..types import Callable, Dict, List, Optional, Type, Union + +from ..commons import sanitize_inputs_targets +from ..commons import sanitize_dataset, dataset_gather +from .search_methods import ProtoGreedySearch +from .projections import Projection +from .prototypes import Prototypes + +from .search_methods.base import _sanitize_returns + + +class ProtoGreedy(Prototypes): + """ + ProtoGreedy method for searching prototypes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distance make sense for the model. + It should not be `None`, otherwise, + all examples could be computed only with the `search_method`. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + search_method_kwargs + Parameters to be passed at the construction of the `search_method`. + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + **search_method_kwargs, + ): + # the only difference with parent is that the search method is always ProtoGreedySearch + search_method = ProtoGreedySearch + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + **search_method_kwargs, + ) + diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 9df3081f..906b6bca 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -13,7 +13,7 @@ from ..commons import sanitize_inputs_targets from ..commons import sanitize_dataset, dataset_gather -from .search_methods import ProtoGreedySearch, PrototypesSearch +from .search_methods import ProtoGreedySearch from .projections import Projection from .base_example_method import BaseExampleMethod @@ -68,12 +68,8 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar batch_size Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - distance - Distance for the knn search method. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + search_method_kwargs + Parameters to be passed at the construction of the `search_method`. """ def __init__( @@ -81,28 +77,58 @@ def __init__( cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - search_method: Type[PrototypesSearch] = ProtoGreedySearch, + search_method: Type[ProtoGreedySearch] = ProtoGreedySearch, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, **search_method_kwargs, ): - super().__init__( - cases_dataset=cases_dataset, + assert ( + projection is not None + ), "`BaseExampleMethod` without `projection` is a `BaseSearchMethod`." + + # set attributes + batch_size = self.__initialize_cases_dataset( + cases_dataset, labels_dataset, targets_dataset, batch_size + ) + + self.k = k + self.set_returns(case_returns) + + assert hasattr(projection, "__call__"), "projection should be a callable." + + # check projection type + if isinstance(projection, Projection): + self.projection = projection + elif hasattr(projection, "__call__"): + self.projection = Projection(get_weights=None, space_projection=projection) + else: + raise AttributeError( + "projection should be a `Projection` or a `Callable`, not a" + + f"{type(projection)}" + ) + + # project dataset + projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, + self.targets_dataset) + + # set `search_returns` if not provided and overwrite it otherwise + search_method_kwargs["search_returns"] = ["indices", "distances"] + + # initiate search_method + self.search_method = search_method( + cases_dataset=projected_cases_dataset, labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - search_method=search_method, k=k, - projection=projection, - case_returns=case_returns, batch_size=batch_size, **search_method_kwargs, ) - def get_prototypes(self): + def get_global_prototypes(self): """ - Return the prototypes computed by the search method. + Return all the prototypes computed by the search method, + which consist of a global explanation of the dataset. Returns: prototype_indices : Tensor @@ -111,4 +137,100 @@ def get_prototypes(self): prototype weights. """ return self.search_method.prototype_indices, self.search_method.prototype_weights + + def __initialize_cases_dataset( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], + batch_size: Optional[int], + ) -> int: + """ + Factorization of `__init__()` method for dataset related attributes. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + labels_dataset + Labels associated to the examples in the dataset. + Indices should match with cases_dataset. + targets_dataset + Targets associated to the cases_dataset for dataset projection. + See `projection` for detail. + batch_size + Number of sample treated simultaneously when using the datasets. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + + Returns + ------- + batch_size + Number of sample treated simultaneously when using the datasets. + Extracted from the datasets in case they are `tf.data.Dataset`. + Otherwise, the input value. + """ + # at least one dataset provided + if isinstance(cases_dataset, tf.data.Dataset): + # set batch size (ignore provided argument) and cardinality + if isinstance(cases_dataset.element_spec, tuple): + batch_size = tf.shape(next(iter(cases_dataset))[0])[0].numpy() + else: + batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() + + cardinality = cases_dataset.cardinality().numpy() + else: + # if case_dataset is not a `tf.data.Dataset`, then neither should the other. + assert not isinstance(labels_dataset, tf.data.Dataset) + assert not isinstance(targets_dataset, tf.data.Dataset) + # set batch size and cardinality + batch_size = min(batch_size, len(cases_dataset)) + cardinality = math.ceil(len(cases_dataset) / batch_size) + + # verify cardinality and create datasets from the tensors + self.cases_dataset = sanitize_dataset( + cases_dataset, batch_size, cardinality + ) + self.labels_dataset = sanitize_dataset( + labels_dataset, batch_size, cardinality + ) + self.targets_dataset = sanitize_dataset( + targets_dataset, batch_size, cardinality + ) + + # if the provided `cases_dataset` has several columns + if isinstance(self.cases_dataset.element_spec, tuple): + # switch case on the number of columns of `cases_dataset` + if len(self.cases_dataset.element_spec) == 2: + assert self.labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + self.labels_dataset = self.cases_dataset.map(lambda x, y: y) + self.cases_dataset = self.cases_dataset.map(lambda x, y: x) + + elif len(self.cases_dataset.element_spec) == 3: + assert self.labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + assert self.targets_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels." + + "Hence, `labels_dataset` should be empty." + ) + self.targets_dataset = self.cases_dataset.map(lambda x, y, t: t) + self.labels_dataset = self.cases_dataset.map(lambda x, y, t: y) + self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) + else: + raise AttributeError( + "`cases_dataset` cannot possess more than 3 columns," + + f"{len(self.cases_dataset.element_spec)} were detected." + ) + + self.cases_dataset = self.cases_dataset.prefetch(tf.data.AUTOTUNE) + if self.labels_dataset is not None: + self.labels_dataset = self.labels_dataset.prefetch(tf.data.AUTOTUNE) + if self.targets_dataset is not None: + self.targets_dataset = self.targets_dataset.prefetch(tf.data.AUTOTUNE) + + return batch_size diff --git a/xplique/example_based/search_methods/__init__.py b/xplique/example_based/search_methods/__init__.py index d54e85a4..351e7ba9 100644 --- a/xplique/example_based/search_methods/__init__.py +++ b/xplique/example_based/search_methods/__init__.py @@ -6,7 +6,6 @@ # from .sklearn_knn import SklearnKNN from .knn import KNN -from .prototypes_search import PrototypesSearch from .proto_greedy_search import ProtoGreedySearch from .proto_dash_search import ProtoDashSearch from .mmd_critic_search import MMDCriticSearch diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 5dde1a9c..1c7c0f1b 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -80,7 +80,6 @@ class BaseSearchMethod(ABC): def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, k: int = 1, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, @@ -93,7 +92,6 @@ def __init__( self.batch_size = batch_size self.cases_dataset = sanitize_dataset(cases_dataset, self.batch_size) - self.labels_dataset = sanitize_dataset(labels_dataset, self.batch_size) self.set_k(k) self.set_returns(search_returns) diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py index cfe70941..87ad3188 100644 --- a/xplique/example_based/search_methods/mmd_critic_search.py +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -14,14 +14,20 @@ class MMDCriticSearch(ProtoGreedySearch): """ - KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. - Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. + MMDCritic method to search prototypes. + + References: + .. [#] `Been Kim, Rajiv Khanna, Oluwasanmi Koyejo, + "Examples are not enough, learn to criticize! criticism for interpretability" + `_ Parameters ---------- cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. k The number of examples to retrieve. search_returns @@ -35,6 +41,13 @@ class MMDCriticSearch(ProtoGreedySearch): Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number yielding the corresponding p-norm." We also added 'cosine'. + nb_prototypes : int + Number of prototypes to find. + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function or kernel matrix, by default rbf_kernel. """ def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index e29d12b8..44d6e3e9 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -93,6 +93,8 @@ class ProtoDashSearch(ProtoGreedySearch): cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. k The number of examples to retrieve. search_returns @@ -108,39 +110,42 @@ class ProtoDashSearch(ProtoGreedySearch): yielding the corresponding p-norm." We also added 'cosine'. nb_prototypes : int Number of prototypes to find. - find_prototypes_kwargs - Additional parameters passed to `find_prototypes` function. + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function or kernel matrix, by default rbf_kernel. + use_optimizer : bool, optional + Flag indicating whether to use an optimizer for prototype selection, by default False. """ - def find_prototypes(self, nb_prototypes, kernel_type: str = 'local', kernel_fn: callable = rbf_kernel, use_optimizer: bool = False): - """ - Search for prototypes and their corresponding weights. - - Parameters - ---------- - nb_prototypes : int - Number of prototypes to find. - nb_prototypes : int - Number of prototypes to find. - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. - kernel_fn : Callable, optional - Kernel function or kernel matrix, by default rbf_kernel. - use_optimizer : bool, optional - Flag indicating whether to use an optimizer for prototype selection, by default False. - - Returns - ------- - prototype_indices : Tensor - The indices of the selected prototypes. - prototype_weights : - The normalized weights of the selected prototypes. - """ - + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = None, + nb_prototypes: int = 1, + kernel_type: str = 'local', + kernel_fn: callable = rbf_kernel, + use_optimizer: bool = False, + ): # pylint: disable=R0801 + self.use_optimizer = use_optimizer - return super().find_prototypes(nb_prototypes, kernel_type, kernel_fn) + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + k=k, + search_returns=search_returns, + batch_size=batch_size, + distance=distance, + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + kernel_fn=kernel_fn + ) def update_selection_weights(self, selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective): """ diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index a383b33a..df3bd5a5 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -9,11 +9,12 @@ from ...commons import dataset_gather, sanitize_dataset from ...types import Callable, List, Union, Optional, Tuple -from .prototypes_search import PrototypesSearch +from .base import BaseSearchMethod +from .knn import KNN from ..projections import Projection -class ProtoGreedySearch(PrototypesSearch): +class ProtoGreedySearch(BaseSearchMethod): """ ProtoGreedy method for searching prototypes. @@ -27,6 +28,8 @@ class ProtoGreedySearch(PrototypesSearch): cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. k The number of examples to retrieve. search_returns @@ -42,14 +45,144 @@ class ProtoGreedySearch(PrototypesSearch): yielding the corresponding p-norm." We also added 'cosine'. nb_prototypes : int Number of prototypes to find. - find_prototypes_kwargs - Additional parameters passed to `find_prototypes` function. + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function or kernel matrix, by default rbf_kernel. """ # Avoid zero division during procedure. (the value is not important, as if the denominator is # zero, then the nominator will also be zero). EPSILON = tf.constant(1e-6) + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = None, + nb_prototypes: int = 1, + kernel_type: str = 'local', + kernel_fn: callable = rbf_kernel, + ): # pylint: disable=R0801 + super().__init__( + cases_dataset, k, search_returns, batch_size + ) + + self.labels_dataset = sanitize_dataset(labels_dataset, self.batch_size) + + if kernel_type in ['local', 'global']: + self.kernel_type = kernel_type + else: + raise AttributeError( + "The kernel_type parameter is expected to be in" + + " ['local', 'global'] ", + +f"but {kernel_type} was received.", + ) + + if hasattr(kernel_fn, "__call__"): + def custom_kernel_fn(x1, x2, y1=None, y2=None): + if self.kernel_type == 'global': + kernel_matrix = kernel_fn(x1,x2) + if isinstance(kernel_matrix, np.ndarray): + kernel_matrix = tf.convert_to_tensor(kernel_matrix) + else: + # In the case of a local kernel, calculations are limited to within the class. + # Across different classes, the kernel values are set to 0. + kernel_matrix = np.zeros((x1.shape[0], x2.shape[0]), dtype=np.float32) + y_intersect = np.intersect1d(y1, y2) + for i in range(y_intersect.shape[0]): + y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] + y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] + sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) + kernel_matrix[tf.reshape(y1_indices, (-1, 1)), tf.reshape(y2_indices, (1, -1))] = sub_matrix + kernel_matrix = tf.convert_to_tensor(kernel_matrix) + return kernel_matrix + + self.kernel_fn = custom_kernel_fn + else: + raise AttributeError( + "The kernel parameter is expected to be a Callable", + +f"but {kernel_fn} was received.", + ) + + # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset. + # We take advantage of the symmetry of this matrix to traverse only its lower triangle. + col_sums = [] + diag = [] + row_sums = [] + + for batch_col_index, (batch_col_cases, batch_col_labels) in enumerate( + zip(self.cases_dataset, self.labels_dataset) + ): + batch_col_sums = tf.zeros((batch_col_cases.shape[0])) + + for batch_row_index, (batch_row_cases, batch_row_labels) in enumerate( + zip(self.cases_dataset, self.labels_dataset) + ): + if batch_row_index < batch_col_index: + continue + + batch_kernel = self.kernel_fn(batch_row_cases, batch_col_cases, batch_row_labels, batch_col_labels) + + batch_col_sums = batch_col_sums + tf.reduce_sum(batch_kernel, axis=0) + + if batch_col_index == batch_row_index: + if batch_col_index != 0: + batch_col_sums = batch_col_sums + row_sums[batch_row_index] + + diag.append(tf.linalg.diag_part(batch_kernel)) + + if batch_col_index == 0: + if batch_row_index == 0: + row_sums.append(None) + else: + row_sums.append(tf.reduce_sum(batch_kernel, axis=1)) + else: + row_sums[batch_row_index] += tf.reduce_sum(batch_kernel, axis=1) + + col_sums.append(batch_col_sums) + + self.col_sums = tf.concat(col_sums, axis=0) + self.n = self.col_sums.shape[0] + self.col_means = self.col_sums / self.n + self.diag = tf.concat(diag, axis=0) + self.nb_features = batch_col_cases.shape[1] + + # compute the prototypes in the latent space + self.prototype_indices, self.prototype_cases, self.prototype_labels, self.prototype_weights = self.find_prototypes(nb_prototypes) + + if distance is None: + def custom_distance(x1,x2): + x1 = tf.expand_dims(x1, axis=0) + x2 = tf.expand_dims(x2, axis=0) + distance = tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2)) + return distance + self.distance_fn = custom_distance + elif hasattr(distance, "__call__"): + self.distance_fn = distance + elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( + distance, int + ): + self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) + else: + raise AttributeError( + "The distance parameter is expected to be either a Callable or in" + + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", + +f"but {distance} was received.", + ) + + self.knn = KNN( + cases_dataset=self.prototype_cases, + k=k, + search_returns=search_returns, + batch_size=batch_size, + distance=self.distance_fn + ) + def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): """ Compute the objective and its weights for each candidate. @@ -144,104 +277,7 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti return selection_weights - def compute_kernel_attributes(self, kernel_type: str = 'local', kernel_fn: callable = rbf_kernel): - """ - Compute the attributes of the class that are related to the kernel. - - Parameters - ---------- - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. - kernel_fn : Callable, optional - Kernel function or kernel matrix, by default rbf_kernel. - - Returns - ------- - selection_weights : Tensor - Updated weights corresponding to the selected prototypes. - """ - if kernel_type in ['local', 'global']: - self.kernel_type = kernel_type - else: - raise AttributeError( - "The kernel_type parameter is expected to be in" - + " ['local', 'global'] ", - +f"but {kernel_type} was received.", - ) - - if hasattr(kernel_fn, "__call__"): - def custom_kernel_fn(x1, x2, y1, y2): - if self.kernel_type == 'global': - kernel_matrix = kernel_fn(x1,x2) - if isinstance(kernel_matrix, np.ndarray): - kernel_matrix = tf.convert_to_tensor(kernel_matrix) - else: - # In the case of a local kernel, calculations are limited to within the class. - # Across different classes, the kernel values are set to 0. - kernel_matrix = np.zeros((x1.shape[0], x2.shape[0]), dtype=np.float32) - y_intersect = np.intersect1d(y1, y2) - for i in range(y_intersect.shape[0]): - y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] - y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] - sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) - kernel_matrix[tf.reshape(y1_indices, (-1, 1)), tf.reshape(y2_indices, (1, -1))] = sub_matrix - kernel_matrix = tf.convert_to_tensor(kernel_matrix) - return kernel_matrix - - self.kernel_fn = custom_kernel_fn - else: - raise AttributeError( - "The kernel parameter is expected to be a Callable", - +f"but {kernel_fn} was received.", - ) - - # TODO: for local explanation add the ability to compute distance_fn based on the kernel - - # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset. - # We take advantage of the symmetry of this matrix to traverse only its lower triangle. - col_sums = [] - diag = [] - row_sums = [] - - for batch_col_index, (batch_col_cases, batch_col_labels) in enumerate( - zip(self.cases_dataset, self.labels_dataset) - ): - batch_col_sums = tf.zeros((batch_col_cases.shape[0])) - - for batch_row_index, (batch_row_cases, batch_row_labels) in enumerate( - zip(self.cases_dataset, self.labels_dataset) - ): - if batch_row_index < batch_col_index: - continue - - batch_kernel = self.kernel_fn(batch_row_cases, batch_col_cases, batch_row_labels, batch_col_labels) - - batch_col_sums = batch_col_sums + tf.reduce_sum(batch_kernel, axis=0) - - if batch_col_index == batch_row_index: - if batch_col_index != 0: - batch_col_sums = batch_col_sums + row_sums[batch_row_index] - - diag.append(tf.linalg.diag_part(batch_kernel)) - - if batch_col_index == 0: - if batch_row_index == 0: - row_sums.append(None) - else: - row_sums.append(tf.reduce_sum(batch_kernel, axis=1)) - else: - row_sums[batch_row_index] += tf.reduce_sum(batch_kernel, axis=1) - - col_sums.append(batch_col_sums) - - self.col_sums = tf.concat(col_sums, axis=0) - self.n = self.col_sums.shape[0] - self.col_means = self.col_sums / self.n - self.diag = tf.concat(diag, axis=0) - self.nb_features = batch_col_cases.shape[1] - - def find_prototypes(self, nb_prototypes, kernel_type: str = 'local', kernel_fn: callable = rbf_kernel): + def find_prototypes(self, nb_prototypes): """ Search for prototypes and their corresponding weights. @@ -249,22 +285,19 @@ def find_prototypes(self, nb_prototypes, kernel_type: str = 'local', kernel_fn: ---------- nb_prototypes : int Number of prototypes to find. - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. - kernel_fn : Callable, optional - Kernel function or kernel matrix, by default rbf_kernel. Returns ------- prototype_indices : Tensor The indices of the selected prototypes. + prototype_cases : Tensor + The cases of the selected prototypes. + prototype_labels : Tensor + The labels of the selected prototypes. prototype_weights : The normalized weights of the selected prototypes. """ - self.compute_kernel_attributes(kernel_type, kernel_fn) - # Tensors to store selected indices and their corresponding cases, labels and weights. selection_indices = tf.constant([], dtype=tf.int32) selection_cases = tf.zeros((0, self.nb_features), dtype=tf.float32) @@ -362,9 +395,45 @@ def find_prototypes(self, nb_prototypes, kernel_type: str = 'local', kernel_fn: k += 1 prototype_indices = selection_indices + prototype_cases = selection_cases + prototype_labels = selection_labels prototype_weights = selection_weights # Normalize the weights prototype_weights = prototype_weights / tf.reduce_sum(prototype_weights) - return prototype_indices, prototype_weights \ No newline at end of file + return prototype_indices, prototype_cases, prototype_labels, prototype_weights + + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): + """ + Search the samples to return as examples. Called by the explain methods. + It may also return the indices corresponding to the samples, + based on `return_indices` value. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Assumed to have been already projected. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + """ + + # look for closest prototypes to projected inputs + knn_output = self.knn(inputs) + + # obtain closest prototypes indices with respect to the prototypes + indices_wrt_prototypes = knn_output["indices"] + + # convert to unique indices + indices_wrt_prototypes = indices_wrt_prototypes[:, :, 0] * self.batch_size + indices_wrt_prototypes[:, :, 1] + + # get prototypes indices with respect to the dataset + indices = tf.gather(self.prototype_indices, indices_wrt_prototypes) + + # convert back to batch-element indices + batch_indices, elem_indices = indices // self.batch_size, indices % self.batch_size + indices = tf.stack([batch_indices, elem_indices], axis=-1) + + knn_output["indices"] = indices + + return knn_output \ No newline at end of file diff --git a/xplique/example_based/search_methods/prototypes_search.py b/xplique/example_based/search_methods/prototypes_search.py deleted file mode 100644 index 6db01b3c..00000000 --- a/xplique/example_based/search_methods/prototypes_search.py +++ /dev/null @@ -1,137 +0,0 @@ -""" -Prototypes search method in example-based module -""" - -from abc import ABC, abstractmethod -import numpy as np -from sklearn.metrics.pairwise import rbf_kernel -import tensorflow as tf - -from ...commons import dataset_gather -from ...types import Callable, List, Union, Optional, Tuple - -from .base import BaseSearchMethod -from ..projections import Projection - - -class PrototypesSearch(BaseSearchMethod): - """ - Prototypes search method to find prototypes and the examples closest to these prototypes. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - For natural example-based methods it is the train dataset. - k - The number of examples to retrieve. - search_returns - String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. - batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. - distance - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. - nb_prototypes : int - Number of prototypes to find. - find_prototypes_kwargs - Additional parameters passed to `find_prototypes` function. - """ - - # Avoid zero division during procedure. (the value is not important, as if the denominator is - # zero, then the nominator will also be zero). - EPSILON = tf.constant(1e-6) - - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - search_returns: Optional[Union[List[str], str]] = None, - batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = "euclidean", - nb_prototypes: int = 1, - **find_prototypes_kwargs, - ): # pylint: disable=R0801 - super().__init__( - cases_dataset, labels_dataset, k, search_returns, batch_size - ) - - if hasattr(distance, "__call__"): - self.distance_fn = distance - elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( - distance, int - ): - self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) - else: - raise AttributeError( - "The distance parameter is expected to be either a Callable or in" - + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", - +f"but {distance} was received.", - ) - - self.prototype_indices, self.prototype_weights = self.find_prototypes(nb_prototypes, **find_prototypes_kwargs) - - @abstractmethod - def find_prototypes(self, nb_prototypes: int, **find_prototypes_kwargs): - """ - Search for prototypes and their corresponding weights. - - Parameters - ---------- - nb_prototypes : int - Number of prototypes to find. - - find_prototypes_kwargs - Additional parameters passed to `find_prototypes` function. - - Returns - ------- - prototype_indices : Tensor - The indices of the selected prototypes. - prototype_weights : - The normalized weights of the selected prototypes. - """ - return NotImplementedError() - - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): - """ - Search the samples to return as examples. Called by the explain methods. - It may also return the indices corresponding to the samples, - based on `return_indices` value. - - Parameters - ---------- - inputs - Tensor or Array. Input samples to be explained. - Assumed to have been already projected. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - """ - # TODO: Find examples: here we provide a local explanation. - # Find the nearest prototypes to inputs - # we use self.distance_fn and self.prototype_indices. - examples_indices = None - examples_distances = None - - # Set values in return dict - return_dict = {} - if "examples" in self.returns: - return_dict["examples"] = dataset_gather(self.cases_dataset, examples_indices) - if "include_inputs" in self.returns: - inputs = tf.expand_dims(inputs, axis=1) - return_dict["examples"] = tf.concat( - [inputs, return_dict["examples"]], axis=1 - ) - if "indices" in self.returns: - return_dict["indices"] = examples_indices - if "distances" in self.returns: - return_dict["distances"] = examples_distances - - # Return a dict only different variables are returned - if len(return_dict) == 1: - return list(return_dict.values())[0] - return return_dict From 46494360b4132547679d4b931cbd0c8e18f6621f Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Tue, 30 Apr 2024 16:44:42 +0200 Subject: [PATCH 037/138] add Prototypes fix up --- tests/example_based/test_prototypes.py | 63 +++++--------- .../search_methods/mmd_critic_search.py | 13 ++- .../search_methods/proto_dash_search.py | 20 ++--- .../search_methods/proto_greedy_search.py | 85 ++++++++++++------- 4 files changed, 88 insertions(+), 93 deletions(-) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index 720c47ee..8a31b24d 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -11,7 +11,6 @@ import time import numpy as np -from sklearn.metrics.pairwise import rbf_kernel import tensorflow as tf from xplique.commons import sanitize_dataset, are_dataset_first_elems_equal @@ -25,13 +24,14 @@ def test_proto_greedy_basic(): """ - Test the SimilarExamples with an identity projection. + Test the Prototypes with an identity projection. """ # Setup k = 3 nb_prototypes = 3 gamma = 0.026 x_train, y_train = get_Gaussian_Data(nb_samples_class=20) + x_test, y_test = get_Gaussian_Data(nb_samples_class=10) # x_train, y_train = load_data('usps') # x_test, y_test = load_data('usps.t') # x_test = tf.random.shuffle(x_test) @@ -41,13 +41,6 @@ def test_proto_greedy_basic(): space_projection=lambda inputs, targets=None: inputs ) - def custom_kernel_wrapper(gamma): - def custom_kernel(x,y=None): - return rbf_kernel(x,y,gamma) - return custom_kernel - - kernel_fn = custom_kernel_wrapper(gamma) - kernel_type = "global" # Method initialization @@ -57,10 +50,10 @@ def custom_kernel(x,y=None): k=k, projection=identity_projection, batch_size=32, - distance="euclidean", + distance=None, #"euclidean", nb_prototypes=nb_prototypes, kernel_type=kernel_type, - kernel_fn=kernel_fn, + gamma=gamma, ) # Generate global explanation @@ -92,24 +85,25 @@ def custom_kernel(x,y=None): # Check if all indices are between 0 and x_train.shape[0]-1 assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) + # Generate local explanation + examples = method.explain(x_test) + # # Visualize all prototypes # plot(prototypes, prototype_weights, 'proto_greedy') - # # Generate local explanation - # examples = method.explain(x_test) - # # Visualize local explanation # plot_local_explanation(examples, x_test, 'proto_greedy') def test_proto_dash_basic(): """ - Test the SimilarExamples with an identity projection. + Test the Prototypes with an identity projection. """ # Setup k = 3 nb_prototypes = 3 gamma = 0.026 x_train, y_train = get_Gaussian_Data(nb_samples_class=20) + x_test, y_test = get_Gaussian_Data(nb_samples_class=10) # x_train, y_train = load_data('usps') # x_test, y_test = load_data('usps.t') # x_test = tf.random.shuffle(x_test) @@ -119,13 +113,6 @@ def test_proto_dash_basic(): space_projection=lambda inputs, targets=None: inputs ) - def custom_kernel_wrapper(gamma): - def custom_kernel(x,y=None): - return rbf_kernel(x,y,gamma) - return custom_kernel - - kernel_fn = custom_kernel_wrapper(gamma) - kernel_type = "global" # Method initialization @@ -138,7 +125,7 @@ def custom_kernel(x,y=None): distance="euclidean", nb_prototypes=nb_prototypes, kernel_type=kernel_type, - kernel_fn=kernel_fn, + gamma=gamma, ) # Generate global explanation @@ -170,24 +157,25 @@ def custom_kernel(x,y=None): # Check if all indices are between 0 and x_train.shape[0]-1 assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) + # Generate local explanation + examples = method.explain(x_test) + # # Visualize all prototypes # plot(prototypes, prototype_weights, 'proto_dash') - # # Generate local explanation - # examples = method.explain(x_test) - # # Visualize local explanation # plot_local_explanation(examples, x_test, 'proto_dash') def test_mmd_critic_basic(): """ - Test the SimilarExamples with an identity projection. + Test the Prototypes with an identity projection. """ # Setup k = 3 nb_prototypes = 3 gamma = 0.026 x_train, y_train = get_Gaussian_Data(nb_samples_class=20) + x_test, y_test = get_Gaussian_Data(nb_samples_class=10) # x_train, y_train = load_data('usps') # x_test, y_test = load_data('usps.t') # x_test = tf.random.shuffle(x_test) @@ -197,13 +185,6 @@ def test_mmd_critic_basic(): space_projection=lambda inputs, targets=None: inputs ) - def custom_kernel_wrapper(gamma): - def custom_kernel(x,y=None): - return rbf_kernel(x,y,gamma) - return custom_kernel - - kernel_fn = custom_kernel_wrapper(gamma) - kernel_type = "global" # Method initialization @@ -216,7 +197,7 @@ def custom_kernel(x,y=None): distance="euclidean", nb_prototypes=nb_prototypes, kernel_type=kernel_type, - kernel_fn=kernel_fn, + gamma=gamma, ) # Generate global explanation @@ -248,15 +229,15 @@ def custom_kernel(x,y=None): # Check if all indices are between 0 and x_train.shape[0]-1 assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) + # Generate local explanation + examples = method.explain(x_test) + # # Visualize all prototypes # plot(prototypes, prototype_weights, 'mmd_critic') - # # Generate local explanation - # examples = method.explain(x_test) - # # Visualize local explanation # plot_local_explanation(examples, x_test, 'mmd_critic') -test_proto_greedy_basic() -test_proto_dash_basic() -test_mmd_critic_basic() +# test_proto_greedy_basic() +# test_proto_dash_basic() +# test_mmd_critic_basic() diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py index 87ad3188..7465fcfb 100644 --- a/xplique/example_based/search_methods/mmd_critic_search.py +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -47,10 +47,13 @@ class MMDCriticSearch(ProtoGreedySearch): The kernel type. It can be 'local' or 'global', by default 'local'. When it is local, the distances are calculated only within the classes. kernel_fn : Callable, optional - Kernel function or kernel matrix, by default rbf_kernel. + Kernel function, by default the rbf kernel. + This function must only use TensorFlow operations. + gamma : float, optional + Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ - def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): + def compute_objectives(self, selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel): """ Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. @@ -70,18 +73,12 @@ def compute_objectives(self, selection_indices, selection_cases, selection_label Indices corresponding to the selected prototypes. selection_cases : Tensor Cases corresponding to the selected prototypes. - selection_labels : Tensor - Labels corresponding to the selected prototypes. selection_weights : Tensor Weights corresponding to the selected prototypes. selection_selection_kernel : Tensor Kernel matrix computed from the selected prototypes. candidates_indices : Tensor Indices corresponding to the candidate prototypes. - candidates_cases : Tensor - Cases corresponding to the candidate prototypes. - candidates_labels : Tensor - Labels corresponding to the candidate prototypes. candidates_selection_kernel : Tensor Kernel matrix between the candidates and the selected prototypes. diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index 44d6e3e9..5bb7b78b 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -3,7 +3,6 @@ """ import numpy as np -from sklearn.metrics.pairwise import rbf_kernel from scipy.optimize import minimize import tensorflow as tf @@ -114,7 +113,10 @@ class ProtoDashSearch(ProtoGreedySearch): The kernel type. It can be 'local' or 'global', by default 'local'. When it is local, the distances are calculated only within the classes. kernel_fn : Callable, optional - Kernel function or kernel matrix, by default rbf_kernel. + Kernel function, by default the rbf kernel. + This function must only use TensorFlow operations. + gamma : float, optional + Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. use_optimizer : bool, optional Flag indicating whether to use an optimizer for prototype selection, by default False. """ @@ -129,7 +131,8 @@ def __init__( distance: Union[int, str, Callable] = None, nb_prototypes: int = 1, kernel_type: str = 'local', - kernel_fn: callable = rbf_kernel, + kernel_fn: callable = None, + gamma: float = None, use_optimizer: bool = False, ): # pylint: disable=R0801 @@ -144,7 +147,8 @@ def __init__( distance=distance, nb_prototypes=nb_prototypes, kernel_type=kernel_type, - kernel_fn=kernel_fn + kernel_fn=kernel_fn, + gamma=gamma ) def update_selection_weights(self, selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective): @@ -197,7 +201,7 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti return selection_weights - def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): + def compute_objectives(self, selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel): """ Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. Calculate the gradient of l(w) = w^T * μ_p - 1/2 * w^T * K * w @@ -211,18 +215,12 @@ def compute_objectives(self, selection_indices, selection_cases, selection_label Indices corresponding to the selected prototypes. selection_cases : Tensor Cases corresponding to the selected prototypes. - selection_labels : Tensor - Labels corresponding to the selected prototypes. selection_weights : Tensor Weights corresponding to the selected prototypes. selection_selection_kernel : Tensor Kernel matrix computed from the selected prototypes. candidates_indices : Tensor Indices corresponding to the candidate prototypes. - candidates_cases : Tensor - Cases corresponding to the candidate prototypes. - candidates_labels : Tensor - Labels corresponding to the candidate prototypes. candidates_selection_kernel : Tensor Kernel matrix between the candidates and the selected prototypes. diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index df3bd5a5..a86f610d 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -3,7 +3,6 @@ """ import numpy as np -from sklearn.metrics.pairwise import rbf_kernel import tensorflow as tf from ...commons import dataset_gather, sanitize_dataset @@ -49,7 +48,10 @@ class ProtoGreedySearch(BaseSearchMethod): The kernel type. It can be 'local' or 'global', by default 'local'. When it is local, the distances are calculated only within the classes. kernel_fn : Callable, optional - Kernel function or kernel matrix, by default rbf_kernel. + Kernel function, by default the rbf kernel. + This function must only use TensorFlow operations. + gamma : float, optional + Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ # Avoid zero division during procedure. (the value is not important, as if the denominator is @@ -66,7 +68,8 @@ def __init__( distance: Union[int, str, Callable] = None, nb_prototypes: int = 1, kernel_type: str = 'local', - kernel_fn: callable = rbf_kernel, + kernel_fn: callable = None, + gamma: float = None ): # pylint: disable=R0801 super().__init__( cases_dataset, k, search_returns, batch_size @@ -82,7 +85,27 @@ def __init__( + " ['local', 'global'] ", +f"but {kernel_type} was received.", ) + + if kernel_fn is None: + # define rbf kernel function + def rbf_kernel(X, Y=None, gamma=None): + if Y is None: + Y = X + + if gamma is None: + gamma = 1.0 / tf.cast(tf.shape(X)[1], dtype=X.dtype) + + X = tf.expand_dims(X, axis=1) + Y = tf.expand_dims(Y, axis=0) + + pairwise_diff = X - Y + pairwise_sq_dist = tf.reduce_sum(tf.square(pairwise_diff), axis=-1) + kernel_matrix = tf.exp(-gamma * pairwise_sq_dist) + + return kernel_matrix + kernel_fn = lambda x, y: rbf_kernel(x,y,gamma) + if hasattr(kernel_fn, "__call__"): def custom_kernel_fn(x1, x2, y1=None, y2=None): if self.kernel_type == 'global': @@ -105,10 +128,32 @@ def custom_kernel_fn(x1, x2, y1=None, y2=None): self.kernel_fn = custom_kernel_fn else: raise AttributeError( - "The kernel parameter is expected to be a Callable", + "The kernel_fn parameter is expected to be a Callable", +f"but {kernel_fn} was received.", - ) + ) + + if distance is None: + def kernel_induced_distance(x1,x2): + x1 = tf.expand_dims(x1, axis=0) + x2 = tf.expand_dims(x2, axis=0) + distance = tf.squeeze(tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2))) + return distance + + self.distance_fn = lambda x1, x2: kernel_induced_distance(x1,x2) + elif hasattr(distance, "__call__"): + self.distance_fn = distance + elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( + distance, int + ): + self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) + else: + raise AttributeError( + "The distance parameter is expected to be either a Callable or in" + + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", + +f"but {distance} was received.", + ) + # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset. # We take advantage of the symmetry of this matrix to traverse only its lower triangle. col_sums = [] @@ -155,26 +200,6 @@ def custom_kernel_fn(x1, x2, y1=None, y2=None): # compute the prototypes in the latent space self.prototype_indices, self.prototype_cases, self.prototype_labels, self.prototype_weights = self.find_prototypes(nb_prototypes) - if distance is None: - def custom_distance(x1,x2): - x1 = tf.expand_dims(x1, axis=0) - x2 = tf.expand_dims(x2, axis=0) - distance = tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2)) - return distance - self.distance_fn = custom_distance - elif hasattr(distance, "__call__"): - self.distance_fn = distance - elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( - distance, int - ): - self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) - else: - raise AttributeError( - "The distance parameter is expected to be either a Callable or in" - + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", - +f"but {distance} was received.", - ) - self.knn = KNN( cases_dataset=self.prototype_cases, k=k, @@ -183,7 +208,7 @@ def custom_distance(x1,x2): distance=self.distance_fn ) - def compute_objectives(self, selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel): + def compute_objectives(self, selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel): """ Compute the objective and its weights for each candidate. @@ -193,18 +218,12 @@ def compute_objectives(self, selection_indices, selection_cases, selection_label Indices corresponding to the selected prototypes. selection_cases : Tensor Cases corresponding to the selected prototypes. - selection_labels : Tensor - Labels corresponding to the selected prototypes. selection_weights : Tensor Weights corresponding to the selected prototypes. selection_selection_kernel : Tensor Kernel matrix computed from the selected prototypes. candidates_indices : Tensor Indices corresponding to the candidate prototypes. - candidates_cases : Tensor - Cases corresponding to the candidate prototypes. - candidates_labels : Tensor - Labels corresponding to the candidate prototypes. candidates_selection_kernel : Tensor Kernel matrix between the candidates and the selected prototypes. @@ -353,7 +372,7 @@ def find_prototypes(self, nb_prototypes): all_candidates_last_selected_kernel = tf.tensor_scatter_nd_update(all_candidates_last_selected_kernel, tf.expand_dims(candidates_indices, axis=1), tf.squeeze(candidates_last_selected_kernel, axis=1)) # Compute the objectives for the batch - objectives, objectives_weights = self.compute_objectives(selection_indices, selection_cases, selection_labels, selection_weights, selection_selection_kernel, candidates_indices, candidates_cases, candidates_labels, candidates_selection_kernel) + objectives, objectives_weights = self.compute_objectives(selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel) # Select the best objective in the batch objectives_argmax = tf.argmax(objectives) From a62fd8ea55f4938b69230f5fcc79cea4e8523540 Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Tue, 30 Apr 2024 17:01:55 +0200 Subject: [PATCH 038/138] add Prototypes fix up --- xplique/example_based/prototypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 906b6bca..29946c22 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -133,7 +133,7 @@ def get_global_prototypes(self): Returns: prototype_indices : Tensor prototype indices. - prototype_indices : Tensor + prototype_weights : Tensor prototype weights. """ return self.search_method.prototype_indices, self.search_method.prototype_weights From e839e24ff857cea4dc02a4f21759bffd1bd5acdc Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Tue, 21 May 2024 15:18:33 +0200 Subject: [PATCH 039/138] feat: change the private initialize_cases_dataset method to a protected one --- xplique/example_based/base_example_method.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index df8ac306..9ce6b154 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -90,7 +90,7 @@ def __init__( ), "`BaseExampleMethod` without `projection` is a `BaseSearchMethod`." # set attributes - batch_size = self.__initialize_cases_dataset( + batch_size = self._initialize_cases_dataset( cases_dataset, labels_dataset, targets_dataset, batch_size ) @@ -126,7 +126,7 @@ def __init__( **search_method_kwargs, ) - def __initialize_cases_dataset( + def _initialize_cases_dataset( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], From b3ebe4a31c451a30bd387a735d19b336216a4f70 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Tue, 21 May 2024 15:21:01 +0200 Subject: [PATCH 040/138] feat: change the fill value to np.inf when gathering elements of a dataset from indices, such that indices to -1, -1 create inf valued examples --- xplique/commons/tf_dataset_operations.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/commons/tf_dataset_operations.py index 69e750eb..f74f4ea2 100644 --- a/xplique/commons/tf_dataset_operations.py +++ b/xplique/commons/tf_dataset_operations.py @@ -206,9 +206,7 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: example = next(iter(dataset)) # (n, bs, ...) results = tf.Variable( - tf.zeros( - indices.shape[:-1] + example[0].shape, dtype=dataset.element_spec.dtype - ) + tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)), ) nb_results = product(indices.shape[:-1]) From 335dcb95406d472407574cc36b20f2c8f1321a44 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Tue, 21 May 2024 15:25:27 +0200 Subject: [PATCH 041/138] feat: add the kleor search methods and their tests --- tests/example_based/test_kleor.py | 212 +++++++++++++++ .../example_based/search_methods/__init__.py | 2 +- xplique/example_based/search_methods/kleor.py | 241 ++++++++++++++++++ 3 files changed, 454 insertions(+), 1 deletion(-) create mode 100644 tests/example_based/test_kleor.py create mode 100644 xplique/example_based/search_methods/kleor.py diff --git a/tests/example_based/test_kleor.py b/tests/example_based/test_kleor.py new file mode 100644 index 00000000..f4965f8d --- /dev/null +++ b/tests/example_based/test_kleor.py @@ -0,0 +1,212 @@ +""" +Tests for the contrastive methods. +""" +import tensorflow as tf +import numpy as np + +from xplique.example_based.search_methods import KLEORSimMiss, KLEORGlobalSim + +def test_kleor_base_and_sim_miss(): + """ + Test suite for both the BaseKLEOR and KLEORSimMiss class. Indeed, the KLEORSimMiss class is a subclass of the + BaseKLEOR class with a very basic implementation of the only abstract method (identity function). + """ + # setup the tests + cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) + cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + + cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) + cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) + + inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + + # build the kleor object + kleor = KLEORSimMiss(cases_dataset, cases_targets_dataset, k=1, search_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2) + + # test the _filter_fn method + fake_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + fake_cases_targets = tf.constant([[0, 1], [1, 0], [0, 1], [1, 0], [1, 0]], dtype=tf.float32) + # the mask should be True when the targets are the same i.e we keep those cases + expected_mask = tf.constant([[True, False, True, False, False], + [False, True, False, True, True], + [False, True, False, True, True], + [True, False, True, False, False], + [False, True, False, True, True]], dtype=tf.bool) + mask = kleor._filter_fn(inputs, cases, fake_targets, fake_cases_targets) + assert tf.reduce_all(tf.equal(mask, expected_mask)) + + # test the _filter_fn_nun method, this time the mask should be True when the targets are different + expected_mask = tf.constant([[False, True, False, True, True], + [True, False, True, False, False], + [True, False, True, False, False], + [False, True, False, True, True], + [True, False, True, False, False]], dtype=tf.bool) + mask = kleor._filter_fn_nun(inputs, cases, fake_targets, fake_cases_targets) + assert tf.reduce_all(tf.equal(mask, expected_mask)) + + # test the _get_nuns method + nuns, nuns_distances = kleor._get_nuns(inputs, targets) + expected_nuns = tf.constant([ + [[2., 3.]], + [[1., 2.]], + [[4., 5.]]], dtype=tf.float32) + expected_nuns_distances = tf.constant([ + [np.sqrt(2*0.5**2)], + [np.sqrt(2*1.5**2)], + [np.sqrt(2*0.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + assert tf.reduce_all(tf.equal(nuns_distances, expected_nuns_distances)) + + # test the _initialize_search method + sf_indices, input_sf_distances, nun_sf_distances, batch_indices = kleor._initialize_search(inputs) + assert sf_indices.shape == (3, 1, 2) # (n, k, 2) + assert input_sf_distances.shape == (3, 1) # (n, k) + assert nun_sf_distances.shape == (3, 1) # (n, k) + assert batch_indices.shape == (3, 2) # (n, bs) + expected_sf_indices = tf.constant([[[-1, -1]],[[-1, -1]],[[-1, -1]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(sf_indices, expected_sf_indices)) + assert tf.reduce_all(tf.math.is_inf(input_sf_distances)) + assert tf.reduce_all(tf.math.is_inf(nun_sf_distances)) + expected_batch_indices = tf.constant([[0, 1], [0, 1], [0, 1]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(batch_indices, expected_batch_indices)) + + # test the kneighbors method + input_sf_distances, sf_indices, nuns = kleor.kneighbors(inputs, targets) + + assert input_sf_distances.shape == (3, 1) # (n, k) + assert sf_indices.shape == (3, 1, 2) # (n, k, 2) + assert nuns.shape == (3, 1, 2) # (n, k, 2) + + assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + + expected_distances = tf.constant([[np.sqrt(2*0.5**2)], [np.sqrt(2*0.5**2)], [np.sqrt(2*1.5**2)]], dtype=tf.float32) + assert tf.reduce_all(tf.abs(input_sf_distances - expected_distances) < 1e-5) + + expected_indices = tf.constant([[[0, 0]],[[0, 1]],[[1, 0]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(sf_indices, expected_indices)) + + # test the find_examples method + return_dict = kleor.find_examples(inputs, targets) + assert set(return_dict.keys()) == set(["examples", "indices", "distances", "nuns"]) + + examples = return_dict["examples"] + distances = return_dict["distances"] + indices = return_dict["indices"] + nuns = return_dict["nuns"] + + assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + assert tf.reduce_all(tf.equal(expected_indices, indices)) + assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + + expected_examples = tf.constant([ + [[1.5, 2.5], [1., 2.]], + [[2.5, 3.5], [2., 3.]], + [[4.5, 5.5], [3., 4.]]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(examples, expected_examples)) + +def test_kleor_global_sim(): + """ + Test suite for the KleorGlobalSim class. As only the kneighbors, format_output are impacted by the + _additionnal_filtering method we test those 3 methods. + """ + # setup the tests + cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) + cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + + cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) + cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) + + inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) + targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + + # build the kleor object + kleor = KLEORGlobalSim(cases_dataset, cases_targets_dataset, k=1, search_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2) + + # test the _additionnal_filtering method + # (n, bs) + fake_nun_sf_distances = tf.constant([[1., 2.], [2., 3.], [3., 4.]]) + # (n, bs) + fake_input_sf_distances = tf.constant([[2., 1.], [3., 2.], [2., 5.]]) + # (n,1) + fake_nuns_input_distances = tf.constant([[3.], [1.], [4.]]) + # the expected filtering should be such that we keep the distance of a sf candidates + # when the input is closer to the sf than the nun, otherwise we set it to infinity + expected_nun_sf_distances = tf.constant([[1., 2.], [np.inf, np.inf], [3., np.inf]], dtype=tf.float32) + expected_input_sf_distances = tf.constant([[2., 1.], [np.inf, np.inf], [2., np.inf]], dtype=tf.float32) + + nun_sf_distances, input_sf_distances = kleor._additional_filtering(fake_nun_sf_distances, fake_input_sf_distances, fake_nuns_input_distances) + assert nun_sf_distances.shape == (3, 2) + assert input_sf_distances.shape == (3, 2) + + inf_mask_expected_nun_sf = tf.math.is_inf(expected_nun_sf_distances) + inf_mask_nun_sf = tf.math.is_inf(nun_sf_distances) + assert tf.reduce_all(tf.equal(inf_mask_expected_nun_sf, inf_mask_nun_sf)) + assert tf.reduce_all( + tf.abs(tf.where(inf_mask_nun_sf, 0.0, nun_sf_distances) - tf.where(inf_mask_expected_nun_sf, 0.0, expected_nun_sf_distances) + ) < 1e-5) + + inf_mask_expected_input_sf = tf.math.is_inf(expected_input_sf_distances) + inf_mask_input_sf = tf.math.is_inf(input_sf_distances) + assert tf.reduce_all(tf.equal(inf_mask_expected_input_sf, inf_mask_input_sf)) + assert tf.reduce_all( + tf.abs(tf.where(inf_mask_input_sf, 0.0, input_sf_distances) - tf.where(inf_mask_expected_input_sf, 0.0, expected_input_sf_distances) + ) < 1e-5) + + # test the kneighbors method + input_sf_distances, sf_indices, nuns = kleor.kneighbors(inputs, targets) + + expected_nuns = tf.constant([ + [[2., 3.]], + [[1., 2.]], + [[4., 5.]]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + + assert input_sf_distances.shape == (3, 1) # (n, k) + assert sf_indices.shape == (3, 1, 2) # (n, k, 2) + + expected_indices = tf.constant([[[-1, -1]],[[0, 1]],[[-1, -1]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(sf_indices, expected_indices)) + + expected_distances = tf.constant([[kleor.fill_value], [np.sqrt(2*0.5**2)], [kleor.fill_value]], dtype=tf.float32) + + # create masks for inf values + inf_mask_input = tf.math.is_inf(input_sf_distances) + inf_mask_expected = tf.math.is_inf(expected_distances) + assert tf.reduce_all(tf.equal(inf_mask_input, inf_mask_expected)) + + # compare finite values + assert tf.reduce_all( + tf.abs(tf.where(inf_mask_input, 0.0, input_sf_distances) - tf.where(inf_mask_expected, 0.0, expected_distances) + ) < 1e-5) + + # test the find_examples + return_dict = kleor.find_examples(inputs, targets) + + indices = return_dict["indices"] + nuns = return_dict["nuns"] + distances = return_dict["distances"] + examples = return_dict["examples"] + + assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + assert tf.reduce_all(tf.equal(expected_indices, indices)) + + # create masks for inf values + inf_mask_dist = tf.math.is_inf(distances) + assert tf.reduce_all(tf.equal(inf_mask_dist, inf_mask_expected)) + assert tf.reduce_all( + tf.abs(tf.where(inf_mask_dist, 0.0, distances) - tf.where(inf_mask_expected, 0.0, expected_distances) + ) < 1e-5) + + expected_examples = tf.constant([ + [[1.5, 2.5], [np.inf, np.inf]], + [[2.5, 3.5], [2., 3.]], + [[4.5, 5.5], [np.inf, np.inf]]], dtype=tf.float32) + + # mask for inf values + inf_mask_examples = tf.math.is_inf(examples) + inf_mask_expected_examples = tf.math.is_inf(expected_examples) + assert tf.reduce_all(tf.equal(inf_mask_examples, inf_mask_expected_examples)) + assert tf.reduce_all( + tf.abs(tf.where(inf_mask_examples, 0.0, examples) - tf.where(inf_mask_expected_examples, 0.0, expected_examples) + ) < 1e-5) diff --git a/xplique/example_based/search_methods/__init__.py b/xplique/example_based/search_methods/__init__.py index 010b7cb3..3c7897c5 100644 --- a/xplique/example_based/search_methods/__init__.py +++ b/xplique/example_based/search_methods/__init__.py @@ -4,5 +4,5 @@ from .base import BaseSearchMethod, ORDER -# from .sklearn_knn import SklearnKNN from .knn import BaseKNN, KNN, FilterKNN +from .kleor import KLEORSimMiss, KLEORGlobalSim diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py new file mode 100644 index 00000000..380d668a --- /dev/null +++ b/xplique/example_based/search_methods/kleor.py @@ -0,0 +1,241 @@ +""" +Define the KLEOR search method. +""" +from abc import abstractmethod, ABC + +import numpy as np +import tensorflow as tf + +from ...commons import dataset_gather +from ...types import Callable, List, Union, Optional, Tuple + +from .base import ORDER +from .knn import FilterKNN + +class BaseKLEOR(FilterKNN, ABC): + """ + Base class for the KLEOR search methods. + """ + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + k: int = 1, + search_returns: Optional[Union[List[str], str]] = None, + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): # pylint: disable=R0801 + possibilities = ["examples", "indices", "distances", "include_inputs", "nuns"] + super().__init__( + cases_dataset = cases_dataset, + targets_dataset=targets_dataset, + k=k, + filter_fn=self._filter_fn, + search_returns=search_returns, + batch_size=batch_size, + distance=distance, + order=ORDER.ASCENDING, + possibilities=possibilities + ) + + self.search_nuns = FilterKNN( + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=1, + filter_fn=self._filter_fn_nun, + search_returns=["indices", "distances"], + batch_size=batch_size, + distance=distance, + order = ORDER.ASCENDING, + ) + + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): + """ + Search the samples to return as examples. Called by the explain methods. + It may also return the indices corresponding to the samples, + based on `return_indices` value. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Assumed to have been already projected. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + """ + # compute neighbors + examples_distances, examples_indices, nuns = self.kneighbors(inputs, targets) + + # Set values in return dict + return_dict = {} + if "examples" in self.returns: + return_dict["examples"] = dataset_gather(self.cases_dataset, examples_indices) + # replace examples for which indices is -1, -1 by an inf value + # mask = tf.reduce_all(tf.equal(examples_indices, -1), axis=-1) + # return_dict["examples"] = tf.where( + # tf.expand_dims(mask, axis=-1), + # tf.fill(return_dict["examples"].shape, tf.constant(np.inf, dtype=tf.float32)), + # return_dict["examples"], + # ) + if "include_inputs" in self.returns: + inputs = tf.expand_dims(inputs, axis=1) + return_dict["examples"] = tf.concat( + [inputs, return_dict["examples"]], axis=1 + ) + if "nuns" in self.returns: + return_dict["nuns"] = nuns + if "indices" in self.returns: + return_dict["indices"] = examples_indices + if "distances" in self.returns: + return_dict["distances"] = examples_distances + + # Return a dict only different variables are returned + if len(return_dict) == 1: + return list(return_dict.values())[0] + return return_dict + + def _filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: + """ + """ + # get the labels predicted by the model + # (n, ) + predicted_labels = tf.argmax(targets, axis=-1) + label_targets = tf.argmax(cases_targets, axis=-1) + # for each input, if the target label is the same as the cases label + # the mask as a True value and False otherwise + mask = tf.equal(tf.expand_dims(predicted_labels, axis=1), label_targets) + return mask + + def _filter_fn_nun(self, _, __, targets, cases_targets) -> tf.Tensor: + """ + Filter function to mask the cases for which the label is different from the predicted + label on the inputs. + """ + # get the labels predicted by the model + # (n, ) + predicted_labels = tf.argmax(targets, axis=-1) + label_targets = tf.argmax(cases_targets, axis=-1) # (bs,) + # for each input, if the target label is the same as the predicted label + # the mask as a False value and True otherwise + mask = tf.not_equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) + return mask + + def _get_nuns(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: + """ + """ + nuns_dict = self.search_nuns(inputs, targets) + nuns_indices, nuns_distances = nuns_dict["indices"], nuns_dict["distances"] + nuns = dataset_gather(self.cases_dataset, nuns_indices) + return nuns, nuns_distances + + def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: + """ + """ + # get the Nearest Unlike Neighbors and their distance to the related input + nuns, nuns_input_distances = self._get_nuns(inputs, targets) + + # initialize the search for the KLEOR semi-factual methods + sf_indices, input_sf_distances, nun_sf_distances, batch_indices = self._initialize_search(inputs) + + # iterate on batches + for batch_index, (cases, cases_targets) in enumerate(zip(self.cases_dataset, self.targets_dataset)): + # add new elements + # (n, current_bs, 2) + indices = batch_indices[:, : tf.shape(cases)[0]] + new_indices = tf.stack( + [tf.fill(indices.shape, tf.cast(batch_index, tf.int32)), indices], axis=-1 + ) + + # get filter masks + # (n, current_bs) + filter_mask = self.filter_fn(inputs, cases, targets, cases_targets) + + # compute distances + # (n, current_bs) + b_nun_sf_distances = self._crossed_distances_fn(nuns, cases, mask=filter_mask) + b_input_sf_distances = self._crossed_distances_fn(inputs, cases, mask=filter_mask) + + # additional filtering + b_nun_sf_distances, b_input_sf_distances = self._additional_filtering( + b_nun_sf_distances, b_input_sf_distances, nuns_input_distances + ) + # concatenate distances and indices + # (n, k+curent_bs, 2) + concatenated_indices = tf.concat([sf_indices, new_indices], axis=1) + # (n, k+curent_bs) + concatenated_nun_sf_distances = tf.concat([nun_sf_distances, b_nun_sf_distances], axis=1) + concatenated_input_sf_distances = tf.concat([input_sf_distances, b_input_sf_distances], axis=1) + + # sort according to the smallest distances between sf and nun + # (n, k) + sort_order = tf.argsort( + concatenated_nun_sf_distances, axis=1, direction=self.order.name.upper() + )[:, : self.k] + + sf_indices.assign( + tf.gather(concatenated_indices, sort_order, axis=1, batch_dims=1) + ) + nun_sf_distances.assign( + tf.gather(concatenated_nun_sf_distances, sort_order, axis=1, batch_dims=1) + ) + input_sf_distances.assign( + tf.gather(concatenated_input_sf_distances, sort_order, axis=1, batch_dims=1) + ) + + return input_sf_distances, sf_indices, nuns + + def _initialize_search(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Variable, tf.Variable, tf.Variable, tf.Tensor]: + """ + Initialize the search for the KLEOR semi-factual methods. + """ + nb_inputs = tf.shape(inputs)[0] + + # sf_indices shape (n, k, 2) + sf_indices = tf.Variable(tf.fill((nb_inputs, self.k, 2), -1)) + # (n, k) + input_sf_distances = tf.Variable(tf.fill((nb_inputs, self.k), self.fill_value)) + nun_sf_distances = tf.Variable(tf.fill((nb_inputs, self.k), self.fill_value)) + # (n, bs) + batch_indices = tf.expand_dims(tf.range(self.batch_size, dtype=tf.int32), axis=0) + batch_indices = tf.tile(batch_indices, multiples=(nb_inputs, 1)) + return sf_indices, input_sf_distances, nun_sf_distances, batch_indices + + @abstractmethod + def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: + """ + Additional filtering to apply to the distances. + """ + raise NotImplementedError + +class KLEORSimMiss(BaseKLEOR): + """ + KLEOR search method. + + Parameters + ---------- + cases_dataset + Dataset of cases. + targets_dataset + Dataset of targets. Should be a one-hot encoded of the predicted class + """ + def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple: + return nun_sf_distances, input_sf_distances + +class KLEORGlobalSim(BaseKLEOR): + """ + KLEOR search method. + + Parameters + ---------- + cases_dataset + Dataset of cases. + targets_dataset + Dataset of targets. Should be a one-hot encoded of the predicted class + """ + def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple: + # filter non acceptable cases, i.e. cases for which the distance to the input is greater + # than the distance between the input and its nun + # (n, current_bs) + mask = tf.less(input_sf_distances, nuns_input_distances) + nun_sf_distances = tf.where(mask, nun_sf_distances, self.fill_value) + input_sf_distances = tf.where(mask, input_sf_distances, self.fill_value) + return nun_sf_distances, input_sf_distances From fe5cc44b5e00ad0e8a92e1340575a8a39b61af84 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Tue, 21 May 2024 15:28:28 +0200 Subject: [PATCH 042/138] feat: add the possibilities as an initialisation args --- xplique/example_based/search_methods/base.py | 8 +++++--- xplique/example_based/search_methods/knn.py | 12 ++++++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index a165688d..a7bf4e02 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -92,6 +92,7 @@ def __init__( search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + possibilities: Optional[List[str]] = None, ): # pylint: disable=R0801 # set batch size @@ -103,7 +104,7 @@ def __init__( self.cases_dataset = sanitize_dataset(cases_dataset, self.batch_size) self.set_k(k) - self.set_returns(search_returns) + self.set_returns(search_returns, possibilities) # set targets_dataset if targets_dataset is not None: @@ -125,7 +126,7 @@ def set_k(self, k: int): assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" self.k = k - def set_returns(self, returns: Optional[Union[List[str], str]] = None): + def set_returns(self, returns: Optional[Union[List[str], str]] = None, possibilities: Optional[List[str]] = None): """ Set `self.returns` used to define returned elements in `self.find_examples()`. @@ -143,7 +144,8 @@ def set_returns(self, returns: Optional[Union[List[str], str]] = None): - 'include_inputs' specify if inputs should be included in the returned elements. Note that it changes the number of returned elements from k to k+1. """ - possibilities = ["examples", "indices", "distances", "include_inputs"] + if possibilities is None: + possibilities = ["examples", "indices", "distances", "include_inputs"] default = "examples" self.returns = _sanitize_returns(returns, possibilities, default) diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index 5291999a..e53833cd 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -23,9 +23,10 @@ def __init__( batch_size: Optional[int] = 32, order: ORDER = ORDER.ASCENDING, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + possibilities: Optional[List[str]] = None, ): super().__init__( - cases_dataset, k, search_returns, batch_size, targets_dataset + cases_dataset, k, search_returns, batch_size, targets_dataset, possibilities ) assert isinstance(order, ORDER), f"order should be an instance of ORDER and not {type(order)}" @@ -130,9 +131,10 @@ def __init__( distance: Union[int, str, Callable] = "euclidean", order: ORDER = ORDER.ASCENDING, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + possibilities: Optional[List[str]] = None, ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order, targets_dataset + cases_dataset, k, search_returns, batch_size, order, targets_dataset, possibilities ) if hasattr(distance, "__call__"): @@ -236,6 +238,7 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], _ = None) -> Tuple[tf class FilterKNN(BaseKNN): """ + TODO: Change the class description KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. @@ -271,10 +274,11 @@ def __init__( search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", - order: ORDER = ORDER.ASCENDING + order: ORDER = ORDER.ASCENDING, + possibilities: Optional[List[str]] = None, ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order, targets_dataset + cases_dataset, k, search_returns, batch_size, order, targets_dataset, possibilities ) if hasattr(distance, "__call__"): From 67b78047891014286be24b71f994e7cea08e60b1 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Tue, 21 May 2024 15:29:18 +0200 Subject: [PATCH 043/138] feat: add the KLEOR example based method and its tests --- tests/example_based/test_contrastive.py | 292 ++++++++++++------ xplique/example_based/contrastive_examples.py | 206 ++++++++---- 2 files changed, 344 insertions(+), 154 deletions(-) diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py index bac1aaa2..82a47d60 100644 --- a/tests/example_based/test_contrastive.py +++ b/tests/example_based/test_contrastive.py @@ -1,14 +1,12 @@ """ Tests for the contrastive methods. """ -import pytest - import tensorflow as tf import numpy as np -from xplique.example_based import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals, NaiveCounterFactuals +from xplique.example_based import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEOR -def test_naive_semi_factuals(): +def test_naive_counter_factuals(): """ """ cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) @@ -16,21 +14,21 @@ def test_naive_semi_factuals(): cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) - semi_factuals = NaiveSemiFactuals(cases_dataset, cases_targets_dataset, k=2, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) + counter_factuals = NaiveCounterFactuals(cases_dataset, cases_targets_dataset, k=2, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) - mask = semi_factuals.filter_fn(inputs, cases, targets, cases_targets) + mask = counter_factuals.filter_fn(inputs, cases, targets, cases_targets) assert mask.shape == (inputs.shape[0], cases.shape[0]) expected_mask = tf.constant([ - [True, False, False, True, False], [False, True, True, False, True], - [False, True, True, False, True]], dtype=tf.bool) + [True, False, False, True, False], + [True, False, False, True, False]], dtype=tf.bool) assert tf.reduce_all(tf.equal(mask, expected_mask)) - return_dict = semi_factuals(inputs, targets) + return_dict = counter_factuals(inputs, targets) assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) examples = return_dict["examples"] @@ -42,159 +40,259 @@ def test_naive_semi_factuals(): assert indices.shape == (3, 2, 2) # (n, k, 2) expected_examples = tf.constant([ - [[1.5, 2.5], [4., 5.], [1., 2.]], - [[2.5, 3.5], [5., 6.], [2., 3.]], - [[4.5, 5.5], [2., 3.], [3., 4.]]], dtype=tf.float32) + [[1.5, 2.5], [2., 3.], [3., 4.]], + [[2.5, 3.5], [1., 2.], [4., 5.]], + [[4.5, 5.5], [4., 5.], [1., 2.]]], dtype=tf.float32) assert tf.reduce_all(tf.equal(examples, expected_examples)) - expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)]], dtype=tf.float32) + expected_distances = tf.constant([[np.sqrt(2*0.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*1.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*0.5**2), np.sqrt(2*3.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - expected_indices = tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32) + expected_indices = tf.constant([[[0, 1], [1, 0]],[[0, 0], [1, 1]],[[1, 1], [0, 0]]], dtype=tf.int32) assert tf.reduce_all(tf.equal(indices, expected_indices)) -def test_labelaware_semifactuals(): +def test_label_aware_cf(): """ + Test suite for the LabelAwareCounterFactuals class """ + # Same tests as the previous one but with the LabelAwareCounterFactuals class + # thus we only needs to use cf_targets = 1 - targets of the previous tests cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) + counter_factuals = LabelAwareCounterFactuals(cases_dataset, cases_targets_dataset, k=1, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) - targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) - - semi_factuals = PredictedLabelAwareSemiFactuals(cases_dataset, cases_targets_dataset, target_label=0, k=2, batch_size=2, case_returns=["examples", "distances", "include_inputs"]) - # assert the filtering on the right label went right - - combined_dataset = tf.data.Dataset.zip((cases_dataset.unbatch(), cases_targets_dataset.unbatch())) - combined_dataset = combined_dataset.filter(lambda x, y: tf.equal(tf.argmax(y, axis=-1),0)) - - filter_cases = semi_factuals.cases_dataset - filter_targets = semi_factuals.targets_dataset - - expected_filter_cases = tf.constant([[2., 3.], [3., 4.], [5., 6.]], dtype=tf.float32) - expected_filter_targets = tf.constant([[1, 0], [1, 0], [1, 0]], dtype=tf.float32) + cf_targets = tf.constant([[1, 0], [0, 1], [0, 1]], dtype=tf.float32) - tensor_filter_cases = [] - for elem in filter_cases.unbatch(): - tensor_filter_cases.append(elem) - tensor_filter_cases = tf.stack(tensor_filter_cases) - assert tf.reduce_all(tf.equal(tensor_filter_cases, expected_filter_cases)) - - tensor_filter_targets = [] - for elem in filter_targets.unbatch(): - tensor_filter_targets.append(elem) - tensor_filter_targets = tf.stack(tensor_filter_targets) - assert tf.reduce_all(tf.equal(tensor_filter_targets, expected_filter_targets)) + mask = counter_factuals.filter_fn(inputs, cases, cf_targets, cases_targets) + assert mask.shape == (inputs.shape[0], cases.shape[0]) - # check the call method - filter_inputs = tf.constant([[2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) - filter_targets = tf.constant([[1, 0], [1, 0]], dtype=tf.float32) + expected_mask = tf.constant([ + [False, True, True, False, True], + [True, False, False, True, False], + [True, False, False, True, False]], dtype=tf.bool) + assert tf.reduce_all(tf.equal(mask, expected_mask)) - return_dict = semi_factuals(filter_inputs, filter_targets) - assert set(return_dict.keys()) == set(["examples", "distances"]) + return_dict = counter_factuals(inputs, cf_targets) + assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) examples = return_dict["examples"] distances = return_dict["distances"] + indices = return_dict["indices"] - assert examples.shape == (2, 3, 2) # (n_label0, k+1, W) - assert distances.shape == (2, 2) # (n_label0, k) + assert examples.shape == (3, 2, 2) # (n, k+1, W) + assert distances.shape == (3, 1) # (n, k) + assert indices.shape == (3, 1, 2) # (n, k, 2) expected_examples = tf.constant([ - [[2.5, 3.5], [5., 6.], [2., 3.]], - [[4.5, 5.5], [2., 3.], [3., 4.]]], dtype=tf.float32) + [[1.5, 2.5], [2., 3.]], + [[2.5, 3.5], [1., 2.]], + [[4.5, 5.5], [4., 5.]]], dtype=tf.float32) assert tf.reduce_all(tf.equal(examples, expected_examples)) - expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)]], dtype=tf.float32) + expected_distances = tf.constant([[np.sqrt(2*0.5**2)], [np.sqrt(2*1.5**2)], [np.sqrt(2*0.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - # check an error is raised when a target does not match the target label - with pytest.raises(AssertionError): - semi_factuals(inputs, targets) + expected_indices = tf.constant([[[0, 1]],[[0, 0]],[[1, 1]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(indices, expected_indices)) - # same but with the other label - semi_factuals = PredictedLabelAwareSemiFactuals(cases_dataset, cases_targets_dataset, target_label=1, k=2, batch_size=2, case_returns=["examples", "distances", "include_inputs"]) - filter_cases = semi_factuals.cases_dataset - filter_targets = semi_factuals.targets_dataset + # Now let's dive when multiple classes are available in 1D + cases = tf.constant([[1.], [2.], [3.], [4.], [5.], [6.], [7.], [8.], [9.], [10.]], dtype=tf.float32) + cases_targets = tf.constant([[0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 0], [1, 0, 0], [0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=tf.float32) - expected_filter_cases = tf.constant([[1., 2.], [4., 5.]], dtype=tf.float32) - expected_filter_targets = tf.constant([[0, 1], [0, 1]], dtype=tf.float32) + cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) + cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) - tensor_filter_cases = [] - for elem in filter_cases.unbatch(): - tensor_filter_cases.append(elem) - tensor_filter_cases = tf.stack(tensor_filter_cases) - assert tf.reduce_all(tf.equal(tensor_filter_cases, expected_filter_cases)) + counter_factuals = LabelAwareCounterFactuals(cases_dataset, cases_targets_dataset, k=1, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) - tensor_filter_targets = [] - for elem in filter_targets.unbatch(): - tensor_filter_targets.append(elem) - tensor_filter_targets = tf.stack(tensor_filter_targets) - assert tf.reduce_all(tf.equal(tensor_filter_targets, expected_filter_targets)) + inputs = tf.constant([[1.5], [2.5], [4.5], [6.5], [8.5]], dtype=tf.float32) + cf_targets = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]], dtype=tf.float32) - # check the call method - filter_inputs = tf.constant([[1.5, 2.5]], dtype=tf.float32) - filter_targets = tf.constant([[0, 1]], dtype=tf.float32) + mask = counter_factuals.filter_fn(inputs, cases, cf_targets, cases_targets) + assert mask.shape == (inputs.shape[0], cases.shape[0]) - return_dict = semi_factuals(filter_inputs, filter_targets) - assert set(return_dict.keys()) == set(["examples", "distances"]) + expected_mask = tf.constant([ + [False, True, False, True, True, False, False, False, False, True], + [True, False, False, False, False, False, True, False, True, False], + [False, False, True, False, False, True, False, True, False, False], + [False, False, True, False, False, True, False, True, False, False], + [True, False, False, False, False, False, True, False, True, False]], dtype=tf.bool) + assert tf.reduce_all(tf.equal(mask, expected_mask)) + + return_dict = counter_factuals(inputs, cf_targets) + assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) examples = return_dict["examples"] distances = return_dict["distances"] + indices = return_dict["indices"] - assert examples.shape == (1, 3, 2) # (n_label1, k+1, W) - assert distances.shape == (1, 2) # (n_label1, k) + assert examples.shape == (5, 2, 1) # (n, k+1, W) + assert distances.shape == (5, 1) # (n, k) + assert indices.shape == (5, 1, 2) # (n, k, 2) expected_examples = tf.constant([ - [[1.5, 2.5], [4., 5.], [1., 2.]]], dtype=tf.float32) + [[1.5], [2.]], + [[2.5], [1.]], + [[4.5], [3.]], + [[6.5], [6.]], + [[8.5], [9.]]], dtype=tf.float32) assert tf.reduce_all(tf.equal(examples, expected_examples)) - expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)]], dtype=tf.float32) + expected_distances = tf.constant([[np.sqrt(0.5**2)], [np.sqrt(1.5**2)], [np.sqrt(1.5**2)], [np.sqrt(0.5**2)], [np.sqrt(0.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) -def test_naive_counter_factuals(): + expected_indices = tf.constant([[[0, 1]],[[0, 0]],[[1, 0]],[[2, 1]],[[4, 0]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(indices, expected_indices)) + +def test_kleor(): """ + Test suite for the Kleor class """ + # setup the tests cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) - counter_factuals = NaiveCounterFactuals(cases_dataset, cases_targets_dataset, k=2, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) - mask = counter_factuals.filter_fn(inputs, cases, targets, cases_targets) - assert mask.shape == (inputs.shape[0], cases.shape[0]) - - expected_mask = tf.constant([ - [False, True, True, False, True], - [True, False, False, True, False], - [True, False, False, True, False]], dtype=tf.bool) - assert tf.reduce_all(tf.equal(mask, expected_mask)) + # start when strategy is sim_miss + kleor_sim_miss = KLEOR( + cases_dataset, + cases_targets_dataset, + k=1, + case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], + batch_size=2, + strategy="sim_miss" + ) - return_dict = counter_factuals(inputs, targets) - assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) + return_dict = kleor_sim_miss(inputs, targets) + assert set(return_dict.keys()) == set(["examples", "indices", "distances", "nuns"]) examples = return_dict["examples"] distances = return_dict["distances"] indices = return_dict["indices"] + nuns = return_dict["nuns"] - assert examples.shape == (3, 3, 2) # (n, k+1, W) - assert distances.shape == (3, 2) # (n, k) - assert indices.shape == (3, 2, 2) # (n, k, 2) + expected_nuns = tf.constant([ + [[2., 3.]], + [[1., 2.]], + [[4., 5.]]], dtype=tf.float32) + assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + + assert examples.shape == (3, 2, 2) # (n, k+1, W) + assert distances.shape == (3, 1) # (n, k) + assert indices.shape == (3, 1, 2) # (n, k, 2) expected_examples = tf.constant([ - [[1.5, 2.5], [2., 3.], [3., 4.]], - [[2.5, 3.5], [1., 2.], [4., 5.]], - [[4.5, 5.5], [4., 5.], [1., 2.]]], dtype=tf.float32) + [[1.5, 2.5], [1., 2.]], + [[2.5, 3.5], [2., 3.]], + [[4.5, 5.5], [3., 4.]]], dtype=tf.float32) assert tf.reduce_all(tf.equal(examples, expected_examples)) - expected_distances = tf.constant([[np.sqrt(2*0.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*1.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*0.5**2), np.sqrt(2*3.5**2)]], dtype=tf.float32) + expected_distances = tf.constant([[np.sqrt(2*0.5**2)], [np.sqrt(2*0.5**2)], [np.sqrt(2*1.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - expected_indices = tf.constant([[[0, 1], [1, 0]],[[0, 0], [1, 1]],[[1, 1], [0, 0]]], dtype=tf.int32) + expected_indices = tf.constant([[[0, 0]],[[0, 1]],[[1, 0]]], dtype=tf.int32) + assert tf.reduce_all(tf.equal(indices, expected_indices)) + + # now strategy is global_sim + kleor_global_sim = KLEOR( + cases_dataset, + cases_targets_dataset, + k=1, + case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], + batch_size=2, + strategy="global_sim" + ) + + return_dict = kleor_global_sim(inputs, targets) + assert set(return_dict.keys()) == set(["examples", "indices", "distances", "nuns"]) + + nuns = return_dict["nuns"] + assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + + examples = return_dict["examples"] + distances = return_dict["distances"] + indices = return_dict["indices"] + + assert examples.shape == (3, 2, 2) # (n, k+1, W) + assert distances.shape == (3, 1) # (n, k) + assert indices.shape == (3, 1, 2) # (n, k, 2) + + expected_indices = tf.constant([[[-1, -1]],[[0, 1]],[[-1, -1]]], dtype=tf.int32) assert tf.reduce_all(tf.equal(indices, expected_indices)) + + expected_distances = tf.constant([[np.inf], [np.sqrt(2*0.5**2)], [np.inf]], dtype=tf.float32) + # create masks for inf values + inf_mask_dist = tf.math.is_inf(distances) + inf_mask_expected_distances = tf.math.is_inf(expected_distances) + assert tf.reduce_all(tf.equal(inf_mask_dist, inf_mask_expected_distances)) + assert tf.reduce_all( + tf.abs(tf.where(inf_mask_dist, 0.0, distances) - tf.where(inf_mask_expected_distances, 0.0, expected_distances) + ) < 1e-5) + + expected_examples = tf.constant([ + [[1.5, 2.5], [np.inf, np.inf]], + [[2.5, 3.5], [2., 3.]], + [[4.5, 5.5], [np.inf, np.inf]]], dtype=tf.float32) + # mask for inf values + inf_mask_examples = tf.math.is_inf(examples) + inf_mask_expected_examples = tf.math.is_inf(expected_examples) + assert tf.reduce_all(tf.equal(inf_mask_examples, inf_mask_expected_examples)) + assert tf.reduce_all( + tf.abs(tf.where(inf_mask_examples, 0.0, examples) - tf.where(inf_mask_expected_examples, 0.0, expected_examples) + ) < 1e-5) + +# def test_kleor_global_sim(): +# """ +# Test suite for the KleorSimMiss class +# """ +# cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) +# cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) + +# cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) +# cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) +# semi_factuals = KLEOR( +# cases_dataset, +# cases_targets_dataset, +# k=1, +# case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], +# batch_size=2, +# strategy="global_sim" +# ) + +# return_dict = semi_factuals(inputs, targets) +# assert set(return_dict.keys()) == set(["examples", "indices", "distances", "nuns"]) + +# examples = return_dict["examples"] +# distances = return_dict["distances"] +# indices = return_dict["indices"] +# nuns = return_dict["nuns"] + +# expected_nuns = tf.constant([ +# [[2., 3.]], +# [[1., 2.]], +# [[4., 5.]]], dtype=tf.float32) +# assert tf.reduce_all(tf.equal(nuns, expected_nuns)) + +# assert examples.shape == (3, 2, 2) # (n, k+1, W) +# assert distances.shape == (3, 1) # (n, k) +# assert indices.shape == (3, 1, 2) # (n, k, 2) + +# expected_examples = tf.constant([ +# [[1.5, 2.5], [1., 2.]], +# [[2.5, 3.5], [2., 3.]], +# [[4.5, 5.5], [3., 4.]]], dtype=tf.float32) +# assert tf.reduce_all(tf.equal(examples, expected_examples)) + +# expected_distances = tf.constant([[np.sqrt(2*0.5**2)], [np.sqrt(2*0.5**2)], [np.sqrt(2*1.5**2)]], dtype=tf.float32) +# assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) + +# expected_indices = tf.constant([[[0, 0]],[[0, 1]],[[1, 0]]], dtype=tf.int32) +# assert tf.reduce_all(tf.equal(indices, expected_indices)) diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 83b03b11..0b996f89 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -1,19 +1,25 @@ """ Implementation of both counterfactuals and semi factuals methods for classification tasks. + +SM CF guided to be implemented (I think): KLEOR at least Sim-Miss and Global-Sim +SM CF free to be implemented: MDN but has to be adapated, Local-Region Model?? """ import numpy as np import tensorflow as tf -from ..types import Callable, List, Optional, Union +from ..types import Callable, List, Optional, Union, Dict +from ..commons import sanitize_inputs_targets from .base_example_method import BaseExampleMethod -from .search_methods import BaseSearchMethod, KNN, ORDER, FilterKNN +from .search_methods import ORDER, FilterKNN, KLEORSimMiss, KLEORGlobalSim from .projections import Projection -class NaiveSemiFactuals(BaseExampleMethod): +from .search_methods.base import _sanitize_returns + +class NaiveCounterFactuals(BaseExampleMethod): """ - Define a naive version of semi factuals search. That for a given sample - it will return the farthest sample which have the same label. + This class allows to search for counterfactuals by searching for the closest sample that do not have the same label. + It is a naive approach as it follows a greedy approach. """ def __init__( self, @@ -42,7 +48,7 @@ def __init__( batch_size=batch_size, distance=distance, filter_fn=self.filter_fn, - order = ORDER.DESCENDING + order = ORDER.ASCENDING ) @@ -58,19 +64,18 @@ def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: # for each input, if the target label is the same as the predicted label # the mask as a True value and False otherwise label_targets = tf.argmax(cases_targets, axis=-1) # (bs,) - mask = tf.equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) + mask = tf.not_equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) return mask -class PredictedLabelAwareSemiFactuals(BaseExampleMethod): +class LabelAwareCounterFactuals(BaseExampleMethod): """ - As we know semi-factuals should belong to the same class as the input, - we propose here a method that is dedicated to a specific label. + This method will search the counterfactuals with a specific label. This label should be provided by the user in the + cf_labels_dataset args. """ def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - target_label: int, labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, k: int = 1, projection: Union[Projection, Callable] = None, @@ -78,23 +83,11 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", ): - # filter the cases dataset and targets dataset to keep only the ones - # that have the target label - # TODO: improve this unbatch and batch - combined_dataset = tf.data.Dataset.zip((cases_dataset.unbatch(), targets_dataset.unbatch())) - combined_dataset = combined_dataset.filter(lambda x, y: tf.equal(tf.argmax(y, axis=-1),target_label)) - - # separate the cases and targets - cases_dataset = combined_dataset.map(lambda x, y: x).batch(batch_size) - targets_dataset = combined_dataset.map(lambda x, y: y).batch(batch_size) - - # delete the combined dataset - del combined_dataset + search_method = FilterKNN if projection is None: projection = Projection(space_projection=lambda inputs: inputs) - - search_method = KNN + # TODO: add a warning here if it is a custom projection that requires using targets as it might mismatch with the explain super().__init__( cases_dataset=cases_dataset, @@ -106,24 +99,60 @@ def __init__( case_returns=case_returns, batch_size=batch_size, distance=distance, - order = ORDER.DESCENDING + filter_fn=self.filter_fn, + order = ORDER.ASCENDING ) - self.target_label = target_label + def filter_fn(self, _, __, cf_targets, cases_targets) -> tf.Tensor: + """ + Filter function to mask the cases for which the label is different from the label(s) expected for the + counterfactuals. + + Parameters + ---------- + cf_targets + TODO + cases_targets + TODO + """ + mask = tf.matmul(cf_targets, cases_targets, transpose_b=True) #(n, bs) + # TODO: I think some retracing are done here + mask = tf.cast(mask, dtype=tf.bool) + return mask - def __call__( + @sanitize_inputs_targets + def explain( self, inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + cf_targets: Union[tf.Tensor, np.ndarray], ): - # assert targets are all the same as the target label - if targets is not None: - assert tf.reduce_all(tf.argmax(targets, axis=-1) == self.target_label), "All targets should be the same as the target label." - return super().__call__(inputs, targets) + """ + Compute examples to explain the inputs. + It project inputs with `self.projection` in the search space + and find examples with `self.search_method`. -class NaiveCounterFactuals(BaseExampleMethod): + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + cf_targets + TODO: change the description here + + Returns + ------- + return_dict + Dictionary with listed elements in `self.returns`. + If only one element is present it returns the element. + The elements that can be returned are: + examples, weights, distances, indices, and labels. + """ + # TODO make an assert on the cf_targets + return super().explain(inputs, cf_targets) + +class KLEOR(BaseExampleMethod): """ - """ def __init__( self, @@ -135,38 +164,101 @@ def __init__( case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", + strategy: str = "sim_miss", ): - search_method = FilterKNN + + self.k = k + self.set_returns(case_returns) + + if strategy == "global_sim": + search_method = KLEORGlobalSim + elif strategy == "sim_miss": + search_method = KLEORSimMiss + else: + raise ValueError("strategy should be either 'global_sim' or 'sim_miss'.") if projection is None: projection = Projection(space_projection=lambda inputs: inputs) - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - search_method=search_method, + # set attributes + batch_size = super()._initialize_cases_dataset( + cases_dataset, labels_dataset, targets_dataset, batch_size + ) + + assert hasattr(projection, "__call__"), "projection should be a callable." + + # check projection type + if isinstance(projection, Projection): + self.projection = projection + elif hasattr(projection, "__call__"): + self.projection = Projection(get_weights=None, space_projection=projection) + else: + raise AttributeError( + "projection should be a `Projection` or a `Callable`, not a" + + f"{type(projection)}" + ) + + # project dataset + projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, + self.targets_dataset) + + # set `search_returns` if not provided and overwrite it otherwise + if isinstance(case_returns, list) and ("nuns" in case_returns): + search_method_returns = ["indices", "distances", "nuns"] + else: + search_method_returns = ["indices", "distances"] + + # initiate search_method + self.search_method = search_method( + cases_dataset=projected_cases_dataset, + targets_dataset=self.targets_dataset, k=k, - projection=projection, - case_returns=case_returns, + search_returns=search_method_returns, batch_size=batch_size, distance=distance, - filter_fn=self.filter_fn, - order = ORDER.ASCENDING ) - - def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: + def set_returns(self, returns: Union[List[str], str]): """ - Filter function to mask the cases for which the label is different from the predicted - label on the inputs. + Set `self.returns` used to define returned elements in `self.explain()`. + + Parameters + ---------- + returns + Most elements are useful in `xplique.plots.plot_examples()`. + `returns` can be set to 'all' for all possible elements to be returned. + - 'examples' correspond to the expected examples, + the inputs may be included in first position. (n, k(+1), ...) + - 'weights' the weights in the input space used in the projection. + They are associated to the input and the examples. (n, k(+1), ...) + - 'distances' the distances between the inputs and the corresponding examples. + They are associated to the examples. (n, k, ...) + - 'labels' if provided through `dataset_labels`, + they are the labels associated with the examples. (n, k, ...) + - 'include_inputs' specify if inputs should be included in the returned elements. + Note that it changes the number of returned elements from k to k+1. """ - # get the labels predicted by the model - # (n, ) - predicted_labels = tf.argmax(targets, axis=-1) + possibilities = ["examples", "weights", "distances", "labels", "include_inputs", "nuns"] + default = "examples" + self.returns = _sanitize_returns(returns, possibilities, default) - # for each input, if the target label is the same as the predicted label - # the mask as a True value and False otherwise - label_targets = tf.argmax(cases_targets, axis=-1) # (bs,) - mask = tf.not_equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) - return mask + def format_search_output( + self, + search_output: Dict[str, tf.Tensor], + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + ): + """ + """ + return_dict = super().format_search_output(search_output, inputs, targets) + if "nuns" in self.returns: + if isinstance(return_dict, dict): + return_dict["nuns"] = search_output["nuns"] + else: + # find the other only key + other_key = [k for k in self.returns if k != "nuns"][0] + return_dict = { + other_key: return_dict, + "nuns": search_output["nuns"] + } + return return_dict From c6be204c930ae25af17bdca9b9906a280554ed19 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Tue, 21 May 2024 15:30:07 +0200 Subject: [PATCH 044/138] feat: add the kleor method in the package init --- xplique/example_based/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index 89e08d1b..5e23b9d6 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -4,4 +4,4 @@ from .cole import Cole from .similar_examples import SimilarExamples -from .contrastive_examples import NaiveSemiFactuals, PredictedLabelAwareSemiFactuals, NaiveCounterFactuals +from .contrastive_examples import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEOR From aa9a50001846d35506478e350bde3d205cd60839 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Tue, 21 May 2024 17:45:23 +0200 Subject: [PATCH 045/138] fix: change the set_returns and set_k methods to properties with a setter for easier factorization. Some changes to improve factorization and update of tests accordingly --- tests/example_based/test_knn.py | 4 +- tests/example_based/test_similar_examples.py | 15 ++-- xplique/example_based/base_example_method.py | 90 ++++++++++--------- xplique/example_based/contrastive_examples.py | 88 +++++------------- xplique/example_based/search_methods/base.py | 47 +++++----- xplique/example_based/search_methods/kleor.py | 31 ++----- xplique/example_based/search_methods/knn.py | 24 ++--- 7 files changed, 120 insertions(+), 179 deletions(-) diff --git a/tests/example_based/test_knn.py b/tests/example_based/test_knn.py index 63d4d504..61740a0e 100644 --- a/tests/example_based/test_knn.py +++ b/tests/example_based/test_knn.py @@ -106,14 +106,14 @@ def test_base_find_examples(): search_returns = returns, ) return_dict = mock_knn.find_examples(inputs) - assert return_dict.shape == (5, 3, 3) + assert return_dict["examples"].shape == (5, 3, 3) mock_knn = MockKNN( tf.constant([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.], [10., 11., 12.]], dtype=tf.float32), k = 2, ) return_dict = mock_knn.find_examples(inputs) - assert return_dict.shape == (5, 2, 3) + assert return_dict["examples"].shape == (5, 2, 3) def test_knn_init(): """ diff --git a/tests/example_based/test_similar_examples.py b/tests/example_based/test_similar_examples.py index 2ec371d3..db4af594 100644 --- a/tests/example_based/test_similar_examples.py +++ b/tests/example_based/test_similar_examples.py @@ -12,12 +12,10 @@ import numpy as np import tensorflow as tf -from xplique.commons import sanitize_dataset, are_dataset_first_elems_equal -from xplique.types import Union +from xplique.commons import are_dataset_first_elems_equal from xplique.example_based import SimilarExamples -from xplique.example_based.projections import Projection, LatentSpaceProjection -from xplique.example_based.search_methods import KNN +from xplique.example_based.projections import Projection from tests.utils import almost_equal @@ -154,7 +152,7 @@ def test_similar_examples_basic(): ) # Generate explanation - examples = method.explain(x_test) + examples = method.explain(x_test)["examples"] # Verifications # Shape should be (n, k, h, w, c) @@ -198,9 +196,8 @@ def test_similar_examples_return_multiple_elements(): distance="euclidean", ) - method.set_returns("all") - - method.set_k(k) + method.returns = "all" + method.k = k # Generate explanation method_output = method.explain(x_test) @@ -278,7 +275,7 @@ def test_similar_examples_weighting(): ) # Generate explanation - examples = method.explain(x_test) + examples = method.explain(x_test)["examples"] # Verifications # Shape should be (n, k, h, w, c) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 9ce6b154..f45bdc91 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -72,6 +72,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar search_method_kwargs Parameters to be passed at the construction of the `search_method`. """ + _returns_possibilities = ["examples", "weights", "distances", "labels", "include_inputs"] def __init__( self, @@ -94,9 +95,7 @@ def __init__( cases_dataset, labels_dataset, targets_dataset, batch_size ) - self.k = k - self.set_returns(case_returns) - + self._search_returns = ["indices", "distances"] assert hasattr(projection, "__call__"), "projection should be a callable." # check projection type @@ -115,7 +114,7 @@ def __init__( self.targets_dataset) # set `search_returns` if not provided and overwrite it otherwise - search_method_kwargs["search_returns"] = ["indices", "distances"] + search_method_kwargs["search_returns"] = self._search_returns # initiate search_method self.search_method = search_method( @@ -125,6 +124,49 @@ def __init__( targets_dataset=self.targets_dataset, **search_method_kwargs, ) + self.k = k + self.returns = case_returns + + @property + def k(self) -> int: + """Getter for the k parameter.""" + return self._k + + @k.setter + def k(self, k: int): + """Setter for the k parameter.""" + assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" + self._k = k + self.search_method.k = k + + @property + def returns(self) -> Union[List[str], str]: + """Getter for the returns parameter.""" + return self._returns + + @returns.setter + def returns(self, returns: Union[List[str], str]): + """ + Setter for the returns parameter used to define returned elements in `self.explain()`. + + Parameters + ---------- + returns + Most elements are useful in `xplique.plots.plot_examples()`. + `returns` can be set to 'all' for all possible elements to be returned. + - 'examples' correspond to the expected examples, + the inputs may be included in first position. (n, k(+1), ...) + - 'weights' the weights in the input space used in the projection. + They are associated to the input and the examples. (n, k(+1), ...) + - 'distances' the distances between the inputs and the corresponding examples. + They are associated to the examples. (n, k, ...) + - 'labels' if provided through `dataset_labels`, + they are the labels associated with the examples. (n, k, ...) + - 'include_inputs' specify if inputs should be included in the returned elements. + Note that it changes the number of returned elements from k to k+1. + """ + default = "examples" + self._returns = _sanitize_returns(returns, self._returns_possibilities, default) def _initialize_cases_dataset( self, @@ -222,43 +264,6 @@ def _initialize_cases_dataset( return batch_size - def set_k(self, k: int): - """ - Setter for the k parameter. - - Parameters - ---------- - k - Number of examples to return, it should be a positive integer. - """ - assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" - self.k = k - self.search_method.set_k(k) - - def set_returns(self, returns: Union[List[str], str]): - """ - Set `self.returns` used to define returned elements in `self.explain()`. - - Parameters - ---------- - returns - Most elements are useful in `xplique.plots.plot_examples()`. - `returns` can be set to 'all' for all possible elements to be returned. - - 'examples' correspond to the expected examples, - the inputs may be included in first position. (n, k(+1), ...) - - 'weights' the weights in the input space used in the projection. - They are associated to the input and the examples. (n, k(+1), ...) - - 'distances' the distances between the inputs and the corresponding examples. - They are associated to the examples. (n, k, ...) - - 'labels' if provided through `dataset_labels`, - they are the labels associated with the examples. (n, k, ...) - - 'include_inputs' specify if inputs should be included in the returned elements. - Note that it changes the number of returned elements from k to k+1. - """ - possibilities = ["examples", "weights", "distances", "labels", "include_inputs"] - default = "examples" - self.returns = _sanitize_returns(returns, possibilities, default) - @sanitize_inputs_targets def explain( self, @@ -392,7 +397,4 @@ def format_search_output( ), "The method cannot return labels without a label dataset." return_dict["labels"] = examples_labels - # return a dict only different variables are returned - if len(return_dict) == 1: - return list(return_dict.values())[0] return return_dict diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 0b996f89..0c584ae0 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -154,6 +154,8 @@ def explain( class KLEOR(BaseExampleMethod): """ """ + _returns_possibilities = ["examples", "weights", "distances", "labels", "include_inputs", "nuns"] + def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -166,9 +168,6 @@ def __init__( distance: Union[int, str, Callable] = "euclidean", strategy: str = "sim_miss", ): - - self.k = k - self.set_returns(case_returns) if strategy == "global_sim": search_method = KLEORGlobalSim @@ -180,67 +179,34 @@ def __init__( if projection is None: projection = Projection(space_projection=lambda inputs: inputs) - # set attributes - batch_size = super()._initialize_cases_dataset( - cases_dataset, labels_dataset, targets_dataset, batch_size - ) - - assert hasattr(projection, "__call__"), "projection should be a callable." - - # check projection type - if isinstance(projection, Projection): - self.projection = projection - elif hasattr(projection, "__call__"): - self.projection = Projection(get_weights=None, space_projection=projection) - else: - raise AttributeError( - "projection should be a `Projection` or a `Callable`, not a" - + f"{type(projection)}" - ) - - # project dataset - projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, - self.targets_dataset) - - # set `search_returns` if not provided and overwrite it otherwise - if isinstance(case_returns, list) and ("nuns" in case_returns): - search_method_returns = ["indices", "distances", "nuns"] - else: - search_method_returns = ["indices", "distances"] - - # initiate search_method - self.search_method = search_method( - cases_dataset=projected_cases_dataset, - targets_dataset=self.targets_dataset, + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + search_method=search_method, k=k, - search_returns=search_method_returns, + projection=projection, + case_returns=case_returns, batch_size=batch_size, distance=distance, ) - def set_returns(self, returns: Union[List[str], str]): - """ - Set `self.returns` used to define returned elements in `self.explain()`. + @property + def returns(self) -> Union[List[str], str]: + """Getter for the returns parameter.""" + return self._returns - Parameters - ---------- - returns - Most elements are useful in `xplique.plots.plot_examples()`. - `returns` can be set to 'all' for all possible elements to be returned. - - 'examples' correspond to the expected examples, - the inputs may be included in first position. (n, k(+1), ...) - - 'weights' the weights in the input space used in the projection. - They are associated to the input and the examples. (n, k(+1), ...) - - 'distances' the distances between the inputs and the corresponding examples. - They are associated to the examples. (n, k, ...) - - 'labels' if provided through `dataset_labels`, - they are the labels associated with the examples. (n, k, ...) - - 'include_inputs' specify if inputs should be included in the returned elements. - Note that it changes the number of returned elements from k to k+1. + @returns.setter + def returns(self, returns: Union[List[str], str]): + """ """ - possibilities = ["examples", "weights", "distances", "labels", "include_inputs", "nuns"] default = "examples" - self.returns = _sanitize_returns(returns, possibilities, default) + self._returns = _sanitize_returns(returns, self._returns_possibilities, default) + if isinstance(self._returns, list) and ("nuns" in self._returns): + self._search_returns = ["indices", "distances", "nuns"] + else: + self._search_returns = ["indices", "distances"] + self.search_method.returns = self._search_returns def format_search_output( self, @@ -252,13 +218,5 @@ def format_search_output( """ return_dict = super().format_search_output(search_output, inputs, targets) if "nuns" in self.returns: - if isinstance(return_dict, dict): - return_dict["nuns"] = search_output["nuns"] - else: - # find the other only key - other_key = [k for k in self.returns if k != "nuns"][0] - return_dict = { - other_key: return_dict, - "nuns": search_output["nuns"] - } + return_dict["nuns"] = search_output["nuns"] return return_dict diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index a7bf4e02..018f3ad4 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -7,7 +7,7 @@ import tensorflow as tf import numpy as np -from ...types import Callable, Union, Optional, List +from ...types import Union, Optional, List from ...commons import sanitize_dataset @@ -84,6 +84,7 @@ class BaseSearchMethod(ABC): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. """ + _returns_possibilities = ["examples", "indices", "distances", "include_inputs"] def __init__( self, @@ -92,7 +93,6 @@ def __init__( search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - possibilities: Optional[List[str]] = None, ): # pylint: disable=R0801 # set batch size @@ -103,8 +103,8 @@ def __init__( self.cases_dataset = sanitize_dataset(cases_dataset, self.batch_size) - self.set_k(k) - self.set_returns(search_returns, possibilities) + self.k = k + self.returns = search_returns # set targets_dataset if targets_dataset is not None: @@ -113,22 +113,26 @@ def __init__( # make an iterable of None self.targets_dataset = [None]*len(cases_dataset) - def set_k(self, k: int): - """ - Change value of k with constructing a new `BaseSearchMethod`. - It is useful because the constructor can be computationally expensive. + @property + def k(self) -> int: + """Getter for the k parameter.""" + return self._k - Parameters - ---------- - k - The number of examples to retrieve. - """ + @k.setter + def k(self, k: int): + """Setter for the k parameter.""" assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" - self.k = k + self._k = k - def set_returns(self, returns: Optional[Union[List[str], str]] = None, possibilities: Optional[List[str]] = None): + @property + def returns(self) -> Union[List[str], str]: + """Getter for the returns parameter.""" + return self._returns + + @returns.setter + def returns(self, returns: Union[List[str], str]): """ - Set `self.returns` used to define returned elements in `self.find_examples()`. + Setter for the returns parameter used to define returned elements in `self.explain()`. Parameters ---------- @@ -137,18 +141,17 @@ def set_returns(self, returns: Optional[Union[List[str], str]] = None, possibili `returns` can be set to 'all' for all possible elements to be returned. - 'examples' correspond to the expected examples, the inputs may be included in first position. (n, k(+1), ...) - - 'indices' the indices of the examples in the `search_set`. - Used to retrieve the original example and labels. (n, k, ...) + - 'weights' the weights in the input space used in the projection. + They are associated to the input and the examples. (n, k(+1), ...) - 'distances' the distances between the inputs and the corresponding examples. They are associated to the examples. (n, k, ...) + - 'labels' if provided through `dataset_labels`, + they are the labels associated with the examples. (n, k, ...) - 'include_inputs' specify if inputs should be included in the returned elements. Note that it changes the number of returned elements from k to k+1. """ - if possibilities is None: - possibilities = ["examples", "indices", "distances", "include_inputs"] default = "examples" - self.returns = _sanitize_returns(returns, possibilities, default) - + self._returns = _sanitize_returns(returns, self._returns_possibilities, default) @abstractmethod def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index 380d668a..c62c14cd 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -25,7 +25,6 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", ): # pylint: disable=R0801 - possibilities = ["examples", "indices", "distances", "include_inputs", "nuns"] super().__init__( cases_dataset = cases_dataset, targets_dataset=targets_dataset, @@ -35,7 +34,6 @@ def __init__( batch_size=batch_size, distance=distance, order=ORDER.ASCENDING, - possibilities=possibilities ) self.search_nuns = FilterKNN( @@ -65,32 +63,13 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ # compute neighbors examples_distances, examples_indices, nuns = self.kneighbors(inputs, targets) - # Set values in return dict - return_dict = {} - if "examples" in self.returns: - return_dict["examples"] = dataset_gather(self.cases_dataset, examples_indices) - # replace examples for which indices is -1, -1 by an inf value - # mask = tf.reduce_all(tf.equal(examples_indices, -1), axis=-1) - # return_dict["examples"] = tf.where( - # tf.expand_dims(mask, axis=-1), - # tf.fill(return_dict["examples"].shape, tf.constant(np.inf, dtype=tf.float32)), - # return_dict["examples"], - # ) - if "include_inputs" in self.returns: - inputs = tf.expand_dims(inputs, axis=1) - return_dict["examples"] = tf.concat( - [inputs, return_dict["examples"]], axis=1 - ) + # build return dict + return_dict = self._build_return_dict(inputs, examples_distances, examples_indices) + + # add the nuns if needed if "nuns" in self.returns: return_dict["nuns"] = nuns - if "indices" in self.returns: - return_dict["indices"] = examples_indices - if "distances" in self.returns: - return_dict["distances"] = examples_distances - - # Return a dict only different variables are returned - if len(return_dict) == 1: - return list(return_dict.values())[0] + return return_dict def _filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index e53833cd..be686b45 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -1,13 +1,12 @@ """ KNN online search method in example-based module """ -import math from abc import abstractmethod import numpy as np import tensorflow as tf -from ...commons import dataset_gather, sanitize_dataset +from ...commons import dataset_gather from ...types import Callable, List, Union, Optional, Tuple from .base import BaseSearchMethod, ORDER @@ -23,10 +22,9 @@ def __init__( batch_size: Optional[int] = 32, order: ORDER = ORDER.ASCENDING, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - possibilities: Optional[List[str]] = None, ): super().__init__( - cases_dataset, k, search_returns, batch_size, targets_dataset, possibilities + cases_dataset, k, search_returns, batch_size, targets_dataset ) assert isinstance(order, ORDER), f"order should be an instance of ORDER and not {type(order)}" @@ -79,6 +77,15 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ # compute neighbors examples_distances, examples_indices = self.kneighbors(inputs, targets) + # build the return dict + return_dict = self._build_return_dict(inputs, examples_distances, examples_indices) + + return return_dict + + def _build_return_dict(self, inputs, examples_distances, examples_indices): + """ + TODO: Change the description + """ # Set values in return dict return_dict = {} if "examples" in self.returns: @@ -93,9 +100,6 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ if "distances" in self.returns: return_dict["distances"] = examples_distances - # Return a dict only different variables are returned - if len(return_dict) == 1: - return list(return_dict.values())[0] return return_dict class KNN(BaseKNN): @@ -131,10 +135,9 @@ def __init__( distance: Union[int, str, Callable] = "euclidean", order: ORDER = ORDER.ASCENDING, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - possibilities: Optional[List[str]] = None, ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order, targets_dataset, possibilities + cases_dataset, k, search_returns, batch_size, order, targets_dataset ) if hasattr(distance, "__call__"): @@ -275,10 +278,9 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", order: ORDER = ORDER.ASCENDING, - possibilities: Optional[List[str]] = None, ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order, targets_dataset, possibilities + cases_dataset, k, search_returns, batch_size, order, targets_dataset ) if hasattr(distance, "__call__"): From 9d5062e21c03758be62ddafe703d260d0fd317bb Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 22 May 2024 17:32:08 +0200 Subject: [PATCH 046/138] example based: merge and solve part of problems and refacto --- tests/example_based/test_cole.py | 10 +- xplique/example_based/__init__.py | 5 +- xplique/example_based/base_example_method.py | 36 ++- xplique/example_based/cole.py | 20 +- xplique/example_based/contrastive_examples.py | 65 ++++- xplique/example_based/mmd_critic.py | 100 ------- xplique/example_based/proto_dash.py | 100 ------- xplique/example_based/proto_greedy.py | 100 ------- xplique/example_based/prototypes.py | 268 +++++++++--------- xplique/example_based/search_methods/base.py | 8 - xplique/example_based/search_methods/kleor.py | 4 +- xplique/example_based/search_methods/knn.py | 30 +- .../search_methods/proto_greedy_search.py | 6 +- xplique/example_based/similar_examples.py | 21 +- 14 files changed, 259 insertions(+), 514 deletions(-) delete mode 100644 xplique/example_based/mmd_critic.py delete mode 100644 xplique/example_based/proto_dash.py delete mode 100644 xplique/example_based/proto_greedy.py diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py index a9dc1afe..ba71d5d3 100644 --- a/tests/example_based/test_cole.py +++ b/tests/example_based/test_cole.py @@ -104,9 +104,9 @@ def test_cole_attribution(): ) # Generate explanation - examples_constructor = method_constructor.explain(x_test, y_test) - examples_call = method_call.explain(x_test, y_test) - examples_different_distance = method_different_distance(x_test, y_test) + examples_constructor = method_constructor.explain(x_test, y_test)["examples"] + examples_call = method_call.explain(x_test, y_test)["examples"] + examples_different_distance = method_different_distance(x_test, y_test)["examples"] # Verifications # Shape should be (n, k, h, w, c) @@ -166,8 +166,8 @@ def test_cole_hadamard(): ) # Generate explanation - examples_constructor = method_constructor.explain(x_test, y_test) - examples_call = method_call.explain(x_test, y_test) + examples_constructor = method_constructor.explain(x_test, y_test)["examples"] + examples_call = method_call.explain(x_test, y_test)["examples"] # Verifications # Shape should be (n, k, h, w, c) diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index 7a174e91..e1d70b05 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -4,8 +4,5 @@ from .cole import Cole from .similar_examples import SimilarExamples -from .prototypes import Prototypes -from .proto_greedy import ProtoGreedy -from .proto_dash import ProtoDash -from .mmd_critic import MMDCritic +from .prototypes import Prototypes, ProtoGreedy, ProtoDash, MMDCritic from .contrastive_examples import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEOR diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index f45bdc91..0f4c1dfa 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -2,6 +2,8 @@ Base model for example-based """ +from abc import ABC, abstractmethod + import math import tensorflow as tf @@ -17,7 +19,7 @@ from .search_methods.base import _sanitize_returns -class BaseExampleMethod: +class BaseExampleMethod(ABC): """ Base class for natural example-based methods explaining models, they project the cases_dataset into a pertinent space for the with a `Projection`, @@ -79,19 +81,17 @@ def __init__( cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - search_method: Type[BaseSearchMethod] = KNN, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, - **search_method_kwargs, ): assert ( projection is not None ), "`BaseExampleMethod` without `projection` is a `BaseSearchMethod`." # set attributes - batch_size = self._initialize_cases_dataset( + self.batch_size = self._initialize_cases_dataset( cases_dataset, labels_dataset, targets_dataset, batch_size ) @@ -110,22 +110,16 @@ def __init__( ) # project dataset - projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, - self.targets_dataset) - - # set `search_returns` if not provided and overwrite it otherwise - search_method_kwargs["search_returns"] = self._search_returns - - # initiate search_method - self.search_method = search_method( - cases_dataset=projected_cases_dataset, - k=k, - batch_size=batch_size, - targets_dataset=self.targets_dataset, - **search_method_kwargs, - ) + self.projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, + self.targets_dataset) + self.k = k self.returns = case_returns + + @property + @abstractmethod + def search_method_class(self) -> Type[BaseSearchMethod]: + raise NotImplementedError @property def k(self) -> int: @@ -137,7 +131,11 @@ def k(self, k: int): """Setter for the k parameter.""" assert isinstance(k, int) and k >= 1, f"k should be an int >= 1 and not {k}" self._k = k - self.search_method.k = k + + try: + self.search_method.k = k + except AttributeError: + pass @property def returns(self) -> Union[List[str], str]: diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py index 3fdfc82f..ca203b12 100644 --- a/xplique/example_based/cole.py +++ b/xplique/example_based/cole.py @@ -88,7 +88,7 @@ def __init__( distance: Union[str, Callable] = "euclidean", case_returns: Optional[Union[List[str], str]] = "examples", batch_size: Optional[int] = 32, - device: Optional[str] = None, + # device: Optional[str] = None, latent_layer: Optional[Union[str, int]] = None, attribution_method: Union[str, Type[BlackBoxExplainer]] = "gradient", **attribution_kwargs, @@ -104,7 +104,7 @@ def __init__( model=model, latent_layer=latent_layer, operator=operator, - device=device, + # device=device, ) elif issubclass(attribution_method, BlackBoxExplainer): # build attribution projection @@ -112,7 +112,7 @@ def __init__( model=model, method=attribution_method, latent_layer=latent_layer, - device=device, + # device=device, **attribution_kwargs, ) else: @@ -122,12 +122,12 @@ def __init__( ) super().__init__( - cases_dataset, - labels_dataset, - targets_dataset, - k, - projection, - case_returns, - batch_size, + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + labels_dataset=labels_dataset, + projection=projection, + k=k, + case_returns=case_returns, + batch_size=batch_size, distance=distance, ) diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 0c584ae0..05b1d9ad 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -32,7 +32,6 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", ): - search_method = FilterKNN if projection is None: projection = Projection(space_projection=lambda inputs: inputs) @@ -41,15 +40,29 @@ def __init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, targets_dataset=targets_dataset, - search_method=search_method, k=k, projection=projection, case_returns=case_returns, batch_size=batch_size, + ) + + self.distance = distance + self.order = ORDER.ASCENDING + + self.search_method = self.search_method_class( + cases_dataset=self.cases_dataset, + targets_dataset=self.targets_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, distance=distance, filter_fn=self.filter_fn, - order = ORDER.ASCENDING + order=self.order ) + + @property + def search_method_class(self): + return FilterKNN def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: @@ -83,8 +96,6 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", ): - search_method = FilterKNN - if projection is None: projection = Projection(space_projection=lambda inputs: inputs) # TODO: add a warning here if it is a custom projection that requires using targets as it might mismatch with the explain @@ -93,15 +104,30 @@ def __init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, targets_dataset=targets_dataset, - search_method=search_method, k=k, projection=projection, case_returns=case_returns, batch_size=batch_size, + ) + + self.distance = distance + self.order = ORDER.ASCENDING + + self.search_method = self.search_method_class( + cases_dataset=self.cases_dataset, + targets_dataset=self.targets_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, distance=distance, filter_fn=self.filter_fn, - order = ORDER.ASCENDING + order=self.order ) + + @property + def search_method_class(self): + return FilterKNN + def filter_fn(self, _, __, cf_targets, cases_targets) -> tf.Tensor: """ @@ -183,13 +209,30 @@ def __init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, targets_dataset=targets_dataset, - search_method=search_method, k=k, projection=projection, case_returns=case_returns, batch_size=batch_size, + ) + + self.distance = distance + self.order = ORDER.ASCENDING + + self.search_method = self.search_method_class( + cases_dataset=self.cases_dataset, + targets_dataset=self.targets_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, distance=distance, + filter_fn=self.filter_fn, + order=self.order ) + + @property + def search_method_class(self): + return FilterKNN + @property def returns(self) -> Union[List[str], str]: @@ -206,7 +249,11 @@ def returns(self, returns: Union[List[str], str]): self._search_returns = ["indices", "distances", "nuns"] else: self._search_returns = ["indices", "distances"] - self.search_method.returns = self._search_returns + + try: + self.search_method.returns = self._search_returns + except AttributeError: + pass def format_search_output( self, diff --git a/xplique/example_based/mmd_critic.py b/xplique/example_based/mmd_critic.py deleted file mode 100644 index a2ccfb47..00000000 --- a/xplique/example_based/mmd_critic.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -MMDCritic method for searching prototypes -""" - -import math - -import time - -import tensorflow as tf -import numpy as np - -from ..types import Callable, Dict, List, Optional, Type, Union - -from ..commons import sanitize_inputs_targets -from ..commons import sanitize_dataset, dataset_gather -from .search_methods import MMDCriticSearch -from .projections import Projection -from .prototypes import Prototypes - -from .search_methods.base import _sanitize_returns - - -class MMDCritic(Prototypes): - """ - MMDCritic method for searching prototypes. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - k - The number of examples to retrieve. - projection - Projection or Callable that project samples from the input space to the search space. - The search space should be a space where distance make sense for the model. - It should not be `None`, otherwise, - all examples could be computed only with the `search_method`. - - Example of Callable: - ``` - def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): - ''' - Example of projection, - inputs are the elements to project. - targets are optional parameters to orientated the projection. - ''' - projected_inputs = # do some magic on inputs, it should use the model. - return projected_inputs - ``` - case_returns - String or list of string with the elements to return in `self.explain()`. - See `self.set_returns()` for detail. - batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - search_method_kwargs - Parameters to be passed at the construction of the `search_method`. - """ - - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - projection: Union[Projection, Callable] = None, - case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, - **search_method_kwargs, - ): - # the only difference with parent is that the search method is always MMDCriticSearch - search_method = MMDCriticSearch - - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - search_method=search_method, - k=k, - projection=projection, - case_returns=case_returns, - batch_size=batch_size, - **search_method_kwargs, - ) - diff --git a/xplique/example_based/proto_dash.py b/xplique/example_based/proto_dash.py deleted file mode 100644 index 475e138b..00000000 --- a/xplique/example_based/proto_dash.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -ProtoDash method for searching prototypes -""" - -import math - -import time - -import tensorflow as tf -import numpy as np - -from ..types import Callable, Dict, List, Optional, Type, Union - -from ..commons import sanitize_inputs_targets -from ..commons import sanitize_dataset, dataset_gather -from .search_methods import ProtoDashSearch -from .projections import Projection -from .prototypes import Prototypes - -from .search_methods.base import _sanitize_returns - - -class ProtoDash(Prototypes): - """ - ProtoDash method for searching prototypes. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - k - The number of examples to retrieve. - projection - Projection or Callable that project samples from the input space to the search space. - The search space should be a space where distance make sense for the model. - It should not be `None`, otherwise, - all examples could be computed only with the `search_method`. - - Example of Callable: - ``` - def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): - ''' - Example of projection, - inputs are the elements to project. - targets are optional parameters to orientated the projection. - ''' - projected_inputs = # do some magic on inputs, it should use the model. - return projected_inputs - ``` - case_returns - String or list of string with the elements to return in `self.explain()`. - See `self.set_returns()` for detail. - batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - search_method_kwargs - Parameters to be passed at the construction of the `search_method`. - """ - - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - projection: Union[Projection, Callable] = None, - case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, - **search_method_kwargs, - ): - # the only difference with parent is that the search method is always ProtoDashSearch - search_method = ProtoDashSearch - - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - search_method=search_method, - k=k, - projection=projection, - case_returns=case_returns, - batch_size=batch_size, - **search_method_kwargs, - ) - diff --git a/xplique/example_based/proto_greedy.py b/xplique/example_based/proto_greedy.py deleted file mode 100644 index 2c43565b..00000000 --- a/xplique/example_based/proto_greedy.py +++ /dev/null @@ -1,100 +0,0 @@ -""" -ProtoGreedy method for searching prototypes -""" - -import math - -import time - -import tensorflow as tf -import numpy as np - -from ..types import Callable, Dict, List, Optional, Type, Union - -from ..commons import sanitize_inputs_targets -from ..commons import sanitize_dataset, dataset_gather -from .search_methods import ProtoGreedySearch -from .projections import Projection -from .prototypes import Prototypes - -from .search_methods.base import _sanitize_returns - - -class ProtoGreedy(Prototypes): - """ - ProtoGreedy method for searching prototypes. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - k - The number of examples to retrieve. - projection - Projection or Callable that project samples from the input space to the search space. - The search space should be a space where distance make sense for the model. - It should not be `None`, otherwise, - all examples could be computed only with the `search_method`. - - Example of Callable: - ``` - def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): - ''' - Example of projection, - inputs are the elements to project. - targets are optional parameters to orientated the projection. - ''' - projected_inputs = # do some magic on inputs, it should use the model. - return projected_inputs - ``` - case_returns - String or list of string with the elements to return in `self.explain()`. - See `self.set_returns()` for detail. - batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - search_method_kwargs - Parameters to be passed at the construction of the `search_method`. - """ - - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - projection: Union[Projection, Callable] = None, - case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, - **search_method_kwargs, - ): - # the only difference with parent is that the search method is always ProtoGreedySearch - search_method = ProtoGreedySearch - - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - search_method=search_method, - k=k, - projection=projection, - case_returns=case_returns, - batch_size=batch_size, - **search_method_kwargs, - ) - diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 29946c22..16f68243 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -2,25 +2,19 @@ Base model for prototypes """ -import math - -import time +from abc import ABC, abstractmethod import tensorflow as tf import numpy as np from ..types import Callable, Dict, List, Optional, Type, Union -from ..commons import sanitize_inputs_targets -from ..commons import sanitize_dataset, dataset_gather -from .search_methods import ProtoGreedySearch +from .search_methods import BaseSearchMethod, ProtoGreedySearch, MMDCriticSearch, ProtoDashSearch from .projections import Projection from .base_example_method import BaseExampleMethod -from .search_methods.base import _sanitize_returns - -class Prototypes(BaseExampleMethod): +class Prototypes(BaseExampleMethod, ABC): """ Base class for prototypes. @@ -76,54 +70,51 @@ def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - search_method: Type[ProtoGreedySearch] = ProtoGreedySearch, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, - **search_method_kwargs, - ): - assert ( - projection is not None - ), "`BaseExampleMethod` without `projection` is a `BaseSearchMethod`." - - # set attributes - batch_size = self.__initialize_cases_dataset( - cases_dataset, labels_dataset, targets_dataset, batch_size + distance: Union[int, str, Callable] = None, + nb_prototypes: int = 1, + kernel_type: str = 'local', + kernel_fn: callable = None, + gamma: float = None + ): + # set common example-based parameters + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, ) - self.k = k - self.set_returns(case_returns) - - assert hasattr(projection, "__call__"), "projection should be a callable." - - # check projection type - if isinstance(projection, Projection): - self.projection = projection - elif hasattr(projection, "__call__"): - self.projection = Projection(get_weights=None, space_projection=projection) - else: - raise AttributeError( - "projection should be a `Projection` or a `Callable`, not a" - + f"{type(projection)}" - ) - - # project dataset - projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, - self.targets_dataset) - - # set `search_returns` if not provided and overwrite it otherwise - search_method_kwargs["search_returns"] = ["indices", "distances"] + # set prototypes parameters + self.distance = distance + self.nb_prototypes = nb_prototypes + self.kernel_type = kernel_type + self.kernel_fn = kernel_fn + self.gamma = gamma # initiate search_method - self.search_method = search_method( - cases_dataset=projected_cases_dataset, - labels_dataset=labels_dataset, - k=k, - batch_size=batch_size, - **search_method_kwargs, + self.search_method = self.search_method_class( + cases_dataset=self.cases_dataset, + labels_dataset=self.labels_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, + distance=self.distance, + nb_prototypes=self.nb_prototypes, + kernel_type=self.kernel_type, + kernel_fn=self.kernel_fn, + gamma=self.gamma ) + + @property + @abstractmethod + def search_method_class(self) -> Type[ProtoGreedySearch]: + raise NotImplementedError def get_global_prototypes(self): """ @@ -137,100 +128,95 @@ def get_global_prototypes(self): prototype weights. """ return self.search_method.prototype_indices, self.search_method.prototype_weights - - def __initialize_cases_dataset( + + +class ProtoGreedy(Prototypes): + @property + def search_method_class(self) -> Type[ProtoGreedySearch]: + return ProtoGreedySearch + + +class MMDCritic(Prototypes): + @property + def search_method_class(self) -> Type[ProtoGreedySearch]: + return MMDCriticSearch + + +class ProtoDash(Prototypes): + """ + Protodash method for searching prototypes. + + References: + .. [#] `Karthik S. Gurumoorthy, Amit Dhurandhar, Guillermo Cecchi, + "ProtoDash: Fast Interpretable Prototype Selection" + `_ + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from the dataset. + For natural example-based methods it is the train dataset. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + k + The number of examples to retrieve. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + See `self.set_returns()` for detail. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + nb_prototypes : int + Number of prototypes to find. + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function, by default the rbf kernel. + This function must only use TensorFlow operations. + gamma : float, optional + Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. + use_optimizer : bool, optional + Flag indicating whether to use an optimizer for prototype selection, by default False. + """ + + def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], - batch_size: Optional[int], - ) -> int: - """ - Factorization of `__init__()` method for dataset related attributes. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - labels_dataset - Labels associated to the examples in the dataset. - Indices should match with cases_dataset. - targets_dataset - Targets associated to the cases_dataset for dataset projection. - See `projection` for detail. - batch_size - Number of sample treated simultaneously when using the datasets. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - - Returns - ------- - batch_size - Number of sample treated simultaneously when using the datasets. - Extracted from the datasets in case they are `tf.data.Dataset`. - Otherwise, the input value. - """ - # at least one dataset provided - if isinstance(cases_dataset, tf.data.Dataset): - # set batch size (ignore provided argument) and cardinality - if isinstance(cases_dataset.element_spec, tuple): - batch_size = tf.shape(next(iter(cases_dataset))[0])[0].numpy() - else: - batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() - - cardinality = cases_dataset.cardinality().numpy() - else: - # if case_dataset is not a `tf.data.Dataset`, then neither should the other. - assert not isinstance(labels_dataset, tf.data.Dataset) - assert not isinstance(targets_dataset, tf.data.Dataset) - # set batch size and cardinality - batch_size = min(batch_size, len(cases_dataset)) - cardinality = math.ceil(len(cases_dataset) / batch_size) - - # verify cardinality and create datasets from the tensors - self.cases_dataset = sanitize_dataset( - cases_dataset, batch_size, cardinality - ) - self.labels_dataset = sanitize_dataset( - labels_dataset, batch_size, cardinality - ) - self.targets_dataset = sanitize_dataset( - targets_dataset, batch_size, cardinality + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = None, + nb_prototypes: int = 1, + kernel_type: str = 'local', + kernel_fn: callable = None, + gamma: float = None, + use_optimizer: bool = False, + ): # pylint: disable=R0801 + self.use_optimizer = use_optimizer + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + distance=distance, + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + kernel_fn=kernel_fn, + gamma=gamma ) - # if the provided `cases_dataset` has several columns - if isinstance(self.cases_dataset.element_spec, tuple): - # switch case on the number of columns of `cases_dataset` - if len(self.cases_dataset.element_spec) == 2: - assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." - + "Hence, `labels_dataset` should be empty." - ) - self.labels_dataset = self.cases_dataset.map(lambda x, y: y) - self.cases_dataset = self.cases_dataset.map(lambda x, y: x) - - elif len(self.cases_dataset.element_spec) == 3: - assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." - + "Hence, `labels_dataset` should be empty." - ) - assert self.targets_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." - + "Hence, `labels_dataset` should be empty." - ) - self.targets_dataset = self.cases_dataset.map(lambda x, y, t: t) - self.labels_dataset = self.cases_dataset.map(lambda x, y, t: y) - self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) - else: - raise AttributeError( - "`cases_dataset` cannot possess more than 3 columns," - + f"{len(self.cases_dataset.element_spec)} were detected." - ) - - self.cases_dataset = self.cases_dataset.prefetch(tf.data.AUTOTUNE) - if self.labels_dataset is not None: - self.labels_dataset = self.labels_dataset.prefetch(tf.data.AUTOTUNE) - if self.targets_dataset is not None: - self.targets_dataset = self.targets_dataset.prefetch(tf.data.AUTOTUNE) - - return batch_size - + @property + def search_method_class(self) -> Type[ProtoGreedySearch]: + return ProtoDashSearch + diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 018f3ad4..60db96af 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -92,7 +92,6 @@ def __init__( k: int = 1, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, ): # pylint: disable=R0801 # set batch size @@ -106,13 +105,6 @@ def __init__( self.k = k self.returns = search_returns - # set targets_dataset - if targets_dataset is not None: - self.targets_dataset = sanitize_dataset(targets_dataset, self.batch_size) - else: - # make an iterable of None - self.targets_dataset = [None]*len(cases_dataset) - @property def k(self) -> int: """Getter for the k parameter.""" diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index c62c14cd..315a25af 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -29,22 +29,22 @@ def __init__( cases_dataset = cases_dataset, targets_dataset=targets_dataset, k=k, - filter_fn=self._filter_fn, search_returns=search_returns, batch_size=batch_size, distance=distance, order=ORDER.ASCENDING, + filter_fn=self._filter_fn, ) self.search_nuns = FilterKNN( cases_dataset=cases_dataset, targets_dataset=targets_dataset, k=1, - filter_fn=self._filter_fn_nun, search_returns=["indices", "distances"], batch_size=batch_size, distance=distance, order = ORDER.ASCENDING, + filter_fn=self._filter_fn_nun, ) def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index be686b45..c1fbe4db 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -6,7 +6,7 @@ import numpy as np import tensorflow as tf -from ...commons import dataset_gather +from ...commons import dataset_gather, sanitize_dataset from ...types import Callable, List, Union, Optional, Tuple from .base import BaseSearchMethod, ORDER @@ -21,10 +21,12 @@ def __init__( search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, order: ORDER = ORDER.ASCENDING, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, ): super().__init__( - cases_dataset, k, search_returns, batch_size, targets_dataset + cases_dataset=cases_dataset, + k=k, + search_returns=search_returns, + batch_size=batch_size, ) assert isinstance(order, ORDER), f"order should be an instance of ORDER and not {type(order)}" @@ -134,10 +136,13 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", order: ORDER = ORDER.ASCENDING, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order, targets_dataset + cases_dataset=cases_dataset, + k=k, + search_returns=search_returns, + batch_size=batch_size, + order=order, ) if hasattr(distance, "__call__"): @@ -273,14 +278,18 @@ def __init__( cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, k: int = 1, - filter_fn: Optional[Callable] = None, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", order: ORDER = ORDER.ASCENDING, + filter_fn: Optional[Callable] = None, ): # pylint: disable=R0801 super().__init__( - cases_dataset, k, search_returns, batch_size, order, targets_dataset + cases_dataset=cases_dataset, + k=k, + search_returns=search_returns, + batch_size=batch_size, + order=order, ) if hasattr(distance, "__call__"): @@ -301,6 +310,13 @@ def __init__( filter_fn = lambda x, z, y, t: tf.ones((tf.shape(x)[0], tf.shape(z)[0]), dtype=tf.bool) self.filter_fn = filter_fn + # set targets_dataset + if targets_dataset is not None: + self.targets_dataset = sanitize_dataset(targets_dataset, self.batch_size) + else: + # make an iterable of None + self.targets_dataset = [None]*len(cases_dataset) + @tf.function def _crossed_distances_fn(self, x1, x2, mask): n = x1.shape[0] diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index a86f610d..4ed79899 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -146,7 +146,7 @@ def kernel_induced_distance(x1,x2): elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( distance, int ): - self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance) + self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance, axis=-1) else: raise AttributeError( "The distance parameter is expected to be either a Callable or in" @@ -423,7 +423,7 @@ def find_prototypes(self, nb_prototypes): return prototype_indices, prototype_cases, prototype_labels, prototype_weights - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], _): """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -438,7 +438,7 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray]): """ # look for closest prototypes to projected inputs - knn_output = self.knn(inputs) + knn_output = self.knn(inputs, _) # obtain closest prototypes indices with respect to the prototypes indices_wrt_prototypes = knn_output["indices"] diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index 2a9634d3..e8836167 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -84,18 +84,27 @@ def __init__( case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", - ): - # the only difference with parent is that the search method is always KNN - search_method = KNN - + ): super().__init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, targets_dataset=targets_dataset, - search_method=search_method, k=k, projection=projection, case_returns=case_returns, batch_size=batch_size, - distance=distance ) + + self.distance = distance + + # initiate search_method + self.search_method = self.search_method_class( + cases_dataset=self.projected_cases_dataset, + search_returns=self._search_returns, + k=self.k, + batch_size=self.batch_size, + ) + + @property + def search_method_class(self) -> Type[BaseSearchMethod]: + return KNN From 026c29e737b11104aebc915ab172e0c4642093d0 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Wed, 22 May 2024 18:21:53 +0200 Subject: [PATCH 047/138] fix: change the fill value depending on the dataset type --- xplique/commons/tf_dataset_operations.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/commons/tf_dataset_operations.py index f74f4ea2..83c81fa0 100644 --- a/xplique/commons/tf_dataset_operations.py +++ b/xplique/commons/tf_dataset_operations.py @@ -205,9 +205,14 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: example = next(iter(dataset)) # (n, bs, ...) - results = tf.Variable( - tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)), - ) + if dataset.element_spec.dtype in ['uint8', 'int8', 'int16', 'int32', 'int64']: + results = tf.Variable( + tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(-1, dtype=dataset.element_spec.dtype)), + ) + else: + results = tf.Variable( + tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)), + ) nb_results = product(indices.shape[:-1]) current_nb_results = 0 From 31bb02240a1ef48d320ce52f39de255cc8a40e5f Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Wed, 22 May 2024 18:22:34 +0200 Subject: [PATCH 048/138] fix: update kleor search and example base methods to fit the new interface --- tests/example_based/test_contrastive.py | 8 +- tests/example_based/test_kleor.py | 12 +- xplique/example_based/__init__.py | 2 +- xplique/example_based/contrastive_examples.py | 131 +++++++++++++----- .../example_based/search_methods/__init__.py | 2 +- xplique/example_based/search_methods/kleor.py | 20 ++- 6 files changed, 123 insertions(+), 52 deletions(-) diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py index 82a47d60..af5cdc6f 100644 --- a/tests/example_based/test_contrastive.py +++ b/tests/example_based/test_contrastive.py @@ -4,7 +4,7 @@ import tensorflow as tf import numpy as np -from xplique.example_based import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEOR +from xplique.example_based import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEORGlobalSim, KLEORSimMiss def test_naive_counter_factuals(): """ @@ -162,13 +162,12 @@ def test_kleor(): targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) # start when strategy is sim_miss - kleor_sim_miss = KLEOR( + kleor_sim_miss = KLEORSimMiss( cases_dataset, cases_targets_dataset, k=1, case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2, - strategy="sim_miss" ) return_dict = kleor_sim_miss(inputs, targets) @@ -202,13 +201,12 @@ def test_kleor(): assert tf.reduce_all(tf.equal(indices, expected_indices)) # now strategy is global_sim - kleor_global_sim = KLEOR( + kleor_global_sim = KLEORGlobalSim( cases_dataset, cases_targets_dataset, k=1, case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2, - strategy="global_sim" ) return_dict = kleor_global_sim(inputs, targets) diff --git a/tests/example_based/test_kleor.py b/tests/example_based/test_kleor.py index f4965f8d..fec68950 100644 --- a/tests/example_based/test_kleor.py +++ b/tests/example_based/test_kleor.py @@ -4,7 +4,7 @@ import tensorflow as tf import numpy as np -from xplique.example_based.search_methods import KLEORSimMiss, KLEORGlobalSim +from xplique.example_based.search_methods import KLEORSimMissSearch, KLEORGlobalSimSearch def test_kleor_base_and_sim_miss(): """ @@ -22,7 +22,7 @@ def test_kleor_base_and_sim_miss(): targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) # build the kleor object - kleor = KLEORSimMiss(cases_dataset, cases_targets_dataset, k=1, search_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2) + kleor = KLEORSimMissSearch(cases_dataset, cases_targets_dataset, k=1, search_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2) # test the _filter_fn method fake_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) @@ -46,7 +46,7 @@ def test_kleor_base_and_sim_miss(): assert tf.reduce_all(tf.equal(mask, expected_mask)) # test the _get_nuns method - nuns, nuns_distances = kleor._get_nuns(inputs, targets) + nuns, _, nuns_distances = kleor._get_nuns(inputs, targets) expected_nuns = tf.constant([ [[2., 3.]], [[1., 2.]], @@ -72,7 +72,7 @@ def test_kleor_base_and_sim_miss(): assert tf.reduce_all(tf.equal(batch_indices, expected_batch_indices)) # test the kneighbors method - input_sf_distances, sf_indices, nuns = kleor.kneighbors(inputs, targets) + input_sf_distances, sf_indices, nuns, _, __ = kleor.kneighbors(inputs, targets) assert input_sf_distances.shape == (3, 1) # (n, k) assert sf_indices.shape == (3, 1, 2) # (n, k, 2) @@ -121,7 +121,7 @@ def test_kleor_global_sim(): targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) # build the kleor object - kleor = KLEORGlobalSim(cases_dataset, cases_targets_dataset, k=1, search_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2) + kleor = KLEORGlobalSimSearch(cases_dataset, cases_targets_dataset, k=1, search_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2) # test the _additionnal_filtering method # (n, bs) @@ -154,7 +154,7 @@ def test_kleor_global_sim(): ) < 1e-5) # test the kneighbors method - input_sf_distances, sf_indices, nuns = kleor.kneighbors(inputs, targets) + input_sf_distances, sf_indices, nuns, _, __ = kleor.kneighbors(inputs, targets) expected_nuns = tf.constant([ [[2., 3.]], diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index e1d70b05..3de46d18 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -5,4 +5,4 @@ from .cole import Cole from .similar_examples import SimilarExamples from .prototypes import Prototypes, ProtoGreedy, ProtoDash, MMDCritic -from .contrastive_examples import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEOR +from .contrastive_examples import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEORGlobalSim, KLEORSimMiss diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 05b1d9ad..5477b450 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -8,10 +8,10 @@ import tensorflow as tf from ..types import Callable, List, Optional, Union, Dict -from ..commons import sanitize_inputs_targets +from ..commons import sanitize_inputs_targets, dataset_gather from .base_example_method import BaseExampleMethod -from .search_methods import ORDER, FilterKNN, KLEORSimMiss, KLEORGlobalSim +from .search_methods import ORDER, FilterKNN, KLEORSimMissSearch, KLEORGlobalSimSearch from .projections import Projection from .search_methods.base import _sanitize_returns @@ -177,10 +177,12 @@ def explain( # TODO make an assert on the cf_targets return super().explain(inputs, cf_targets) -class KLEOR(BaseExampleMethod): +class KLEORBase(BaseExampleMethod): """ """ - _returns_possibilities = ["examples", "weights", "distances", "labels", "include_inputs", "nuns"] + _returns_possibilities = [ + "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns" + ] def __init__( self, @@ -192,16 +194,8 @@ def __init__( case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", - strategy: str = "sim_miss", ): - if strategy == "global_sim": - search_method = KLEORGlobalSim - elif strategy == "sim_miss": - search_method = KLEORSimMiss - else: - raise ValueError("strategy should be either 'global_sim' or 'sim_miss'.") - if projection is None: projection = Projection(space_projection=lambda inputs: inputs) @@ -218,22 +212,6 @@ def __init__( self.distance = distance self.order = ORDER.ASCENDING - self.search_method = self.search_method_class( - cases_dataset=self.cases_dataset, - targets_dataset=self.targets_dataset, - k=self.k, - search_returns=self._search_returns, - batch_size=self.batch_size, - distance=distance, - filter_fn=self.filter_fn, - order=self.order - ) - - @property - def search_method_class(self): - return FilterKNN - - @property def returns(self) -> Union[List[str], str]: """Getter for the returns parameter.""" @@ -245,10 +223,15 @@ def returns(self, returns: Union[List[str], str]): """ default = "examples" self._returns = _sanitize_returns(returns, self._returns_possibilities, default) + self._search_returns = ["indices", "distances"] + if isinstance(self._returns, list) and ("nuns" in self._returns): - self._search_returns = ["indices", "distances", "nuns"] - else: - self._search_returns = ["indices", "distances"] + self._search_returns.append("nuns_indices") + elif isinstance(self._returns, list) and ("nuns_indices" in self._returns): + self._search_returns.append("nuns_indices") + + if isinstance(self._returns, list) and ("dist_to_nuns" in self._returns): + self._search_returns.append("dist_to_nuns") try: self.search_method.returns = self._search_returns @@ -265,5 +248,89 @@ def format_search_output( """ return_dict = super().format_search_output(search_output, inputs, targets) if "nuns" in self.returns: - return_dict["nuns"] = search_output["nuns"] + return_dict["nuns"] = dataset_gather(self.cases_dataset, search_output["nuns_indices"]) + if "nuns_indices" in self.returns: + return_dict["nuns_indices"] = search_output["nuns_indices"] + if "dist_to_nuns" in self.returns: + return_dict["dist_to_nuns"] = search_output["dist_to_nuns"] return return_dict + +class KLEORGlobalSim(KLEORBase): + """ + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + distance=distance, + ) + + self.search_method = self.search_method_class( + cases_dataset=self.cases_dataset, + targets_dataset=self.targets_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, + distance=self.distance, + ) + + @property + def search_method_class(self): + return KLEORGlobalSimSearch + +class KLEORSimMiss(KLEORBase): + """ + """ + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + distance=distance, + ) + + self.search_method = self.search_method_class( + cases_dataset=self.cases_dataset, + targets_dataset=self.targets_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, + distance=self.distance, + ) + + @property + def search_method_class(self): + return KLEORSimMissSearch diff --git a/xplique/example_based/search_methods/__init__.py b/xplique/example_based/search_methods/__init__.py index 998a3025..24a2e14c 100644 --- a/xplique/example_based/search_methods/__init__.py +++ b/xplique/example_based/search_methods/__init__.py @@ -9,4 +9,4 @@ from .proto_dash_search import ProtoDashSearch from .mmd_critic_search import MMDCriticSearch from .knn import BaseKNN, KNN, FilterKNN -from .kleor import KLEORSimMiss, KLEORGlobalSim +from .kleor import KLEORSimMissSearch, KLEORGlobalSimSearch diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index 315a25af..bb057bf3 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -12,7 +12,7 @@ from .base import ORDER from .knn import FilterKNN -class BaseKLEOR(FilterKNN, ABC): +class BaseKLEORSearch(FilterKNN, ABC): """ Base class for the KLEOR search methods. """ @@ -61,7 +61,7 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ Expected shape among (N, W), (N, T, W), (N, W, H, C). """ # compute neighbors - examples_distances, examples_indices, nuns = self.kneighbors(inputs, targets) + examples_distances, examples_indices, nuns, nuns_indices, nuns_sf_distances = self.kneighbors(inputs, targets) # build return dict return_dict = self._build_return_dict(inputs, examples_distances, examples_indices) @@ -70,6 +70,12 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ if "nuns" in self.returns: return_dict["nuns"] = nuns + if "dist_to_nuns" in self.returns: + return_dict["dist_to_nuns"] = nuns_sf_distances + + if "nuns_indices" in self.returns: + return_dict["nuns_indices"] = nuns_indices + return return_dict def _filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: @@ -104,13 +110,13 @@ def _get_nuns(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tens nuns_dict = self.search_nuns(inputs, targets) nuns_indices, nuns_distances = nuns_dict["indices"], nuns_dict["distances"] nuns = dataset_gather(self.cases_dataset, nuns_indices) - return nuns, nuns_distances + return nuns, nuns_indices, nuns_distances def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: """ """ # get the Nearest Unlike Neighbors and their distance to the related input - nuns, nuns_input_distances = self._get_nuns(inputs, targets) + nuns, nuns_indices, nuns_input_distances = self._get_nuns(inputs, targets) # initialize the search for the KLEOR semi-factual methods sf_indices, input_sf_distances, nun_sf_distances, batch_indices = self._initialize_search(inputs) @@ -160,7 +166,7 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Ten tf.gather(concatenated_input_sf_distances, sort_order, axis=1, batch_dims=1) ) - return input_sf_distances, sf_indices, nuns + return input_sf_distances, sf_indices, nuns, nuns_indices, nun_sf_distances def _initialize_search(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Variable, tf.Variable, tf.Variable, tf.Tensor]: """ @@ -185,7 +191,7 @@ def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: """ raise NotImplementedError -class KLEORSimMiss(BaseKLEOR): +class KLEORSimMissSearch(BaseKLEORSearch): """ KLEOR search method. @@ -199,7 +205,7 @@ class KLEORSimMiss(BaseKLEOR): def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple: return nun_sf_distances, input_sf_distances -class KLEORGlobalSim(BaseKLEOR): +class KLEORGlobalSimSearch(BaseKLEORSearch): """ KLEOR search method. From 19d7cb3888b062c0178cb306799b011aae55c2b8 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 23 May 2024 14:59:46 +0200 Subject: [PATCH 049/138] example based: cole tests pass --- tests/example_based/test_cole.py | 10 +-- xplique/example_based/contrastive_examples.py | 82 +++---------------- xplique/example_based/projections/base.py | 2 + xplique/example_based/similar_examples.py | 4 +- 4 files changed, 22 insertions(+), 76 deletions(-) diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py index ba71d5d3..e96abbb7 100644 --- a/tests/example_based/test_cole.py +++ b/tests/example_based/test_cole.py @@ -48,7 +48,7 @@ def test_cole_attribution(): Test that the distance has an impact. """ # Setup - nb_samples = 20 + nb_samples = 50 input_shape = (5, 5) nb_labels = 10 k = 3 @@ -84,7 +84,7 @@ def test_cole_attribution(): explainer.gradient(model, inputs, targets) projection = Projection(get_weights=explainer) - euclidean_dist = lambda x, z: tf.sqrt(tf.reduce_sum(tf.square(x - z))) + euclidean_dist = lambda x, z: tf.sqrt(tf.reduce_sum(tf.square(x - z), axis=-1)) method_call = SimilarExamples( cases_dataset=x_train, targets_dataset=y_train, @@ -121,9 +121,9 @@ def test_cole_attribution(): assert not almost_equal(examples_constructor, examples_different_distance) # check weights are equal to the attribution directly on the input - method_constructor.set_returns(["weights", "include_inputs"]) + method_constructor.returns = ["weights", "include_inputs"] assert almost_equal( - method_constructor.explain(x_test, y_test)[:, 0], + method_constructor.explain(x_test, y_test)["weights"][:, 0], Saliency(model)(x_test, y_test), ) @@ -156,7 +156,7 @@ def test_cole_hadamard(): weights_extraction = lambda inputs, targets: gradients_predictions(model, inputs, targets) projection = Projection(get_weights=weights_extraction) - euclidean_dist = lambda x, z: tf.sqrt(tf.reduce_sum(tf.square(x - z))) + euclidean_dist = lambda x, z: tf.sqrt(tf.reduce_sum(tf.square(x - z), axis=-1)) method_call = SimilarExamples( cases_dataset=x_train, targets_dataset=y_train, diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 5477b450..fd6b6204 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -177,6 +177,7 @@ def explain( # TODO make an assert on the cf_targets return super().explain(inputs, cf_targets) + class KLEORBase(BaseExampleMethod): """ """ @@ -212,6 +213,15 @@ def __init__( self.distance = distance self.order = ORDER.ASCENDING + self.search_method = self.search_method_class( + cases_dataset=self.cases_dataset, + targets_dataset=self.targets_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, + distance=self.distance, + ) + @property def returns(self) -> Union[List[str], str]: """Getter for the returns parameter.""" @@ -255,82 +265,14 @@ def format_search_output( return_dict["dist_to_nuns"] = search_output["dist_to_nuns"] return return_dict -class KLEORGlobalSim(KLEORBase): - """ - """ - - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - projection: Union[Projection, Callable] = None, - case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = "euclidean", - ): - - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - k=k, - projection=projection, - case_returns=case_returns, - batch_size=batch_size, - distance=distance, - ) - - self.search_method = self.search_method_class( - cases_dataset=self.cases_dataset, - targets_dataset=self.targets_dataset, - k=self.k, - search_returns=self._search_returns, - batch_size=self.batch_size, - distance=self.distance, - ) +class KLEORGlobalSim(KLEORBase): @property def search_method_class(self): return KLEORGlobalSimSearch -class KLEORSimMiss(KLEORBase): - """ - """ - - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - projection: Union[Projection, Callable] = None, - case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = "euclidean", - ): - - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - k=k, - projection=projection, - case_returns=case_returns, - batch_size=batch_size, - distance=distance, - ) - - self.search_method = self.search_method_class( - cases_dataset=self.cases_dataset, - targets_dataset=self.targets_dataset, - k=self.k, - search_returns=self._search_returns, - batch_size=self.batch_size, - distance=self.distance, - ) +class KLEORSimMiss(KLEORBase): @property def search_method_class(self): return KLEORSimMissSearch diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 54192ed5..5efb3d27 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -73,6 +73,8 @@ def __init__(self, # weights is a tensor if isinstance(get_weights, np.ndarray): weights = tf.convert_to_tensor(get_weights, dtype=tf.float32) + else: + weights = get_weights # define a function that returns the weights def get_weights(inputs, _ = None): diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index e8836167..75756074 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -11,7 +11,7 @@ from ..commons import sanitize_inputs_targets from ..commons import sanitize_dataset, dataset_gather -from .search_methods import KNN, BaseSearchMethod +from .search_methods import KNN, BaseSearchMethod, ORDER from .projections import Projection from .base_example_method import BaseExampleMethod @@ -103,6 +103,8 @@ def __init__( search_returns=self._search_returns, k=self.k, batch_size=self.batch_size, + distance=self.distance, + order=ORDER.ASCENDING, ) @property From 122f8d56f543e09cf29a03626bf2f80728df6ad2 Mon Sep 17 00:00:00 2001 From: Lucas Hervier Date: Wed, 29 May 2024 17:54:10 +0200 Subject: [PATCH 050/138] docs: refactoring of the documentation for the new interfaces --- tests/example_based/test_contrastive.py | 89 +++--- xplique/example_based/base_example_method.py | 91 +++--- xplique/example_based/cole.py | 31 +- xplique/example_based/contrastive_examples.py | 291 +++++++++++++++--- xplique/example_based/search_methods/base.py | 31 +- xplique/example_based/search_methods/kleor.py | 110 ++++++- xplique/example_based/search_methods/knn.py | 120 +++++++- xplique/example_based/similar_examples.py | 33 +- 8 files changed, 574 insertions(+), 222 deletions(-) diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py index af5cdc6f..eab75ca7 100644 --- a/tests/example_based/test_contrastive.py +++ b/tests/example_based/test_contrastive.py @@ -5,20 +5,33 @@ import numpy as np from xplique.example_based import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEORGlobalSim, KLEORSimMiss +from xplique.example_based.projections import Projection def test_naive_counter_factuals(): """ """ + # setup the tests cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) - counter_factuals = NaiveCounterFactuals(cases_dataset, cases_targets_dataset, k=2, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + projection = Projection(space_projection=lambda inputs: inputs) + + # build the NaiveCounterFactuals object + counter_factuals = NaiveCounterFactuals( + cases_dataset, + cases_targets_dataset, + k=2, + projection=projection, + case_returns=["examples", "indices", "distances", "include_inputs"], + batch_size=2 + ) + mask = counter_factuals.filter_fn(inputs, cases, targets, cases_targets) assert mask.shape == (inputs.shape[0], cases.shape[0]) @@ -62,11 +75,22 @@ def test_label_aware_cf(): cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) - counter_factuals = LabelAwareCounterFactuals(cases_dataset, cases_targets_dataset, k=1, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) cf_targets = tf.constant([[1, 0], [0, 1], [0, 1]], dtype=tf.float32) + projection = Projection(space_projection=lambda inputs: inputs) + + # build the LabelAwareCounterFactuals object + counter_factuals = LabelAwareCounterFactuals( + cases_dataset, + cases_targets_dataset, + k=1, + projection=projection, + case_returns=["examples", "indices", "distances", "include_inputs"], + batch_size=2 + ) + mask = counter_factuals.filter_fn(inputs, cases, cf_targets, cases_targets) assert mask.shape == (inputs.shape[0], cases.shape[0]) @@ -106,7 +130,14 @@ def test_label_aware_cf(): cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) - counter_factuals = LabelAwareCounterFactuals(cases_dataset, cases_targets_dataset, k=1, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2) + counter_factuals = LabelAwareCounterFactuals( + cases_dataset, + cases_targets_dataset, + k=1, + projection=projection, + case_returns=["examples", "indices", "distances", "include_inputs"], + batch_size=2 + ) inputs = tf.constant([[1.5], [2.5], [4.5], [6.5], [8.5]], dtype=tf.float32) cf_targets = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]], dtype=tf.float32) @@ -161,11 +192,14 @@ def test_kleor(): inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + projection = Projection(space_projection=lambda inputs: inputs) + # start when strategy is sim_miss kleor_sim_miss = KLEORSimMiss( cases_dataset, cases_targets_dataset, k=1, + projection=projection, case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2, ) @@ -205,6 +239,7 @@ def test_kleor(): cases_dataset, cases_targets_dataset, k=1, + projection=projection, case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], batch_size=2, ) @@ -246,51 +281,3 @@ def test_kleor(): assert tf.reduce_all( tf.abs(tf.where(inf_mask_examples, 0.0, examples) - tf.where(inf_mask_expected_examples, 0.0, expected_examples) ) < 1e-5) - -# def test_kleor_global_sim(): -# """ -# Test suite for the KleorSimMiss class -# """ -# cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) -# cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) - -# cases_dataset = tf.data.Dataset.from_tensor_slices(cases).batch(2) -# cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) -# semi_factuals = KLEOR( -# cases_dataset, -# cases_targets_dataset, -# k=1, -# case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], -# batch_size=2, -# strategy="global_sim" -# ) - -# return_dict = semi_factuals(inputs, targets) -# assert set(return_dict.keys()) == set(["examples", "indices", "distances", "nuns"]) - -# examples = return_dict["examples"] -# distances = return_dict["distances"] -# indices = return_dict["indices"] -# nuns = return_dict["nuns"] - -# expected_nuns = tf.constant([ -# [[2., 3.]], -# [[1., 2.]], -# [[4., 5.]]], dtype=tf.float32) -# assert tf.reduce_all(tf.equal(nuns, expected_nuns)) - -# assert examples.shape == (3, 2, 2) # (n, k+1, W) -# assert distances.shape == (3, 1) # (n, k) -# assert indices.shape == (3, 1, 2) # (n, k, 2) - -# expected_examples = tf.constant([ -# [[1.5, 2.5], [1., 2.]], -# [[2.5, 3.5], [2., 3.]], -# [[4.5, 5.5], [3., 4.]]], dtype=tf.float32) -# assert tf.reduce_all(tf.equal(examples, expected_examples)) - -# expected_distances = tf.constant([[np.sqrt(2*0.5**2)], [np.sqrt(2*0.5**2)], [np.sqrt(2*1.5**2)]], dtype=tf.float32) -# assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - -# expected_indices = tf.constant([[[0, 0]],[[0, 1]],[[1, 0]]], dtype=tf.int32) -# assert tf.reduce_all(tf.equal(indices, expected_indices)) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 0f4c1dfa..02cc1f4e 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -13,7 +13,7 @@ from ..commons import sanitize_inputs_targets from ..commons import sanitize_dataset, dataset_gather -from .search_methods import KNN, BaseSearchMethod +from .search_methods import BaseSearchMethod from .projections import Projection from .search_methods.base import _sanitize_returns @@ -21,38 +21,39 @@ class BaseExampleMethod(ABC): """ - Base class for natural example-based methods explaining models, - they project the cases_dataset into a pertinent space for the with a `Projection`, - then they call the `BaseSearchMethod` on it. + Base class for natural example-based methods explaining classification models. + An example-based method is a method that explains a model's predictions by providing examples from the cases_dataset + (usually the training dataset). The examples are selected with the help of a search method that performs a search in + the search space. The search space is defined with the help of a projection function that projects the cases_dataset + and the (inputs, targets) to explain into a space where the search method is relevant. Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. + The dataset used to train the model, examples are extracted from this dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. labels_dataset Labels associated to the examples in the dataset. Indices should match with cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. + Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's + predictions. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. + Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. - search_method - An algorithm to search the examples in the projected space. k - The number of examples to retrieve. + The number of examples to retrieve per input. projection Projection or Callable that project samples from the input space to the search space. - The search space should be a space where distance make sense for the model. - It should not be `None`, otherwise, - all examples could be computed only with the `search_method`. + The search space should be a space where distances are relevant for the model. + It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in + searching the input space, you should use a `BaseSearchMethod` instead. Example of Callable: ``` @@ -67,12 +68,10 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar ``` case_returns String or list of string with the elements to return in `self.explain()`. - See `self.set_returns()` for detail. + See the returns property for details. batch_size Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - search_method_kwargs - Parameters to be passed at the construction of the `search_method`. """ _returns_possibilities = ["examples", "weights", "distances", "labels", "include_inputs"] @@ -88,7 +87,7 @@ def __init__( ): assert ( projection is not None - ), "`BaseExampleMethod` without `projection` is a `BaseSearchMethod`." + ), "`BaseExampleMethod` without Projection method should be a `BaseSearchMethod`." # set attributes self.batch_size = self._initialize_cases_dataset( @@ -96,9 +95,9 @@ def __init__( ) self._search_returns = ["indices", "distances"] - assert hasattr(projection, "__call__"), "projection should be a callable." - # check projection type + # check projection + assert hasattr(projection, "__call__"), "projection should be a callable." if isinstance(projection, Projection): self.projection = projection elif hasattr(projection, "__call__"): @@ -112,13 +111,17 @@ def __init__( # project dataset self.projected_cases_dataset = self.projection.project_dataset(self.cases_dataset, self.targets_dataset) - + + # set properties self.k = k self.returns = case_returns @property @abstractmethod def search_method_class(self) -> Type[BaseSearchMethod]: + """ + When inheriting from `BaseExampleMethod`, one should define the search method class to use. + """ raise NotImplementedError @property @@ -179,13 +182,13 @@ def _initialize_cases_dataset( Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. + The dataset used to train the model, examples are extracted from this dataset. labels_dataset - Labels associated to the examples in the dataset. + Labels associated to the examples in the cases_dataset. Indices should match with cases_dataset. targets_dataset Targets associated to the cases_dataset for dataset projection. - See `projection` for detail. + See `projection` for details. batch_size Number of sample treated simultaneously when using the datasets. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). @@ -254,6 +257,7 @@ def _initialize_cases_dataset( + f"{len(self.cases_dataset.element_spec)} were detected." ) + # prefetch datasets self.cases_dataset = self.cases_dataset.prefetch(tf.data.AUTOTUNE) if self.labels_dataset is not None: self.labels_dataset = self.labels_dataset.prefetch(tf.data.AUTOTUNE) @@ -269,9 +273,9 @@ def explain( targets: Optional[Union[tf.Tensor, np.ndarray]] = None, ): """ - Compute examples to explain the inputs. - It project inputs with `self.projection` in the search space - and find examples with `self.search_method`. + Return the relevant examples to explain the (inputs, targets). + It projects inputs with `self.projection` in the search space + and find examples with the `self.search_method`. Parameters ---------- @@ -280,20 +284,19 @@ def explain( Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. targets - Tensor or Array passed to the projection function. + Targets associated to the cases_dataset for dataset projection. + See `projection` for details. Returns ------- return_dict Dictionary with listed elements in `self.returns`. - If only one element is present it returns the element. - The elements that can be returned are: - examples, weights, distances, indices, and labels. + The elements that can be returned are defined with _returns_possibilities static attribute of the class. """ - # project inputs + # project inputs into the search space projected_inputs = self.projection(inputs, targets) - # look for closest elements to projected inputs + # look for relevant elements in the search space search_output = self.search_method(projected_inputs, targets) # manage returned elements @@ -304,7 +307,7 @@ def __call__( inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None, ): - """explain alias""" + """explain() alias""" return self.explain(inputs, targets) def format_search_output( @@ -323,23 +326,20 @@ def format_search_output( inputs Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). - More information in the documentation. targets - Tensor or Array passed to the projection function. - Here it is used by the explain function of attribution methods. - Refer to the corresponding method documentation for more detail. - Note that the default method is `Saliency`. + Targets associated to the cases_dataset for dataset projection. + See `projection` for details. Returns ------- return_dict Dictionary with listed elements in `self.returns`. - If only one element is present it returns the element. - The elements that can be returned are: - examples, weights, distances, indices, and labels. + The elements that can be returned are defined with _returns_possibilities static attribute of the class. """ + # initialize return dictionary return_dict = {} + # gather examples, labels, and targets from the example's indices of the search output examples = dataset_gather(self.cases_dataset, search_output["indices"]) examples_labels = dataset_gather(self.labels_dataset, search_output["indices"]) examples_targets = dataset_gather( @@ -377,13 +377,6 @@ def format_search_output( return_dict["weights"] = tf.stack(weights, axis=0) - # optimization test TODO - # return_dict["weights"] = tf.vectorized_map( - # fn=lambda x: self.projection.get_input_weights(x[0], x[1]), - # elems=(examples, examples_targets), - # # fn_output_signature=tf.float32, - # ) - # add indices, distances, and labels if "indices" in self.returns: return_dict["indices"] = search_output["indices"] diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py index ca203b12..296bcea2 100644 --- a/xplique/example_based/cole.py +++ b/xplique/example_based/cole.py @@ -1,7 +1,6 @@ """ Implementation of Cole method a simlilar examples method from example based module """ - import numpy as np import tensorflow as tf @@ -14,10 +13,10 @@ class Cole(SimilarExamples): """ - Cole is a similar examples methods that gives the most similar examples to a query. - Cole use the model to build a search space so that distances are meaningful for the model. - It uses attribution methods to weights inputs. - Those attributions may be computed in the latent space for complex data types like images. + Cole is a similar examples method that gives the most similar examples to a query in some specific projection space. + Cole use the model (to be explained) to build a search space so that distances are meaningful for the model. + It uses attribution methods to weight inputs. + Those attributions may be computed in the latent space for high-dimensional data like images. It is an implementation of a method proposed by Kenny et Keane in 2019, Twin-Systems to Explain Artificial Neural Networks using Case-Based Reasoning: @@ -26,24 +25,25 @@ class Cole(SimilarExamples): Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. + The dataset used to train the model, examples are extracted from this dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. labels_dataset Labels associated to the examples in the dataset. Indices should match with cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. + Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's + predictions. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. + Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. k - The number of examples to retrieve. Default value is `1`. + The number of examples to retrieve per input. distance Either a Callable, or a value supported by `tf.norm` `ord` parameter. Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: @@ -51,13 +51,10 @@ class Cole(SimilarExamples): yielding the corresponding p-norm." case_returns String or list of string with the elements to return in `self.explain()`. - See `self.set_returns()` from parent class `SimilarExamples` for detail. - By default, the `explain()` method will only return the examples. + See the base class returns property for details. batch_size Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - device - Device to use for the projection, if None, use the default device. latent_layer Layer used to split the model, the first part will be used for projection and the second to compute the attributions. By default, the model is not split. @@ -75,9 +72,8 @@ class Cole(SimilarExamples): It should inherit from `xplique.attributions.base.BlackBoxExplainer`. By default, it computes the gradient to make the Hadamard product in the latent space. attribution_kwargs - Parameters to be passed at the construction of the `attribution_method`. + Parameters to be passed for the construction of the `attribution_method`. """ - def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -88,7 +84,6 @@ def __init__( distance: Union[str, Callable] = "euclidean", case_returns: Optional[Union[List[str], str]] = "examples", batch_size: Optional[int] = 32, - # device: Optional[str] = None, latent_layer: Optional[Union[str, int]] = None, attribution_method: Union[str, Type[BlackBoxExplainer]] = "gradient", **attribution_kwargs, @@ -104,7 +99,6 @@ def __init__( model=model, latent_layer=latent_layer, operator=operator, - # device=device, ) elif issubclass(attribution_method, BlackBoxExplainer): # build attribution projection @@ -112,7 +106,6 @@ def __init__( model=model, method=attribution_method, latent_layer=latent_layer, - # device=device, **attribution_kwargs, ) else: diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index fd6b6204..caf4a3fe 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -1,9 +1,8 @@ """ Implementation of both counterfactuals and semi factuals methods for classification tasks. - -SM CF guided to be implemented (I think): KLEOR at least Sim-Miss and Global-Sim -SM CF free to be implemented: MDN but has to be adapated, Local-Region Model?? """ +import warnings + import numpy as np import tensorflow as tf @@ -18,8 +17,60 @@ class NaiveCounterFactuals(BaseExampleMethod): """ - This class allows to search for counterfactuals by searching for the closest sample that do not have the same label. + This class allows to search for counterfactuals by searching for the closest sample to a query in a projection space + that do not have the same model's prediction. It is a naive approach as it follows a greedy approach. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from this dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve per input. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distances are relevant for the model. + It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in + searching the input space, you should use a `BaseSearchMethod` instead. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See the base class returns property for more details. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + distance + Distance for the FilterKNN search method. + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. """ def __init__( self, @@ -32,10 +83,6 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", ): - - if projection is None: - projection = Projection(space_projection=lambda inputs: inputs) - super().__init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, @@ -45,10 +92,12 @@ def __init__( case_returns=case_returns, batch_size=batch_size, ) - + + # set distance function and order for the search method self.distance = distance self.order = ORDER.ASCENDING + # initiate search_method self.search_method = self.search_method_class( cases_dataset=self.cases_dataset, targets_dataset=self.targets_dataset, @@ -59,16 +108,20 @@ def __init__( filter_fn=self.filter_fn, order=self.order ) - + @property def search_method_class(self): + """ + This property defines the search method class to use for the search. In this case, it is the FilterKNN that + is an efficient KNN search method ignoring non-acceptable cases, thus not considering them in the search. + """ return FilterKNN def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: """ - Filter function to mask the cases for which the label is different from the predicted - label on the inputs. + Filter function to mask the cases for which the model's prediction is different from the model's prediction + on the inputs. """ # get the labels predicted by the model # (n, ) @@ -82,8 +135,59 @@ def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: class LabelAwareCounterFactuals(BaseExampleMethod): """ - This method will search the counterfactuals with a specific label. This label should be provided by the user in the - cf_labels_dataset args. + This method will search the counterfactuals of a query within an expected class. This class should be provided with + the query when calling the explain method. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from this dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve per input. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distances are relevant for the model. + It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in + searching the input space, you should use a `BaseSearchMethod` instead. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See the base class returns property for more details. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + distance + Distance for the FilterKNN search method. + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. """ def __init__( self, @@ -96,9 +200,6 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", ): - if projection is None: - projection = Projection(space_projection=lambda inputs: inputs) - # TODO: add a warning here if it is a custom projection that requires using targets as it might mismatch with the explain super().__init__( cases_dataset=cases_dataset, @@ -109,10 +210,18 @@ def __init__( case_returns=case_returns, batch_size=batch_size, ) - + + # raise a warning to specify that target in the explain method is not the same as the target used for + # the target dataset + warnings.warn("If your projection method requires the target, be aware that when using the explain method," + " the target provided is the class within one should search for the counterfactual.\nThus," + " it is possible that the projection of the query is going wrong.") + + # set distance function and order for the search method self.distance = distance self.order = ORDER.ASCENDING + # initiate search_method self.search_method = self.search_method_class( cases_dataset=self.cases_dataset, targets_dataset=self.targets_dataset, @@ -126,20 +235,24 @@ def __init__( @property def search_method_class(self): + """ + This property defines the search method class to use for the search. In this case, it is the FilterKNN that + is an efficient KNN search method ignoring non-acceptable cases, thus not considering them in the search. + """ return FilterKNN def filter_fn(self, _, __, cf_targets, cases_targets) -> tf.Tensor: """ - Filter function to mask the cases for which the label is different from the label(s) expected for the + Filter function to mask the cases for which the target is different from the target(s) expected for the counterfactuals. Parameters ---------- cf_targets - TODO + The one-hot enoding of the target class for the counterfactuals. cases_targets - TODO + The one-hot encoding of the target class for the cases. """ mask = tf.matmul(cf_targets, cases_targets, transpose_b=True) #(n, bs) # TODO: I think some retracing are done here @@ -153,9 +266,9 @@ def explain( cf_targets: Union[tf.Tensor, np.ndarray], ): """ - Compute examples to explain the inputs. - It project inputs with `self.projection` in the search space - and find examples with `self.search_method`. + Return the relevant CF examples to explain the inputs. + The CF examples are searched within cases for which the target is the one provided in `cf_targets`. + It projects inputs with `self.projection` in the search space and find examples with the `self.search_method`. Parameters ---------- @@ -164,22 +277,80 @@ def explain( Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. cf_targets - TODO: change the description here + Tensor or Array. One-hot encoding of the target class for the counterfactuals. Returns ------- return_dict Dictionary with listed elements in `self.returns`. - If only one element is present it returns the element. - The elements that can be returned are: - examples, weights, distances, indices, and labels. + The elements that can be returned are defined with _returns_possibilities static attribute of the class. """ - # TODO make an assert on the cf_targets return super().explain(inputs, cf_targets) class KLEORBase(BaseExampleMethod): """ + Base class for KLEOR methods. KLEOR methods search Semi-Factuals examples. In those methods, one should first + retrieve the Nearest Unlike Neighbor (NUN) which is the closest example to the query that has a different prediction + than the query. Then, the method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction + as the query. + + All the searches are done in a projection space where distances are relevant for the model. The projection space is + defined by the `projection` method. + + Depending on the KLEOR method some additional condition for the search are added. See the specific KLEOR method for + more details. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from this dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve per input. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distances are relevant for the model. + It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in + searching the input space, you should use a `BaseSearchMethod` instead. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See the base class returns property for more details. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + distance + Distance for the FilterKNN search method. + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. """ _returns_possibilities = [ "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns" @@ -197,9 +368,6 @@ def __init__( distance: Union[int, str, Callable] = "euclidean", ): - if projection is None: - projection = Projection(space_projection=lambda inputs: inputs) - super().__init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, @@ -209,10 +377,12 @@ def __init__( case_returns=case_returns, batch_size=batch_size, ) - + + # set distance function and order for the search method self.distance = distance self.order = ORDER.ASCENDING + # initiate search_method self.search_method = self.search_method_class( cases_dataset=self.cases_dataset, targets_dataset=self.targets_dataset, @@ -224,12 +394,14 @@ def __init__( @property def returns(self) -> Union[List[str], str]: - """Getter for the returns parameter.""" + """Override the Base class returns' parameter.""" return self._returns @returns.setter def returns(self, returns: Union[List[str], str]): """ + Set the returns parameter. The returns parameter is a string or a list of string with the elements to return + in `self.explain()`. The elements that can be returned are defined with _returns_possibilities static attribute """ default = "examples" self._returns = _sanitize_returns(returns, self._returns_possibilities, default) @@ -255,6 +427,24 @@ def format_search_output( targets: Optional[Union[tf.Tensor, np.ndarray]] = None, ): """ + Format the output of the `search_method` to match the expected returns in `self.returns`. + + Parameters + ---------- + search_output + Dictionary with the required outputs from the `search_method`. + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + targets + Targets associated to the cases_dataset for dataset projection. + See `projection` for details. + + Returns + ------- + return_dict + Dictionary with listed elements in `self.returns`. + The elements that can be returned are defined with _returns_possibilities static attribute of the class. """ return_dict = super().format_search_output(search_output, inputs, targets) if "nuns" in self.returns: @@ -266,13 +456,38 @@ def format_search_output( return return_dict -class KLEORGlobalSim(KLEORBase): +class KLEORSimMiss(KLEORBase): + """ + The KLEORSimMiss method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of + the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the + method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. + + The search is done in a projection space where distances are relevant for the model. The projection space is defined + by the `projection` method. + """ @property def search_method_class(self): - return KLEORGlobalSimSearch + """ + This property defines the search method class to use for the search. In this case, it is the KLEORSimMissSearch. + """ + return KLEORSimMissSearch +class KLEORGlobalSim(KLEORBase): + """ + The KLEORGlobalSim method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of + the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the + method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. -class KLEORSimMiss(KLEORBase): + In addition, for a SF candidate to be considered, the SF should be closer to the query than the NUN in the + projection space (i.e. the SF should be 'between' the input and its NUN). This condition is added to the search. + + The search is done in a projection space where distances are relevant for the model. The projection space is defined + by the `projection` method. + """ @property def search_method_class(self): - return KLEORSimMissSearch + """ + This property defines the search method class to use for the search. In this case, it is the + KLEORGlobalSimSearch. + """ + return KLEORGlobalSimSearch diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 60db96af..dc06bca9 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -8,7 +8,6 @@ import numpy as np from ...types import Union, Optional, List - from ...commons import sanitize_dataset class ORDER(Enum): @@ -24,7 +23,6 @@ def _sanitize_returns(returns: Optional[Union[List[str], str]] = None, possibilities: List[str] = None, default: Union[List[str], str] = None): """ - Factorization of `set_returns` for `BaseSearchMethod` and `SimilarExamples`. It cleans the `returns` parameter. Results is either a sublist of possibilities or a value among possibilities. @@ -66,20 +64,22 @@ def _sanitize_returns(returns: Optional[Union[List[str], str]] = None, class BaseSearchMethod(ABC): """ - Base class used by `NaturalExampleBasedExplainer` search examples in - a meaningful space for the model. It can also be used alone but will not provided - model explanations. + Base class for the example-based search methods. This class is abstract. It should be inherited by + the search methods that are used to find examples in a dataset. It also defines the interface for the + search methods. Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - For natural example-based methods it is the train dataset. + The dataset containing the examples to search in. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. k The number of examples to retrieve. search_returns String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. + It should be a subset of `self._returns_possibilities`. batch_size Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. @@ -133,8 +133,6 @@ def returns(self, returns: Union[List[str], str]): `returns` can be set to 'all' for all possible elements to be returned. - 'examples' correspond to the expected examples, the inputs may be included in first position. (n, k(+1), ...) - - 'weights' the weights in the input space used in the projection. - They are associated to the input and the examples. (n, k(+1), ...) - 'distances' the distances between the inputs and the corresponding examples. They are associated to the examples. (n, k, ...) - 'labels' if provided through `dataset_labels`, @@ -146,7 +144,7 @@ def returns(self, returns: Union[List[str], str]): self._returns = _sanitize_returns(returns, self._returns_possibilities, default) @abstractmethod - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -157,9 +155,16 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ inputs Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). + targets + Tensor or Array. Target of the samples to be explained. + + Returns + ------- + return_dict + Dictionary containing the elements to return which are specified in `self.returns`. """ raise NotImplementedError() - def __call__(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): - """find_samples alias""" + def __call__(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: + """find_samples() alias""" return self.find_examples(inputs, targets) diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index bb057bf3..08baa293 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -14,7 +14,39 @@ class BaseKLEORSearch(FilterKNN, ABC): """ - Base class for the KLEOR search methods. + Base class for the KLEOR search methods. In those methods, one should first retrieve the Nearest Unlike Neighbor + (NUN) which is the closest example to the query that has a different prediction than the query. + Then, the method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. + + Depending on the KLEOR method some additional condition for the search are added. See the specific KLEOR method for + more details. + + Parameters + ---------- + cases_dataset + The dataset used to search the examples. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve per input. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + It should be a subset of `self._returns_possibilities`. + batch_size + Number of sample treated simultaneously. + distance + Distance function to use to measure similarity. + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. """ def __init__( self, @@ -36,6 +68,7 @@ def __init__( filter_fn=self._filter_fn, ) + # search method for the Nearest Unlike Neighbors self.search_nuns = FilterKNN( cases_dataset=cases_dataset, targets_dataset=targets_dataset, @@ -47,7 +80,7 @@ def __init__( filter_fn=self._filter_fn_nun, ) - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -59,6 +92,13 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ Tensor or Array. Input samples to be explained. Assumed to have been already projected. Expected shape among (N, W), (N, T, W), (N, W, H, C). + targets + Tensor or Array. Target of the samples to be explained. + + Returns + ------- + return_dict + Dictionary containing the elements to return which are specified in `self.returns`. """ # compute neighbors examples_distances, examples_indices, nuns, nuns_indices, nuns_sf_distances = self.kneighbors(inputs, targets) @@ -80,6 +120,7 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ def _filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: """ + Filter function to mask the cases for which the prediction is the same as the predicted label on the inputs. """ # get the labels predicted by the model # (n, ) @@ -106,6 +147,7 @@ def _filter_fn_nun(self, _, __, targets, cases_targets) -> tf.Tensor: def _get_nuns(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: """ + Get the Nearest Unlike Neighbors and their distance to the related input. """ nuns_dict = self.search_nuns(inputs, targets) nuns_indices, nuns_distances = nuns_dict["indices"], nuns_dict["distances"] @@ -114,6 +156,41 @@ def _get_nuns(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tens def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: """ + Compute the k SF to each tensor of `inputs` in `self.cases_dataset`. + Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. + + Parameters + ---------- + inputs + Tensor or Array. Input samples on which knn are computed. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + More information in the documentation. + targets + Tensor or Array. Target of the samples to be explained. + + Returns + ------- + input_sf_distances + Tensor of distances between the SFs and the inputs with dimension (n, k). + The n inputs times their k-SF. + sf_indices + Tensor of indices of the SFs in `self.cases_dataset` with dimension (n, k, 2). + Where, n represent the number of inputs and k the number of corresponding SFs. + The index of each element is encoded by two values, + the batch index and the index of the element in the batch. + Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. + nuns + Tensor of Nearest Unlike Neighbors with dimension (n, 1, ...). + The n inputs times their NUN. + nuns_indices + Tensor of indices of the NUN in `self.cases_dataset` with dimension (n, 1, 2). + Where, n represent the number of inputs. + The index of each element is encoded by two values, + the batch index and the index of the element in the batch. + Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. + nun_sf_distances + Tensor of distances between the SFs and the NUN with dimension (n, k). + The n NUNs times the k-SF. """ # get the Nearest Unlike Neighbors and their distance to the related input nuns, nuns_indices, nuns_input_distances = self._get_nuns(inputs, targets) @@ -193,30 +270,29 @@ def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: class KLEORSimMissSearch(BaseKLEORSearch): """ - KLEOR search method. - - Parameters - ---------- - cases_dataset - Dataset of cases. - targets_dataset - Dataset of targets. Should be a one-hot encoded of the predicted class + The KLEORSimMiss method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of + the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the + method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. """ def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple: + """ + No additional filtering for the KLEORSimMiss method. + """ return nun_sf_distances, input_sf_distances class KLEORGlobalSimSearch(BaseKLEORSearch): """ - KLEOR search method. + The KLEORGlobalSim method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of + the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the + method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. - Parameters - ---------- - cases_dataset - Dataset of cases. - targets_dataset - Dataset of targets. Should be a one-hot encoded of the predicted class + In addition, for a SF candidate to be considered, the SF should be closer to the query than the NUN + (i.e. the SF should be 'between' the input and its NUN). This condition is added to the search. """ def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple: + """ + Filter the distances to keep only the SF that are 'between' the input and its NUN. + """ # filter non acceptable cases, i.e. cases for which the distance to the input is greater # than the distance between the input and its nun # (n, current_bs) diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index c1fbe4db..70102a24 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -13,6 +13,27 @@ class BaseKNN(BaseSearchMethod): """ + Base class for the KNN search methods. It is an abstract class that should be inherited by a specific KNN method. + + Parameters + ---------- + cases_dataset + The dataset containing the examples to search in. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve. + search_returns + String or list of string with the elements to return in `self.find_examples()`. + It should be a subset of `self._returns_possibilities`. + batch_size + Number of sample treated simultaneously. + It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + order + The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. + ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are + the best. """ def __init__( self, @@ -28,7 +49,7 @@ def __init__( search_returns=search_returns, batch_size=batch_size, ) - + # set order assert isinstance(order, ORDER), f"order should be an instance of ORDER and not {type(order)}" self.order = order # fill value @@ -47,7 +68,7 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Uni Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. targets - Tensor or Array. Target samples to be explained. + Tensor or Array. Target of the samples to be explained. Returns ------- @@ -63,7 +84,7 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Uni """ raise NotImplementedError - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None): + def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -75,6 +96,13 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ Tensor or Array. Input samples to be explained. Assumed to have been already projected. Expected shape among (N, W), (N, T, W), (N, W, H, C). + targets + Tensor or Array. Target of the samples to be explained. + + Returns + ------- + return_dict + Dictionary containing the elements to return which are specified in `self.returns`. """ # compute neighbors examples_distances, examples_indices = self.kneighbors(inputs, targets) @@ -84,9 +112,10 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ return return_dict - def _build_return_dict(self, inputs, examples_distances, examples_indices): + def _build_return_dict(self, inputs, examples_distances, examples_indices) -> dict: """ - TODO: Change the description + Build the return dict based on the `self.returns` values. It builds the return dict with the value in the + subset of ['examples', 'include_inputs', 'indices', 'distances'] which is commonly shared. """ # Set values in return dict return_dict = {} @@ -107,22 +136,29 @@ def _build_return_dict(self, inputs, examples_distances, examples_indices): class KNN(BaseKNN): """ KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. - Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. + The kneighbors method is implemented in a batched way to handle large datasets. Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - For natural example-based methods it is the train dataset. + The dataset containing the examples to search in. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. k The number of examples to retrieve. search_returns String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. + It should be a subset of `self._returns_possibilities`. batch_size Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + order + The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. + ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are + the best. distance + Distance function to use to measure similarity. Either a Callable, or a value supported by `tf.norm` `ord` parameter. Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number @@ -145,6 +181,7 @@ def __init__( order=order, ) + # set distance function if hasattr(distance, "__call__"): self.distance_fn = distance elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( @@ -159,7 +196,23 @@ def __init__( ) @tf.function - def _crossed_distances_fn(self, x1, x2): + def _crossed_distances_fn(self, x1, x2) -> tf.Tensor: + """ + Element-wise distance computation between two tensors. + It has been vectorized to handle batches of inputs and cases. + + Parameters + ---------- + x1 + Tensor. Input samples of shape (n, ...). + x2 + Tensor. Cases samples of shape (m, ...). + + Returns + ------- + distances + Tensor of distances between the inputs and the cases with dimension (n, m). + """ n = x1.shape[0] m = x2.shape[0] x2 = tf.expand_dims(x2, axis=0) @@ -186,6 +239,8 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], _ = None) -> Tuple[tf Tensor or Array. Input samples on which knn are computed. Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. + targets + Tensor or Array. Target of the samples to be explained. Returns ------- @@ -246,24 +301,33 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], _ = None) -> Tuple[tf class FilterKNN(BaseKNN): """ - TODO: Change the class description KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. - Basically a wrapper of `NearestNeighbors` to match the `BaseSearchMethod` API. + The kneighbors method is implemented in a batched way to handle large datasets. + In addition, a filter function is used to select the elements to compute the distances, thus reducing the + computational cost of the distance computation (worth if the computation of the filter is low and the matrix + of distances is sparse). Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - For natural example-based methods it is the train dataset. + The dataset containing the examples to search in. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. k The number of examples to retrieve. search_returns String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. + It should be a subset of `self._returns_possibilities`. batch_size Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + order + The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. + ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are + the best. distance + Distance function to use to measure similarity. Either a Callable, or a value supported by `tf.norm` `ord` parameter. Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number @@ -291,7 +355,8 @@ def __init__( batch_size=batch_size, order=order, ) - + + # set distance function if hasattr(distance, "__call__"): self.distance_fn = distance elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( @@ -319,6 +384,24 @@ def __init__( @tf.function def _crossed_distances_fn(self, x1, x2, mask): + """ + Element-wise distance computation between two tensors with a mask. + It has been vectorized to handle batches of inputs and cases. + + Parameters + ---------- + x1 + Tensor. Input samples of shape (n, ...). + x2 + Tensor. Cases samples of shape (m, ...). + mask + Tensor. Boolean mask of shape (n, m). It is used to filter the elements for which the distance is computed. + + Returns + ------- + distances + Tensor of distances between the inputs and the cases with dimension (n, m). + """ n = x1.shape[0] m = x2.shape[0] x2 = tf.expand_dims(x2, axis=0) @@ -338,6 +421,9 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Uni """ Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. + In addition, a filter function is used to select the elements to compute the distances, thus reducing the + computational cost of the distance computation (worth if the computation of the filter is low and the matrix + of distances is sparse). Parameters ---------- @@ -345,6 +431,8 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Uni Tensor or Array. Input samples on which knn are computed. Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. + targets + Tensor or Array. Target of the samples to be explained. Returns ------- diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index 75756074..ea16f261 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -1,53 +1,48 @@ """ Base model for example-based """ - -import math - import tensorflow as tf import numpy as np -from ..types import Callable, Dict, List, Optional, Type, Union +from ..types import Callable, List, Optional, Type, Union -from ..commons import sanitize_inputs_targets -from ..commons import sanitize_dataset, dataset_gather from .search_methods import KNN, BaseSearchMethod, ORDER from .projections import Projection from .base_example_method import BaseExampleMethod -from .search_methods.base import _sanitize_returns - class SimilarExamples(BaseExampleMethod): """ - Base class for similar examples. + Class for similar example-based method. This class allows to search the k Nearest Neighbor of an input in the + projected space (defined by the projection method) using the distance defined by the distance method provided. Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. + The dataset used to train the model, examples are extracted from this dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. labels_dataset Labels associated to the examples in the dataset. Indices should match with cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. + Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's + predictions. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. + Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. k - The number of examples to retrieve. + The number of examples to retrieve per input. projection Projection or Callable that project samples from the input space to the search space. - The search space should be a space where distance make sense for the model. - It should not be `None`, otherwise, - all examples could be computed only with the `search_method`. + The search space should be a space where distances are relevant for the model. + It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in + searching the input space, you should use a `BaseSearchMethod` instead. Example of Callable: ``` @@ -62,7 +57,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar ``` case_returns String or list of string with the elements to return in `self.explain()`. - See `self.set_returns()` for detail. + See the base class returns property for more details. batch_size Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). @@ -73,7 +68,6 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number yielding the corresponding p-norm." We also added 'cosine'. """ - def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -95,6 +89,7 @@ def __init__( batch_size=batch_size, ) + # set distance function self.distance = distance # initiate search_method From 7a665256de7f1f42168e0ebb351373c38c35b9cf Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Wed, 3 Jul 2024 17:30:35 +0200 Subject: [PATCH 051/138] add the documentation for the prototypes search methods --- .../mmd_critic_search.md | 3 + .../proto_dash_search.md | 3 + .../proto_greedy_search.md | 3 + .../prototypes_search_methods/prototypes.md | 69 +++++++++++++++++++ .../search_methods/search_methods.md | 0 mkdocs.yml | 7 ++ 6 files changed, 85 insertions(+) create mode 100644 docs/api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md create mode 100644 docs/api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md create mode 100644 docs/api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md create mode 100644 docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md create mode 100644 docs/api/example_based/search_methods/search_methods.md diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md b/docs/api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md new file mode 100644 index 00000000..cb85d17c --- /dev/null +++ b/docs/api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md @@ -0,0 +1,3 @@ +# MMDCriticSearch + +MMDCriticSearch ([Kim et al., 2016](https://proceedings.neurips.cc/paper/2016/hash/5680522b8e2bb01943234bce7bf84534-Abstract.html)) \ No newline at end of file diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md b/docs/api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md new file mode 100644 index 00000000..b54dec50 --- /dev/null +++ b/docs/api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md @@ -0,0 +1,3 @@ +# ProtoGreedySearch + +ProtoDashSearch ([Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212)) \ No newline at end of file diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md b/docs/api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md new file mode 100644 index 00000000..9213caa1 --- /dev/null +++ b/docs/api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md @@ -0,0 +1,3 @@ +# ProtoGreedySearch + +ProtoGreedySearch ([Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212)) \ No newline at end of file diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md b/docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md new file mode 100644 index 00000000..e617985e --- /dev/null +++ b/docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md @@ -0,0 +1,69 @@ +# Prototypes + +Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class. + +Three classes of prototype-based methods are found in the literature ([Poché et al., 2023](https://hal.science/hal-04117520/document)): Prototypes for Data-Centric Interpretability, Prototypes for Post-hoc Interpretability and Prototype-Based Models Interpretable by Design. This library focuses on first two classes. + +## Prototypes for Data-Centric Interpretability +In this class, prototypes are selected without relying on the model and provide an overview of +the dataset. In this library, the following methode are implemented as [search methods](./algorithms/search_methods/): + +Xplique includes the following prototypes search methods: + +| Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | +|:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | +| [ProtoGreedySearch](../proto_greedy_search/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | +| [ProtoDashSearch](../proto_dash_search/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | +| [MMDCriticSearch](../mmd_critic_search/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + +*: Before using a PyTorch model it is highly recommended to read the [dedicated documentation](../pytorch/) + +### What is MMD? +The commonality among these three methods is their utilization of the Maximum Mean Discrepancy (MMD) statistic as a measure of similarity between points and potential prototypes. MMD is a statistic for comparing two distributions (similar to KL-divergence). However, it is a non-parametric statistic, i.e., it does not assume a specific parametric form for the probability distributions being compared. It is defined as follows: + +$$ +\begin{align*} +\text{MMD}(P, Q) &= \left\| \mathbb{E}_{X \sim P}[\varphi(X)] - \mathbb{E}_{Y \sim Q}[\varphi(Y)] \right\|_\mathcal{H} +\end{align*} +$$ + +where $\varphi(\cdot)$ is a mapping function of the data points. If we want to consider all orders of moments of the distributions, the mapping vectors $\varphi(X)$ and $\varphi(Y)$ will be infinite-dimensional. Thus, we cannot calculate them directly. However, if we have a kernel that gives the same result as the inner product of these two mappings in Hilbert space ($k(x, y) = \langle \varphi(x), \varphi(y) \rangle_\mathcal{H}$), then the $MMD^2$ can be computed using only the kernel and without explicitly using $\varphi(X)$ and $\varphi(Y)$ (this is called the kernel trick): + +$$ +\begin{align*} +\text{MMD}^2(P, Q) &= \langle \mathbb{E}_{X \sim P}[\varphi(X)], \mathbb{E}_{X' \sim P}[\varphi(X')] \rangle_\mathcal{H} + \langle \mathbb{E}_{Y \sim Q}[\varphi(Y)], \mathbb{E}_{Y' \sim Q}[\varphi(Y')] \rangle_\mathcal{H} \\ +&\quad - 2\langle \mathbb{E}_{X \sim P}[\varphi(X)], \mathbb{E}_{Y \sim Q}[\varphi(Y)] \rangle_\mathcal{H} \\ +&= \mathbb{E}_{X, X' \sim P}[k(X, X')] + \mathbb{E}_{Y, Y' \sim Q}[k(Y, Y')] - 2\mathbb{E}_{X \sim P, Y \sim Q}[k(X, Y)] +\end{align*} +$$ + +### How to choose the kernel ? +The choice of the kernel for selecting prototypes depends on the specific problem and the characteristics of your data. Several kernels can be used, including: + +- Gaussian +- Laplace +- Polynomial +- Linear... + +If we consider any exponential kernel (Gaussian kernel, Laplace, ...), we automatically consider all the moments for the distribution, as the Taylor expansion of the exponential considers infinite-order moments. It is better to use a non-linear kernel to capture non-linear relationships in your data. If the problem is linear, it is better to choose a linear kernel such as the dot product kernel, since it is computationally efficient and often requires fewer hyperparameters to tune. + +For the MMD-critic method, the kernel must satisfy a condition ensuring the submodularity of the set function (the Gaussian kernel respects this constraint). In contrast, for Protodash and Protogreedy, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. + +### Default kernel +The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. + +## Prototypes for Post-hoc Interpretability + +Data-Centric methods such as Protogreedy, ProtoDash and MMD-critic can be used in either the output or the latent space of the model. In these cases, [projections methods](./algorithms/projections/) are used to transfer the data from the input space to the latent/output spaces. + +# Architecture of the code + +The Data-Centric prototypes methods are implemented as `search_methods`. The search method can have attribute `projection` that projects samples to a space where distances between samples make sense for the model. Then the `search_method` finds the prototypes by looking in the projected space. + +The class `ProtoGreedySearch` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. + +Both the `MMDCriticSearch` and `ProtoDashSearch` classes inherit from the `ProtoGreedySearch` class. + +The class `MMDCriticSearch` differs from `ProtoGreedySearch` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of `ProtoGreedySearch`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCriticSearch`, the `compute_objective` method assigns the same weight to all elements in the selection. + +The class `ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is different: it maximizes a tight lower bound on $l(w)$ instead of maximizing $l(w)$, as done in `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides the `compute_objective` method to calculate an objective based on the gradient of $l(w)$. It also overrides the `update_selection` method to select the best weights of the selection based on the gradient of the best candidate. \ No newline at end of file diff --git a/docs/api/example_based/search_methods/search_methods.md b/docs/api/example_based/search_methods/search_methods.md new file mode 100644 index 00000000..e69de29b diff --git a/mkdocs.yml b/mkdocs.yml index 6b20f7f5..eb975edc 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -42,6 +42,13 @@ nav: - Cav: api/concepts/cav.md - Tcav: api/concepts/tcav.md - Craft: api/concepts/craft.md + - Example based: + - Search Methods: + - Prototypes Search Methods: + - Prototypes: api/example_based/search_methods/prototypes_search_methods/prototypes.md + - ProtoGreedySearch: api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md + - ProtoDashSearch: api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md + - MMDCriticSearch: api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md - Feature visualization: - Modern Feature Visualization (MaCo): api/feature_viz/maco.md - Feature visualization: api/feature_viz/feature_viz.md From c8047b0e5b4fb1e6f134875f1651083cafeba360 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 8 Jul 2024 15:14:37 +0200 Subject: [PATCH 052/138] prototypes: hotfix --- .gitignore | 1 + xplique/example_based/prototypes.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index fec8e55f..84161dc4 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,7 @@ coverage.xml .pytest_cache/ cover/ *test*.sh +tests/concepts/checkpoints/ # Environments .env diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 16f68243..6d8c8c34 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -70,6 +70,7 @@ def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", @@ -84,6 +85,7 @@ def __init__( super().__init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, + targets_dataset=targets_dataset, k=k, projection=projection, case_returns=case_returns, From 693fa42a4013fe9c6f4a361b1c738f3a110d6961 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 8 Jul 2024 15:38:19 +0200 Subject: [PATCH 053/138] prototypes: hotfix --- xplique/example_based/prototypes.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 6d8c8c34..cd14766b 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -157,9 +157,21 @@ class ProtoDash(Prototypes): ---------- cases_dataset The dataset used to train the model, examples are extracted from the dataset. - For natural example-based methods it is the train dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. labels_dataset Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other dataset should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. k The number of examples to retrieve. search_returns @@ -191,6 +203,7 @@ def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", @@ -206,7 +219,8 @@ def __init__( super().__init__( cases_dataset=cases_dataset, - labels_dataset=labels_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, k=k, projection=projection, case_returns=case_returns, From d1e8031fd93183c3ac5257886b84f6a978e27571 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 8 Jul 2024 17:33:06 +0200 Subject: [PATCH 054/138] example-based: make tests pass --- tests/example_based/test_kleor.py | 2 +- tests/example_based/test_knn.py | 58 +++++++++++--------- tests/example_based/test_projections.py | 2 +- xplique/example_based/projections/commons.py | 19 ++++++- 4 files changed, 52 insertions(+), 29 deletions(-) diff --git a/tests/example_based/test_kleor.py b/tests/example_based/test_kleor.py index fec68950..cd2cd333 100644 --- a/tests/example_based/test_kleor.py +++ b/tests/example_based/test_kleor.py @@ -56,7 +56,7 @@ def test_kleor_base_and_sim_miss(): [np.sqrt(2*1.5**2)], [np.sqrt(2*0.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.equal(nuns, expected_nuns)) - assert tf.reduce_all(tf.equal(nuns_distances, expected_nuns_distances)) + assert tf.reduce_all(tf.abs(nuns_distances - expected_nuns_distances) < 1e-5) # test the _initialize_search method sf_indices, input_sf_distances, nun_sf_distances, batch_indices = kleor._initialize_search(inputs) diff --git a/tests/example_based/test_knn.py b/tests/example_based/test_knn.py index 61740a0e..4a9df427 100644 --- a/tests/example_based/test_knn.py +++ b/tests/example_based/test_knn.py @@ -5,6 +5,8 @@ import numpy as np import tensorflow as tf +from ..utils import almost_equal + from xplique.example_based.search_methods import BaseKNN, KNN, FilterKNN, ORDER def get_setup(input_shape, nb_samples=10, nb_labels=10): @@ -180,7 +182,7 @@ def test_knn_compute_distances(): distances = knn._crossed_distances_fn(x1, x2) assert distances.shape == (x1.shape[0], x2.shape[0]) - assert tf.reduce_all(tf.equal(distances, expected_distance)) + assert almost_equal(distances, expected_distance, epsilon=1e-5) # Test with higher dimensions data = np.array([ @@ -217,7 +219,7 @@ def test_knn_compute_distances(): distances = knn._crossed_distances_fn(x1, x2) assert distances.shape == (x1.shape[0], x2.shape[0]) - assert tf.reduce_all(tf.equal(distances, expected_distance)) + assert almost_equal(distances, expected_distance) def test_knn_kneighbors(): @@ -237,8 +239,8 @@ def test_knn_kneighbors(): distances, indices = knn.kneighbors(inputs) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) - assert tf.reduce_all(tf.equal(distances, tf.constant([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], dtype=tf.float32))) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + assert almost_equal(distances, tf.constant([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], dtype=tf.float32)) + assert almost_equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32)) # Test with reverse order knn = KNN( @@ -252,8 +254,8 @@ def test_knn_kneighbors(): distances, indices = knn.kneighbors(inputs) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) - assert tf.reduce_all(tf.equal(distances, tf.constant([[3.5, 2.5], [2.5, 1.5], [3.5, 2.5]], dtype=tf.float32))) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + assert almost_equal(distances, tf.constant([[3.5, 2.5], [2.5, 1.5], [3.5, 2.5]], dtype=tf.float32)) + assert almost_equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32)) # Test with input and cases being 2D cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) @@ -268,8 +270,8 @@ def test_knn_kneighbors(): distances, indices = knn.kneighbors(inputs) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) - assert tf.reduce_all(tf.equal(distances, tf.constant([[np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)]], dtype=tf.float32))) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + assert almost_equal(distances, tf.constant([[np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)]], dtype=tf.float32)) + assert almost_equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32)) # Test with reverse order knn = KNN( @@ -285,7 +287,7 @@ def test_knn_kneighbors(): assert indices.shape == (3, 2, 2) expected_distances = tf.constant([[np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + assert almost_equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32)) def test_filter_knn_compute_distances(): """ @@ -310,12 +312,14 @@ def test_filter_knn_compute_distances(): mask = tf.ones((x1.shape[0], x2.shape[0]), dtype=tf.bool) distances = knn._crossed_distances_fn(x1, x2, mask) assert distances.shape == (x1.shape[0], x2.shape[0]) - assert tf.reduce_all(tf.equal(distances, expected_distance)) + assert almost_equal(distances, expected_distance, epsilon=1e-5) mask = tf.constant([[True, False], [False, True], [True, True]], dtype=tf.bool) expected_distance = tf.constant([[np.sqrt(72), np.inf], [np.inf, np.sqrt(72)], [np.sqrt(8), np.sqrt(32)]], dtype=tf.float32) distances = knn._crossed_distances_fn(x1, x2, mask) - assert tf.reduce_all(tf.equal(distances, expected_distance)) + assert np.allclose(distances, expected_distance, equal_nan=True) + assert np.array_equal(distances == np.inf, expected_distance == np.inf) + assert np.array_equal(distances == -np.inf, expected_distance == -np.inf) # Test with higher dimensions data = np.array([ @@ -353,13 +357,15 @@ def test_filter_knn_compute_distances(): mask = tf.ones((x1.shape[0], x2.shape[0]), dtype=tf.bool) distances = knn._crossed_distances_fn(x1, x2, mask) assert distances.shape == (x1.shape[0], x2.shape[0]) - assert tf.reduce_all(tf.equal(distances, expected_distance)) + assert almost_equal(distances, expected_distance) mask = tf.constant([[True, False], [False, True], [True, True]], dtype=tf.bool) expected_distance = tf.constant([[np.sqrt(9)*27, np.inf], [np.inf, np.sqrt(9)*27], [np.sqrt(9)*9, np.sqrt(9)*18]], dtype=tf.float32) distances = knn._crossed_distances_fn(x1, x2, mask) assert distances.shape == (x1.shape[0], x2.shape[0]) - assert tf.reduce_all(tf.equal(distances, expected_distance)) + assert np.allclose(distances, expected_distance, equal_nan=True) + assert np.array_equal(distances == np.inf, expected_distance == np.inf) + assert np.array_equal(distances == -np.inf, expected_distance == -np.inf) def test_filter_knn_kneighbors(): """ @@ -378,8 +384,8 @@ def test_filter_knn_kneighbors(): distances, indices = knn.kneighbors(inputs) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) - assert tf.reduce_all(tf.equal(distances, tf.constant([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], dtype=tf.float32))) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + assert almost_equal(distances, tf.constant([[0.5, 0.5], [0.5, 0.5], [0.5, 0.5]], dtype=tf.float32)) + assert almost_equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32)) cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) @@ -396,8 +402,8 @@ def test_filter_knn_kneighbors(): distances, indices = knn.kneighbors(inputs, targets) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) - assert tf.reduce_all(tf.equal(distances, tf.constant([[0.5, 2.5], [0.5, 0.5], [0.5, 1.5]], dtype=tf.float32))) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [1, 1]],[[0, 1], [1, 0]],[[2, 0], [1, 0]]], dtype=tf.int32))) + assert almost_equal(distances, tf.constant([[0.5, 2.5], [0.5, 0.5], [0.5, 1.5]], dtype=tf.float32)) + assert almost_equal(indices, tf.constant([[[0, 0], [1, 1]],[[0, 1], [1, 0]],[[2, 0], [1, 0]]], dtype=tf.int32)) ## test with reverse order knn = FilterKNN( @@ -412,8 +418,8 @@ def test_filter_knn_kneighbors(): assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) expected_distances = tf.constant([[3.5, 2.5], [2.5, 1.5], [3.5, 2.5]], dtype=tf.float32) - assert tf.reduce_all(tf.equal(distances, expected_distances)) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + assert almost_equal(distances, expected_distances) + assert almost_equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32)) ## add a filter that is not the default one and reverse order knn = FilterKNN( @@ -429,8 +435,8 @@ def test_filter_knn_kneighbors(): distances, indices = knn.kneighbors(inputs, targets) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) - assert tf.reduce_all(tf.equal(distances, tf.constant([[2.5, 0.5], [2.5, 0.5], [2.5, 1.5]], dtype=tf.float32))) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32))) + assert almost_equal(distances, tf.constant([[2.5, 0.5], [2.5, 0.5], [2.5, 1.5]], dtype=tf.float32)) + assert almost_equal(indices, tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32)) # Test with input and cases being 2D cases = tf.constant([[1., 2.], [2., 3.], [3., 4.], [4., 5.], [5., 6.]], dtype=tf.float32) @@ -446,8 +452,8 @@ def test_filter_knn_kneighbors(): distances, indices = knn.kneighbors(inputs) assert distances.shape == (3, 2) assert indices.shape == (3, 2, 2) - assert tf.reduce_all(tf.equal(distances, tf.constant([[np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)]], dtype=tf.float32))) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32))) + assert almost_equal(distances, tf.constant([[np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(0.5)]], dtype=tf.float32)) + assert almost_equal(indices, tf.constant([[[0, 0], [0, 1]],[[0, 1], [1, 0]],[[1, 1], [2, 0]]], dtype=tf.int32)) cases_targets = tf.constant([[0, 1], [1, 0], [1, 0], [0, 1], [1, 0]], dtype=tf.float32) targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) @@ -466,7 +472,7 @@ def test_filter_knn_kneighbors(): assert indices.shape == (3, 2, 2) expected_distances = tf.constant([[np.sqrt(0.5), np.sqrt(2*2.5**2)], [np.sqrt(0.5), np.sqrt(0.5)], [np.sqrt(0.5), np.sqrt(2*1.5**2)],], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[0, 0], [1, 1]],[[0, 1], [1, 0]],[[2, 0], [1, 0]]], dtype=tf.int32))) + assert almost_equal(indices, tf.constant([[[0, 0], [1, 1]],[[0, 1], [1, 0]],[[2, 0], [1, 0]]], dtype=tf.int32)) ## test with reverse order and default filter knn = FilterKNN( @@ -482,7 +488,7 @@ def test_filter_knn_kneighbors(): assert indices.shape == (3, 2, 2) expected_distances = tf.constant([[np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)], [np.sqrt(2*3.5**2), np.sqrt(2*2.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32))) + assert almost_equal(indices, tf.constant([[[2, 0], [1, 1]],[[2, 0], [0, 0]],[[0, 0], [0, 1]]], dtype=tf.int32)) ## add a filter that is not the default one and reverse order knn = FilterKNN( @@ -500,4 +506,4 @@ def test_filter_knn_kneighbors(): assert indices.shape == (3, 2, 2) expected_distances = tf.constant([[np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(0.5)], [np.sqrt(2*2.5**2), np.sqrt(2*1.5**2)]], dtype=tf.float32) assert tf.reduce_all(tf.abs(distances - expected_distances) < 1e-5) - assert tf.reduce_all(tf.equal(indices, tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32))) + assert almost_equal(indices, tf.constant([[[1, 1], [0, 0]],[[2, 0], [0, 1]],[[0, 1], [1, 0]]], dtype=tf.int32)) diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py index 8fe8b28f..f624b68a 100644 --- a/tests/example_based/test_projections.py +++ b/tests/example_based/test_projections.py @@ -13,7 +13,7 @@ from xplique.attributions import Saliency from xplique.example_based.projections import Projection, AttributionProjection, LatentSpaceProjection from xplique.example_based.projections.commons import model_splitting -from ..utils import generate_data, almost_equal + def get_setup(input_shape, nb_samples=10, nb_labels=2): """ diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index 59dc7ee8..c8747592 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -51,8 +51,25 @@ def model_splitting(model: tf.keras.Model, features_extractor = tf.keras.Model( model.input, latent_layer.output, name="features_extractor" ) + # predictor = tf.keras.Model( + # latent_layer.output, model.output, name="predictor" + # ) + second_input = tf.keras.Input(shape=latent_layer.output_shape[1:]) + + # Reconstruct the second part of the model + x = second_input + layer_found = False + for layer in model.layers: + if layer_found: + x = layer(x) + if layer == latent_layer: + layer_found = True + + # Create the second part of the model (predictor) predictor = tf.keras.Model( - latent_layer.output, model.output, name="predictor" + inputs=second_input, + outputs=x, + name="predictor" ) if return_layer: From 84fdb6852820b2330c121da12ad4edb71817d172 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 8 Jul 2024 18:20:32 +0200 Subject: [PATCH 055/138] prototypes: support non-identity projections --- xplique/example_based/prototypes.py | 2 +- .../example_based/search_methods/proto_greedy_search.py | 9 ++++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index cd14766b..5742190d 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -101,7 +101,7 @@ def __init__( # initiate search_method self.search_method = self.search_method_class( - cases_dataset=self.cases_dataset, + cases_dataset=self.projected_cases_dataset, labels_dataset=self.labels_dataset, k=self.k, search_returns=self._search_returns, diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 4ed79899..39ed7159 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -10,7 +10,7 @@ from .base import BaseSearchMethod from .knn import KNN -from ..projections import Projection +# from ..projections import Projection class ProtoGreedySearch(BaseSearchMethod): @@ -163,6 +163,13 @@ def kernel_induced_distance(x1,x2): for batch_col_index, (batch_col_cases, batch_col_labels) in enumerate( zip(self.cases_dataset, self.labels_dataset) ): + # elements should be tabular data + assert len(batch_col_cases.shape) == 2,\ + "Expected prototypes' searches expects 2D data, (nb_samples, nb_features),"+\ + f"but got {batch_col_cases.shape}"+\ + "Please verify your projection if you provided a custom one."+\ + "If you use a splitted model, make sure the output of the first part of the model is flattened." + batch_col_sums = tf.zeros((batch_col_cases.shape[0])) for batch_row_index, (batch_row_cases, batch_row_labels) in enumerate( From a7a9cc462ae11dad00ee247fa611edd179d484ff Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Tue, 9 Jul 2024 09:36:58 +0200 Subject: [PATCH 056/138] example-based: hotfix --- xplique/example_based/search_methods/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index dc06bca9..77dd768b 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -96,7 +96,7 @@ def __init__( # set batch size if hasattr(cases_dataset, "_batch_size"): - self.batch_size = cases_dataset._batch_size + self.batch_size = tf.cast(cases_dataset._batch_size, tf.int32) else: self.batch_size = batch_size From 30e6a56f620f7b5909abaf3c8b5de0a670ab5ceb Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Tue, 9 Jul 2024 17:54:19 +0200 Subject: [PATCH 057/138] prototypes: enhance tests and adapt code --- tests/example_based/test_prototypes.py | 452 +++++++++++------- tests/utils.py | 17 +- xplique/commons/tf_dataset_operations.py | 13 +- xplique/example_based/prototypes.py | 166 +++---- .../search_methods/proto_dash_search.py | 12 +- .../search_methods/proto_greedy_search.py | 123 +++-- 6 files changed, 426 insertions(+), 357 deletions(-) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index 8a31b24d..b9fd43f8 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -19,225 +19,317 @@ from xplique.example_based import Prototypes, ProtoGreedy, ProtoDash, MMDCritic from xplique.example_based.projections import Projection, LatentSpaceProjection -from tests.utils import almost_equal, get_Gaussian_Data, load_data, plot, plot_local_explanation +from tests.utils import almost_equal, get_gaussian_data, load_data, plot, plot_local_explanation -def test_proto_greedy_basic(): +def test_prototypes_global_explanations_basic(): """ - Test the Prototypes with an identity projection. + Test prototypes shapes and uniqueness. """ # Setup k = 3 - nb_prototypes = 3 + nb_prototypes = 5 + nb_classes = 3 + gamma = 0.026 - x_train, y_train = get_Gaussian_Data(nb_samples_class=20) - x_test, y_test = get_Gaussian_Data(nb_samples_class=10) - # x_train, y_train = load_data('usps') - # x_test, y_test = load_data('usps.t') - # x_test = tf.random.shuffle(x_test) - # x_test = x_test[0:8] + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) + x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=10) identity_projection = Projection( space_projection=lambda inputs, targets=None: inputs ) - kernel_type = "global" - - # Method initialization - method = ProtoGreedy( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=identity_projection, - batch_size=32, - distance=None, #"euclidean", - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - gamma=gamma, - ) - - # Generate global explanation - prototype_indices, prototype_weights = method.get_global_prototypes() - - prototypes = tf.gather(x_train, prototype_indices) - prototype_labels = tf.gather(y_train, prototype_indices) - - # sort by label - prototype_labels_sorted = prototype_labels.numpy().argsort() - - prototypes = tf.gather(prototypes, prototype_labels_sorted) - prototype_indices = tf.gather(prototype_indices, prototype_labels_sorted) - prototype_labels = tf.gather(prototype_labels, prototype_labels_sorted) - prototype_weights = tf.gather(prototype_weights, prototype_labels_sorted) - - # Verifications - # Shape - assert prototype_indices.shape == (nb_prototypes,) - assert prototypes.shape == (nb_prototypes, x_train.shape[1]) - assert prototype_weights.shape == (nb_prototypes,) - - # at least 1 prototype per class is selected - assert tf.unique(prototype_labels)[0].shape == tf.unique(y_train)[0].shape - - # uniqueness test of prototypes - assert prototype_indices.shape == tf.unique(prototype_indices)[0].shape - - # Check if all indices are between 0 and x_train.shape[0]-1 - assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) - - # Generate local explanation - examples = method.explain(x_test) - - # # Visualize all prototypes - # plot(prototypes, prototype_weights, 'proto_greedy') - - # # Visualize local explanation - # plot_local_explanation(examples, x_test, 'proto_greedy') - -def test_proto_dash_basic(): + for kernel_type in ["local", "global"]: + for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + projection=identity_projection, + batch_size=8, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + gamma=gamma, + ) + # extract prototypes + prototypes_dict = method.get_global_prototypes() + prototypes = prototypes_dict["prototypes"] + prototypes_indices = prototypes_dict["prototypes_indices"] + prototypes_labels = prototypes_dict["prototypes_labels"] + prototypes_weights = prototypes_dict["prototypes_weights"] + + # check shapes + assert prototypes.shape == (nb_prototypes,) + x_train.shape[1:] + assert prototypes_indices.shape == (nb_prototypes,) + assert prototypes_labels.shape == (nb_prototypes,) + assert prototypes_weights.shape == (nb_prototypes,) + + # check uniqueness + assert len(prototypes_indices) == len(tf.unique(prototypes_indices)[0]) + + # for each prototype + for i in range(nb_prototypes): + # check prototypes are in the dataset and correspond to the index + assert tf.reduce_all(tf.equal(prototypes[i], x_train[prototypes_indices[i]])) + + # same for labels + assert tf.reduce_all(tf.equal(prototypes_labels[i], y_train[prototypes_indices[i]])) + + # check indices are in the dataset + assert prototypes_indices[i] >= 0 and prototypes_indices[i] < x_train.shape[0] + + +def test_prototypes_local_explanations_basic(): """ - Test the Prototypes with an identity projection. + Test prototypes local explanations. """ # Setup k = 3 - nb_prototypes = 3 + nb_prototypes = 5 + nb_classes = 3 + batch_size = 8 + gamma = 0.026 - x_train, y_train = get_Gaussian_Data(nb_samples_class=20) - x_test, y_test = get_Gaussian_Data(nb_samples_class=10) - # x_train, y_train = load_data('usps') - # x_test, y_test = load_data('usps.t') - # x_test = tf.random.shuffle(x_test) - # x_test = x_test[0:8] + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) + x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=10) identity_projection = Projection( space_projection=lambda inputs, targets=None: inputs ) - kernel_type = "global" - - # Method initialization - method = ProtoDash( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=identity_projection, - batch_size=32, - distance="euclidean", - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - gamma=gamma, - ) - - # Generate global explanation - prototype_indices, prototype_weights = method.get_global_prototypes() - - prototypes = tf.gather(x_train, prototype_indices) - prototype_labels = tf.gather(y_train, prototype_indices) - - # sort by label - prototype_labels_sorted = prototype_labels.numpy().argsort() - - prototypes = tf.gather(prototypes, prototype_labels_sorted) - prototype_indices = tf.gather(prototype_indices, prototype_labels_sorted) - prototype_labels = tf.gather(prototype_labels, prototype_labels_sorted) - prototype_weights = tf.gather(prototype_weights, prototype_labels_sorted) - - # Verifications - # Shape - assert prototype_indices.shape == (nb_prototypes,) - assert prototypes.shape == (nb_prototypes, x_train.shape[1]) - assert prototype_weights.shape == (nb_prototypes,) - - # at least 1 prototype per class is selected - assert tf.unique(prototype_labels)[0].shape == tf.unique(y_train)[0].shape - - # uniqueness test of prototypes - assert prototype_indices.shape == tf.unique(prototype_indices)[0].shape - - # Check if all indices are between 0 and x_train.shape[0]-1 - assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) - - # Generate local explanation - examples = method.explain(x_test) - - # # Visualize all prototypes - # plot(prototypes, prototype_weights, 'proto_dash') - - # # Visualize local explanation - # plot_local_explanation(examples, x_test, 'proto_dash') - -def test_mmd_critic_basic(): + for kernel_type in ["local", "global"]: + for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + projection=identity_projection, + case_returns=["examples", "distances", "labels", "indices"], + batch_size=batch_size, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + gamma=gamma, + ) + # extract prototypes + prototypes_dict = method.get_global_prototypes() + prototypes = prototypes_dict["prototypes"] + prototypes_indices = prototypes_dict["prototypes_indices"] + prototypes_labels = prototypes_dict["prototypes_labels"] + prototypes_weights = prototypes_dict["prototypes_weights"] + + # compute local explanations + outputs = method.explain(x_test) + examples = outputs["examples"] + distances = outputs["distances"] + labels = outputs["labels"] + indices = outputs["indices"] + + # check shapes + assert examples.shape == (x_test.shape[0], k) + x_train.shape[1:] + assert distances.shape == (x_test.shape[0], k) + assert labels.shape == (x_test.shape[0], k) + assert indices.shape == (x_test.shape[0], k, 2) + + # for each sample + for i in range(x_test.shape[0]): + # check first closest prototype label is the same as the sample label + assert tf.reduce_all(tf.equal(labels[i, 0], y_test[i])) + + for j in range(k): + # check indices in prototypes' indices + index = indices[i, j, 0] * batch_size + indices[i, j, 1] + assert index in prototypes_indices + + # check examples are in prototypes + assert tf.reduce_all(tf.equal(prototypes[prototypes_indices == index], examples[i, j])) + + # check indices are in the dataset + assert tf.reduce_all(tf.equal(x_train[index], examples[i, j])) + + # check distances + assert almost_equal(distances[i, j], tf.norm(x_test[i] - x_train[index]), epsilon=1e-5) + + # check labels + assert tf.reduce_all(tf.equal(labels[i, j], y_train[index])) + + +def test_prototypes_global_sanity_checks_1(): """ - Test the Prototypes with an identity projection. + Test prototypes global explanations sanity checks. + + Check 1: For n separated gaussians, for n requested prototypes, there should be 1 prototype per gaussian. """ + # Setup k = 3 nb_prototypes = 3 + gamma = 0.026 - x_train, y_train = get_Gaussian_Data(nb_samples_class=20) - x_test, y_test = get_Gaussian_Data(nb_samples_class=10) - # x_train, y_train = load_data('usps') - # x_test, y_test = load_data('usps.t') - # x_test = tf.random.shuffle(x_test) - # x_test = x_test[0:8] + x_train, y_train = get_gaussian_data(nb_classes=nb_prototypes, nb_samples_class=20) identity_projection = Projection( space_projection=lambda inputs, targets=None: inputs ) - kernel_type = "global" - - # Method initialization - method = MMDCritic( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=identity_projection, - batch_size=32, - distance="euclidean", - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - gamma=gamma, - ) - - # Generate global explanation - prototype_indices, prototype_weights = method.get_global_prototypes() - - prototypes = tf.gather(x_train, prototype_indices) - prototype_labels = tf.gather(y_train, prototype_indices) - - # sort by label - prototype_labels_sorted = prototype_labels.numpy().argsort() + for kernel_type in ["local", "global"]: + for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + projection=identity_projection, + batch_size=8, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + gamma=gamma, + ) + # extract prototypes + prototypes_dict = method.get_global_prototypes() + prototypes = prototypes_dict["prototypes"] + prototypes_indices = prototypes_dict["prototypes_indices"] + prototypes_labels = prototypes_dict["prototypes_labels"] + prototypes_weights = prototypes_dict["prototypes_weights"] + + # check 1 + assert len(tf.unique(prototypes_labels)[0]) == nb_prototypes + + +def test_prototypes_global_sanity_checks_2(): + """ + Test prototypes global explanations sanity checks. - prototypes = tf.gather(prototypes, prototype_labels_sorted) - prototype_indices = tf.gather(prototype_indices, prototype_labels_sorted) - prototype_labels = tf.gather(prototype_labels, prototype_labels_sorted) - prototype_weights = tf.gather(prototype_weights, prototype_labels_sorted) + Check 2: With local kernel_type, if there are more requested prototypes than classes, there should be at least 1 prototype per class. + """ + + # Setup + k = 3 + nb_prototypes = 5 + nb_classes = 3 - # Verifications - # Shape - assert prototype_indices.shape == (nb_prototypes,) - assert prototypes.shape == (nb_prototypes, x_train.shape[1]) - assert prototype_weights.shape == (nb_prototypes,) + gamma = 0.026 + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) - # at least 1 prototype per class is selected - assert tf.unique(prototype_labels)[0].shape == tf.unique(y_train)[0].shape + # randomize y_train + y_train = tf.random.shuffle(y_train) - # uniqueness test of prototypes - assert prototype_indices.shape == tf.unique(prototype_indices)[0].shape + identity_projection = Projection( + space_projection=lambda inputs, targets=None: inputs + ) - # Check if all indices are between 0 and x_train.shape[0]-1 - assert tf.reduce_all(tf.math.logical_and(prototype_indices >= 0, prototype_indices <= x_train.shape[0]-1)) + for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + projection=identity_projection, + batch_size=8, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type="local", + gamma=gamma, + ) + # extract prototypes + prototypes_dict = method.get_global_prototypes() + prototypes = prototypes_dict["prototypes"] + prototypes_indices = prototypes_dict["prototypes_indices"] + prototypes_labels = prototypes_dict["prototypes_labels"] + prototypes_weights = prototypes_dict["prototypes_weights"] + + # check 2 + assert len(tf.unique(prototypes_labels)[0]) == nb_classes + + +def test_prototypes_local_explanations_with_projection(): + """ + Test prototypes local explanations with a projection. + """ + # Setup + k = 3 + nb_prototypes = 5 + nb_classes = 3 + batch_size = 8 - # Generate local explanation - examples = method.explain(x_test) + gamma = 0.026 + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) + x_train_bis, _ = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) + x_train = tf.concat([x_train, x_train_bis], axis=1) # make a dataset with two dimensions - # # Visualize all prototypes - # plot(prototypes, prototype_weights, 'mmd_critic') + x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=10) - # # Visualize local explanation - # plot_local_explanation(examples, x_test, 'mmd_critic') + projection = Projection( + space_projection=lambda inputs, targets=None: tf.reduce_mean(inputs, axis=1, keepdims=True) + ) -# test_proto_greedy_basic() -# test_proto_dash_basic() -# test_mmd_critic_basic() + for kernel_type in ["local", "global"]: + for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + projection=projection, + case_returns=["examples", "distances", "labels", "indices"], + batch_size=batch_size, + distance="euclidean", + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, + gamma=gamma, + ) + # extract prototypes + prototypes_dict = method.get_global_prototypes() + prototypes = prototypes_dict["prototypes"] + prototypes_indices = prototypes_dict["prototypes_indices"] + prototypes_labels = prototypes_dict["prototypes_labels"] + prototypes_weights = prototypes_dict["prototypes_weights"] + + # check shapes + assert prototypes.shape == (nb_prototypes,) + x_train.shape[1:] + assert prototypes_indices.shape == (nb_prototypes,) + assert prototypes_labels.shape == (nb_prototypes,) + assert prototypes_weights.shape == (nb_prototypes,) + + # compute local explanations + outputs = method.explain(x_test) + examples = outputs["examples"] + distances = outputs["distances"] + labels = outputs["labels"] + indices = outputs["indices"] + + # check shapes + assert examples.shape == (x_test.shape[0], k) + x_train.shape[1:] + assert distances.shape == (x_test.shape[0], k) + assert labels.shape == (x_test.shape[0], k) + assert indices.shape == (x_test.shape[0], k, 2) + + # for each sample + for i in range(x_test.shape[0]): + # check first closest prototype label is the same as the sample label + assert tf.reduce_all(tf.equal(labels[i, 0], y_test[i])) + + for j in range(k): + # check indices in prototypes' indices + index = indices[i, j, 0] * batch_size + indices[i, j, 1] + assert index in prototypes_indices + + # check examples are in prototypes + assert tf.reduce_all(tf.equal(prototypes[prototypes_indices == index], examples[i, j])) + + # check indices are in the dataset + assert tf.reduce_all(tf.equal(x_train[index], examples[i, j])) + + # check labels + assert tf.reduce_all(tf.equal(labels[i, j], y_train[index])) + + # check distances + assert almost_equal( + distances[i, j], + tf.norm(tf.reduce_mean(x_train[index]) - tf.reduce_mean(x_test[i])), + epsilon=1e-5 + ) diff --git a/tests/utils.py b/tests/utils.py index 280d7a5f..000b7f01 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -255,14 +255,21 @@ def download_file(identifier: str, if chunk: file.write(chunk) -def get_Gaussian_Data(nb_samples_class=20): +def get_gaussian_data(nb_classes=3, nb_samples_class=20): tf.random.set_seed(42) - sigma = 0.05 - mu = [10, 20, 30] + sigma = 1 + mu = [10 * (id + 1) for id in range(nb_classes)] - X = tf.concat([tf.random.normal(shape=(nb_samples_class,1), mean=mu[i], stddev=sigma, dtype=tf.float32) for i in range(3)], axis=0) - y = tf.concat([tf.ones(shape=(nb_samples_class), dtype=tf.int32) * i for i in range(3)], axis=0) + X = tf.concat([ + tf.random.normal(shape=(nb_samples_class,1), mean=mu[i], stddev=sigma, dtype=tf.float32) + for i in range(nb_classes) + ], axis=0) + + y = tf.concat([ + tf.ones(shape=(nb_samples_class), dtype=tf.int32) * i + for i in range(3) + ], axis=0) return(X, y) diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/commons/tf_dataset_operations.py index 83c81fa0..ea010933 100644 --- a/xplique/commons/tf_dataset_operations.py +++ b/xplique/commons/tf_dataset_operations.py @@ -197,11 +197,20 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: Returns ------- results - - indices should be (n, k, 2) + A tensor with the extracted elements from the `dataset`. + The shape of the tensor is (n, k, ...), where ... is the shape of the elements in the `dataset`. """ if dataset is None: return None + + if len(indices.shape) != 3 or indices.shape[-1] != 2: + raise ValueError( + "Indices should have dimensions (n, k, 2), " + + "where n represent the number of inputs and k the number of corresponding examples. " + + "The index of each element is encoded by two values, " + + "the batch index and the index of the element in the batch. " + + f"Received {indices.shape}." + ) example = next(iter(dataset)) # (n, bs, ...) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 5742190d..a4819952 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -9,6 +9,8 @@ from ..types import Callable, Dict, List, Optional, Type, Union +from ..commons.tf_dataset_operations import dataset_gather + from .search_methods import BaseSearchMethod, ProtoGreedySearch, MMDCriticSearch, ProtoDashSearch from .projections import Projection from .base_example_method import BaseExampleMethod @@ -38,11 +40,13 @@ class Prototypes(BaseExampleMethod, ABC): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. k - The number of examples to retrieve. + For decision explanations, the number of closest prototypes to return. Used in `explain`. + Default is 1, which means that only the closest prototype is returned. projection Projection or Callable that project samples from the input space to the search space. The search space should be a space where distance make sense for the model. - It should not be `None`, otherwise, + The output of the projection should be a two dimensional tensor. (nb_samples, nb_features). + `projection` should not be `None`, otherwise, all examples could be computed only with the `search_method`. Example of Callable: @@ -61,9 +65,23 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar See `self.set_returns()` for detail. batch_size Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - search_method_kwargs - Parameters to be passed at the construction of the `search_method`. + Ignored if `tf.data.Dataset` are provided (these are supposed to be batched). + distance + Either a Callable, or a value supported by `tf.norm` `ord` parameter. + Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: + "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number + yielding the corresponding p-norm." We also added 'cosine'. + nb_prototypes : int + For general explanations, the number of prototypes to select. + If `class_wise` is True, it will correspond to the number of prototypes per class. + kernel_type : str, optional + The kernel type. It can be 'local' or 'global', by default 'local'. + When it is local, the distances are calculated only within the classes. + kernel_fn : Callable, optional + Kernel function, by default the rbf kernel. + This function must only use TensorFlow operations. + gamma : float, optional + Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ def __init__( @@ -77,7 +95,7 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = None, nb_prototypes: int = 1, - kernel_type: str = 'local', + kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None ): @@ -118,18 +136,47 @@ def __init__( def search_method_class(self) -> Type[ProtoGreedySearch]: raise NotImplementedError - def get_global_prototypes(self): + def get_global_prototypes(self) -> Dict[str, tf.Tensor]: """ - Return all the prototypes computed by the search method, - which consist of a global explanation of the dataset. - - Returns: - prototype_indices : Tensor - prototype indices. - prototype_weights : Tensor - prototype weights. + Provide the global prototypes computed at the initialization. + Prototypes and their labels are extracted from the indices. + The weights of the prototypes and their indices are also returned. + + Returns + ------- + prototypes_dict : Dict[str, tf.Tensor] + A dictionary with the following + - 'prototypes': The prototypes found by the method. + - 'prototype_labels': The labels of the prototypes. + - 'prototype_weights': The weights of the prototypes. + - 'prototype_indices': The indices of the prototypes. """ - return self.search_method.prototype_indices, self.search_method.prototype_weights + # (nb_prototypes,) + indices = self.search_method.prototypes_indices + batch_indices = indices // self.batch_size + elem_indices = indices % self.batch_size + + # (nb_prototypes, 2) + batch_elem_indices = tf.stack([batch_indices, elem_indices], axis=1) + + # (1, nb_prototypes, 2) + batch_elem_indices = tf.expand_dims(batch_elem_indices, axis=0) + + # (nb_prototypes, ...) + prototypes = dataset_gather(self.cases_dataset, batch_elem_indices)[0] + + # (nb_prototypes,) + labels = dataset_gather(self.labels_dataset, batch_elem_indices)[0] + + # (nb_prototypes,) + weights = self.search_method.prototypes_weights + + return { + "prototypes": prototypes, + "prototypes_labels": labels, + "prototypes_weights": weights, + "prototypes_indices": indices, + } class ProtoGreedy(Prototypes): @@ -145,93 +192,6 @@ def search_method_class(self) -> Type[ProtoGreedySearch]: class ProtoDash(Prototypes): - """ - Protodash method for searching prototypes. - - References: - .. [#] `Karthik S. Gurumoorthy, Amit Dhurandhar, Guillermo Cecchi, - "ProtoDash: Fast Interpretable Prototype Selection" - `_ - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - k - The number of examples to retrieve. - search_returns - String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. - batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. - distance - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. - nb_prototypes : int - Number of prototypes to find. - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. - kernel_fn : Callable, optional - Kernel function, by default the rbf kernel. - This function must only use TensorFlow operations. - gamma : float, optional - Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. - use_optimizer : bool, optional - Flag indicating whether to use an optimizer for prototype selection, by default False. - """ - - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - projection: Union[Projection, Callable] = None, - case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = None, - nb_prototypes: int = 1, - kernel_type: str = 'local', - kernel_fn: callable = None, - gamma: float = None, - use_optimizer: bool = False, - ): # pylint: disable=R0801 - self.use_optimizer = use_optimizer - - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - k=k, - projection=projection, - case_returns=case_returns, - batch_size=batch_size, - distance=distance, - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - kernel_fn=kernel_fn, - gamma=gamma - ) - @property def search_method_class(self) -> Type[ProtoGreedySearch]: return ProtoDashSearch diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index 5bb7b78b..cbe78b40 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -117,8 +117,10 @@ class ProtoDashSearch(ProtoGreedySearch): This function must only use TensorFlow operations. gamma : float, optional Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. - use_optimizer : bool, optional - Flag indicating whether to use an optimizer for prototype selection, by default False. + exact_selection_weights_update : bool, optional + Wether to use an exact method to update selection weights, by default False. + Exact method is based on a scipy optimization, + while the other is based on a tensorflow inverse operation. """ def __init__( @@ -133,10 +135,10 @@ def __init__( kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None, - use_optimizer: bool = False, + exact_selection_weights_update: bool = False, ): # pylint: disable=R0801 - self.use_optimizer = use_optimizer + self.exact_selection_weights_update = exact_selection_weights_update super().__init__( cases_dataset=cases_dataset, @@ -187,7 +189,7 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti u = tf.expand_dims(tf.gather(self.col_means, selection_indices), axis=1) K = selection_selection_kernel - if self.use_optimizer: + if self.exact_selection_weights_update: initial_weights = tf.concat([selection_weights, [best_objective / tf.gather(self.diag, best_indice)]], axis=0) opt = Optimizer(initial_weights) selection_weights, _ = opt.optimize(u, K) diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 39ed7159..5000e4d4 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -13,6 +13,23 @@ # from ..projections import Projection +def rbf_kernel(X, Y=None, gamma=None): + if Y is None: + Y = X + + if gamma is None: + gamma = 1.0 / tf.cast(tf.shape(X)[1], dtype=X.dtype) + + X = tf.expand_dims(X, axis=1) + Y = tf.expand_dims(Y, axis=0) + + pairwise_diff = X - Y + pairwise_sq_dist = tf.reduce_sum(tf.square(pairwise_diff), axis=-1) + kernel_matrix = tf.exp(-gamma * pairwise_sq_dist) + + return kernel_matrix + + class ProtoGreedySearch(BaseSearchMethod): """ ProtoGreedy method for searching prototypes. @@ -77,75 +94,57 @@ def __init__( self.labels_dataset = sanitize_dataset(labels_dataset, self.batch_size) - if kernel_type in ['local', 'global']: - self.kernel_type = kernel_type - else: + if kernel_type not in ['local', 'global']: raise AttributeError( "The kernel_type parameter is expected to be in" + " ['local', 'global'] ", +f"but {kernel_type} was received.", ) + + self.kernel_type = kernel_type + # set default kernel function (rbf_kernel) or raise error if kernel_fn is not callable if kernel_fn is None: # define rbf kernel function - def rbf_kernel(X, Y=None, gamma=None): - if Y is None: - Y = X - - if gamma is None: - gamma = 1.0 / tf.cast(tf.shape(X)[1], dtype=X.dtype) - - X = tf.expand_dims(X, axis=1) - Y = tf.expand_dims(Y, axis=0) - - pairwise_diff = X - Y - pairwise_sq_dist = tf.reduce_sum(tf.square(pairwise_diff), axis=-1) - kernel_matrix = tf.exp(-gamma * pairwise_sq_dist) - - return kernel_matrix - kernel_fn = lambda x, y: rbf_kernel(x,y,gamma) - - if hasattr(kernel_fn, "__call__"): - def custom_kernel_fn(x1, x2, y1=None, y2=None): - if self.kernel_type == 'global': - kernel_matrix = kernel_fn(x1,x2) - if isinstance(kernel_matrix, np.ndarray): - kernel_matrix = tf.convert_to_tensor(kernel_matrix) - else: - # In the case of a local kernel, calculations are limited to within the class. - # Across different classes, the kernel values are set to 0. - kernel_matrix = np.zeros((x1.shape[0], x2.shape[0]), dtype=np.float32) - y_intersect = np.intersect1d(y1, y2) - for i in range(y_intersect.shape[0]): - y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] - y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] - sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) - kernel_matrix[tf.reshape(y1_indices, (-1, 1)), tf.reshape(y2_indices, (1, -1))] = sub_matrix - kernel_matrix = tf.convert_to_tensor(kernel_matrix) - return kernel_matrix - - self.kernel_fn = custom_kernel_fn - else: + elif not hasattr(kernel_fn, "__call__"): raise AttributeError( "The kernel_fn parameter is expected to be a Callable", +f"but {kernel_fn} was received.", ) + # define custom kernel function depending on the kernel type + def custom_kernel_fn(x1, x2, y1=None, y2=None): + if self.kernel_type == 'global': + kernel_matrix = kernel_fn(x1,x2) + if isinstance(kernel_matrix, np.ndarray): + kernel_matrix = tf.convert_to_tensor(kernel_matrix) + else: + # In the case of a local kernel, calculations are limited to within the class. + # Across different classes, the kernel values are set to 0. + kernel_matrix = np.zeros((x1.shape[0], x2.shape[0]), dtype=np.float32) + y_intersect = np.intersect1d(y1, y2) + for i in range(y_intersect.shape[0]): + y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] + y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] + sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) + kernel_matrix[tf.reshape(y1_indices, (-1, 1)), tf.reshape(y2_indices, (1, -1))] = sub_matrix + kernel_matrix = tf.convert_to_tensor(kernel_matrix) + return kernel_matrix + + self.kernel_fn = custom_kernel_fn + + if distance is None: - def kernel_induced_distance(x1,x2): + def kernel_induced_distance(x1, x2): x1 = tf.expand_dims(x1, axis=0) x2 = tf.expand_dims(x2, axis=0) distance = tf.squeeze(tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2))) return distance - - self.distance_fn = lambda x1, x2: kernel_induced_distance(x1,x2) - + self.distance_fn = kernel_induced_distance elif hasattr(distance, "__call__"): self.distance_fn = distance - elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( - distance, int - ): + elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance(distance, int): self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance, axis=-1) else: raise AttributeError( @@ -165,7 +164,7 @@ def kernel_induced_distance(x1,x2): ): # elements should be tabular data assert len(batch_col_cases.shape) == 2,\ - "Expected prototypes' searches expects 2D data, (nb_samples, nb_features),"+\ + "Prototypes' searches expects 2D data, (nb_samples, nb_features),"+\ f"but got {batch_col_cases.shape}"+\ "Please verify your projection if you provided a custom one."+\ "If you use a splitted model, make sure the output of the first part of the model is flattened." @@ -205,10 +204,10 @@ def kernel_induced_distance(x1,x2): self.nb_features = batch_col_cases.shape[1] # compute the prototypes in the latent space - self.prototype_indices, self.prototype_cases, self.prototype_labels, self.prototype_weights = self.find_prototypes(nb_prototypes) + self.prototypes_indices, self.prototypes, self.prototypes_labels, self.prototypes_weights = self.find_prototypes(nb_prototypes) self.knn = KNN( - cases_dataset=self.prototype_cases, + cases_dataset=self.prototypes, k=k, search_returns=search_returns, batch_size=batch_size, @@ -314,13 +313,13 @@ def find_prototypes(self, nb_prototypes): Returns ------- - prototype_indices : Tensor + prototypes_indices : Tensor The indices of the selected prototypes. - prototype_cases : Tensor + prototypes : Tensor The cases of the selected prototypes. - prototype_labels : Tensor + prototypes_labels : Tensor The labels of the selected prototypes. - prototype_weights : + prototypes_weights : The normalized weights of the selected prototypes. """ @@ -420,15 +419,15 @@ def find_prototypes(self, nb_prototypes): k += 1 - prototype_indices = selection_indices - prototype_cases = selection_cases - prototype_labels = selection_labels - prototype_weights = selection_weights + prototypes_indices = selection_indices + prototypes = selection_cases + prototypes_labels = selection_labels + prototypes_weights = selection_weights # Normalize the weights - prototype_weights = prototype_weights / tf.reduce_sum(prototype_weights) + prototypes_weights = prototypes_weights / tf.reduce_sum(prototypes_weights) - return prototype_indices, prototype_cases, prototype_labels, prototype_weights + return prototypes_indices, prototypes, prototypes_labels, prototypes_weights def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], _): """ @@ -454,7 +453,7 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], _): indices_wrt_prototypes = indices_wrt_prototypes[:, :, 0] * self.batch_size + indices_wrt_prototypes[:, :, 1] # get prototypes indices with respect to the dataset - indices = tf.gather(self.prototype_indices, indices_wrt_prototypes) + indices = tf.gather(self.prototypes_indices, indices_wrt_prototypes) # convert back to batch-element indices batch_indices, elem_indices = indices // self.batch_size, indices % self.batch_size From e0731785639daa08f4c974f5858e441a447fc757 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 10 Jul 2024 17:08:04 +0200 Subject: [PATCH 058/138] tf dataset operations: make them on cpu --- xplique/commons/tf_dataset_operations.py | 108 ++++++++++++++++++++--- 1 file changed, 98 insertions(+), 10 deletions(-) diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/commons/tf_dataset_operations.py index ea010933..783b7e57 100644 --- a/xplique/commons/tf_dataset_operations.py +++ b/xplique/commons/tf_dataset_operations.py @@ -156,10 +156,101 @@ def sanitize_dataset( return dataset +# def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: +# """ +# Imitation of `tf.gather` for `tf.data.Dataset`, +# it extract elements from `dataset` at the given indices. +# We could see it as returning the `indices` tensor +# where each index was replaced by the corresponding element in `dataset`. +# The aim is to use it in the `example_based` module to extract examples form the cases dataset. +# Hence, `indices` expect dimensions of (n, k, 2), +# where n represent the number of inputs and k the number of corresponding examples. +# Here indices for each element are encoded by two values, +# the batch index and the index of the element in the batch. + +# Example of application +# ``` +# >>> dataset = tf.data.Dataset.from_tensor_slices( +# ... tf.reshape(tf.range(20), (-1, 2, 2)) +# ... ).batch(3) # shape=(None, 2, 2) +# >>> indices = tf.constant([[[0, 0]], [[1, 0]]]) # shape=(2, 1, 2) +# >>> dataset_gather(dataset, indices) +# +# ``` + +# Parameters +# ---------- +# dataset +# Tensorflow dataset to verify or tensor to transform in `tf.data.Dataset` and verify. +# indices +# Tensor of indices of elements to extract from the `dataset`. +# `indices` should be of dimensions (n, k, 2), +# this is to match the format of indices in the `example_based` module. +# Indeed, n represent the number of inputs and k the number of corresponding examples. +# The index of each element is encoded by two values, +# the batch index and the index of the element in the batch. + +# Returns +# ------- +# results +# A tensor with the extracted elements from the `dataset`. +# The shape of the tensor is (n, k, ...), where ... is the shape of the elements in the `dataset`. +# """ +# if dataset is None: +# return None + +# if len(indices.shape) != 3 or indices.shape[-1] != 2: +# raise ValueError( +# "Indices should have dimensions (n, k, 2), " +# + "where n represent the number of inputs and k the number of corresponding examples. " +# + "The index of each element is encoded by two values, " +# + "the batch index and the index of the element in the batch. " +# + f"Received {indices.shape}." +# ) + +# example = next(iter(dataset)) +# # (n, bs, ...) +# with tf.device('/CPU:0'): +# if dataset.element_spec.dtype in ['uint8', 'int8', 'int16', 'int32', 'int64']: +# results = tf.Variable( +# tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(-1, dtype=dataset.element_spec.dtype)), +# ) +# else: +# results = tf.Variable( +# tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)), +# ) + +# nb_results = product(indices.shape[:-1]) +# current_nb_results = 0 + +# for i, batch in enumerate(dataset): +# # check if the batch is interesting +# if not tf.reduce_any(indices[..., 0] == i): +# continue + +# # extract pertinent elements +# pertinent_indices_location = tf.where(indices[..., 0] == i) +# samples_index = tf.gather_nd(indices[..., 1], pertinent_indices_location) +# samples = tf.gather(batch, samples_index) + +# # put them at the right place in results +# for location, sample in zip(pertinent_indices_location, samples): +# results[location[0], location[1]].assign(sample) +# current_nb_results += 1 + +# # test if results are filled to break the loop +# if current_nb_results == nb_results: +# break +# return results + def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: """ Imitation of `tf.gather` for `tf.data.Dataset`, - it extract elements from `dataset` at the given indices. + it extracts elements from `dataset` at the given indices. We could see it as returning the `indices` tensor where each index was replaced by the corresponding element in `dataset`. The aim is to use it in the `example_based` module to extract examples form the cases dataset. @@ -175,7 +266,7 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: ... ).batch(3) # shape=(None, 2, 2) >>> indices = tf.constant([[[0, 0]], [[1, 0]]]) # shape=(2, 1, 2) >>> dataset_gather(dataset, indices) - tf.Tensor: ) example = next(iter(dataset)) - # (n, bs, ...) + if dataset.element_spec.dtype in ['uint8', 'int8', 'int16', 'int32', 'int64']: - results = tf.Variable( - tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(-1, dtype=dataset.element_spec.dtype)), - ) + results = tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(-1, dtype=dataset.element_spec.dtype)) else: - results = tf.Variable( - tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)), - ) + results = tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)) nb_results = product(indices.shape[:-1]) current_nb_results = 0 @@ -238,10 +325,11 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: # put them at the right place in results for location, sample in zip(pertinent_indices_location, samples): - results[location[0], location[1]].assign(sample) + results = tf.tensor_scatter_nd_update(results, [location], [sample]) current_nb_results += 1 # test if results are filled to break the loop if current_nb_results == nb_results: break + return results From 1e1b0c7aa00b2beab4dae9dd6d6b8897ba561a9a Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 10 Jul 2024 17:08:41 +0200 Subject: [PATCH 059/138] test prototypes: remove absurd tests --- tests/example_based/test_prototypes.py | 69 +++----------------------- 1 file changed, 7 insertions(+), 62 deletions(-) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index b9fd43f8..fe1ae962 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -6,20 +6,12 @@ sys.path.append(os.getcwd()) -from math import prod, sqrt -import unittest -import time - -import numpy as np import tensorflow as tf -from xplique.commons import sanitize_dataset, are_dataset_first_elems_equal -from xplique.types import Union - from xplique.example_based import Prototypes, ProtoGreedy, ProtoDash, MMDCritic from xplique.example_based.projections import Projection, LatentSpaceProjection -from tests.utils import almost_equal, get_gaussian_data, load_data, plot, plot_local_explanation +from tests.utils import almost_equal, get_gaussian_data, generate_model def test_prototypes_global_explanations_basic(): @@ -30,10 +22,9 @@ def test_prototypes_global_explanations_basic(): k = 3 nb_prototypes = 5 nb_classes = 3 - gamma = 0.026 + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) - x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=10) identity_projection = Projection( space_projection=lambda inputs, targets=None: inputs @@ -90,8 +81,8 @@ def test_prototypes_local_explanations_basic(): nb_prototypes = 5 nb_classes = 3 batch_size = 8 - gamma = 0.026 + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=10) @@ -157,18 +148,18 @@ def test_prototypes_local_explanations_basic(): assert tf.reduce_all(tf.equal(labels[i, j], y_train[index])) -def test_prototypes_global_sanity_checks_1(): +def test_prototypes_global_sanity_check(): """ Test prototypes global explanations sanity checks. - Check 1: For n separated gaussians, for n requested prototypes, there should be 1 prototype per gaussian. + Check: For n separated gaussians, for n requested prototypes, there should be 1 prototype per gaussian. """ # Setup k = 3 nb_prototypes = 3 - gamma = 0.026 + x_train, y_train = get_gaussian_data(nb_classes=nb_prototypes, nb_samples_class=20) identity_projection = Projection( @@ -198,52 +189,6 @@ def test_prototypes_global_sanity_checks_1(): # check 1 assert len(tf.unique(prototypes_labels)[0]) == nb_prototypes - - -def test_prototypes_global_sanity_checks_2(): - """ - Test prototypes global explanations sanity checks. - - Check 2: With local kernel_type, if there are more requested prototypes than classes, there should be at least 1 prototype per class. - """ - - # Setup - k = 3 - nb_prototypes = 5 - nb_classes = 3 - - gamma = 0.026 - x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) - - # randomize y_train - y_train = tf.random.shuffle(y_train) - - identity_projection = Projection( - space_projection=lambda inputs, targets=None: inputs - ) - - for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: - # compute general prototypes - method = method_class( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=identity_projection, - batch_size=8, - distance="euclidean", - nb_prototypes=nb_prototypes, - kernel_type="local", - gamma=gamma, - ) - # extract prototypes - prototypes_dict = method.get_global_prototypes() - prototypes = prototypes_dict["prototypes"] - prototypes_indices = prototypes_dict["prototypes_indices"] - prototypes_labels = prototypes_dict["prototypes_labels"] - prototypes_weights = prototypes_dict["prototypes_weights"] - - # check 2 - assert len(tf.unique(prototypes_labels)[0]) == nb_classes def test_prototypes_local_explanations_with_projection(): @@ -255,8 +200,8 @@ def test_prototypes_local_explanations_with_projection(): nb_prototypes = 5 nb_classes = 3 batch_size = 8 - gamma = 0.026 + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) x_train_bis, _ = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) x_train = tf.concat([x_train, x_train_bis], axis=1) # make a dataset with two dimensions From edabcda999cfd704ef81244bda8ecbc1cbd562a8 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 10 Jul 2024 17:09:13 +0200 Subject: [PATCH 060/138] example based: linting --- xplique/example_based/base_example_method.py | 20 ++++++++++++------- .../search_methods/proto_greedy_search.py | 7 +++---- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 02cc1f4e..3fabe7c1 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -210,9 +210,15 @@ def _initialize_cases_dataset( cardinality = cases_dataset.cardinality().numpy() else: - # if case_dataset is not a `tf.data.Dataset`, then neither should the other. - assert not isinstance(labels_dataset, tf.data.Dataset) - assert not isinstance(targets_dataset, tf.data.Dataset) + # if cases_dataset is not a `tf.data.Dataset`, then neither should the other. + assert not isinstance(labels_dataset, tf.data.Dataset), ( + "if the cases_dataset is not a `tf.data.Dataset`, " + + "then neither should the labels_dataset." + ) + assert not isinstance(targets_dataset, tf.data.Dataset), ( + "if the cases_dataset is not a `tf.data.Dataset`, " + + "then neither should the targets_dataset." + ) # set batch size and cardinality batch_size = min(batch_size, len(cases_dataset)) cardinality = math.ceil(len(cases_dataset) / batch_size) @@ -233,7 +239,7 @@ def _initialize_cases_dataset( # switch case on the number of columns of `cases_dataset` if len(self.cases_dataset.element_spec) == 2: assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." + "The second column of `cases_dataset` is assumed to be the labels. " + "Hence, `labels_dataset` should be empty." ) self.labels_dataset = self.cases_dataset.map(lambda x, y: y) @@ -241,11 +247,11 @@ def _initialize_cases_dataset( elif len(self.cases_dataset.element_spec) == 3: assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." + "The second column of `cases_dataset` is assumed to be the labels. " + "Hence, `labels_dataset` should be empty." ) assert self.targets_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels." + "The second column of `cases_dataset` is assumed to be the labels. " + "Hence, `labels_dataset` should be empty." ) self.targets_dataset = self.cases_dataset.map(lambda x, y, t: t) @@ -253,7 +259,7 @@ def _initialize_cases_dataset( self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) else: raise AttributeError( - "`cases_dataset` cannot possess more than 3 columns," + "`cases_dataset` cannot possess more than 3 columns, " + f"{len(self.cases_dataset.element_spec)} were detected." ) diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 5000e4d4..d21ae9e1 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -126,15 +126,14 @@ def custom_kernel_fn(x1, x2, y1=None, y2=None): y_intersect = np.intersect1d(y1, y2) for i in range(y_intersect.shape[0]): y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] - y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] - sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) + y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] + sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) kernel_matrix[tf.reshape(y1_indices, (-1, 1)), tf.reshape(y2_indices, (1, -1))] = sub_matrix kernel_matrix = tf.convert_to_tensor(kernel_matrix) return kernel_matrix self.kernel_fn = custom_kernel_fn - - + if distance is None: def kernel_induced_distance(x1, x2): x1 = tf.expand_dims(x1, axis=0) From 5904a7700f0a24091ae864a092c22cc7a9edb32b Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 10 Jul 2024 17:17:35 +0200 Subject: [PATCH 061/138] prototypes: change constant for memory --- xplique/example_based/search_methods/proto_greedy_search.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index d21ae9e1..238ce8ad 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -73,7 +73,7 @@ class ProtoGreedySearch(BaseSearchMethod): # Avoid zero division during procedure. (the value is not important, as if the denominator is # zero, then the nominator will also be zero). - EPSILON = tf.constant(1e-6) + EPSILON = 1e-6 def __init__( self, From dee479377eaea5ced62fcb8811d4213a92b88386 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 11 Jul 2024 11:49:22 +0200 Subject: [PATCH 062/138] test contrastive: add projection test --- tests/example_based/test_contrastive.py | 30 ++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-) diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py index eab75ca7..b91c5814 100644 --- a/tests/example_based/test_contrastive.py +++ b/tests/example_based/test_contrastive.py @@ -5,7 +5,10 @@ import numpy as np from xplique.example_based import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEORGlobalSim, KLEORSimMiss -from xplique.example_based.projections import Projection +from xplique.example_based.projections import Projection, LatentSpaceProjection + +from ..utils import generate_data, generate_model + def test_naive_counter_factuals(): """ @@ -281,3 +284,28 @@ def test_kleor(): assert tf.reduce_all( tf.abs(tf.where(inf_mask_examples, 0.0, examples) - tf.where(inf_mask_expected_examples, 0.0, expected_examples) ) < 1e-5) + + +def test_contrastive_with_projection(): + input_shapes = [(28, 28, 1), (32, 32, 3)] + nb_labels = 10 + nb_samples = 50 + + for input_shape in input_shapes: + features, labels = generate_data(input_shape, nb_labels, nb_samples) + model = generate_model(input_shape, nb_labels) + + projection = LatentSpaceProjection(model, latent_layer=-1) + + for contrastive_method_class in [NaiveCounterFactuals, LabelAwareCounterFactuals, + KLEORGlobalSim, KLEORSimMiss]: + contrastive_method = contrastive_method_class( + features, + labels, + k=1, + projection=projection, + case_returns=["examples", "indices", "distances", "include_inputs"], + batch_size=7 + ) + + contrastive_method(features, labels) \ No newline at end of file From 22849058377160553a4a99f231aa93b5c225e29e Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 11 Jul 2024 11:49:44 +0200 Subject: [PATCH 063/138] contrastive: solve projection problems --- xplique/example_based/contrastive_examples.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index caf4a3fe..3b82c73b 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -99,7 +99,7 @@ def __init__( # initiate search_method self.search_method = self.search_method_class( - cases_dataset=self.cases_dataset, + cases_dataset=self.projected_cases_dataset, targets_dataset=self.targets_dataset, k=self.k, search_returns=self._search_returns, @@ -223,7 +223,7 @@ def __init__( # initiate search_method self.search_method = self.search_method_class( - cases_dataset=self.cases_dataset, + cases_dataset=self.projected_cases_dataset, targets_dataset=self.targets_dataset, k=self.k, search_returns=self._search_returns, @@ -250,7 +250,7 @@ def filter_fn(self, _, __, cf_targets, cases_targets) -> tf.Tensor: Parameters ---------- cf_targets - The one-hot enoding of the target class for the counterfactuals. + The one-hot encoding of the target class for the counterfactuals. cases_targets The one-hot encoding of the target class for the cases. """ @@ -384,7 +384,7 @@ def __init__( # initiate search_method self.search_method = self.search_method_class( - cases_dataset=self.cases_dataset, + cases_dataset=self.projected_cases_dataset, targets_dataset=self.targets_dataset, k=self.k, search_returns=self._search_returns, From 0356279ba8c71b095dd95a560d753ee03b91488a Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 11 Jul 2024 15:21:41 +0200 Subject: [PATCH 064/138] semi-factual: allow to return nuns labels --- xplique/example_based/contrastive_examples.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 3b82c73b..e0dfce7d 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -411,6 +411,8 @@ def returns(self, returns: Union[List[str], str]): self._search_returns.append("nuns_indices") elif isinstance(self._returns, list) and ("nuns_indices" in self._returns): self._search_returns.append("nuns_indices") + elif isinstance(self._returns, list) and ("nuns_labels" in self._returns): + self._search_returns.append("nuns_indices") if isinstance(self._returns, list) and ("dist_to_nuns" in self._returns): self._search_returns.append("dist_to_nuns") @@ -449,6 +451,8 @@ def format_search_output( return_dict = super().format_search_output(search_output, inputs, targets) if "nuns" in self.returns: return_dict["nuns"] = dataset_gather(self.cases_dataset, search_output["nuns_indices"]) + if "nuns_labels" in self.returns: + return_dict["nuns_labels"] = dataset_gather(self.labels_dataset, search_output["nuns_indices"]) if "nuns_indices" in self.returns: return_dict["nuns_indices"] = search_output["nuns_indices"] if "dist_to_nuns" in self.returns: From ac812ae70ecca6f46efa8fcc53db25a9c2e68eaa Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 11 Jul 2024 15:21:54 +0200 Subject: [PATCH 065/138] prototypes: linting --- xplique/example_based/prototypes.py | 1 - 1 file changed, 1 deletion(-) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index a4819952..4f00ff57 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -195,4 +195,3 @@ class ProtoDash(Prototypes): @property def search_method_class(self) -> Type[ProtoGreedySearch]: return ProtoDashSearch - From f8a377dc1236f3bd88eead39e288f8661b9b63fa Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 11 Jul 2024 16:14:57 +0200 Subject: [PATCH 066/138] example based: clarify distances --- tests/example_based/test_cole.py | 2 +- xplique/example_based/cole.py | 7 +- xplique/example_based/contrastive_examples.py | 22 ++- xplique/example_based/prototypes.py | 7 +- .../example_based/search_methods/common.py | 144 ++++++++++++++++++ xplique/example_based/search_methods/kleor.py | 8 +- xplique/example_based/search_methods/knn.py | 30 +--- .../search_methods/mmd_critic_search.py | 7 +- .../search_methods/proto_dash_search.py | 7 +- .../search_methods/proto_greedy_search.py | 19 +-- xplique/example_based/similar_examples.py | 8 +- 11 files changed, 186 insertions(+), 75 deletions(-) create mode 100644 xplique/example_based/search_methods/common.py diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py index e96abbb7..3864a71d 100644 --- a/tests/example_based/test_cole.py +++ b/tests/example_based/test_cole.py @@ -98,7 +98,7 @@ def test_cole_attribution(): targets_dataset=y_train, k=k, batch_size=2, - distance=np.inf, # infinity norm based distance + distance="cosine", # infinity norm based distance model=model, attribution_method=Saliency, ) diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py index 296bcea2..47b21dbe 100644 --- a/xplique/example_based/cole.py +++ b/xplique/example_based/cole.py @@ -45,10 +45,9 @@ class Cole(SimilarExamples): k The number of examples to retrieve per input. distance - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". case_returns String or list of string with the elements to return in `self.explain()`. See the base class returns property for details. diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index e0dfce7d..31afc794 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -66,11 +66,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). distance - Distance for the FilterKNN search method. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". """ def __init__( self, @@ -184,10 +182,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). distance Distance for the FilterKNN search method. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". """ def __init__( self, @@ -347,10 +344,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). distance Distance for the FilterKNN search method. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". """ _returns_possibilities = [ "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns" diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 4f00ff57..5f4017d4 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -67,10 +67,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (these are supposed to be batched). distance - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". nb_prototypes : int For general explanations, the number of prototypes to select. If `class_wise` is True, it will correspond to the number of prototypes per class. diff --git a/xplique/example_based/search_methods/common.py b/xplique/example_based/search_methods/common.py new file mode 100644 index 00000000..bac0bce6 --- /dev/null +++ b/xplique/example_based/search_methods/common.py @@ -0,0 +1,144 @@ +""" +Common functions for search methods. +""" + +import numpy as np +import tensorflow as tf + +from ...types import Callable, List, Union, Optional, Tuple + + +def _manhattan_distance(x1: tf.Tensor, x2: tf.Tensor) -> tf.Tensor: + """ + Compute the Manhattan distance between two vectors. + + Parameters + ---------- + x1 : tf.Tensor + First vector. + x2 : tf.Tensor + Second vector. + + Returns + ------- + tf.Tensor + Manhattan distance between the two vectors. + """ + return tf.reduce_sum(tf.abs(x1 - x2), axis=-1) + + +def _euclidean_distance(x1: tf.Tensor, x2: tf.Tensor) -> tf.Tensor: + """ + Compute the Euclidean distance between two vectors. + + Parameters + ---------- + x1 : tf.Tensor + First vector. + x2 : tf.Tensor + Second vector. + + Returns + ------- + tf.Tensor + Euclidean distance between the two vectors. + """ + return tf.norm(x1 - x2, axis=-1) + + +def _cosine_distance(x1: tf.Tensor, x2: tf.Tensor) -> tf.Tensor: + """ + Compute the cosine distance between two vectors. + + Parameters + ---------- + x1 : tf.Tensor + First vector. + x2 : tf.Tensor + Second vector. + + Returns + ------- + tf.Tensor + Cosine distance between the two vectors. + """ + return 1 - tf.reduce_sum(x1 * x2, axis=-1) / ( + tf.norm(x1, axis=-1) * tf.norm(x2, axis=-1) + ) + + +def _chebyshev_distance(x1: tf.Tensor, x2: tf.Tensor) -> tf.Tensor: + """ + Compute the Chebyshev distance between two vectors. + + Parameters + ---------- + x1 : tf.Tensor + First vector. + x2 : tf.Tensor + Second vector. + + Returns + ------- + tf.Tensor + Chebyshev distance between the two vectors. + """ + return tf.reduce_max(tf.abs(x1 - x2), axis=-1) + + +def _minkowski_distance(x1: tf.Tensor, x2: tf.Tensor, p: int) -> tf.Tensor: + """ + Compute the Minkowski distance between two vectors. + + Parameters + ---------- + x1 : tf.Tensor + First vector. + x2 : tf.Tensor + Second vector. + p : int + Order of the Minkowski distance. + + Returns + ------- + tf.Tensor + Minkowski distance between the two vectors. + """ + return tf.norm(x1 - x2, ord=p, axis=-1) + + +_distances = { + "manhattan": _manhattan_distance, + "euclidean": _euclidean_distance, + "cosine": _cosine_distance, + "chebyshev": _chebyshev_distance, +} + + +def get_distance_function(distance: Union[int, str, Callable] = "euclidean",) -> Callable: + """ + Function to obtain a distance function from different inputs. + + Parameters + ---------- + distance : Union[int, str, Callable], optional + Distance function to use. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". + """ + # set distance function + if hasattr(distance, "__call__"): + return distance + elif isinstance(distance, str) and distance in _distances: + return _distances[distance] + elif isinstance(distance, int): + return lambda x1, x2: _minkowski_distance(x1, x2, p=distance) + elif distance == np.inf: + return lambda x1, x2: _chebyshev_distance(x1, x2) + else: + raise AttributeError( + "The distance parameter is expected to be either a Callable, " + + f" an integer, or a string in {_distances.keys()}. " + +f"But {type(distance)} was received." + ) + diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index 08baa293..57a238e0 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -42,11 +42,9 @@ class BaseKLEORSearch(FilterKNN, ABC): batch_size Number of sample treated simultaneously. distance - Distance function to use to measure similarity. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". """ def __init__( self, diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index 70102a24..a1ae0cdd 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -10,6 +10,7 @@ from ...types import Callable, List, Union, Optional, Tuple from .base import BaseSearchMethod, ORDER +from .common import get_distance_function class BaseKNN(BaseSearchMethod): """ @@ -158,11 +159,9 @@ class KNN(BaseKNN): ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are the best. distance - Distance function to use to measure similarity. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". """ def __init__( self, @@ -182,18 +181,7 @@ def __init__( ) # set distance function - if hasattr(distance, "__call__"): - self.distance_fn = distance - elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( - distance, int - ): - self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance, axis=-1) - else: - raise AttributeError( - "The distance parameter is expected to be either a Callable or in" - + " ['fro', 'euclidean', 1, 2, np.inf] " - +f"but {type(distance)} was received." - ) + self.distance_fn = get_distance_function(distance) @tf.function def _crossed_distances_fn(self, x1, x2) -> tf.Tensor: @@ -327,11 +315,9 @@ class FilterKNN(BaseKNN): ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are the best. distance - Distance function to use to measure similarity. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". filter_fn A Callable that takes as inputs the inputs, their targets, the cases and their targets and returns a boolean mask of shape (n, m) where n is the number of inputs and m the number of cases. diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py index 7465fcfb..538ed277 100644 --- a/xplique/example_based/search_methods/mmd_critic_search.py +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -37,10 +37,9 @@ class MMDCriticSearch(ProtoGreedySearch): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. distance - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". nb_prototypes : int Number of prototypes to find. kernel_type : str, optional diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index cbe78b40..cb1d9097 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -103,10 +103,9 @@ class ProtoDashSearch(ProtoGreedySearch): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. distance - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". nb_prototypes : int Number of prototypes to find. kernel_type : str, optional diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 238ce8ad..0a6a9b28 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -9,6 +9,7 @@ from ...types import Callable, List, Union, Optional, Tuple from .base import BaseSearchMethod +from .common import get_distance_function from .knn import KNN # from ..projections import Projection @@ -55,10 +56,9 @@ class ProtoGreedySearch(BaseSearchMethod): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. distance - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". nb_prototypes : int Number of prototypes to find. kernel_type : str, optional @@ -134,6 +134,7 @@ def custom_kernel_fn(x1, x2, y1=None, y2=None): self.kernel_fn = custom_kernel_fn + # set distance function if distance is None: def kernel_induced_distance(x1, x2): x1 = tf.expand_dims(x1, axis=0) @@ -141,16 +142,8 @@ def kernel_induced_distance(x1, x2): distance = tf.squeeze(tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2))) return distance self.distance_fn = kernel_induced_distance - elif hasattr(distance, "__call__"): - self.distance_fn = distance - elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance(distance, int): - self.distance_fn = lambda x1, x2: tf.norm(x1 - x2, ord=distance, axis=-1) else: - raise AttributeError( - "The distance parameter is expected to be either a Callable or in" - + " ['fro', 'euclidean', 'cosine', 1, 2, np.inf] ", - +f"but {distance} was received.", - ) + self.distance_fn = get_distance_function(distance) # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset. # We take advantage of the symmetry of this matrix to traverse only its lower triangle. diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index ea16f261..5c785322 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -62,11 +62,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). distance - Distance for the knn search method. - Either a Callable, or a value supported by `tf.norm` `ord` parameter. - Their documentation (https://www.tensorflow.org/api_docs/python/tf/norm) say: - "Supported values are 'fro', 'euclidean', 1, 2, np.inf and any positive real number - yielding the corresponding p-norm." We also added 'cosine'. + Distance for the knn search method. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + by default "euclidean". """ def __init__( self, From be1ce6d786c09e299f154fd477ca9d5ce90fbdb9 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 11 Jul 2024 16:35:19 +0200 Subject: [PATCH 067/138] example based: clarify distances --- xplique/example_based/search_methods/knn.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index a1ae0cdd..f6dd7076 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -343,18 +343,7 @@ def __init__( ) # set distance function - if hasattr(distance, "__call__"): - self.distance_fn = distance - elif distance in ["fro", "euclidean", 1, 2, np.inf] or isinstance( - distance, int - ): - self.distance_fn = lambda x1, x2, m: tf.where(m, tf.norm(x1 - x2, ord=distance, axis=-1), self.fill_value) - else: - raise AttributeError( - "The distance parameter is expected to be either a Callable or in" - + " ['fro', 'euclidean', 1, 2, np.inf] " - +f"but {type(distance)} was received." - ) + self.distance_fn = get_distance_function(distance) # TODO: Assertion on the function signature if filter_fn is None: From ae69b785d0aa3c0942a71ccdf327cadd414f2a19 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Thu, 11 Jul 2024 17:01:16 +0200 Subject: [PATCH 068/138] example based: clarify distances --- xplique/example_based/search_methods/knn.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index f6dd7076..18d3fd2b 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -59,7 +59,7 @@ def __init__( @abstractmethod def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> Tuple[tf.Tensor, tf.Tensor]: """ - Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. + Compute the k-nearest neighbors to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. Parameters @@ -244,7 +244,7 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], _ = None) -> Tuple[tf """ nb_inputs = tf.shape(inputs)[0] - # initialiaze + # initialize # (n, k, 2) best_indices = tf.Variable(tf.fill((nb_inputs, self.k, 2), -1)) # (n, k) @@ -343,7 +343,11 @@ def __init__( ) # set distance function - self.distance_fn = get_distance_function(distance) + if hasattr(distance, "__call__"): + self.distance_fn = distance + else: + self.distance_fn = lambda x1, x2, m:\ + tf.where(m, get_distance_function(distance)(x1, x2), self.fill_value) # TODO: Assertion on the function signature if filter_fn is None: From 90cb2cf035ba667716e8a6a59c511fff305e6bb4 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Tue, 16 Jul 2024 18:29:02 +0200 Subject: [PATCH 069/138] example base projection: support pytorch --- tests/example_based/test_projections.py | 32 ++-- .../example_based/projections/attributions.py | 41 +---- xplique/example_based/projections/base.py | 72 ++++++++- xplique/example_based/projections/commons.py | 146 ++++++++++++++++-- xplique/example_based/projections/hadamard.py | 12 +- .../example_based/projections/latent_space.py | 16 +- 6 files changed, 245 insertions(+), 74 deletions(-) diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py index f624b68a..d7b7fab8 100644 --- a/tests/example_based/test_projections.py +++ b/tests/example_based/test_projections.py @@ -44,27 +44,27 @@ def _generate_model(input_shape=(32, 32, 3), output_shape=2): return model -def test_model_splitting_latent_layer(): - """We should target the right layer using either int, string or default procedure""" - tf.keras.backend.clear_session() +# def test_model_splitting_latent_layer(): +# """We should target the right layer using either int, string or default procedure""" +# tf.keras.backend.clear_session() - model = _generate_model() +# model = _generate_model() - first_conv_layer = model.get_layer("conv2d_1") - last_conv_layer = model.get_layer("conv2d_2") - flatten_layer = model.get_layer("flatten") +# first_conv_layer = model.get_layer("conv2d_1") +# last_conv_layer = model.get_layer("conv2d_2") +# flatten_layer = model.get_layer("flatten") - # last_conv should be recognized - _, _, latent_layer = model_splitting(model, latent_layer="last_conv", return_layer=True) - assert latent_layer == last_conv_layer +# # last_conv should be recognized +# _, _, latent_layer = model_splitting(model, latent_layer="last_conv", return_layer=True) +# assert latent_layer == last_conv_layer - # target the first conv layer - _, _, latent_layer = model_splitting(model, latent_layer=0, return_layer=True) - assert latent_layer == first_conv_layer +# # target the first conv layer +# _, _, latent_layer = model_splitting(model, latent_layer=0, return_layer=True) +# assert latent_layer == first_conv_layer - # target a random flatten layer - _, _, latent_layer = model_splitting(model, latent_layer="flatten", return_layer=True) - assert latent_layer == flatten_layer +# # target a random flatten layer +# _, _, latent_layer = model_splitting(model, latent_layer="flatten", return_layer=True) +# assert latent_layer == flatten_layer def test_simple_projection_mapping(): diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index 2ebf37c8..0cf5c2af 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -76,7 +76,7 @@ def __init__( get_weights = self.method(self.predictor, **attribution_kwargs) # set methods - super().__init__(get_weights, space_projection) + super().__init__(get_weights, space_projection, mappable=False) def get_input_weights( self, @@ -125,42 +125,3 @@ def get_input_weights( false_fn=resize_fn, ) return input_weights - - def project_dataset( - self, - cases_dataset: tf.data.Dataset, - targets_dataset: tf.data.Dataset, - ) -> tf.data.Dataset: - """ - Apply the projection to a dataset without `Dataset.map`. - Because attribution methods create a `tf.data.Dataset` for batching, - however doing so inside a `Dataset.map` is not recommended. - - Parameters - ---------- - cases_dataset - Dataset of samples to be projected. - targets_dataset - Dataset of targets for the samples. - - Returns - ------- - projected_dataset - The projected dataset. - """ - # TODO see if a warning is needed - - projected_cases_dataset = [] - batch_size = None - - # iteratively project the dataset - for inputs, targets in tf.data.Dataset.zip((cases_dataset, targets_dataset)): - if batch_size is None: - batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this - projected_cases_dataset.append(self.project(inputs, targets)) - - projected_cases_dataset = tf.concat(projected_cases_dataset, axis=0) - projected_cases_dataset = tf.data.Dataset.from_tensor_slices(projected_cases_dataset) - projected_cases_dataset = projected_cases_dataset.batch(batch_size) - - return projected_cases_dataset \ No newline at end of file diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 5efb3d27..592a3d1d 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -54,17 +54,23 @@ def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), An example of projected space is the latent space of a model. See `LatentSpaceProjection` device Device to use for the projection, if None, use the default device. + mappable + If True, the projection can be applied to a dataset through `Dataset.map`. + Otherwise, the dataset projection will be done through a loop. """ def __init__(self, get_weights: Optional[Union[Callable, tf.Tensor, np.ndarray]] = None, space_projection: Optional[Callable] = None, - device: Optional[str] = None): + device: Optional[str] = None, + mappable: bool = True,): assert get_weights is not None or space_projection is not None, ( "At least one of `get_weights` and `space_projection`" + "should not be `None`." ) + self.mappable = mappable + # set get_weights if get_weights is None: # no weights @@ -186,6 +192,31 @@ def project_dataset( """ Apply the projection to a dataset through `Dataset.map` + Parameters + ---------- + cases_dataset + Dataset of samples to be projected. + targets_dataset + Dataset of targets for the samples. + + Returns + ------- + projected_dataset + The projected dataset. + """ + if self.mappable: + return self._map_project_dataset(cases_dataset, targets_dataset) + else: + return self._loop_project_dataset(cases_dataset, targets_dataset) + + def _map_project_dataset( + self, + cases_dataset: tf.data.Dataset, + targets_dataset: Optional[tf.data.Dataset] = None, + ) -> Optional[tf.data.Dataset]: + """ + Apply the projection to a dataset through `Dataset.map` + Parameters ---------- cases_dataset @@ -210,3 +241,42 @@ def project_dataset( ) return projected_cases_dataset + + def _loop_project_dataset( + self, + cases_dataset: tf.data.Dataset, + targets_dataset: tf.data.Dataset, + ) -> tf.data.Dataset: + """ + Apply the projection to a dataset without `Dataset.map`. + Because attribution methods create a `tf.data.Dataset` for batching, + however doing so inside a `Dataset.map` is not recommended. + + Parameters + ---------- + cases_dataset + Dataset of samples to be projected. + targets_dataset + Dataset of targets for the samples. + + Returns + ------- + projected_dataset + The projected dataset. + """ + # TODO see if a warning is needed + + projected_cases_dataset = [] + batch_size = None + + # iteratively project the dataset + for inputs, targets in tf.data.Dataset.zip((cases_dataset, targets_dataset)): + if batch_size is None: + batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this + projected_cases_dataset.append(self.project(inputs, targets)) + + projected_cases_dataset = tf.concat(projected_cases_dataset, axis=0) + projected_cases_dataset = tf.data.Dataset.from_tensor_slices(projected_cases_dataset) + projected_cases_dataset = projected_cases_dataset.batch(batch_size) + + return projected_cases_dataset diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index c8747592..42260f75 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -1,6 +1,7 @@ """ Commons for projections """ +import warnings import tensorflow as tf @@ -8,10 +9,55 @@ from ...types import Callable, Union, Optional, Tuple -def model_splitting(model: tf.keras.Model, +def model_splitting(model: Union[tf.keras.Model, 'torch.nn.Module'], latent_layer: Union[str, int], - return_layer: bool = False, - ) -> Tuple[Callable, Callable, Optional[tf.keras.layers.Layer]]: + device: Union["torch.device", str] = None, + ) -> Tuple[Union[tf.keras.Model, 'torch.nn.Module'], Union[tf.keras.Model, 'torch.nn.Module']]: + """ + Split the model into two parts, before and after the `latent_layer`. + The parts will respectively be called `features_extractor` and `predictor`. + + Parameters + ---------- + model + Model to be split. + latent_layer + Layer used to split the `model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + To separate after the last convolution, `"last_conv"` can be used. + Otherwise, `-1` could be used for the last layer before softmax. + device + Device to use for the projection, if None, use the default device. + Only used for PyTorch models. Ignored for TensorFlow models. + + Returns + ------- + features_extractor + Model used to project the inputs. + predictor + Model used to compute the attributions. + latent_layer + Layer used to split the `model`. + """ + if isinstance(model, tf.keras.Model): + return _tf_model_splitting(model, latent_layer) + else: + try: + return _torch_model_splitting(model, latent_layer, device) + except ImportError as exc: + raise AttributeError( + f"Unknown model type, should be either `tf.keras.Model` or `torch.nn.Module`."\ + +f"But got {type(model)} instead.") + + + +def _tf_model_splitting(model: tf.keras.Model, + latent_layer: Union[str, int], + ) -> Tuple[tf.keras.Model, tf.keras.Model]: """ Split the model into two parts, before and after the `latent_layer`. The parts will respectively be called `features_extractor` and `predictor`. @@ -29,8 +75,6 @@ def model_splitting(model: tf.keras.Model, To separate after the last convolution, `"last_conv"` can be used. Otherwise, `-1` could be used for the last layer before softmax. - return_layer - If True, return the latent layer found. Returns ------- @@ -51,9 +95,6 @@ def model_splitting(model: tf.keras.Model, features_extractor = tf.keras.Model( model.input, latent_layer.output, name="features_extractor" ) - # predictor = tf.keras.Model( - # latent_layer.output, model.output, name="predictor" - # ) second_input = tf.keras.Input(shape=latent_layer.output_shape[1:]) # Reconstruct the second part of the model @@ -72,6 +113,89 @@ def model_splitting(model: tf.keras.Model, name="predictor" ) - if return_layer: - return features_extractor, predictor, latent_layer - return features_extractor, predictor \ No newline at end of file + return features_extractor, predictor + + +def _torch_model_splitting(model: 'torch.nn.Module', + latent_layer: Union[str, int], + device: Union["torch.device", str] = None, + ) -> Tuple['torch.nn.Module', 'torch.nn.Module']: + """ + Split the model into two parts, before and after the `latent_layer`. + The parts will respectively be called `features_extractor` and `predictor`. + + Parameters + ---------- + model + Model to be split. + latent_layer + Layer used to split the `model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + To separate after the last convolution, `"last_conv"` can be used. + Otherwise, `-1` could be used for the last layer before softmax. + Device to use for the projection, if None, use the default device. + + Returns + ------- + features_extractor + Model used to project the inputs. + predictor + Model used to compute the attributions. + latent_layer + Layer used to split the `model`. + """ + import torch + import torch.nn as nn + from ...wrappers.pytorch import PyTorchWrapper + + warnings.warn("Automatically splitting the provided PyTorch model into two parts. "\ + +"This splitting is based on `model.named_children()`. "\ + +"If the model cannot be reconstructed via sub-modules, errors are to be expected.") + + if device is None: + warnings.warn("No device provided for the projection, using 'cuda' if available, else 'cpu'.") + device = "cuda" if torch.cuda.is_available() else "cpu" + + first_model = nn.Sequential() + second_model = nn.Sequential() + split_flag = False + + if isinstance(latent_layer, int) and latent_layer < 0: + latent_layer = len(list(model.children())) + latent_layer + + for layer_index, (name, module) in enumerate(model.named_children()): + if name == latent_layer or layer_index == latent_layer: + split_flag = True + + if not split_flag: + first_model.add_module(name, module) + else: + second_model.add_module(name, module) + + # Define forward function for the first model + def first_model_forward(x): + for module in first_model: + x = module(x) + return x + + # Define forward function for the second model + def second_model_forward(x): + for module in second_model: + x = module(x) + return x + + # Set the forward functions for the models + first_model.forward = first_model_forward + second_model.forward = second_model_forward + + # Wrap models to obtain tensorflow ones + first_model.eval() + wrapped_first_model = PyTorchWrapper(first_model, device=device) + second_model.eval() + wrapped_second_model = PyTorchWrapper(second_model, device=device) + + return wrapped_first_model, wrapped_second_model \ No newline at end of file diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 87234883..e4b3d106 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -47,6 +47,9 @@ class HadamardProjection(Projection): Otherwise, `-1` could be used for the last layer before softmax. operator Operator to use to compute the explanation, if None use standard predictions. + device + Device to use for the projection, if None, use the default device. + Only used for PyTorch models. Ignored for TensorFlow models. """ def __init__( @@ -54,6 +57,7 @@ def __init__( model: Callable, latent_layer: Optional[Union[str, int]] = None, operator: Optional[OperatorSignature] = None, + device: Union["torch.device", str] = None, ): if latent_layer is None: # no split @@ -62,14 +66,18 @@ def __init__( self.predictor = model else: # split the model if a latent_layer is provided - space_projection, self.predictor = model_splitting(model, latent_layer) + space_projection, self.predictor = model_splitting(model, + latent_layer=latent_layer, + device=device) # the weights are given be the gradient of the operator gradients, _ = get_gradient_functions(self.predictor, operator) get_weights = lambda inputs, targets: gradients(self.predictor, inputs, targets) # TODO check usage of gpu + mappable = isinstance(model, tf.keras.Model) + # set methods - super().__init__(get_weights, space_projection) + super().__init__(get_weights, space_projection, mappable=mappable) def get_input_weights( self, diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py index 3bfc1d9f..dfa08561 100644 --- a/xplique/example_based/projections/latent_space.py +++ b/xplique/example_based/projections/latent_space.py @@ -29,9 +29,17 @@ class LatentSpaceProjection(Projection): To separate after the last convolution, `"last_conv"` can be used. Otherwise, `-1` could be used for the last layer before softmax. + device + Device to use for the projection, if None, use the default device. + Only used for PyTorch models. Ignored for TensorFlow models. """ - def __init__(self, model: Callable, latent_layer: Union[str, int] = -1): - features_extractor, _ = model_splitting(model, latent_layer) - super().__init__(space_projection=features_extractor) - # TODO test if gpu is used for the projection + def __init__(self, + model: Union[tf.keras.Model, 'torch.nn.Module'], + latent_layer: Union[str, int] = -1, + device: Union["torch.device", str] = None, + ): + features_extractor, _ = model_splitting(model, latent_layer=latent_layer, device=device) + + mappable = isinstance(model, tf.keras.Model) + super().__init__(space_projection=features_extractor, mappable=mappable) From 5f88f74c63d344c8db7329ac5b254cd85e651755 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 17 Jul 2024 10:48:30 +0200 Subject: [PATCH 070/138] projections: add initialization from splitted model and target free operator --- xplique/example_based/projections/hadamard.py | 85 ++++++++++++++++++- .../example_based/projections/latent_space.py | 24 ++++++ 2 files changed, 107 insertions(+), 2 deletions(-) diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index e4b3d106..884c0217 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -14,6 +14,43 @@ from .commons import model_splitting +def _target_free_classification_operator(model: Callable, + inputs: tf.Tensor, + targets: Optional[tf.Tensor]) -> tf.Tensor: # TODO: test + """ + Compute predictions scores, only for the label class, for a batch of samples. + It has the same behavior as `Tasks.CLASSIFICATION` operator + but computes targets at the same time if not provided. + Targets are a mask with 1 on the predicted class and 0 elsewhere. + This operator should only be used for classification tasks. + + + Parameters + ---------- + model + Model used for computing predictions. + inputs + Input samples to be explained. + targets + One-hot encoded labels or regression target (e.g {+1, -1}), one for each sample. + + Returns + ------- + scores + Predictions scores computed, only for the label class. + """ + predictions = model(inputs) + + targets = tf.cond( + pred=tf.constant(targets is None, dtype=tf.bool), + true_fn=lambda: tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]), + false_fn=lambda: targets, + ) + + scores = tf.reduce_sum(predictions * targets, axis=-1) + return scores + + class HadamardProjection(Projection): """ Projection build on an the latent space and the gradient. @@ -45,7 +82,7 @@ class HadamardProjection(Projection): The method as described in the paper apply the separation on the last convolutional layer. To do so, the `"last_conv"` parameter will extract it. Otherwise, `-1` could be used for the last layer before softmax. - operator + operator # TODO: make a larger description. Operator to use to compute the explanation, if None use standard predictions. device Device to use for the projection, if None, use the default device. @@ -70,7 +107,12 @@ def __init__( latent_layer=latent_layer, device=device) - # the weights are given be the gradient of the operator + if operator is None: + warnings.warn("No operator provided, using standard classification operator."\ + + "For non-classification tasks, please specify an operator.") + operator = _target_free_classification_operator + + # the weights are given by the gradient of the operator based on the predictor gradients, _ = get_gradient_functions(self.predictor, operator) get_weights = lambda inputs, targets: gradients(self.predictor, inputs, targets) # TODO check usage of gpu @@ -79,6 +121,45 @@ def __init__( # set methods super().__init__(get_weights, space_projection, mappable=mappable) + @classmethod + def from_splitted_model(cls, + features_extractor: tf.keras.Model, + predictor: tf.keras.Model, + operator: Optional[OperatorSignature] = None, + mappable=True): # TODO: test + """ + Create LatentSpaceProjection from a splitted model. + The projection will project the inputs in the latent space, + which corresponds to the output of the `features_extractor`. + + Parameters + ---------- + features_extractor + The feature extraction part of the model. Mapping inputs to the latent space. + predictor + The prediction part of the model. Mapping the latent space to the outputs. + operator + Operator to use to compute the explanation, if None use standard predictions. + mappable + If the model can be placed in a `tf.data.Dataset` mapping function. + It is not the case for wrapped PyTorch models. + If you encounter errors in the `project_dataset` method, you can set it to `False`. + """ + assert isinstance(features_extractor, tf.keras.Model),\ + f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ + f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." + assert isinstance(predictor, tf.keras.Model),\ + f"predictor should be a tf.keras.Model, got {type(predictor)}"\ + f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." + + # the weights are given by the gradient of the operator based on the predictor + gradients, _ = get_gradient_functions(predictor, operator) + get_weights = lambda inputs, targets: gradients(predictor, inputs, targets) # TODO check usage of gpu + + super().__init__(get_weights=get_weights, + space_projection=features_extractor, + mappable=mappable) + def get_input_weights( self, inputs: Union[tf.Tensor, np.ndarray], diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py index dfa08561..a2d7ca6d 100644 --- a/xplique/example_based/projections/latent_space.py +++ b/xplique/example_based/projections/latent_space.py @@ -43,3 +43,27 @@ def __init__(self, mappable = isinstance(model, tf.keras.Model) super().__init__(space_projection=features_extractor, mappable=mappable) + + @classmethod + def from_splitted_model(cls, + features_extractor: tf.keras.Model, + mappable=True): # TODO: test + """ + Create LatentSpaceProjection from a splitted model. + The projection will project the inputs in the latent space, + which corresponds to the output of the `features_extractor`. + + Parameters + ---------- + features_extractor + The feature extraction part of the model. Mapping inputs to the latent space. + mappable + If the model can be placed in a `tf.data.Dataset` mapping function. + It is not the case for wrapped PyTorch models. + If you encounter errors in the `project_dataset` method, you can set it to `False`. + """ + assert isinstance(features_extractor, tf.keras.Model),\ + f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ + f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." + super().__init__(space_projection=features_extractor, mappable=mappable) + From 25d533173920088fc8ba2a1910c794d75f22dba4 Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Thu, 18 Jul 2024 18:01:11 +0200 Subject: [PATCH 071/138] add the documentation for the prototypes search methods fix up --- ...search_methods.md => api_example_based.md} | 0 docs/api/example_based/projections.md | 0 .../prototypes/api_prototypes.md | 88 +++++++++++++++++++ .../mmd_critic.md} | 2 +- .../proto_dash.md} | 0 .../proto_greedy.md} | 0 docs/api/example_based/search_method_md.md | 0 .../prototypes_search_methods/prototypes.md | 69 --------------- mkdocs.yml | 11 ++- 9 files changed, 94 insertions(+), 76 deletions(-) rename docs/api/example_based/{search_methods/search_methods.md => api_example_based.md} (100%) create mode 100644 docs/api/example_based/projections.md create mode 100644 docs/api/example_based/prototypes/api_prototypes.md rename docs/api/example_based/{search_methods/prototypes_search_methods/mmd_critic_search.md => prototypes/mmd_critic.md} (52%) rename docs/api/example_based/{search_methods/prototypes_search_methods/proto_dash_search.md => prototypes/proto_dash.md} (100%) rename docs/api/example_based/{search_methods/prototypes_search_methods/proto_greedy_search.md => prototypes/proto_greedy.md} (100%) create mode 100644 docs/api/example_based/search_method_md.md delete mode 100644 docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md diff --git a/docs/api/example_based/search_methods/search_methods.md b/docs/api/example_based/api_example_based.md similarity index 100% rename from docs/api/example_based/search_methods/search_methods.md rename to docs/api/example_based/api_example_based.md diff --git a/docs/api/example_based/projections.md b/docs/api/example_based/projections.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md new file mode 100644 index 00000000..dc23bc9b --- /dev/null +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -0,0 +1,88 @@ +# Prototypes +Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class. Three classes of prototype-based methods are found in the literature ([Poché et al., 2023](https://hal.science/hal-04117520/document)): [Prototypes for Data-Centric Interpretability](#prototypes-for-data-centric-interpretability), [Prototypes for Post-hoc Interpretability](#prototypes-for-post-hoc-interpretability) and Prototype-Based Models Interpretable by Design. This library focuses on first two classes. + +## Prototypes for Data-Centric Interpretability +In this class, prototypes are selected without relying on the model and provide an overview of +the dataset. As mentioned in ([Poché et al., 2023](https://hal.science/hal-04117520/document)), we found `clustering methods`, `set cover methods` and `data summarization methods`. This library focuses on `data summarization methods`, also known as `set cover problem methods`, which can be treated in two ways [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf): + +- **Summarization with knapsack constraint**: +consists in finding a subset of prototypes $\mathcal{P}$ that maximizes the coverage set function $F(\mathcal{P})$ under the constraint that its selection cost $C(\mathcal{P})$ (e.g., the number of selected prototypes $|\mathcal{P}|$) should be less than a given budget. + +- **Summarization with covering constraint**: +consists in finding a low-cost subset under the constraint it should cover all the data. For both cases, submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). In addition, $F(\mathcal{P})$ should encourage coverage and penalize redundancy in order to have a good summary [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). + +This library implements three methods from **Summarization with knapsack constraint**: `MMDCritic`, `ProtoGreedy` and `ProtoDash`. +[Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) proposed `MMDCritic` method that used a set function based on the Maximum Mean Discrepancy [(MMD)](#what-is-mmd). They added additional diagonal dominance conditions on the kernel to ensure monotonocity and submodularity. They solve summarization with knapsack constraint problem to find both prototypes and criticisms. First, the number of prototypes and criticisms to be found, respectively as $m_p$ and $m_c$, are selected. Second, to find prototypes, a greedy algorithm is used to maximize $F(\mathcal{P})$ s.t. $|\mathcal{P}| \le m_p$ where $F(\mathcal{P})$ is defined as: +\begin{equation} + F(\mathcal{P})=\frac{2}{|\mathcal{P}|\cdot n}\sum_{i,j=1}^{|\mathcal{P}|,n}\kappa(p_i,x_j)-\frac{1}{|\mathcal{P}|^2}\sum_{i,j=1}^{|\mathcal{P}|}\kappa(p_i,p_j) +\end{equation} +Finally, to find criticisms $\mathcal{C}$, the same greedy algorithm is used to select points that maximize another objective function $J(\mathcal{C})$. + +[Gurumoorthy et al., 2019](https://arxiv.org/pdf/1707.01212) associated non-negative weights to prototypes which are indicative of their importance. In this way, both prototypes and criticisms (which are the least weighted examples from prototypes) can be found by maximizing the same set function $F(\mathcal{P})$. They established the weak submodular property of $J(\mathcal{P})$ and present tractable algorithms (`ProtoGreedy` and `ProtoDash`) to optimize it. Their method works for any symmetric positive definite kernel which is not the case for `MMDCritic`. First, they define a weighted objective $F(\mathcal{P},w)$: +\begin{equation} +F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\sum_{i,j=1}^{|\mathcal{P}|}w_iw_j\kappa(p_i,p_j), +\end{equation} +where $w$ are non-negative weights for each prototype. Then, they find $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. $J(\mathcal{P})$ can be maximized either by `ProtoGreedy` or by `ProtoDash`. `ProtoGreedy` selects the next element that maximizes the increment of the scoring function while `Protodash` selects the next element that maximizes the gradient of $F(\mathcal{P},w)$ with respect to $w$. `ProtoDash` is much faster than `ProtoGreedy` without compromising on the quality of the solution (the complexity of `ProtoGreedy` is $O(n(n+m^4))$ comparing to $O(n(n+m^2)+m^4)$ for `ProtoDash`). The difference between `ProtoGreedy` and the greedy algorithm of `MMDCritic` is that `ProtoGreedy` additionally determines the weights for each of the selected prototypes. The approximation guarantee is $(1-e^{-\gamma})$ for `ProtoGreedy`, where $\gamma$ is submodularity ratio of $F(\mathcal{P})$, comparing to $(1-e^{-1})$ for `MMDCritic`. + +### What is MMD? +The commonality among these three methods is their utilization of the Maximum Mean Discrepancy (MMD) statistic as a measure of similarity between points and potential prototypes. MMD is a statistic for comparing two distributions (similar to KL-divergence). However, it is a non-parametric statistic, i.e., it does not assume a specific parametric form for the probability distributions being compared. It is defined as follows: + +$$ +\begin{align*} +\text{MMD}(P, Q) &= \left\| \mathbb{E}_{X \sim P}[\varphi(X)] - \mathbb{E}_{Y \sim Q}[\varphi(Y)] \right\|_\mathcal{H} +\end{align*} +$$ + +where $\varphi(\cdot)$ is a mapping function of the data points. If we want to consider all orders of moments of the distributions, the mapping vectors $\varphi(X)$ and $\varphi(Y)$ will be infinite-dimensional. Thus, we cannot calculate them directly. However, if we have a kernel that gives the same result as the inner product of these two mappings in Hilbert space ($k(x, y) = \langle \varphi(x), \varphi(y) \rangle_\mathcal{H}$), then the $MMD^2$ can be computed using only the kernel and without explicitly using $\varphi(X)$ and $\varphi(Y)$ (this is called the kernel trick): + +$$ +\begin{align*} +\text{MMD}^2(P, Q) &= \langle \mathbb{E}_{X \sim P}[\varphi(X)], \mathbb{E}_{X' \sim P}[\varphi(X')] \rangle_\mathcal{H} + \langle \mathbb{E}_{Y \sim Q}[\varphi(Y)], \mathbb{E}_{Y' \sim Q}[\varphi(Y')] \rangle_\mathcal{H} \\ +&\quad - 2\langle \mathbb{E}_{X \sim P}[\varphi(X)], \mathbb{E}_{Y \sim Q}[\varphi(Y)] \rangle_\mathcal{H} \\ +&= \mathbb{E}_{X, X' \sim P}[k(X, X')] + \mathbb{E}_{Y, Y' \sim Q}[k(Y, Y')] - 2\mathbb{E}_{X \sim P, Y \sim Q}[k(X, Y)] +\end{align*} +$$ + +### How to choose the kernel ? +The choice of the kernel for selecting prototypes depends on the specific problem and the characteristics of your data. Several kernels can be used, including: + +- Gaussian +- Laplace +- Polynomial +- Linear... + +If we consider any exponential kernel (Gaussian kernel, Laplace, ...), we automatically consider all the moments for the distribution, as the Taylor expansion of the exponential considers infinite-order moments. It is better to use a non-linear kernel to capture non-linear relationships in your data. If the problem is linear, it is better to choose a linear kernel such as the dot product kernel, since it is computationally efficient and often requires fewer hyperparameters to tune. + +!!!warning + For `MMDCritic`, the kernel must satisfy a condition ensuring the submodularity of the set function (the Gaussian kernel respects this constraint). In contrast, for `Protodash` and `Protogreedy`, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. + +### Default kernel +The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. + +The Data-Centric prototypes methods are implemented as [search methods](../../xplique/example_based/search_methods/): + +| Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | +|:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | +| [ProtoGreedySearch](../proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | +| [ProtoDashSearch](../proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | +| [MMDCriticSearch](../mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + +*: Before using a PyTorch model it is highly recommended to read the [dedicated documentation](../pytorch/) + +The class `ProtoGreedySearch` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. + +Both the `MMDCriticSearch` and `ProtoDashSearch` classes inherit from the `ProtoGreedySearch` class. + +The class `MMDCriticSearch` differs from `ProtoGreedySearch` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of `ProtoGreedySearch`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCriticSearch`, the `compute_objective` method assigns the same weight to all elements in the selection. + +The class `ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is different: it maximizes a tight lower bound on $l(w)$ instead of maximizing $l(w)$, as done in `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides the `compute_objective` method to calculate an objective based on the gradient of $l(w)$. It also overrides the `update_selection` method to select the best weights of the selection based on the gradient of the best candidate. + +## Prototypes for Post-hoc Interpretability + +Data-Centric methods such as `Protogreedy`, `ProtoDash` and `MMDCritic` can be used in either the output or the latent space of the model. In these cases, [projections methods](./algorithms/projections/) are used to transfer the data from the input space to the latent/output spaces. + +The search method can have attribute `projection` that projects samples to a space where distances between samples make sense for the model. Then the `search_method` finds the prototypes by looking in the projected space. + + + + diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md b/docs/api/example_based/prototypes/mmd_critic.md similarity index 52% rename from docs/api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md rename to docs/api/example_based/prototypes/mmd_critic.md index cb85d17c..2ec4a219 100644 --- a/docs/api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md +++ b/docs/api/example_based/prototypes/mmd_critic.md @@ -1,3 +1,3 @@ # MMDCriticSearch -MMDCriticSearch ([Kim et al., 2016](https://proceedings.neurips.cc/paper/2016/hash/5680522b8e2bb01943234bce7bf84534-Abstract.html)) \ No newline at end of file +MMDCriticSearch ([Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf)) \ No newline at end of file diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md b/docs/api/example_based/prototypes/proto_dash.md similarity index 100% rename from docs/api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md rename to docs/api/example_based/prototypes/proto_dash.md diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md b/docs/api/example_based/prototypes/proto_greedy.md similarity index 100% rename from docs/api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md rename to docs/api/example_based/prototypes/proto_greedy.md diff --git a/docs/api/example_based/search_method_md.md b/docs/api/example_based/search_method_md.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md b/docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md deleted file mode 100644 index e617985e..00000000 --- a/docs/api/example_based/search_methods/prototypes_search_methods/prototypes.md +++ /dev/null @@ -1,69 +0,0 @@ -# Prototypes - -Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class. - -Three classes of prototype-based methods are found in the literature ([Poché et al., 2023](https://hal.science/hal-04117520/document)): Prototypes for Data-Centric Interpretability, Prototypes for Post-hoc Interpretability and Prototype-Based Models Interpretable by Design. This library focuses on first two classes. - -## Prototypes for Data-Centric Interpretability -In this class, prototypes are selected without relying on the model and provide an overview of -the dataset. In this library, the following methode are implemented as [search methods](./algorithms/search_methods/): - -Xplique includes the following prototypes search methods: - -| Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | -|:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | -| [ProtoGreedySearch](../proto_greedy_search/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | -| [ProtoDashSearch](../proto_dash_search/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | -| [MMDCriticSearch](../mmd_critic_search/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | - -*: Before using a PyTorch model it is highly recommended to read the [dedicated documentation](../pytorch/) - -### What is MMD? -The commonality among these three methods is their utilization of the Maximum Mean Discrepancy (MMD) statistic as a measure of similarity between points and potential prototypes. MMD is a statistic for comparing two distributions (similar to KL-divergence). However, it is a non-parametric statistic, i.e., it does not assume a specific parametric form for the probability distributions being compared. It is defined as follows: - -$$ -\begin{align*} -\text{MMD}(P, Q) &= \left\| \mathbb{E}_{X \sim P}[\varphi(X)] - \mathbb{E}_{Y \sim Q}[\varphi(Y)] \right\|_\mathcal{H} -\end{align*} -$$ - -where $\varphi(\cdot)$ is a mapping function of the data points. If we want to consider all orders of moments of the distributions, the mapping vectors $\varphi(X)$ and $\varphi(Y)$ will be infinite-dimensional. Thus, we cannot calculate them directly. However, if we have a kernel that gives the same result as the inner product of these two mappings in Hilbert space ($k(x, y) = \langle \varphi(x), \varphi(y) \rangle_\mathcal{H}$), then the $MMD^2$ can be computed using only the kernel and without explicitly using $\varphi(X)$ and $\varphi(Y)$ (this is called the kernel trick): - -$$ -\begin{align*} -\text{MMD}^2(P, Q) &= \langle \mathbb{E}_{X \sim P}[\varphi(X)], \mathbb{E}_{X' \sim P}[\varphi(X')] \rangle_\mathcal{H} + \langle \mathbb{E}_{Y \sim Q}[\varphi(Y)], \mathbb{E}_{Y' \sim Q}[\varphi(Y')] \rangle_\mathcal{H} \\ -&\quad - 2\langle \mathbb{E}_{X \sim P}[\varphi(X)], \mathbb{E}_{Y \sim Q}[\varphi(Y)] \rangle_\mathcal{H} \\ -&= \mathbb{E}_{X, X' \sim P}[k(X, X')] + \mathbb{E}_{Y, Y' \sim Q}[k(Y, Y')] - 2\mathbb{E}_{X \sim P, Y \sim Q}[k(X, Y)] -\end{align*} -$$ - -### How to choose the kernel ? -The choice of the kernel for selecting prototypes depends on the specific problem and the characteristics of your data. Several kernels can be used, including: - -- Gaussian -- Laplace -- Polynomial -- Linear... - -If we consider any exponential kernel (Gaussian kernel, Laplace, ...), we automatically consider all the moments for the distribution, as the Taylor expansion of the exponential considers infinite-order moments. It is better to use a non-linear kernel to capture non-linear relationships in your data. If the problem is linear, it is better to choose a linear kernel such as the dot product kernel, since it is computationally efficient and often requires fewer hyperparameters to tune. - -For the MMD-critic method, the kernel must satisfy a condition ensuring the submodularity of the set function (the Gaussian kernel respects this constraint). In contrast, for Protodash and Protogreedy, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. - -### Default kernel -The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. - -## Prototypes for Post-hoc Interpretability - -Data-Centric methods such as Protogreedy, ProtoDash and MMD-critic can be used in either the output or the latent space of the model. In these cases, [projections methods](./algorithms/projections/) are used to transfer the data from the input space to the latent/output spaces. - -# Architecture of the code - -The Data-Centric prototypes methods are implemented as `search_methods`. The search method can have attribute `projection` that projects samples to a space where distances between samples make sense for the model. Then the `search_method` finds the prototypes by looking in the projected space. - -The class `ProtoGreedySearch` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. - -Both the `MMDCriticSearch` and `ProtoDashSearch` classes inherit from the `ProtoGreedySearch` class. - -The class `MMDCriticSearch` differs from `ProtoGreedySearch` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of `ProtoGreedySearch`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCriticSearch`, the `compute_objective` method assigns the same weight to all elements in the selection. - -The class `ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is different: it maximizes a tight lower bound on $l(w)$ instead of maximizing $l(w)$, as done in `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides the `compute_objective` method to calculate an objective based on the gradient of $l(w)$. It also overrides the `update_selection` method to select the best weights of the selection based on the gradient of the best candidate. \ No newline at end of file diff --git a/mkdocs.yml b/mkdocs.yml index eb975edc..f3f3eaf6 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -43,12 +43,11 @@ nav: - Tcav: api/concepts/tcav.md - Craft: api/concepts/craft.md - Example based: - - Search Methods: - - Prototypes Search Methods: - - Prototypes: api/example_based/search_methods/prototypes_search_methods/prototypes.md - - ProtoGreedySearch: api/example_based/search_methods/prototypes_search_methods/proto_greedy_search.md - - ProtoDashSearch: api/example_based/search_methods/prototypes_search_methods/proto_dash_search.md - - MMDCriticSearch: api/example_based/search_methods/prototypes_search_methods/mmd_critic_search.md + - Prototypes: + - API Description: api/example_based/prototypes/api_prototypes.md + - ProtoGreedy: api/example_based/prototypes/proto_greedy.md + - ProtoDash: api/example_based/prototypes/proto_dash.md + - MMDCritic: api/example_based/prototypes/mmd_critic.md - Feature visualization: - Modern Feature Visualization (MaCo): api/feature_viz/maco.md - Feature visualization: api/feature_viz/feature_viz.md From c889d483bff0a8924b353d129b3db8bb9c32543d Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Fri, 19 Jul 2024 16:04:30 +0200 Subject: [PATCH 072/138] example-based search methods: add infitity norm distance --- xplique/example_based/search_methods/common.py | 8 ++++---- xplique/example_based/search_methods/kleor.py | 2 +- xplique/example_based/search_methods/knn.py | 4 ++-- xplique/example_based/search_methods/mmd_critic_search.py | 2 +- xplique/example_based/search_methods/proto_dash_search.py | 2 +- .../example_based/search_methods/proto_greedy_search.py | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/xplique/example_based/search_methods/common.py b/xplique/example_based/search_methods/common.py index bac0bce6..0f3af3d4 100644 --- a/xplique/example_based/search_methods/common.py +++ b/xplique/example_based/search_methods/common.py @@ -123,7 +123,7 @@ def get_distance_function(distance: Union[int, str, Callable] = "euclidean",) -> ---------- distance : Union[int, str, Callable], optional Distance function to use. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ # set distance function @@ -133,12 +133,12 @@ def get_distance_function(distance: Union[int, str, Callable] = "euclidean",) -> return _distances[distance] elif isinstance(distance, int): return lambda x1, x2: _minkowski_distance(x1, x2, p=distance) - elif distance == np.inf: + elif distance == np.inf or (isinstance(distance, str) and distance == "inf"): return lambda x1, x2: _chebyshev_distance(x1, x2) else: raise AttributeError( "The distance parameter is expected to be either a Callable, " - + f" an integer, or a string in {_distances.keys()}. " - +f"But {type(distance)} was received." + + f" an integer, 'inf', or a string in {_distances.keys()}. " + +f"But a {type(distance)} was received, with value {distance}." ) diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index 57a238e0..ed6b92a3 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -43,7 +43,7 @@ class BaseKLEORSearch(FilterKNN, ABC): Number of sample treated simultaneously. distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ def __init__( diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index 18d3fd2b..d5ed1be2 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -160,7 +160,7 @@ class KNN(BaseKNN): the best. distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ def __init__( @@ -316,7 +316,7 @@ class FilterKNN(BaseKNN): the best. distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". filter_fn A Callable that takes as inputs the inputs, their targets, the cases and their targets and diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py index 538ed277..fae99771 100644 --- a/xplique/example_based/search_methods/mmd_critic_search.py +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -38,7 +38,7 @@ class MMDCriticSearch(ProtoGreedySearch): It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". nb_prototypes : int Number of prototypes to find. diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index cb1d9097..21a8ae2a 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -104,7 +104,7 @@ class ProtoDashSearch(ProtoGreedySearch): It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". nb_prototypes : int Number of prototypes to find. diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 0a6a9b28..c1e46862 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -57,7 +57,7 @@ class ProtoGreedySearch(BaseSearchMethod): It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". nb_prototypes : int Number of prototypes to find. From b6c3ffbafec60b534ef9d6980ae7d303d5d8e396 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Fri, 19 Jul 2024 16:06:04 +0200 Subject: [PATCH 073/138] example-based projections: debug and remove get inputs weights --- .../example_based/projections/attributions.py | 48 ----------------- xplique/example_based/projections/base.py | 52 ++++--------------- xplique/example_based/projections/commons.py | 9 ++-- xplique/example_based/projections/hadamard.py | 52 +------------------ 4 files changed, 18 insertions(+), 143 deletions(-) diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index 0cf5c2af..8d298afe 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -77,51 +77,3 @@ def __init__( # set methods super().__init__(get_weights, space_projection, mappable=False) - - def get_input_weights( - self, - inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, - ): - """ - For visualization purpose (and only), we may be interested to project weights - from the projected space to the input space. - This is applied only if their is a difference in dimension. - We assume here that we are treating images and an upsampling is applied. - - Parameters - ---------- - inputs - Tensor or Array. Input samples to be explained. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - More information in the documentation. - targets - Additional parameter for `self.get_weights` function. - - Returns - ------- - input_weights - Tensor with the same dimension as `inputs` modulo the channels. - They are an upsampled version of the actual weights used in the projection. - """ - projected_inputs = self.space_projection(inputs) - weights = self.get_weights(projected_inputs, targets) - - # take mean over channels for images - channel_mean_fn = lambda: tf.reduce_mean(weights, axis=-1, keepdims=True) - weights = tf.cond( - pred=tf.shape(weights).shape[0] < 4, - true_fn=lambda: weights, - false_fn=channel_mean_fn, - ) - - # resizing - resize_fn = lambda: tf.image.resize( - weights, inputs.shape[1:-1], method="bicubic" - ) - input_weights = tf.cond( - pred=projected_inputs.shape == inputs.shape, - true_fn=lambda: weights, - false_fn=resize_fn, - ) - return input_weights diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 592a3d1d..4a76de29 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -108,43 +108,6 @@ def get_weights(inputs, _ = None): # set device self.device = get_device(device) - def get_input_weights( - self, - inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, - ): - """ - Depending on the projection, we may not be able to visualize weights - as they are after the space projection. In this case, this method should be overwritten, - as in `AttributionProjection` that applies an up-sampling. - - Parameters - ---------- - inputs - Tensor or Array. Input samples to be explained. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - More information in the documentation. - targets - Additional parameter for `self.get_weights` function. - - Returns - ------- - input_weights - Tensor with the same dimension as `inputs` modulo the channels. - They are an up-sampled version of the actual weights used in the projection. - """ - projected_inputs = self.space_projection(inputs) - assert tf.reduce_all(tf.equal(projected_inputs, inputs)), ( - "Weights cannot be interpreted in the input space" - + "if `space_projection()` is not an identity." - + "Either remove 'weights' from the returns or" - + "make your own projection and overwrite `get_input_weights`." - ) - - weights = self.get_weights(projected_inputs, targets) - - return weights - @sanitize_inputs_targets def project( self, @@ -270,10 +233,17 @@ def _loop_project_dataset( batch_size = None # iteratively project the dataset - for inputs, targets in tf.data.Dataset.zip((cases_dataset, targets_dataset)): - if batch_size is None: - batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this - projected_cases_dataset.append(self.project(inputs, targets)) + if targets_dataset is None: + for inputs in cases_dataset: + if batch_size is None: + batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this + projected_cases_dataset.append(self.project(inputs, None)) + else: + # in case targets are provided, we zip the datasets and project them together + for inputs, targets in tf.data.Dataset.zip((cases_dataset, targets_dataset)): + if batch_size is None: + batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this + projected_cases_dataset.append(self.project(inputs, targets)) projected_cases_dataset = tf.concat(projected_cases_dataset, axis=0) projected_cases_dataset = tf.data.Dataset.from_tensor_slices(projected_cases_dataset) diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index 42260f75..ee9091c2 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -50,7 +50,8 @@ def model_splitting(model: Union[tf.keras.Model, 'torch.nn.Module'], return _torch_model_splitting(model, latent_layer, device) except ImportError as exc: raise AttributeError( - f"Unknown model type, should be either `tf.keras.Model` or `torch.nn.Module`."\ + exc.__str__()+"\n\n"\ + +f"Unknown model type, should be either `tf.keras.Model` or `torch.nn.Module`."\ +f"But got {type(model)} instead.") @@ -150,7 +151,7 @@ def _torch_model_splitting(model: 'torch.nn.Module', """ import torch import torch.nn as nn - from ...wrappers.pytorch import PyTorchWrapper + from ...wrappers import TorchWrapper warnings.warn("Automatically splitting the provided PyTorch model into two parts. "\ +"This splitting is based on `model.named_children()`. "\ @@ -194,8 +195,8 @@ def second_model_forward(x): # Wrap models to obtain tensorflow ones first_model.eval() - wrapped_first_model = PyTorchWrapper(first_model, device=device) + wrapped_first_model = TorchWrapper(first_model, device=device) second_model.eval() - wrapped_second_model = PyTorchWrapper(second_model, device=device) + wrapped_second_model = TorchWrapper(second_model, device=device) return wrapped_first_model, wrapped_second_model \ No newline at end of file diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 884c0217..97dc7acc 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -16,7 +16,7 @@ def _target_free_classification_operator(model: Callable, inputs: tf.Tensor, - targets: Optional[tf.Tensor]) -> tf.Tensor: # TODO: test + targets: Optional[tf.Tensor]) -> tf.Tensor: # TODO: test, and use in attribution projection """ Compute predictions scores, only for the label class, for a batch of samples. It has the same behavior as `Tasks.CLASSIFICATION` operator @@ -158,52 +158,4 @@ def from_splitted_model(cls, super().__init__(get_weights=get_weights, space_projection=features_extractor, - mappable=mappable) - - def get_input_weights( - self, - inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, - ): - """ - For visualization purpose (and only), we may be interested to project weights - from the projected space to the input space. - This is applied only if their is a difference in dimension. - We assume here that we are treating images and an upsampling is applied. - - Parameters - ---------- - inputs - Tensor or Array. Input samples to be explained. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - More information in the documentation. - targets - Additional parameter for `self.get_weights` function. - - Returns - ------- - input_weights - Tensor with the same dimension as `inputs` modulo the channels. - They are an upsampled version of the actual weights used in the projection. - """ - projected_inputs = self.space_projection(inputs) - weights = self.get_weights(projected_inputs, targets) - - # take mean over channels for images - channel_mean_fn = lambda: tf.reduce_mean(weights, axis=-1, keepdims=True) - weights = tf.cond( - pred=tf.shape(weights).shape[0] < 4, - true_fn=lambda: weights, - false_fn=channel_mean_fn, - ) - - # resizing - resize_fn = lambda: tf.image.resize( - weights, inputs.shape[1:-1], method="bicubic" - ) - input_weights = tf.cond( - pred=projected_inputs.shape == inputs.shape, - true_fn=lambda: weights, - false_fn=resize_fn, - ) - return input_weights + mappable=mappable) \ No newline at end of file From 10438b0003495ff7eda3dfd11a388172c62d17fa Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Fri, 19 Jul 2024 16:07:16 +0200 Subject: [PATCH 074/138] example-based: remove weights from possible returns --- tests/example_based/test_cole.py | 16 +------- tests/example_based/test_similar_examples.py | 11 ----- xplique/example_based/base_example_method.py | 40 +++---------------- xplique/example_based/cole.py | 2 +- xplique/example_based/contrastive_examples.py | 12 ++---- xplique/example_based/prototypes.py | 2 +- xplique/example_based/similar_examples.py | 2 +- 7 files changed, 15 insertions(+), 70 deletions(-) diff --git a/tests/example_based/test_cole.py b/tests/example_based/test_cole.py index 3864a71d..f94ea24a 100644 --- a/tests/example_based/test_cole.py +++ b/tests/example_based/test_cole.py @@ -120,13 +120,6 @@ def test_cole_attribution(): # a different distance should give different results assert not almost_equal(examples_constructor, examples_different_distance) - # check weights are equal to the attribution directly on the input - method_constructor.returns = ["weights", "include_inputs"] - assert almost_equal( - method_constructor.explain(x_test, y_test)["weights"][:, 0], - Saliency(model)(x_test, y_test), - ) - def test_cole_hadamard(): """ @@ -205,7 +198,7 @@ def test_cole_splitting(): cases_dataset=x_train, targets_dataset=y_train, k=k, - case_returns=["examples", "weights", "include_inputs"], + case_returns=["examples", "include_inputs"], model=model, latent_layer="last_conv", attribution_method=Occlusion, @@ -215,14 +208,9 @@ def test_cole_splitting(): # Generate explanation outputs = method.explain(x_test, y_test) - examples, weights = outputs["examples"], outputs["weights"] + examples = outputs["examples"] # Verifications # Shape should be (n, k, h, w, c) nb_samples_test = x_test.shape[0] assert examples.shape == (nb_samples_test, k + 1) + input_shape - assert weights.shape[:-1] == (nb_samples_test, k + 1) + input_shape[:-1] - - -# test_cole_attribution() -# test_cole_splitting() diff --git a/tests/example_based/test_similar_examples.py b/tests/example_based/test_similar_examples.py index db4af594..4580ed6d 100644 --- a/tests/example_based/test_similar_examples.py +++ b/tests/example_based/test_similar_examples.py @@ -205,13 +205,11 @@ def test_similar_examples_return_multiple_elements(): assert isinstance(method_output, dict) examples = method_output["examples"] - weights = method_output["weights"] distances = method_output["distances"] labels = method_output["labels"] # test every outputs shape (with the include inputs) assert examples.shape == (nb_samples_test, k + 1) + input_shape - assert weights.shape == (nb_samples_test, k + 1) + input_shape # the inputs distance ae zero and indices do not exist assert distances.shape == (nb_samples_test, k) assert labels.shape == (nb_samples_test, k) @@ -227,9 +225,6 @@ def test_similar_examples_return_multiple_elements(): examples[i, 3], x_train[i + 2] ) - # test weights - assert almost_equal(weights[i], tf.ones(weights[i].shape, dtype=tf.float32)) - # test distances assert almost_equal(distances[i, 0], 0) assert almost_equal(distances[i, 1], sqrt(prod(input_shape))) @@ -294,9 +289,3 @@ def test_similar_examples_weighting(): assert almost_equal(examples[i, 2], x_train[i]) or almost_equal( examples[i, 2], x_train[i + 2] ) - - -# test_similar_examples_input_dataset_management() -# test_similar_examples_basic() -# test_similar_examples_return_multiple_elements() -# test_similar_examples_weighting() diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 3fabe7c1..7e6e19e9 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -73,7 +73,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). """ - _returns_possibilities = ["examples", "weights", "distances", "labels", "include_inputs"] + _returns_possibilities = ["examples", "distances", "labels", "include_inputs"] def __init__( self, @@ -157,8 +157,6 @@ def returns(self, returns: Union[List[str], str]): `returns` can be set to 'all' for all possible elements to be returned. - 'examples' correspond to the expected examples, the inputs may be included in first position. (n, k(+1), ...) - - 'weights' the weights in the input space used in the projection. - They are associated to the input and the examples. (n, k(+1), ...) - 'distances' the distances between the inputs and the corresponding examples. They are associated to the examples. (n, k, ...) - 'labels' if provided through `dataset_labels`, @@ -306,7 +304,7 @@ def explain( search_output = self.search_method(projected_inputs, targets) # manage returned elements - return self.format_search_output(search_output, inputs, targets) + return self.format_search_output(search_output, inputs) def __call__( self, @@ -320,7 +318,6 @@ def format_search_output( self, search_output: Dict[str, tf.Tensor], inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, ): """ Format the output of the `search_method` to match the expected returns in `self.returns`. @@ -332,9 +329,9 @@ def format_search_output( inputs Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). - targets - Targets associated to the cases_dataset for dataset projection. - See `projection` for details. + # targets + # Targets associated to the cases_dataset for dataset projection. + # See `projection` for details. Returns ------- @@ -348,40 +345,15 @@ def format_search_output( # gather examples, labels, and targets from the example's indices of the search output examples = dataset_gather(self.cases_dataset, search_output["indices"]) examples_labels = dataset_gather(self.labels_dataset, search_output["indices"]) - examples_targets = dataset_gather( - self.targets_dataset, search_output["indices"] - ) # add examples and weights - if "examples" in self.returns or "weights" in self.returns: + if "examples" in self.returns: # or "weights" in self.returns: if "include_inputs" in self.returns: # include inputs inputs = tf.expand_dims(inputs, axis=1) examples = tf.concat([inputs, examples], axis=1) - if targets is not None: - targets = tf.expand_dims(targets, axis=1) - examples_targets = tf.concat([targets, examples_targets], axis=1) - else: - examples_targets = [None] * len(examples) if "examples" in self.returns: return_dict["examples"] = examples - if "weights" in self.returns: - # get weights of examples (n, k, ...) - # we iterate on the inputs dimension through maps - # and ask weights for batch of examples - weights = [] - for ex, ex_targ in zip(examples, examples_targets): - if isinstance(self.projection, Projection): - # get weights in the input space - weights.append(self.projection.get_input_weights(ex, ex_targ)) - else: - raise AttributeError( - "Cannot extract weights from the provided projection function" - + "Either remove 'weights' from the `case_returns` or" - + "inherit from `Projection` and overwrite `get_input_weights`." - ) - - return_dict["weights"] = tf.stack(weights, axis=0) # add indices, distances, and labels if "indices" in self.returns: diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py index 47b21dbe..00955452 100644 --- a/xplique/example_based/cole.py +++ b/xplique/example_based/cole.py @@ -46,7 +46,7 @@ class Cole(SimilarExamples): The number of examples to retrieve per input. distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". case_returns String or list of string with the elements to return in `self.explain()`. diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/contrastive_examples.py index 31afc794..b18302b3 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/contrastive_examples.py @@ -67,7 +67,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ def __init__( @@ -183,7 +183,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar distance Distance for the FilterKNN search method. Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ def __init__( @@ -345,7 +345,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar distance Distance for the FilterKNN search method. Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ _returns_possibilities = [ @@ -422,7 +422,6 @@ def format_search_output( self, search_output: Dict[str, tf.Tensor], inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None, ): """ Format the output of the `search_method` to match the expected returns in `self.returns`. @@ -434,9 +433,6 @@ def format_search_output( inputs Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). - targets - Targets associated to the cases_dataset for dataset projection. - See `projection` for details. Returns ------- @@ -444,7 +440,7 @@ def format_search_output( Dictionary with listed elements in `self.returns`. The elements that can be returned are defined with _returns_possibilities static attribute of the class. """ - return_dict = super().format_search_output(search_output, inputs, targets) + return_dict = super().format_search_output(search_output, inputs) if "nuns" in self.returns: return_dict["nuns"] = dataset_gather(self.cases_dataset, search_output["nuns_indices"]) if "nuns_labels" in self.returns: diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 5f4017d4..c1857b48 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -68,7 +68,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Ignored if `tf.data.Dataset` are provided (these are supposed to be batched). distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". nb_prototypes : int For general explanations, the number of prototypes to select. diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index 5c785322..b1433370 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -63,7 +63,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). distance Distance for the knn search method. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev"}, or a Callable, + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ def __init__( From 797f76e50a35448002bcce21f4f18fb45ebc9e6f Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 22 Jul 2024 15:54:06 +0200 Subject: [PATCH 075/138] example-based projections: put target free operator as common --- .../example_based/projections/attributions.py | 8 +++- xplique/example_based/projections/commons.py | 39 +++++++++++++++++- xplique/example_based/projections/hadamard.py | 41 +------------------ 3 files changed, 47 insertions(+), 41 deletions(-) diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index 8d298afe..ef3b0ce8 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -12,7 +12,7 @@ from ...types import Callable, Union, Optional from .base import Projection -from .commons import model_splitting +from .commons import model_splitting, target_free_classification_operator class AttributionProjection(Projection): @@ -71,6 +71,12 @@ def __init__( else: # split the model if a latent_layer is provided space_projection, self.predictor = model_splitting(model, latent_layer) + + # change default operator + if not "operator" in attribution_kwargs or attribution_kwargs["operator"] is None: + warnings.warn("No operator provided, using standard classification operator."\ + + "For non-classification tasks, please specify an operator.") + attribution_kwargs["operator"] = target_free_classification_operator # compute attributions get_weights = self.method(self.predictor, **attribution_kwargs) diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index ee9091c2..45110310 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -199,4 +199,41 @@ def second_model_forward(x): second_model.eval() wrapped_second_model = TorchWrapper(second_model, device=device) - return wrapped_first_model, wrapped_second_model \ No newline at end of file + return wrapped_first_model, wrapped_second_model + + +def target_free_classification_operator(model: Callable, + inputs: tf.Tensor, + targets: Optional[tf.Tensor] = None) -> tf.Tensor: # TODO: test, and use in attribution projection + """ + Compute predictions scores, only for the label class, for a batch of samples. + It has the same behavior as `Tasks.CLASSIFICATION` operator + but computes targets at the same time if not provided. + Targets are a mask with 1 on the predicted class and 0 elsewhere. + This operator should only be used for classification tasks. + + + Parameters + ---------- + model + Model used for computing predictions. + inputs + Input samples to be explained. + targets + One-hot encoded labels or regression target (e.g {+1, -1}), one for each sample. + + Returns + ------- + scores + Predictions scores computed, only for the label class. + """ + predictions = model(inputs) + + targets = tf.cond( + pred=tf.constant(targets is None, dtype=tf.bool), + true_fn=lambda: tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]), + false_fn=lambda: targets, + ) + + scores = tf.reduce_sum(predictions * targets, axis=-1) + return scores diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 97dc7acc..05fb77e3 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -11,44 +11,7 @@ from ...types import Callable, Union, Optional, OperatorSignature from .base import Projection -from .commons import model_splitting - - -def _target_free_classification_operator(model: Callable, - inputs: tf.Tensor, - targets: Optional[tf.Tensor]) -> tf.Tensor: # TODO: test, and use in attribution projection - """ - Compute predictions scores, only for the label class, for a batch of samples. - It has the same behavior as `Tasks.CLASSIFICATION` operator - but computes targets at the same time if not provided. - Targets are a mask with 1 on the predicted class and 0 elsewhere. - This operator should only be used for classification tasks. - - - Parameters - ---------- - model - Model used for computing predictions. - inputs - Input samples to be explained. - targets - One-hot encoded labels or regression target (e.g {+1, -1}), one for each sample. - - Returns - ------- - scores - Predictions scores computed, only for the label class. - """ - predictions = model(inputs) - - targets = tf.cond( - pred=tf.constant(targets is None, dtype=tf.bool), - true_fn=lambda: tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]), - false_fn=lambda: targets, - ) - - scores = tf.reduce_sum(predictions * targets, axis=-1) - return scores +from .commons import model_splitting, target_free_classification_operator class HadamardProjection(Projection): @@ -110,7 +73,7 @@ def __init__( if operator is None: warnings.warn("No operator provided, using standard classification operator."\ + "For non-classification tasks, please specify an operator.") - operator = _target_free_classification_operator + operator = target_free_classification_operator # the weights are given by the gradient of the operator based on the predictor gradients, _ = get_gradient_functions(self.predictor, operator) From d2653300ef5dc54197aeb7789f724f9548705704 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 22 Jul 2024 15:54:57 +0200 Subject: [PATCH 076/138] example-based: split counterfactuals and semifactuals --- xplique/example_based/__init__.py | 6 +- xplique/example_based/cole.py | 125 ---------- ...rastive_examples.py => counterfactuals.py} | 230 ++---------------- xplique/example_based/semifactuals.py | 220 +++++++++++++++++ xplique/example_based/similar_examples.py | 117 ++++++++- 5 files changed, 355 insertions(+), 343 deletions(-) delete mode 100644 xplique/example_based/cole.py rename xplique/example_based/{contrastive_examples.py => counterfactuals.py} (54%) create mode 100644 xplique/example_based/semifactuals.py diff --git a/xplique/example_based/__init__.py b/xplique/example_based/__init__.py index 3de46d18..fa83c1ba 100644 --- a/xplique/example_based/__init__.py +++ b/xplique/example_based/__init__.py @@ -2,7 +2,7 @@ Example-based methods available """ -from .cole import Cole -from .similar_examples import SimilarExamples +from .similar_examples import SimilarExamples, Cole from .prototypes import Prototypes, ProtoGreedy, ProtoDash, MMDCritic -from .contrastive_examples import NaiveCounterFactuals, LabelAwareCounterFactuals, KLEORGlobalSim, KLEORSimMiss +from .counterfactuals import NaiveCounterFactuals, LabelAwareCounterFactuals +from .semifactuals import KLEORGlobalSim, KLEORSimMiss diff --git a/xplique/example_based/cole.py b/xplique/example_based/cole.py deleted file mode 100644 index 00955452..00000000 --- a/xplique/example_based/cole.py +++ /dev/null @@ -1,125 +0,0 @@ -""" -Implementation of Cole method a simlilar examples method from example based module -""" -import numpy as np -import tensorflow as tf - -from ..attributions.base import BlackBoxExplainer -from ..types import Callable, List, Optional, Union, Type - -from .similar_examples import SimilarExamples -from .projections import AttributionProjection, HadamardProjection - - -class Cole(SimilarExamples): - """ - Cole is a similar examples method that gives the most similar examples to a query in some specific projection space. - Cole use the model (to be explained) to build a search space so that distances are meaningful for the model. - It uses attribution methods to weight inputs. - Those attributions may be computed in the latent space for high-dimensional data like images. - - It is an implementation of a method proposed by Kenny et Keane in 2019, - Twin-Systems to Explain Artificial Neural Networks using Case-Based Reasoning: - https://researchrepository.ucd.ie/handle/10197/11064 - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - targets_dataset - Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's - predictions. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - k - The number of examples to retrieve per input. - distance - Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, - by default "euclidean". - case_returns - String or list of string with the elements to return in `self.explain()`. - See the base class returns property for details. - batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - latent_layer - Layer used to split the model, the first part will be used for projection and - the second to compute the attributions. By default, the model is not split. - For such split, the `model` should be a `tf.keras.Model`. - - Layer to target for the outputs (e.g logits or after softmax). - If an `int` is provided it will be interpreted as a layer index. - If a `string` is provided it will look for the layer name. - - The method as described in the paper apply the separation on the last convolutional layer. - To do so, the `"last_conv"` parameter will extract it. - Otherwise, `-1` could be used for the last layer before softmax. - attribution_method - Class of the attribution method to use for projection. - It should inherit from `xplique.attributions.base.BlackBoxExplainer`. - By default, it computes the gradient to make the Hadamard product in the latent space. - attribution_kwargs - Parameters to be passed for the construction of the `attribution_method`. - """ - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - model: tf.keras.Model, - targets_dataset: Union[tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.Tensor, np.ndarray]] = None, - k: int = 1, - distance: Union[str, Callable] = "euclidean", - case_returns: Optional[Union[List[str], str]] = "examples", - batch_size: Optional[int] = 32, - latent_layer: Optional[Union[str, int]] = None, - attribution_method: Union[str, Type[BlackBoxExplainer]] = "gradient", - **attribution_kwargs, - ): - assert targets_dataset is not None - - # build the corresponding projection - if isinstance(attribution_method, str) and attribution_method.lower() == "gradient": - - operator = attribution_kwargs.get("operator", None) - - projection = HadamardProjection( - model=model, - latent_layer=latent_layer, - operator=operator, - ) - elif issubclass(attribution_method, BlackBoxExplainer): - # build attribution projection - projection = AttributionProjection( - model=model, - method=attribution_method, - latent_layer=latent_layer, - **attribution_kwargs, - ) - else: - raise ValueError( - f"attribution_method should be 'gradient' or a subclass of BlackBoxExplainer," +\ - "not {attribution_method}" - ) - - super().__init__( - cases_dataset=cases_dataset, - targets_dataset=targets_dataset, - labels_dataset=labels_dataset, - projection=projection, - k=k, - case_returns=case_returns, - batch_size=batch_size, - distance=distance, - ) diff --git a/xplique/example_based/contrastive_examples.py b/xplique/example_based/counterfactuals.py similarity index 54% rename from xplique/example_based/contrastive_examples.py rename to xplique/example_based/counterfactuals.py index b18302b3..360fdcda 100644 --- a/xplique/example_based/contrastive_examples.py +++ b/xplique/example_based/counterfactuals.py @@ -6,14 +6,13 @@ import numpy as np import tensorflow as tf -from ..types import Callable, List, Optional, Union, Dict -from ..commons import sanitize_inputs_targets, dataset_gather +from ..types import Callable, List, Optional, Union +from ..commons import sanitize_inputs_targets from .base_example_method import BaseExampleMethod -from .search_methods import ORDER, FilterKNN, KLEORSimMissSearch, KLEORGlobalSimSearch +from .search_methods import ORDER, FilterKNN from .projections import Projection -from .search_methods.base import _sanitize_returns class NaiveCounterFactuals(BaseExampleMethod): """ @@ -131,6 +130,7 @@ def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: mask = tf.not_equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) return mask + class LabelAwareCounterFactuals(BaseExampleMethod): """ This method will search the counterfactuals of a query within an expected class. This class should be provided with @@ -165,11 +165,10 @@ class LabelAwareCounterFactuals(BaseExampleMethod): Example of Callable: ``` - def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + def custom_projection(inputs: tf.Tensor, np.ndarray): ''' Example of projection, inputs are the elements to project. - targets are optional parameters to orientated the projection. ''' projected_inputs = # do some magic on inputs, it should use the model. return projected_inputs @@ -239,19 +238,19 @@ def search_method_class(self): return FilterKNN - def filter_fn(self, _, __, cf_targets, cases_targets) -> tf.Tensor: + def filter_fn(self, _, __, cf_expected_classes, cases_targets) -> tf.Tensor: """ Filter function to mask the cases for which the target is different from the target(s) expected for the counterfactuals. Parameters ---------- - cf_targets + cf_expected_classes The one-hot encoding of the target class for the counterfactuals. cases_targets The one-hot encoding of the target class for the cases. """ - mask = tf.matmul(cf_targets, cases_targets, transpose_b=True) #(n, bs) + mask = tf.matmul(cf_expected_classes, cases_targets, transpose_b=True) #(n, bs) # TODO: I think some retracing are done here mask = tf.cast(mask, dtype=tf.bool) return mask @@ -260,7 +259,7 @@ def filter_fn(self, _, __, cf_targets, cases_targets) -> tf.Tensor: def explain( self, inputs: Union[tf.Tensor, np.ndarray], - cf_targets: Union[tf.Tensor, np.ndarray], + cf_expected_classes: Union[tf.Tensor, np.ndarray], ): """ Return the relevant CF examples to explain the inputs. @@ -273,7 +272,7 @@ def explain( Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. - cf_targets + cf_expected_classes Tensor or Array. One-hot encoding of the target class for the counterfactuals. Returns @@ -282,208 +281,11 @@ def explain( Dictionary with listed elements in `self.returns`. The elements that can be returned are defined with _returns_possibilities static attribute of the class. """ - return super().explain(inputs, cf_targets) - - -class KLEORBase(BaseExampleMethod): - """ - Base class for KLEOR methods. KLEOR methods search Semi-Factuals examples. In those methods, one should first - retrieve the Nearest Unlike Neighbor (NUN) which is the closest example to the query that has a different prediction - than the query. Then, the method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction - as the query. - - All the searches are done in a projection space where distances are relevant for the model. The projection space is - defined by the `projection` method. - - Depending on the KLEOR method some additional condition for the search are added. See the specific KLEOR method for - more details. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - k - The number of examples to retrieve per input. - projection - Projection or Callable that project samples from the input space to the search space. - The search space should be a space where distances are relevant for the model. - It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in - searching the input space, you should use a `BaseSearchMethod` instead. + # project inputs into the search space + projected_inputs = self.projection(inputs) - Example of Callable: - ``` - def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): - ''' - Example of projection, - inputs are the elements to project. - targets are optional parameters to orientated the projection. - ''' - projected_inputs = # do some magic on inputs, it should use the model. - return projected_inputs - ``` - case_returns - String or list of string with the elements to return in `self.explain()`. - See the base class returns property for more details. - batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - distance - Distance for the FilterKNN search method. - Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, - by default "euclidean". - """ - _returns_possibilities = [ - "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns" - ] + # look for relevant elements in the search space + search_output = self.search_method(projected_inputs, cf_expected_classes) - def __init__( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - projection: Union[Projection, Callable] = None, - case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = "euclidean", - ): - - super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - targets_dataset=targets_dataset, - k=k, - projection=projection, - case_returns=case_returns, - batch_size=batch_size, - ) - - # set distance function and order for the search method - self.distance = distance - self.order = ORDER.ASCENDING - - # initiate search_method - self.search_method = self.search_method_class( - cases_dataset=self.projected_cases_dataset, - targets_dataset=self.targets_dataset, - k=self.k, - search_returns=self._search_returns, - batch_size=self.batch_size, - distance=self.distance, - ) - - @property - def returns(self) -> Union[List[str], str]: - """Override the Base class returns' parameter.""" - return self._returns - - @returns.setter - def returns(self, returns: Union[List[str], str]): - """ - Set the returns parameter. The returns parameter is a string or a list of string with the elements to return - in `self.explain()`. The elements that can be returned are defined with _returns_possibilities static attribute - """ - default = "examples" - self._returns = _sanitize_returns(returns, self._returns_possibilities, default) - self._search_returns = ["indices", "distances"] - - if isinstance(self._returns, list) and ("nuns" in self._returns): - self._search_returns.append("nuns_indices") - elif isinstance(self._returns, list) and ("nuns_indices" in self._returns): - self._search_returns.append("nuns_indices") - elif isinstance(self._returns, list) and ("nuns_labels" in self._returns): - self._search_returns.append("nuns_indices") - - if isinstance(self._returns, list) and ("dist_to_nuns" in self._returns): - self._search_returns.append("dist_to_nuns") - - try: - self.search_method.returns = self._search_returns - except AttributeError: - pass - - def format_search_output( - self, - search_output: Dict[str, tf.Tensor], - inputs: Union[tf.Tensor, np.ndarray], - ): - """ - Format the output of the `search_method` to match the expected returns in `self.returns`. - - Parameters - ---------- - search_output - Dictionary with the required outputs from the `search_method`. - inputs - Tensor or Array. Input samples to be explained. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - - Returns - ------- - return_dict - Dictionary with listed elements in `self.returns`. - The elements that can be returned are defined with _returns_possibilities static attribute of the class. - """ - return_dict = super().format_search_output(search_output, inputs) - if "nuns" in self.returns: - return_dict["nuns"] = dataset_gather(self.cases_dataset, search_output["nuns_indices"]) - if "nuns_labels" in self.returns: - return_dict["nuns_labels"] = dataset_gather(self.labels_dataset, search_output["nuns_indices"]) - if "nuns_indices" in self.returns: - return_dict["nuns_indices"] = search_output["nuns_indices"] - if "dist_to_nuns" in self.returns: - return_dict["dist_to_nuns"] = search_output["dist_to_nuns"] - return return_dict - - -class KLEORSimMiss(KLEORBase): - """ - The KLEORSimMiss method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of - the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the - method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. - - The search is done in a projection space where distances are relevant for the model. The projection space is defined - by the `projection` method. - """ - @property - def search_method_class(self): - """ - This property defines the search method class to use for the search. In this case, it is the KLEORSimMissSearch. - """ - return KLEORSimMissSearch - -class KLEORGlobalSim(KLEORBase): - """ - The KLEORGlobalSim method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of - the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the - method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. - - In addition, for a SF candidate to be considered, the SF should be closer to the query than the NUN in the - projection space (i.e. the SF should be 'between' the input and its NUN). This condition is added to the search. - - The search is done in a projection space where distances are relevant for the model. The projection space is defined - by the `projection` method. - """ - @property - def search_method_class(self): - """ - This property defines the search method class to use for the search. In this case, it is the - KLEORGlobalSimSearch. - """ - return KLEORGlobalSimSearch + # manage returned elements + return self.format_search_output(search_output, inputs) diff --git a/xplique/example_based/semifactuals.py b/xplique/example_based/semifactuals.py new file mode 100644 index 00000000..b912eb1e --- /dev/null +++ b/xplique/example_based/semifactuals.py @@ -0,0 +1,220 @@ +""" +Implementation of semi factuals methods for classification tasks. +""" +import warnings + +import numpy as np +import tensorflow as tf + +from ..types import Callable, List, Optional, Union, Dict +from ..commons import dataset_gather + +from .base_example_method import BaseExampleMethod +from .search_methods import ORDER, KLEORSimMissSearch, KLEORGlobalSimSearch +from .projections import Projection + +from .search_methods.base import _sanitize_returns + + +class KLEORBase(BaseExampleMethod): + """ + Base class for KLEOR methods. KLEOR methods search Semi-Factuals examples. In those methods, one should first + retrieve the Nearest Unlike Neighbor (NUN) which is the closest example to the query that has a different prediction + than the query. Then, the method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction + as the query. + + All the searches are done in a projection space where distances are relevant for the model. The projection space is + defined by the `projection` method. + + Depending on the KLEOR method some additional condition for the search are added. See the specific KLEOR method for + more details. + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from this dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve per input. + projection + Projection or Callable that project samples from the input space to the search space. + The search space should be a space where distances are relevant for the model. + It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in + searching the input space, you should use a `BaseSearchMethod` instead. + + Example of Callable: + ``` + def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientated the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + ``` + case_returns + String or list of string with the elements to return in `self.explain()`. + See the base class returns property for more details. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + distance + Distance for the FilterKNN search method. + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, + by default "euclidean". + """ + _returns_possibilities = [ + "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns" + ] + + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + k: int = 1, + projection: Union[Projection, Callable] = None, + case_returns: Union[List[str], str] = "examples", + batch_size: Optional[int] = 32, + distance: Union[int, str, Callable] = "euclidean", + ): + + super().__init__( + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + case_returns=case_returns, + batch_size=batch_size, + ) + + # set distance function and order for the search method + self.distance = distance + self.order = ORDER.ASCENDING + + # initiate search_method + self.search_method = self.search_method_class( + cases_dataset=self.projected_cases_dataset, + targets_dataset=self.targets_dataset, + k=self.k, + search_returns=self._search_returns, + batch_size=self.batch_size, + distance=self.distance, + ) + + @property + def returns(self) -> Union[List[str], str]: + """Override the Base class returns' parameter.""" + return self._returns + + @returns.setter + def returns(self, returns: Union[List[str], str]): + """ + Set the returns parameter. The returns parameter is a string or a list of string with the elements to return + in `self.explain()`. The elements that can be returned are defined with _returns_possibilities static attribute + """ + default = "examples" + self._returns = _sanitize_returns(returns, self._returns_possibilities, default) + self._search_returns = ["indices", "distances"] + + if isinstance(self._returns, list) and ("nuns" in self._returns): + self._search_returns.append("nuns_indices") + elif isinstance(self._returns, list) and ("nuns_indices" in self._returns): + self._search_returns.append("nuns_indices") + elif isinstance(self._returns, list) and ("nuns_labels" in self._returns): + self._search_returns.append("nuns_indices") + + if isinstance(self._returns, list) and ("dist_to_nuns" in self._returns): + self._search_returns.append("dist_to_nuns") + + try: + self.search_method.returns = self._search_returns + except AttributeError: + pass + + def format_search_output( + self, + search_output: Dict[str, tf.Tensor], + inputs: Union[tf.Tensor, np.ndarray], + ): + """ + Format the output of the `search_method` to match the expected returns in `self.returns`. + + Parameters + ---------- + search_output + Dictionary with the required outputs from the `search_method`. + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + + Returns + ------- + return_dict + Dictionary with listed elements in `self.returns`. + The elements that can be returned are defined with _returns_possibilities static attribute of the class. + """ + return_dict = super().format_search_output(search_output, inputs) + if "nuns" in self.returns: + return_dict["nuns"] = dataset_gather(self.cases_dataset, search_output["nuns_indices"]) + if "nuns_labels" in self.returns: + return_dict["nuns_labels"] = dataset_gather(self.labels_dataset, search_output["nuns_indices"]) + if "nuns_indices" in self.returns: + return_dict["nuns_indices"] = search_output["nuns_indices"] + if "dist_to_nuns" in self.returns: + return_dict["dist_to_nuns"] = search_output["dist_to_nuns"] + return return_dict + + +class KLEORSimMiss(KLEORBase): + """ + The KLEORSimMiss method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of + the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the + method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. + + The search is done in a projection space where distances are relevant for the model. The projection space is defined + by the `projection` method. + """ + @property + def search_method_class(self): + """ + This property defines the search method class to use for the search. In this case, it is the KLEORSimMissSearch. + """ + return KLEORSimMissSearch + +class KLEORGlobalSim(KLEORBase): + """ + The KLEORGlobalSim method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of + the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the + method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. + + In addition, for a SF candidate to be considered, the SF should be closer to the query than the NUN in the + projection space (i.e. the SF should be 'between' the input and its NUN). This condition is added to the search. + + The search is done in a projection space where distances are relevant for the model. The projection space is defined + by the `projection` method. + """ + @property + def search_method_class(self): + """ + This property defines the search method class to use for the search. In this case, it is the + KLEORGlobalSimSearch. + """ + return KLEORGlobalSimSearch diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index b1433370..4c598fd8 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -4,10 +4,11 @@ import tensorflow as tf import numpy as np +from ..attributions.base import BlackBoxExplainer from ..types import Callable, List, Optional, Type, Union from .search_methods import KNN, BaseSearchMethod, ORDER -from .projections import Projection +from .projections import Projection, AttributionProjection, HadamardProjection from .base_example_method import BaseExampleMethod @@ -103,3 +104,117 @@ def __init__( @property def search_method_class(self) -> Type[BaseSearchMethod]: return KNN + + +class Cole(SimilarExamples): + """ + Cole is a similar examples method that gives the most similar examples to a query in some specific projection space. + Cole use the model (to be explained) to build a search space so that distances are meaningful for the model. + It uses attribution methods to weight inputs. + Those attributions may be computed in the latent space for high-dimensional data like images. + + It is an implementation of a method proposed by Kenny et Keane in 2019, + Twin-Systems to Explain Artificial Neural Networks using Case-Based Reasoning: + https://researchrepository.ucd.ie/handle/10197/11064 + + Parameters + ---------- + cases_dataset + The dataset used to train the model, examples are extracted from this dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + labels_dataset + Labels associated to the examples in the dataset. Indices should match with cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's + predictions. See `projection` for detail. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. + k + The number of examples to retrieve per input. + distance + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, + by default "euclidean". + case_returns + String or list of string with the elements to return in `self.explain()`. + See the base class returns property for details. + batch_size + Number of sample treated simultaneously for projection and search. + Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + latent_layer + Layer used to split the model, the first part will be used for projection and + the second to compute the attributions. By default, the model is not split. + For such split, the `model` should be a `tf.keras.Model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + The method as described in the paper apply the separation on the last convolutional layer. + To do so, the `"last_conv"` parameter will extract it. + Otherwise, `-1` could be used for the last layer before softmax. + attribution_method + Class of the attribution method to use for projection. + It should inherit from `xplique.attributions.base.BlackBoxExplainer`. + By default, it computes the gradient to make the Hadamard product in the latent space. + attribution_kwargs + Parameters to be passed for the construction of the `attribution_method`. + """ + def __init__( + self, + cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], + model: tf.keras.Model, + targets_dataset: Union[tf.Tensor, np.ndarray], + labels_dataset: Optional[Union[tf.Tensor, np.ndarray]] = None, + k: int = 1, + distance: Union[str, Callable] = "euclidean", + case_returns: Optional[Union[List[str], str]] = "examples", + batch_size: Optional[int] = 32, + latent_layer: Optional[Union[str, int]] = None, + attribution_method: Union[str, Type[BlackBoxExplainer]] = "gradient", + **attribution_kwargs, + ): + assert targets_dataset is not None + + # build the corresponding projection + if isinstance(attribution_method, str) and attribution_method.lower() == "gradient": + + operator = attribution_kwargs.get("operator", None) + + projection = HadamardProjection( + model=model, + latent_layer=latent_layer, + operator=operator, + ) + elif issubclass(attribution_method, BlackBoxExplainer): + # build attribution projection + projection = AttributionProjection( + model=model, + method=attribution_method, + latent_layer=latent_layer, + **attribution_kwargs, + ) + else: + raise ValueError( + f"attribution_method should be 'gradient' or a subclass of BlackBoxExplainer," +\ + "not {attribution_method}" + ) + + super().__init__( + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + labels_dataset=labels_dataset, + projection=projection, + k=k, + case_returns=case_returns, + batch_size=batch_size, + distance=distance, + ) From 56baf6b315c45f24541eb3cd5a1164d7d4d8c62c Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Wed, 24 Jul 2024 18:17:56 +0200 Subject: [PATCH 077/138] add the documentation for the prototypes search methods fix up --- .../prototypes/api_prototypes.md | 59 ++++++++++++++----- ...{search_method_md.md => search_methods.md} | 0 2 files changed, 43 insertions(+), 16 deletions(-) rename docs/api/example_based/{search_method_md.md => search_methods.md} (100%) diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md index dc23bc9b..c02132cd 100644 --- a/docs/api/example_based/prototypes/api_prototypes.md +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -1,28 +1,44 @@ # Prototypes -Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class. Three classes of prototype-based methods are found in the literature ([Poché et al., 2023](https://hal.science/hal-04117520/document)): [Prototypes for Data-Centric Interpretability](#prototypes-for-data-centric-interpretability), [Prototypes for Post-hoc Interpretability](#prototypes-for-post-hoc-interpretability) and Prototype-Based Models Interpretable by Design. This library focuses on first two classes. +Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class. Three classes of prototype-based methods are found in the literature ([Poché et al., 2023](https://hal.science/hal-04117520/document)): + +- [Prototypes for Data-Centric Interpretability](#prototypes-for-data-centric-interpretability) +- [Prototypes for Post-hoc Interpretability](#prototypes-for-post-hoc-interpretability) +- Prototype-Based Models Interpretable by Design + +This library focuses on first two classes. ## Prototypes for Data-Centric Interpretability In this class, prototypes are selected without relying on the model and provide an overview of -the dataset. As mentioned in ([Poché et al., 2023](https://hal.science/hal-04117520/document)), we found `clustering methods`, `set cover methods` and `data summarization methods`. This library focuses on `data summarization methods`, also known as `set cover problem methods`, which can be treated in two ways [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf): +the dataset. As mentioned in ([Poché et al., 2023](https://hal.science/hal-04117520/document)), we found in this class: **clustering methods** and **data summarization methods**, also known as **set cover methods**. This library focuses on **data summarization methods** which can be treated in two ways [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf): -- **Summarization with knapsack constraint**: +- **Data summarization with knapsack constraint**: consists in finding a subset of prototypes $\mathcal{P}$ that maximizes the coverage set function $F(\mathcal{P})$ under the constraint that its selection cost $C(\mathcal{P})$ (e.g., the number of selected prototypes $|\mathcal{P}|$) should be less than a given budget. -- **Summarization with covering constraint**: -consists in finding a low-cost subset under the constraint it should cover all the data. For both cases, submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). In addition, $F(\mathcal{P})$ should encourage coverage and penalize redundancy in order to have a good summary [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). +- **Data summarization with covering constraint**: +consists in finding a low-cost subset of prototypes $\mathcal{P}$ under the constraint it should cover all the data. -This library implements three methods from **Summarization with knapsack constraint**: `MMDCritic`, `ProtoGreedy` and `ProtoDash`. -[Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) proposed `MMDCritic` method that used a set function based on the Maximum Mean Discrepancy [(MMD)](#what-is-mmd). They added additional diagonal dominance conditions on the kernel to ensure monotonocity and submodularity. They solve summarization with knapsack constraint problem to find both prototypes and criticisms. First, the number of prototypes and criticisms to be found, respectively as $m_p$ and $m_c$, are selected. Second, to find prototypes, a greedy algorithm is used to maximize $F(\mathcal{P})$ s.t. $|\mathcal{P}| \le m_p$ where $F(\mathcal{P})$ is defined as: +For both cases, submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). In addition, $F(\mathcal{P})$ should encourage coverage and penalize redundancy in order to have a good summary [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). + +This library implements three methods from **Data summarization with knapsack constraint**: `MMDCritic`, `ProtoGreedy` and `ProtoDash`. +[Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) proposed `MMDCritic` method that used a set function based on the Maximum Mean Discrepancy [(MMD)](#what-is-mmd). They solved **data summarization with knapsack constraint** problem to find both prototypes and criticisms. First, the number of prototypes and criticisms to be found, respectively as $m_p$ and $m_c$, are selected. Second, to find prototypes, a greedy algorithm is used to maximize $F(\mathcal{P})$ s.t. $|\mathcal{P}| \le m_p$ where $F(\mathcal{P})$ is defined as: \begin{equation} F(\mathcal{P})=\frac{2}{|\mathcal{P}|\cdot n}\sum_{i,j=1}^{|\mathcal{P}|,n}\kappa(p_i,x_j)-\frac{1}{|\mathcal{P}|^2}\sum_{i,j=1}^{|\mathcal{P}|}\kappa(p_i,p_j) \end{equation} -Finally, to find criticisms $\mathcal{C}$, the same greedy algorithm is used to select points that maximize another objective function $J(\mathcal{C})$. +They used diagonal dominance conditions on the kernel to ensure monotonocity and submodularity of $F(\mathcal{P})$. To find criticisms $\mathcal{C}$, the same greedy algorithm is used to select points that maximize another objective function $J(\mathcal{C})$. -[Gurumoorthy et al., 2019](https://arxiv.org/pdf/1707.01212) associated non-negative weights to prototypes which are indicative of their importance. In this way, both prototypes and criticisms (which are the least weighted examples from prototypes) can be found by maximizing the same set function $F(\mathcal{P})$. They established the weak submodular property of $J(\mathcal{P})$ and present tractable algorithms (`ProtoGreedy` and `ProtoDash`) to optimize it. Their method works for any symmetric positive definite kernel which is not the case for `MMDCritic`. First, they define a weighted objective $F(\mathcal{P},w)$: +[Gurumoorthy et al., 2019](https://arxiv.org/pdf/1707.01212) associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximizing the same weighted objective $F(\mathcal{P},w)$ defined as: \begin{equation} F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\sum_{i,j=1}^{|\mathcal{P}|}w_iw_j\kappa(p_i,p_j), \end{equation} -where $w$ are non-negative weights for each prototype. Then, they find $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. $J(\mathcal{P})$ can be maximized either by `ProtoGreedy` or by `ProtoDash`. `ProtoGreedy` selects the next element that maximizes the increment of the scoring function while `Protodash` selects the next element that maximizes the gradient of $F(\mathcal{P},w)$ with respect to $w$. `ProtoDash` is much faster than `ProtoGreedy` without compromising on the quality of the solution (the complexity of `ProtoGreedy` is $O(n(n+m^4))$ comparing to $O(n(n+m^2)+m^4)$ for `ProtoDash`). The difference between `ProtoGreedy` and the greedy algorithm of `MMDCritic` is that `ProtoGreedy` additionally determines the weights for each of the selected prototypes. The approximation guarantee is $(1-e^{-\gamma})$ for `ProtoGreedy`, where $\gamma$ is submodularity ratio of $F(\mathcal{P})$, comparing to $(1-e^{-1})$ for `MMDCritic`. +where $w$ are non-negative weights for each prototype. The problem then consist on finding $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. They established the weak submodular property of $J(\mathcal{P})$ and present tractable algorithms (`ProtoGreedy` and `ProtoDash`) to optimize it. + +### Method comparison + +- Compared to `MMDCritic`, both `ProtoGreedy` and `Protodash` additionally determine the weights for each of the selected prototypes. +- `ProtoGreedy` and `Protodash` works for any symmetric positive definite kernel which is not the case for `MMDCritic`. +- `MMDCritic` and `ProtoGreedy` select the next element that maximizes the increment of the scoring function while `Protodash` maximizes a tight lower bound on the increment of the scoring function (it maximizes the gradient of $F(\mathcal{P},w)$). +- `ProtoDash` is much faster than `ProtoGreedy` without compromising on the quality of the solution (the complexity of `ProtoGreedy` is $O(n(n+m^4))$ comparing to $O(n(n+m^2)+m^4)$ for `ProtoDash`). +- The approximation guarantee for `ProtoGreedy` is $(1-e^{-\gamma})$, where $\gamma$ is submodularity ratio of $F(\mathcal{P})$, comparing to $(1-e^{-1})$ for `MMDCritic`. ### What is MMD? The commonality among these three methods is their utilization of the Maximum Mean Discrepancy (MMD) statistic as a measure of similarity between points and potential prototypes. MMD is a statistic for comparing two distributions (similar to KL-divergence). However, it is a non-parametric statistic, i.e., it does not assume a specific parametric form for the probability distributions being compared. It is defined as follows: @@ -59,7 +75,9 @@ If we consider any exponential kernel (Gaussian kernel, Laplace, ...), we automa ### Default kernel The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. -The Data-Centric prototypes methods are implemented as [search methods](../../xplique/example_based/search_methods/): +### API Implementation + +The Data-Centric prototypes methods are implemented as [search methods](../../search_methods/): | Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | |:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | @@ -71,18 +89,27 @@ The Data-Centric prototypes methods are implemented as [search methods](../../xp The class `ProtoGreedySearch` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. -Both the `MMDCriticSearch` and `ProtoDashSearch` classes inherit from the `ProtoGreedySearch` class. - -The class `MMDCriticSearch` differs from `ProtoGreedySearch` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of `ProtoGreedySearch`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCriticSearch`, the `compute_objective` method assigns the same weight to all elements in the selection. +Both the `MMDCriticSearch` and `ProtoDashSearch` classes inherit from the `ProtoGreedySearch` class. The class `MMDCriticSearch` differs from `ProtoGreedySearch` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of `ProtoGreedySearch`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCriticSearch`, the `compute_objective` method assigns the same weight to all elements in the selection. -The class `ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is different: it maximizes a tight lower bound on $l(w)$ instead of maximizing $l(w)$, as done in `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides the `compute_objective` method to calculate an objective based on the gradient of $l(w)$. It also overrides the `update_selection` method to select the best weights of the selection based on the gradient of the best candidate. +The class `ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is [different](#method-comparison) from the one used by `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides both the `compute_objective` method and the `update_selection` method. ## Prototypes for Post-hoc Interpretability -Data-Centric methods such as `Protogreedy`, `ProtoDash` and `MMDCritic` can be used in either the output or the latent space of the model. In these cases, [projections methods](./algorithms/projections/) are used to transfer the data from the input space to the latent/output spaces. +Data-Centric methods such as `Protogreedy`, `ProtoDash` and `MMDCritic` can be used in either the output or the latent space of the model. In these cases, [projections methods](../../projections/) are used to transfer the data from the input space to the latent/output spaces. The search method can have attribute `projection` that projects samples to a space where distances between samples make sense for the model. Then the `search_method` finds the prototypes by looking in the projected space. +## Common API ## +```python +explainer = Method(cases_dataset, labels_dataset, targets_dataset, k, + projection, case_returns, batch_size, distance, + nb_prototypes, kernel_type, + kernel_fn, gamma) +# compute global explanation +global_prototypes = explainer.get_global_prototypes() +# compute local explanation +local_prototypes = explainer(inputs) +``` diff --git a/docs/api/example_based/search_method_md.md b/docs/api/example_based/search_methods.md similarity index 100% rename from docs/api/example_based/search_method_md.md rename to docs/api/example_based/search_methods.md From fdee972028f8aa1e7d87dc18554bc63a67ae6b44 Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Thu, 25 Jul 2024 12:42:23 +0200 Subject: [PATCH 078/138] add the documentation for the prototypes search methods fix up --- .../example_based/prototypes/mmd_critic.md | 57 ++++++++++++++++- .../example_based/prototypes/proto_dash.md | 61 ++++++++++++++++++- .../example_based/prototypes/proto_greedy.md | 61 ++++++++++++++++++- 3 files changed, 175 insertions(+), 4 deletions(-) diff --git a/docs/api/example_based/prototypes/mmd_critic.md b/docs/api/example_based/prototypes/mmd_critic.md index 2ec4a219..8743fdbc 100644 --- a/docs/api/example_based/prototypes/mmd_critic.md +++ b/docs/api/example_based/prototypes/mmd_critic.md @@ -1,3 +1,58 @@ # MMDCriticSearch -MMDCriticSearch ([Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf)) \ No newline at end of file + + +[View colab tutorial](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) | + + +[View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | +📰 [Paper](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) + +`MMDCriticSearch` finds prototypes and criticisms by maximizing two separate objectives based on the Maximum Mean Discrepancy (MMD). + +!!! quote + MMD-critic uses the MMD statistic as a measure of similarity between points and potential prototypes, and + efficiently selects prototypes that maximize the statistic. In addition to prototypes, MMD-critic selects criticism samples i.e. samples that are not well-explained by the prototypes using a regularized witness function score. + + -- [Efficient Data Representation by Selecting Prototypes with Importance Weights (2019).](https://arxiv.org/abs/1707.01212) + +First, to find prototypes $\mathcal{P}$, a greedy algorithm is used to maximize $F(\mathcal{P})$ s.t. $|\mathcal{P}| \le m_p$ where $F(\mathcal{P})$ is defined as: +\begin{equation} + F(\mathcal{P})=\frac{2}{|\mathcal{P}|\cdot n}\sum_{i,j=1}^{|\mathcal{P}|,n}\kappa(p_i,x_j)-\frac{1}{|\mathcal{P}|^2}\sum_{i,j=1}^{|\mathcal{P}|}\kappa(p_i,p_j), +\end{equation} +where $m_p$ the number of prototypes to be found. They used diagonal dominance conditions on the kernel to ensure monotonocity and submodularity of $F(\mathcal{P})$. + +Second, to find criticisms $\mathcal{C}$, the same greedy algorithm is used to select points that maximize another objective function $J(\mathcal{C})$. + +!!!warning + For `MMDCritic`, the kernel must satisfy a condition that ensures the submodularity of the set function. The Gaussian kernel meets this requirement and it is recommended. If you wish to choose a different kernel, it must satisfy the condition described by [Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf). + +## Example + +```python +from xplique.example_based import MMDCritic + +# load data and labels +# ... + +explainer = MMDCritic(cases_dataset, labels_dataset, targets_dataset, k, + projection, case_returns, batch_size, distance, + nb_prototypes, kernel_type, + kernel_fn, gamma) +# compute global explanation +global_prototypes = explainer.get_global_prototypes() +# compute local explanation +local_prototypes = explainer(inputs) +``` + +## Notebooks + +- [**Prototypes**: Getting started](https://colab.research.google.com/drive +/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2) +- [**MMDCritic**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) + + +{{xplique.example_based.search_methods.MMDCriticSearch}} + +[^1]: [Visual Explanations from Deep Networks via Gradient-based Localization (2016).](https://arxiv.org/abs/1610.02391) + diff --git a/docs/api/example_based/prototypes/proto_dash.md b/docs/api/example_based/prototypes/proto_dash.md index b54dec50..d694504d 100644 --- a/docs/api/example_based/prototypes/proto_dash.md +++ b/docs/api/example_based/prototypes/proto_dash.md @@ -1,3 +1,60 @@ -# ProtoGreedySearch +# ProtoDashSearch -ProtoDashSearch ([Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212)) \ No newline at end of file + + +[View colab tutorial](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) | + + +[View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | +📰 [Paper](https://arxiv.org/abs/1707.01212) + +`ProtoDahsSearch` associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximmizing the same weighted objective function. + +!!! quote + Our work notably generalizes the recent work + by Kim et al. (2016) where in addition to selecting prototypes, we + also associate non-negative weights which are indicative of their + importance. This extension provides a single coherent framework + under which both prototypes and criticisms (i.e. outliers) can be + found. Furthermore, our framework works for any symmetric + positive definite kernel thus addressing one of the key open + questions laid out in Kim et al. (2016). + + -- [Efficient Data Representation by Selecting Prototypes with Importance Weights (2019).](https://arxiv.org/abs/1707.01212) + +More precisely, the weighted objective $F(\mathcal{P},w)$ is defined as: +\begin{equation} +F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\sum_{i,j=1}^{|\mathcal{P}|}w_iw_j\kappa(p_i,p_j), +\end{equation} +where $w$ are non-negative weights for each prototype. The problem then consist on finding a subset $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. + +[Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) proposed `ProtoDash` algorithm, which is much faster that `ProtoGreedy` without compromising on the quality of the solution. In fact, `ProtoGreedy` selects the next element that maximizes the increment of the scoring function, whereas `ProtoDash` selects the next element that maximizes a tight lower bound on the increment of the scoring function. + +## Example + +```python +from xplique.example_based import ProtoDash + +# load data and labels +# ... + +explainer = ProtoDash(cases_dataset, labels_dataset, targets_dataset, k, + projection, case_returns, batch_size, distance, + nb_prototypes, kernel_type, + kernel_fn, gamma) +# compute global explanation +global_prototypes = explainer.get_global_prototypes() +# compute local explanation +local_prototypes = explainer(inputs) +``` + +## Notebooks + +- [**Prototypes**: Getting started](https://colab.research.google.com/drive +/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2) +- [**ProtoDash**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) + + +{{xplique.example_based.search_methods.ProtoDashSearch}} + +[^1]: [Visual Explanations from Deep Networks via Gradient-based Localization (2016).](https://arxiv.org/abs/1610.02391) diff --git a/docs/api/example_based/prototypes/proto_greedy.md b/docs/api/example_based/prototypes/proto_greedy.md index 9213caa1..57644ef3 100644 --- a/docs/api/example_based/prototypes/proto_greedy.md +++ b/docs/api/example_based/prototypes/proto_greedy.md @@ -1,3 +1,62 @@ # ProtoGreedySearch -ProtoGreedySearch ([Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212)) \ No newline at end of file + + +[View colab tutorial](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) | + + +[View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | +📰 [Paper](https://arxiv.org/abs/1707.01212) + +`ProtoGreedySearch` associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximmizing the same weighted objective function. + +!!! quote + Our work notably generalizes the recent work + by Kim et al. (2016) where in addition to selecting prototypes, we + also associate non-negative weights which are indicative of their + importance. This extension provides a single coherent framework + under which both prototypes and criticisms (i.e. outliers) can be + found. Furthermore, our framework works for any symmetric + positive definite kernel thus addressing one of the key open + questions laid out in Kim et al. (2016). + + -- [Efficient Data Representation by Selecting Prototypes with Importance Weights (2019).](https://arxiv.org/abs/1707.01212) + +More precisely, the weighted objective $F(\mathcal{P},w)$ is defined as: +\begin{equation} +F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\sum_{i,j=1}^{|\mathcal{P}|}w_iw_j\kappa(p_i,p_j), +\end{equation} +where $w$ are non-negative weights for each prototype. The problem then consist on finding a subset $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. + +[Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) demonstrate that this problem is weakly submodular, which immediately leads to a standard greedy algorithm which they call `ProtoGreedy`. + +`ProtoGreedy` is algorithmically similar to greedy algorithm used by [Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) where both the methods greedily select the next element that maximizes the increment of the scoring function. + +## Example + +```python +from xplique.example_based import ProtoGreedy + +# load data and labels +# ... + +explainer = ProtoGreedy(cases_dataset, labels_dataset, targets_dataset, k, + projection, case_returns, batch_size, distance, + nb_prototypes, kernel_type, + kernel_fn, gamma) +# compute global explanation +global_prototypes = explainer.get_global_prototypes() +# compute local explanation +local_prototypes = explainer(inputs) +``` + +## Notebooks + +- [**Prototypes**: Getting started](https://colab.research.google.com/drive +/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2) +- [**ProtoGreedy**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) + + +{{xplique.example_based.search_methods.ProtoGreedySearch}} + +[^1]: [Visual Explanations from Deep Networks via Gradient-based Localization (2016).](https://arxiv.org/abs/1610.02391) From 96fbf96ef3669cbeb8656bdfe02bff359060c81f Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Fri, 26 Jul 2024 12:21:51 +0200 Subject: [PATCH 079/138] docs: create the api page for example based methods, search methods and projections --- docs/api/example_based/api_example_based.md | 193 ++++++++++++++++++++ 1 file changed, 193 insertions(+) diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index e69de29b..b1f6f76f 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -0,0 +1,193 @@ +# API: Example-based API + +- [**Example-based Methods**: Getting strated]() **WIP** + +## Context ## + +!!! quote + While saliency maps have stolen the show for the last few years in the XAI field, their ability to reflect models' internal processes has been questioned. Although less in the spotlight, example-based XAI methods have continued to improve. It encompasses methods that use examples as explanations for a machine learning model's predictions. This aligns with the psychological mechanisms of human reasoning and makes example-based explanations natural and intuitive for users to understand. Indeed, humans learn and reason by forming mental representations of concepts based on examples. + + -- [Natural Example-Based Explainability: a Survey (2023)](https://arxiv.org/abs/2309.03234)[^1] + +As mentioned by our team members in the quote above, example-based methods are an alternative to saliency maps and can be more aligned with some users' expectations. Thus, we have been working on implementing some of those methods in Xplique that have been put aside in the previous developments. + +While not being exhaustive we tried to cover a range of methods that are representative of the field and that belong to different families: similar examples, contrastive (counter-factuals and semi-factuals) examples, and prototypes (as concepts based methods have a dedicated sections). + +At present, we made the following choices: +- Focus on methods that are natural example methods (see the paper above for more details). +- Try to unify the three families of approaches with a common API. + +!!! info + We are in the early stages of development and are looking for feedback on the API design and the methods we have chosen to implement. Also, we are counting on the community to furnish the collection of methods available. If you are willing to contribute reach us on the [GitHub](https://github.com/deel-ai/xplique) repository (with an issue, pull request, ...). + +## Common API ## + +```python +explainer = ExampleMethod( + cases_dataset, + labels_dataset, + targets_dataset, + k, + projection, + case_returns, + batch_size, + **kwargs +) + +explanations = explainer.explain(inputs, targets) +``` + +We tried to keep the API as close as possible to the one of the attribution methods to keep a consistent experience for the users. + +The `BaseExampleMethod` is an abstract base class designed for example-based methods used to explain classification models. It provides examples from a dataset (usually the training dataset) to help understand a model's predictions. Examples are selected using a [search method](#search-methods) within a defined search space, projected from the input space using a [projection function](#projections). + +??? abstract "Table of example-based methods available" + + | Method | Documentation | Family | + | --- | --- | --- | + | `SimilarExamples` | [SimilarExamples](api/example_based/methods/similar_examples) | Similar Examples | + | `Cole` | [Cole](api/example_based/methods/cole) | Similar Examples | + | `ProtoGreedy` | [ProtoGreedy](api/example_based/methods/proto_greedy/) | Prototypes | + | `ProtoDash` | [ProtoDash](api/example_based/methods/proto_dash/) | Prototypes | + | `MMDCritic` | [MMDCritic](api/example_based/methods/mmd_critic/) | Prototypes | + | `NaiveCounterFactuals` | [NaiveCounterFactuals](api/example_based/methods/naive_counter_factuals/) | Counter Factuals | + | `LabelAwareCounterFactuals` | [LabelAwareCounterFactuals](api/example_based/methods/label_aware_counter_factuals/) | Counter Factuals | + | `KLEORSimMiss` | [KLEOR](api/example_based/methods/kleor/) | Semi Factuals | + | `KLEORGlobalSim` | [KLEOR](api/example_based/methods/kleor/) | Semi Factuals | + +### Parameters ### + +- **cases_dataset** (`Union[tf.data.Dataset, tf.Tensor, np.ndarray]`): The dataset used to train the model, from which examples are extracted. It should be batched as TensorFlow provides no method to verify this. Ensure the dataset is not reshuffled at each iteration. +- **labels_dataset** (`Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]]`): Labels associated with the examples in the cases dataset. Indices should match the `cases_dataset`. +- **targets_dataset** (`Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]]`): Targets associated with the `cases_dataset` for dataset projection, often the one-hot encoding of a model's predictions. +- **k** (`int`): The number of examples to retrieve per input. +- **projection** (`Union[Projection, Callable]`): A projection or callable function that projects samples from the input space to the search space. The search space should be relevant for the model. (see [Projections](#projections)) +- **case_returns** (`Union[List[str], str]`): Elements to return in `self.explain()`. Default is "examples". +- **batch_size** (`Optional[int]`): Number of samples processed simultaneously for projection and search. Ignored if `tf.data.Dataset` is provided. + +### Properties ### + +- **search_method_class** (`Type[BaseSearchMethod]`): Abstract property to define the search method class to use. Must be implemented in subclasses. (see [Search Methods](#search-methods)) +- **k** (`int`): Getter and setter for the `k` parameter. +- **returns** (`Union[List[str], str]`): Getter and setter for the `returns` parameter. Defines the elements to return in `self.explain()`. + +### `explain(self, inputs, targets)` ### + +Returns the relevant examples to explain the (inputs, targets). Projects inputs using `self.projection` and finds examples using the `self.search_method`. + +- **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. +- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Targets associated with the `cases_dataset` for dataset projection. + +**Returns:** Dictionary with elements listed in `self.returns`. + +!!!info + The `__call__` method is an alias for the `explain` method. + +## Projections ## +Projections are functions that map input samples to a search space where examples are retrieved with a `search_method`. The search space should be relevant for the model (e.g. projecting the inputs into the latent space of the model). + +!!!info + If one decides to use the identity function as a projection, the search space will be the input space, thus rather explaining the dataset than the model. In this case, it may be more relevant to directly use a `search_method` ([Search Methods](#search-methods)) for the dataset. + +The `Projection` class is an abstract base class for projections. It involves two parts: `space_projection` and `weights`. The samples are first projected to a new space and then weighted. + +!!!warning + If both parts are `None`, the projection acts as an identity function. At least one part should involve the model to ensure meaningful distance calculations. + +??? abstract "Table of projection methods available" + + | Method | Documentation | + | --- | --- | + | `Projection` | HERE | + | `LatentSpaceProjection`| [LatentSpaceProjection](api/example_based/projections/latent_space_projection/) | + | `HadamardProjection` | [HadamardProjection](api/example_based/projections/hadamard_projection/) | + | `AttributionProjection` | [AttributionProjection](api/example_based/projections/attribution_projection/) | + +### Parameters ### + +- **get_weights** (`Optional[Union[Callable, tf.Tensor, np.ndarray]]`): Either a Tensor or a callable function. + - **Tensor**: Weights are applied in the projected space. + - **Callable**: A function that takes inputs and targets, returning the weights (Tensor). Weights should match the input shape (possibly differing in channels). + + **Example**: + ```python + def get_weights_example(projected_inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None): + # Compute weights using projected_inputs and targets. + weights = ... # Custom logic involving the model. + return weights + ``` + +- **space_projection** (`Optional[Callable]`): Callable that takes samples and returns a Tensor in the projected space. An example of a projected space is the latent space of a model. +- **device** (`Optional[str]`): Device to use for the projection. If `None`, the default device is used. +- **mappable** (`bool`): If `True`, the projection can be applied to a dataset through `Dataset.map`. Otherwise, the projection is done through a loop. + +### `project(self, inputs, targets=None)` ### + +Projects samples into a space meaningful for the model. This involves weighting the inputs, projecting them into a latent space, or both. This method should be called during initialization and for each explanation. + +- **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. Expected shapes include (N, W), (N, T, W), (N, W, H, C). +- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Additional parameter for `self.get_weights` function. + +**Returns:** `projected_samples` - The samples projected into the new space. + +!!!info + The `__call__` method is an alias for the `project` method. + +### `project_dataset(self, cases_dataset, targets_dataset=None)` ### + +Applies the projection to a dataset through `Dataset.map`. + +- **cases_dataset** (`tf.data.Dataset`): Dataset of samples to be projected. +- **targets_dataset** (`Optional[tf.data.Dataset]`): Dataset of targets for the samples. + +**Returns:** `projected_dataset` - The projected dataset. + +## Search Methods ## + +Search methods are used to retrieve examples from the `cases_dataset` that are relevant to the input samples. + +The `BaseSearchMethod` class is an abstract base class for example-based search methods. It defines the interface for search methods used to find examples in a dataset. This class should be inherited by specific search methods. + +??? abstract "Table of search methods available" + + | Method | Documentation | + | --- | --- | + | `KNN` | [KNN](api/example_based/search_methods/knn/) | + | `FilterKNN` | [KNN](api/example_based/search_methods/knn/) | + | `ProtoGreedySearch` | [ProtoGreedySearch](api/example_based/search_methods/proto_greedy_search/) | + | `ProtoDashSearch` | [ProtoDashSearch](api/example_based/search_methods/proto_dash_search/) | + | `MMDCriticSearch` | [MMDCriticSearch](api/example_based/search_methods/mmd_critic_search/) | + | `KLEORSimMissSearch` | [KLEOR](api/example_based/search_methods/kleor/) | + | `KLEORGlobalSimSearch` | [KLEOR](api/example_based/search_methods/kleor/) | + + +### Parameters ### + +- **cases_dataset** (`Union[tf.data.Dataset, tf.Tensor, np.ndarray]`): The dataset containing the examples to search in. It should be batched as TensorFlow provides no method to verify this. Ensure the dataset is not reshuffled at each iteration. +- **k** (`int`): The number of examples to retrieve. +- **search_returns** (`Optional[Union[List[str], str]]`): Elements to return in `self.find_examples()`. It should be a subset of `self._returns_possibilities`. +- **batch_size** (`Optional[int]`): Number of samples treated simultaneously. It should match the batch size of the `cases_dataset` if it is a `tf.data.Dataset`. + +### Properties ### + +- **k** (`int`): Getter and setter for the `k` parameter. +- **returns** (`Union[List[str], str]`): Getter and setter for the `returns` parameter. Defines the elements to return in `self.find_examples()`. + +### `find_examples(self, inputs, targets)` ### + +Abstract method to search for samples to return as examples. It should be implemented in subclasses. It may return the indices corresponding to the samples based on `self.returns` value. + +- **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. Expected shapes include (N, W), (N, T, W), (N, W, H, C). +- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Targets associated with the samples to be explained. + +**Returns:** `return_dict` - Dictionary containing the elements specified in `self.returns`. + +!!!info + The `__call__` method is an alias for the `find_examples` method. + +### `_returns_possibilities` + +Attribute thet list possible elements that can be returned by the search methods. For the base class: `["examples", "distances", "labels", "include_inputs"]`. + +[^1]: [Natural Example-Based Explainability: a Survey (2023)](https://arxiv.org/abs/2309.03234) \ No newline at end of file From 6e99393fa30fc864a2972e2e99413ae7dc76f809 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Fri, 26 Jul 2024 15:45:57 +0200 Subject: [PATCH 080/138] docs: create the KLEOR page --- docs/api/example_based/methods/kleor.md | 68 +++++++++++++++++++++++++ 1 file changed, 68 insertions(+) create mode 100644 docs/api/example_based/methods/kleor.md diff --git a/docs/api/example_based/methods/kleor.md b/docs/api/example_based/methods/kleor.md new file mode 100644 index 00000000..66aa986c --- /dev/null +++ b/docs/api/example_based/methods/kleor.md @@ -0,0 +1,68 @@ +# KLEOR + + + + [View colab tutorial]()**WIP** | + + + [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/kleor.py) | +📰 [Paper](https://www.researchgate.net/publication/220106308_KLEOR_A_Knowledge_Lite_Approach_to_Explanation_Oriented_Retrieval) + +KLEOR for Knowledge-Light Explanation-Oriented Retrieval was introduced by Cummins & Bridge in 2006. It is a method that use counterfactuals, Nearest Unlike Neighbor (NUN), to guide the selection of a semi-factual (SF) example. + +Given a distance function $dist$, the NUN of a sample $(x, y)$ is the closest sample in the training dataset which has a different label than $y$. + +The KLEOR method actually have three variants including: + +- The Sim-Miss approach +- The Global-Sim approach + +In the Sim-Miss approach, the SF of the sample $(x,y)$ is the closest training sample from the corresponding NUN which has the same label as $y$. + +Denoting the training dataset as $\mathcal{D}$: + +$$Sim-Miss(x, y, NUN(x,y), \mathcal{D}) = arg \\ min_{(x',y') \in \mathcal{D} \\ | \\ y'=y} dist(x', NUN(x,y))$$ + +In the Global-Sim approach, they add an additional constraint that the SF should lie between the sample $(x,y)$ and the NUN that is: $dist(x, SF) < dist(x, NUN(x,y))$. + +We extended to the $k$ nearest neighbors of the NUN for both approaches. + +!!!info + In our implementation, we rather consider the labels predicted by the model $\hat{y}$ (*i.e.* the targets) rather than $y$! + +## Example + +```python +from xplique.example_based import KLEORGlobalSim, KLEORSimMiss + +cases_dataset = ... # load the training dataset +targets = ... # load the targets of the training dataset + +k = 5 + +# instantiate the KLEOR objects +kleor_sim_miss = KLEORSimMiss(cases_dataset=cases_dataset, + targets_dataset=targets, + k=k, + ) + +kleor_global_sim = KLEORGlobalSim(cases_dataset=cases_dataset, + targets_dataset=targets, + k=k, + ) + +# load the test samples and targets +test_samples = ... # load the test samples to search for +test_targets = ... # load the targets of the test samples + +# search the SFs for the test samples +sim_miss_sf = kleor_sim_miss.explain(test_samples, test_targets) +global_sim_sf = kleor_global_sim.explain(test_samples, test_targets) +``` + +## Notebooks + +TODO: Add the notebook + +{{xplique.example_based.semifactuals.KLEORSimMiss}} +{{xplique.example_based.semifactuals.KLEORGlobalSim}} \ No newline at end of file From 026634e00ede31ace40b5d798930d398316cd699 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Fri, 26 Jul 2024 16:18:11 +0200 Subject: [PATCH 081/138] docs: add pages for naive and label aware cf, for kleor search methods, knn and filter knn, fix a mistake in the kleor page --- docs/api/example_based/methods/kleor.md | 2 +- .../methods/label_aware_counter_factuals.md | 51 ++++++++++++++ .../methods/naive_counter_factuals.md | 51 ++++++++++++++ .../api/example_based/search_methods/kleor.md | 47 +++++++++++++ docs/api/example_based/search_methods/knn.md | 68 +++++++++++++++++++ 5 files changed, 218 insertions(+), 1 deletion(-) create mode 100644 docs/api/example_based/methods/label_aware_counter_factuals.md create mode 100644 docs/api/example_based/methods/naive_counter_factuals.md create mode 100644 docs/api/example_based/search_methods/kleor.md create mode 100644 docs/api/example_based/search_methods/knn.md diff --git a/docs/api/example_based/methods/kleor.md b/docs/api/example_based/methods/kleor.md index 66aa986c..12b2a9fb 100644 --- a/docs/api/example_based/methods/kleor.md +++ b/docs/api/example_based/methods/kleor.md @@ -5,7 +5,7 @@ [View colab tutorial]()**WIP** | - [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/kleor.py) | + [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/semifactuals.py) | 📰 [Paper](https://www.researchgate.net/publication/220106308_KLEOR_A_Knowledge_Lite_Approach_to_Explanation_Oriented_Retrieval) KLEOR for Knowledge-Light Explanation-Oriented Retrieval was introduced by Cummins & Bridge in 2006. It is a method that use counterfactuals, Nearest Unlike Neighbor (NUN), to guide the selection of a semi-factual (SF) example. diff --git a/docs/api/example_based/methods/label_aware_counter_factuals.md b/docs/api/example_based/methods/label_aware_counter_factuals.md new file mode 100644 index 00000000..d08f6224 --- /dev/null +++ b/docs/api/example_based/methods/label_aware_counter_factuals.md @@ -0,0 +1,51 @@ +# Label Aware Counterfactuals + + + + [View colab tutorial]()**WIP** | + + + [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/counterfactuals.py) | +📰 [Paper](https://www.semanticscholar.org/paper/Nearest-unlike-neighbor-(NUN)%3A-an-aid-to-decision-Dasarathy/48c1a310f655b827e5e7d712c859b25a4e3c0902) + +!!!note + The paper referenced here is not exactly the one we implemented. However, it is probably the closest in essence of what we implemented. + +In contrast to the [Naive Counterfactuals](api/example_based/methods/naive_counter_factuals/) approach, the Label Aware Counterfactuals leverage an *a priori* knowledge of the Counterfactuals' (CFs) targets to guide the search for the CFs (*e.g.* one is looking for a CF of the digit 8 in MNIST dataset within the digit 0 instances). + +!!!warning + Consequently, for this class, when a user call the `explain` method, the user is not expected to provide the targets corresponding to the input samples but rather a one-hot encoding of the targets of the CFs to search for. + +!!!info + One can use the `Projection` object to compute the distances between the samples (e.g. search for the CF in the latent space of a model). + +## Example + +```python +from xplique.example_based import LabelAwareCounterfactuals + +# load the training dataset +cases_dataset = ... # load the training dataset +targets_dataset = ... # load the targets of the training dataset + +k = 5 + +# instantiate the LabelAwareCounterfactuals object +lacf = LabelAwareCounterfactuals(cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=k, + ) + +# load the test samples +test_samples = ... # load the test samples to search for +test_cf_targets = ... # WARNING: provide the one-hot encoding of the targets of the CFs to search for + +# search the CFs for the test samples +counterfactuals = lacf.explain(test_samples, test_cf_targets) +``` + +## Notebooks + +TODO: Add notebooks + +{{xplique.example_based.counterfactuals.LabelAwareCounterfactuals}} \ No newline at end of file diff --git a/docs/api/example_based/methods/naive_counter_factuals.md b/docs/api/example_based/methods/naive_counter_factuals.md new file mode 100644 index 00000000..35ed8779 --- /dev/null +++ b/docs/api/example_based/methods/naive_counter_factuals.md @@ -0,0 +1,51 @@ +# Naive Counterfactuals + + + + [View colab tutorial]()**WIP** | + + + [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/counterfactuals.py) | +📰 [Paper](https://www.semanticscholar.org/paper/Nearest-unlike-neighbor-(NUN)%3A-an-aid-to-decision-Dasarathy/48c1a310f655b827e5e7d712c859b25a4e3c0902) + +!!!note + The paper referenced here is not exactly the one we implemented as we a "naive" of it. However, it is probably the closest in essence of what we implemented. + +We define here a "naive" counterfactual method that is based on the Nearest Unlike Neighbor (NUN) concept introduced by Dasarathy in 1991[^1]. In essence, the NUN of a sample $(x, y)$ is the closest sample in the training dataset which has a different label than $y$. + +Thus, in this naive approach to counterfactuals, we yield the $k$ nearest training instances that have a different label than the target of the input sample in a greedy fashion. + +As it is mentioned in the [API documentation](api/example_based/methods/api_example_based/), by setting a `Projection` object, one can use the projection space to compute the distances between the samples (e.g. search for the CF in the latent space of a model). + +## Example + +```python +from xplique.example_based import NaiveCounterfactuals + +# load the training dataset +cases_dataset = ... # load the training dataset +targets_dataset = ... # load the targets of the training dataset + +k = 5 + +# instantiate the NaiveCounterfactuals object +ncf = NaiveCounterfactuals(cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=k, + ) + +# load the test samples and targets +test_samples = ... # load the test samples to search for +test_targets = ... # load the targets of the test samples + +# search the CFs for the test samples +counterfactuals = ncf.explain(test_samples, test_targets) +``` + +## Notebooks + +TODO: Add notebooks + +{{xplique.example_based.counterfactuals.NaiveCounterfactuals}} + +[^1] [Nearest unlike neighbor (NUN): an aid to decision making](https://www.semanticscholar.org/paper/Nearest-unlike-neighbor-(NUN)%3A-an-aid-to-decision-Dasarathy/48c1a310f655b827e5e7d712c859b25a4e3c0902) \ No newline at end of file diff --git a/docs/api/example_based/search_methods/kleor.md b/docs/api/example_based/search_methods/kleor.md new file mode 100644 index 00000000..9ad70ba8 --- /dev/null +++ b/docs/api/example_based/search_methods/kleor.md @@ -0,0 +1,47 @@ +# KLEOR Search Methods + +Those search methods are used for the [KLEOR](api/example_based/methods/kleor/) methods. + +It encompasses the two following classes: +- `KLEORSimMissSearch`: looks for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the method searches for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. +- `KLEORGlobalSim`: in addition to the previous method, the SF should be closer to the query than the NUN to be a candidate. + +## Examples + +```python +from xplique.example_based.search_methods import KLEORSimMissSearch +from xplique.example_based.search_methods import KLEORGlobalSim + +cases_dataset = ... # load the training dataset +targets = ... # load the targets of the training dataset + +test_samples = ... # load the test samples to search for +test_targets = ... # load the targets of the test samples + +# set some parameters +k = 5 +distance = "euclidean" + +# create the KLEORSimMissSearch object +kleor_sim_miss_search = KLEORSimMissSearch(cases_dataset=cases_dataset, + targets_dataset=targets, + k=k, + distance=distance) + +# create the KLEORGlobalSim object +kleor_global_sim = KLEORGlobalSim(cases_dataset=cases_dataset, + targets_dataset=targets, + k=k, + distance=distance) + +# search for the K-Nearest Neighbors of the test samples +sim_miss_neighbors = kleor_sim_miss_search.find_examples(test_samples, test_targets) +global_sim_neighbors = kleor_global_sim.find_examples(test_samples, test_targets) +``` + +## Notebooks + +TODO: add the notebook for KLEOR + +{{xplique.example_based.search_methods.kleor.KLEORSimMissSearch}} +{{xplique.example_based.search_methods.kleor.KLEORGlobalSim}} diff --git a/docs/api/example_based/search_methods/knn.md b/docs/api/example_based/search_methods/knn.md new file mode 100644 index 00000000..7f0eb423 --- /dev/null +++ b/docs/api/example_based/search_methods/knn.md @@ -0,0 +1,68 @@ +# K Nearest Neighbors + +KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors` [see the documentation](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). +The kneighbors method is implemented in a batched way to handle large datasets and try to be memory efficient. + +In addition, we also added a `FilterKNN` class that allows to filter the neighbors based on a given criterion avoiding potentially a compute of the distances for all the samples. It is useful when the candidate neighbors are sparse and the distance computation is expensive. + +## Examples + +```python +from xplique.example_based.search_methods import ORDER +from xplique.example_based.search_methods import KNN + +# set some parameters +k = 5 +cases_dataset = ... # load the training dataset +test_samples = ... # load the test samples to search for + +distance = "euclidean" +order = ORDER.ASCENDING + +# create the KNN object +knn = KNN(cases_dataset = cases_dataset + k = k, + distance = distance, + order = order) + +k_nearest_neighbors = knn.kneighbors(test_samples) +``` + +```python +from xplique.example_based.search_methods import ORDER +from xplique.example_based.search_methods import FilterKNN + +# set some parameters +k = 5 +cases_dataset = ... # load the training dataset +targets = ... # load the targets of the training dataset + +test_samples = ... # load the test samples to search for +test_targets = ... # load the targets of the test samples + +distance = "euclidean" +order = ORDER.ASCENDING + +# define a filter function +def filter_fn(cases, inputs, targets, cases_targets): + # filter the cases that have the same target as the input + mask = tf.not_equal(targets, cases_targets) + return mask + +# create the KNN object +filter_knn = FilterKNN(cases_dataset=cases_dataset, + targets_dataset=targets, + k=k, + distance=distance, + order=order, + filter_fn=filter_fn) + +k_nearest_neighbors = filter_knn.kneighbors(test_samples, test_targets) +``` + +## Notebooks + +TODO: add all notebooks that use this search method + +{{xplique.example_based.search_methods.knn.KNN}} +{{xplique.example_based.search_methods.knn.FilterKNN}} \ No newline at end of file From 393914eb711ecb8b14370dcd727214cbaed35f1d Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Tue, 30 Jul 2024 09:21:23 +0200 Subject: [PATCH 082/138] docs: add the page for similar examples --- .../example_based/methods/similar_examples.md | 45 +++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100644 docs/api/example_based/methods/similar_examples.md diff --git a/docs/api/example_based/methods/similar_examples.md b/docs/api/example_based/methods/similar_examples.md new file mode 100644 index 00000000..4a5748a1 --- /dev/null +++ b/docs/api/example_based/methods/similar_examples.md @@ -0,0 +1,45 @@ +# Similar-Examples + + + + [View colab tutorial]()**WIP** | + + + [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/semifactuals.py) + +We designate here as *Similar Examples* all methods that given an input sample, search for the most similar **training** samples given a distance function `distance`. Furthermore, one can define the search space using a `projection` function (see [Projections](api/example_based/projections.md)). This function should map an input sample to the search space where the distance function is defined and meaningful (**e.g.** the latent space of a Convolutional Neural Network). +Then, a K-Nearest Neighbors (KNN) search is performed to find the most similar samples in the search space. + +## Example + +```python +from xplique.example_based import SimilarExamples + +cases_dataset = ... # load the training dataset +k = 5 +distance = "euclidean" + +# define the projection function +def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): + ''' + Example of projection, + inputs are the elements to project. + targets are optional parameters to orientate the projection. + ''' + projected_inputs = # do some magic on inputs, it should use the model. + return projected_inputs + +# instantiate the SimilarExamples object +sim_ex = SimilarExamples( + cases_dataset=cases_dataset, + k=k, + projection=custom_projection, + distance=distance, +) +``` + +# Notebooks + +TODO: Add the notebook + +{{xplique.example_based.similar_examples.SimilarExamples}} \ No newline at end of file From 37312daf90bcb37913fbfad9053002e4cc2ac07b Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Tue, 30 Jul 2024 10:00:18 +0200 Subject: [PATCH 083/138] docs: create the documentation page for Cole --- docs/api/example_based/methods/cole.md | 62 ++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) create mode 100644 docs/api/example_based/methods/cole.md diff --git a/docs/api/example_based/methods/cole.md b/docs/api/example_based/methods/cole.md new file mode 100644 index 00000000..38d52b43 --- /dev/null +++ b/docs/api/example_based/methods/cole.md @@ -0,0 +1,62 @@ +# COLE: Contributions Oriented Local Explanations + + + + [View colab tutorial]()**WIP** | + + + [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/similar_examples.py) | +📰 [Paper](https://researchrepository.ucd.ie/handle/10197/11064) + +COLE for Contributions Oriented Local Explanations was introduced by Kenny & Keane in 2019. + +!!! quote + Our method COLE is based on the premise that the contributions of features in a model’s classification represent the most sensible basis to inform case-based explanations. + + -- [COLE paper](https://researchrepository.ucd.ie/handle/10197/11064)[^1] + +The core idea of the COLE approach is to use [attribution maps](api/attributions/api_attributions/) to define a relevant search space for the K-Nearest Neighbors (KNN) search. + +More specifically, the COLE approach is based on the following steps: +- (1) Given an input sample $x$, compute the attribution map $A(x)$ +- (2) Consider the projection space defined by: $p: x \rightarrow A(x) \odot x$ ($\odot$ denotes the element-wise product) +- (3) Perform a KNN search in the projection space to find the most similar training samples + +!!! info + In the original paper, the authors focused on Multi-Layer Perceptrons (MLP) and three attribution methods (LPR, Integrated Gradient, and DeepLift). We decided to implement a COLE method that generalizes to a more broader range of Neural Networks and attribution methods that are gradient-based (see [API Attributions documentation](api/attributions/api_attributions/) for definition). + +## Example + +```python +from xplique.example_based import Cole +from xplique.attributions import Saliency + +model = ... # load the model +cases_dataset = ... # load the training dataset +target_dataset = ... # load the target dataset (predicted one-hot encoding of model's predictions) +k = 5 + +# instantiate the Cole object +cole = Cole( + cases_dataset=cases_dataset, + model=model, + k=k, + attribution_method=Saliency, +) + +# load the test samples +test_samples = ... # load the test samples +test_targets = ... # load the test targets + +# search the most similar samples with the COLE method +similar_samples = cole.explain(test_samples, test_targets) +``` + +## Notebooks + +TODO: Add the notebook + +{{xplique.example_based.similar_examples.Cole}} + +[^1]: [Twin-Systems to Explain Artificial Neural Networks using Case-Based Reasoning: +Comparative Tests of Feature-Weighting Methods in ANN-CBR Twins for XAI (2019)](https://researchrepository.ucd.ie/handle/10197/11064) \ No newline at end of file From 6d1247cca0959202536e8325980ff64cefc73f27 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Tue, 30 Jul 2024 10:01:05 +0200 Subject: [PATCH 084/138] fixup: wrong github link --- docs/api/example_based/methods/similar_examples.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/example_based/methods/similar_examples.md b/docs/api/example_based/methods/similar_examples.md index 4a5748a1..1dc9f1b8 100644 --- a/docs/api/example_based/methods/similar_examples.md +++ b/docs/api/example_based/methods/similar_examples.md @@ -5,7 +5,7 @@ [View colab tutorial]()**WIP** | - [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/semifactuals.py) + [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/similar_examples.py) We designate here as *Similar Examples* all methods that given an input sample, search for the most similar **training** samples given a distance function `distance`. Furthermore, one can define the search space using a `projection` function (see [Projections](api/example_based/projections.md)). This function should map an input sample to the search space where the distance function is defined and meaningful (**e.g.** the latent space of a Convolutional Neural Network). Then, a K-Nearest Neighbors (KNN) search is performed to find the most similar samples in the search space. From 27af808c52d0bd916097d0a5c90f945716a67916 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Tue, 30 Jul 2024 10:32:32 +0200 Subject: [PATCH 085/138] fixup: in cole documentation, change misleading information --- docs/api/example_based/methods/cole.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/api/example_based/methods/cole.md b/docs/api/example_based/methods/cole.md index 38d52b43..0ba3916e 100644 --- a/docs/api/example_based/methods/cole.md +++ b/docs/api/example_based/methods/cole.md @@ -23,7 +23,7 @@ More specifically, the COLE approach is based on the following steps: - (3) Perform a KNN search in the projection space to find the most similar training samples !!! info - In the original paper, the authors focused on Multi-Layer Perceptrons (MLP) and three attribution methods (LPR, Integrated Gradient, and DeepLift). We decided to implement a COLE method that generalizes to a more broader range of Neural Networks and attribution methods that are gradient-based (see [API Attributions documentation](api/attributions/api_attributions/) for definition). + In the original paper, the authors focused on Multi-Layer Perceptrons (MLP) and three attribution methods (LPR, Integrated Gradient, and DeepLift). We decided to implement a COLE method that generalizes to a more broader range of Neural Networks and attribution methods (see [API Attributions documentation](api/attributions/api_attributions/) to see the list of methods available). ## Example From 6d15e53ed331e0983ca44d08284069a7b43b5ffc Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Thu, 1 Aug 2024 10:29:25 +0200 Subject: [PATCH 086/138] docs: add a documentation page for projections --- docs/api/example_based/projections.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/docs/api/example_based/projections.md b/docs/api/example_based/projections.md index e69de29b..2918a573 100644 --- a/docs/api/example_based/projections.md +++ b/docs/api/example_based/projections.md @@ -0,0 +1,25 @@ +# Projections + +In example-based explainability, one often needs to define a notion of similarity (distance) between samples. However, the original feature space may not be the most suitable space to define this similarity. For instance, in the case of images, two images can be very similar in terms of their pixel values but very different in terms of their semantic content. In addition, computing distances in the original feature space does not take into account the model's whatsoever, questioning the explainability of the method. + +To address these issues, one can project the samples into a new space where the distances between samples are more meaningful with respect to the model's decision. Two approaches are commonly used to define this projection space: (1) use a latent space and (2) use a feature weighting scheme. + +Consequently, we defined the general `Projection` class that will be used as a base class for all projection methods. This class allows one to use one or both of the aforementioned approaches. Indeed, one can instantiate a `Projection` object with a `space_projection` method, that define a projection from the feature space to a space of interest, and a`get_weights` method, that defines the feature weighting scheme. The `Projection` class will then project a sample with the `space_projection` method and weight the projected sample's features with the `get_weights` method. + +In addition, we provide concrete implementations of the `Projection` class: `LatentSpaceProjection`, `AttributionProjection`, and `HadamardProjection`. + +## `Projection` class + +{{xplique.example_based.projections.Projection}} + +## `LatentSpaceProjection` class + +{{xplique.example_based.projections.LatentSpaceProjection}} + +## `AttributionProjection` class + +{{xplique.example_based.projections.AttributionProjection}} + +## `HadamardProjection` class + +{{xplique.example_based.projections.HadamardProjection}} From 64903ab4c7c7d94b6d349f7f58f0795ae00756bb Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Thu, 1 Aug 2024 10:56:56 +0200 Subject: [PATCH 087/138] docs: add some details in the api documentation --- docs/api/example_based/api_example_based.md | 3 +++ docs/api/example_based/search_methods.md | 0 2 files changed, 3 insertions(+) delete mode 100644 docs/api/example_based/search_methods.md diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index b1f6f76f..0677a265 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -147,6 +147,9 @@ Applies the projection to a dataset through `Dataset.map`. Search methods are used to retrieve examples from the `cases_dataset` that are relevant to the input samples. +!!!info + In an Example method, the `cases_dataset` is the dataset that has been projected with a `Projection` object (see the previous section). The search methods are used to find examples in this projected space. + The `BaseSearchMethod` class is an abstract base class for example-based search methods. It defines the interface for search methods used to find examples in a dataset. This class should be inherited by specific search methods. ??? abstract "Table of search methods available" diff --git a/docs/api/example_based/search_methods.md b/docs/api/example_based/search_methods.md deleted file mode 100644 index e69de29b..00000000 From 5b6f026634deffa12114b756230f6d487b649d8e Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Fri, 2 Aug 2024 11:42:43 +0200 Subject: [PATCH 088/138] docs: update the mkdocs.yml --- docs/api/example_based/api_example_based.md | 4 ++-- mkdocs.yml | 11 +++++++++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index 0677a265..09a87b01 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -1,6 +1,6 @@ -# API: Example-based API +# API: Example-based -- [**Example-based Methods**: Getting strated]() **WIP** +- [**Example-based Methods**: Getting started]() **WIP** ## Context ## diff --git a/mkdocs.yml b/mkdocs.yml index f3f3eaf6..c9a1391e 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -43,11 +43,22 @@ nav: - Tcav: api/concepts/tcav.md - Craft: api/concepts/craft.md - Example based: + - API Description: api/example_based/api_example_based.md + - Methods: + - Cole: api/example_based/methods/cole.md + - Kleor: api/example_based/methods/kleor.md + - LabelAwareCounterFactuals: api/example_based/methods/label_aware_counterfactuals.md + - NaiveCounterFactuals: api/example_based/methods/naive_counterfactuals.md + - SimilarExamples: api/example_based/methods/similar_examples.md - Prototypes: - API Description: api/example_based/prototypes/api_prototypes.md - ProtoGreedy: api/example_based/prototypes/proto_greedy.md - ProtoDash: api/example_based/prototypes/proto_dash.md - MMDCritic: api/example_based/prototypes/mmd_critic.md + - Projections: api/example_based/projections.md + - Search Methods: + - Kleor: api/example_based/search_methods/kleor.md + - KNN: api/example_based/search_methods/knn.md - Feature visualization: - Modern Feature Visualization (MaCo): api/feature_viz/maco.md - Feature visualization: api/feature_viz/feature_viz.md From 70106fe66f27c267264c9f6652c2387312dd6787 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Fri, 2 Aug 2024 16:36:13 +0200 Subject: [PATCH 089/138] example based docs: small modifications --- docs/api/example_based/api_example_based.md | 16 +++++++++++----- docs/api/example_based/methods/kleor.md | 3 +++ .../methods/naive_counter_factuals.md | 2 +- mkdocs.yml | 4 ++-- 4 files changed, 17 insertions(+), 8 deletions(-) diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index 09a87b01..23704ce7 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -5,7 +5,7 @@ ## Context ## !!! quote - While saliency maps have stolen the show for the last few years in the XAI field, their ability to reflect models' internal processes has been questioned. Although less in the spotlight, example-based XAI methods have continued to improve. It encompasses methods that use examples as explanations for a machine learning model's predictions. This aligns with the psychological mechanisms of human reasoning and makes example-based explanations natural and intuitive for users to understand. Indeed, humans learn and reason by forming mental representations of concepts based on examples. + While saliency maps have stolen the show for the last few years in the XAI field, their ability to reflect models' internal processes has been questioned. Although less in the spotlight, example-based XAI methods have continued to improve. It encompasses methods that use samples as explanations for a machine learning model's predictions. This aligns with the psychological mechanisms of human reasoning and makes example-based explanations natural and intuitive for users to understand. Indeed, humans learn and reason by forming mental representations of concepts based on examples. -- [Natural Example-Based Explainability: a Survey (2023)](https://arxiv.org/abs/2309.03234)[^1] @@ -14,7 +14,7 @@ As mentioned by our team members in the quote above, example-based methods are a While not being exhaustive we tried to cover a range of methods that are representative of the field and that belong to different families: similar examples, contrastive (counter-factuals and semi-factuals) examples, and prototypes (as concepts based methods have a dedicated sections). At present, we made the following choices: -- Focus on methods that are natural example methods (see the paper above for more details). +- Focus on methods that are natural example methods (post-hoc and non-generative, see the paper above for more details). - Try to unify the three families of approaches with a common API. !!! info @@ -39,7 +39,7 @@ explanations = explainer.explain(inputs, targets) We tried to keep the API as close as possible to the one of the attribution methods to keep a consistent experience for the users. -The `BaseExampleMethod` is an abstract base class designed for example-based methods used to explain classification models. It provides examples from a dataset (usually the training dataset) to help understand a model's predictions. Examples are selected using a [search method](#search-methods) within a defined search space, projected from the input space using a [projection function](#projections). +The `BaseExampleMethod` is an abstract base class designed for example-based methods used to explain classification models. It provides examples from a dataset (usually the training dataset) to help understand a model's predictions. Examples are projected from the input space to a search space using a [projection function](#projections). The projection function defines the search space. Then, examples are selected using a [search method](#search-methods) within the search space. ??? abstract "Table of example-based methods available" @@ -63,7 +63,13 @@ The `BaseExampleMethod` is an abstract base class designed for example-based met - **k** (`int`): The number of examples to retrieve per input. - **projection** (`Union[Projection, Callable]`): A projection or callable function that projects samples from the input space to the search space. The search space should be relevant for the model. (see [Projections](#projections)) - **case_returns** (`Union[List[str], str]`): Elements to return in `self.explain()`. Default is "examples". -- **batch_size** (`Optional[int]`): Number of samples processed simultaneously for projection and search. Ignored if `tf.data.Dataset` is provided. +- **batch_size** (`Optional[int]`): Number of samples processed simultaneously for projection and search. Ignored if `cases_dataset` is a `tf.data.Dataset`. + +!!!tips + If the elements of your dataset are tuples (cases, labels), you can pass this dataset directly to the `cases_dataset`. + +!!!tips + Apart from contrastive explanations, in the case of classification, the built-in [Projections](#projections) compute `targets` online and the `targets_dataset` is not necessary. ### Properties ### @@ -148,7 +154,7 @@ Applies the projection to a dataset through `Dataset.map`. Search methods are used to retrieve examples from the `cases_dataset` that are relevant to the input samples. !!!info - In an Example method, the `cases_dataset` is the dataset that has been projected with a `Projection` object (see the previous section). The search methods are used to find examples in this projected space. + In an search method, the `cases_dataset` is the dataset that has been projected with a `Projection` object (see the previous section). The search methods are used to find examples in this projected space. The `BaseSearchMethod` class is an abstract base class for example-based search methods. It defines the interface for search methods used to find examples in a dataset. This class should be inherited by specific search methods. diff --git a/docs/api/example_based/methods/kleor.md b/docs/api/example_based/methods/kleor.md index 12b2a9fb..187d6923 100644 --- a/docs/api/example_based/methods/kleor.md +++ b/docs/api/example_based/methods/kleor.md @@ -30,6 +30,9 @@ We extended to the $k$ nearest neighbors of the NUN for both approaches. !!!info In our implementation, we rather consider the labels predicted by the model $\hat{y}$ (*i.e.* the targets) rather than $y$! +!!!tips + As KLEOR methods use counterfactuals, they can also return them. Therefore, it is possible to obtain both semi-factuals and counterfactuals with an unique method. To do so "nuns" and "nuns_labels" should be added to the `cases_returns` list. + ## Example ```python diff --git a/docs/api/example_based/methods/naive_counter_factuals.md b/docs/api/example_based/methods/naive_counter_factuals.md index 35ed8779..e81350c0 100644 --- a/docs/api/example_based/methods/naive_counter_factuals.md +++ b/docs/api/example_based/methods/naive_counter_factuals.md @@ -9,7 +9,7 @@ 📰 [Paper](https://www.semanticscholar.org/paper/Nearest-unlike-neighbor-(NUN)%3A-an-aid-to-decision-Dasarathy/48c1a310f655b827e5e7d712c859b25a4e3c0902) !!!note - The paper referenced here is not exactly the one we implemented as we a "naive" of it. However, it is probably the closest in essence of what we implemented. + The paper referenced here is not exactly the one we implemented as we use a "naive" version of it. However, it is probably the closest in essence of what we implemented. We define here a "naive" counterfactual method that is based on the Nearest Unlike Neighbor (NUN) concept introduced by Dasarathy in 1991[^1]. In essence, the NUN of a sample $(x, y)$ is the closest sample in the training dataset which has a different label than $y$. diff --git a/mkdocs.yml b/mkdocs.yml index c9a1391e..cd59b533 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -47,8 +47,8 @@ nav: - Methods: - Cole: api/example_based/methods/cole.md - Kleor: api/example_based/methods/kleor.md - - LabelAwareCounterFactuals: api/example_based/methods/label_aware_counterfactuals.md - - NaiveCounterFactuals: api/example_based/methods/naive_counterfactuals.md + - LabelAwareCounterFactuals: api/example_based/methods/label_aware_counter_factuals.md + - NaiveCounterFactuals: api/example_based/methods/naive_counter_factuals.md - SimilarExamples: api/example_based/methods/similar_examples.md - Prototypes: - API Description: api/example_based/prototypes/api_prototypes.md From 467c79bded4af5166061dc724c5508c7aa599975 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Fri, 2 Aug 2024 16:37:15 +0200 Subject: [PATCH 090/138] example based: small fixes --- xplique/example_based/projections/attributions.py | 2 +- xplique/example_based/projections/base.py | 1 + xplique/example_based/projections/hadamard.py | 2 +- xplique/example_based/search_methods/knn.py | 6 ++++++ 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index ef3b0ce8..46074fab 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -74,7 +74,7 @@ def __init__( # change default operator if not "operator" in attribution_kwargs or attribution_kwargs["operator"] is None: - warnings.warn("No operator provided, using standard classification operator."\ + warnings.warn("No operator provided, using standard classification operator. "\ + "For non-classification tasks, please specify an operator.") attribution_kwargs["operator"] = target_free_classification_operator diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 4a76de29..9d076bb4 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -99,6 +99,7 @@ def get_weights(inputs, _ = None): if space_projection is None: self.space_projection = lambda inputs: inputs elif hasattr(space_projection, "__call__"): + self.mappable = False self.space_projection = space_projection else: raise TypeError( diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 05fb77e3..9ac2b8c8 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -71,7 +71,7 @@ def __init__( device=device) if operator is None: - warnings.warn("No operator provided, using standard classification operator."\ + warnings.warn("No operator provided, using standard classification operator. "\ + "For non-classification tasks, please specify an operator.") operator = target_free_classification_operator diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index d5ed1be2..fe9b50fc 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -302,6 +302,12 @@ class FilterKNN(BaseKNN): `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. + targets_dataset + Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. + Batch size and cardinality of other datasets should match `cases_dataset`. + Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not + the case for your dataset, otherwise, examples will not make sense. k The number of examples to retrieve. search_returns From 3e638a86a2c4ac4429fcba0f32cf25e69bbb8383 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Mon, 5 Aug 2024 16:42:10 +0200 Subject: [PATCH 091/138] docs: improve documentation with antonin's feedbacks, modify a parameter name such it matches its documentation --- docs/api/example_based/api_example_based.md | 134 ++++-------------- .../label_aware_counter_factuals.md | 17 ++- .../naive_counter_factuals.md | 17 ++- docs/api/example_based/projections.md | 65 +++++++-- .../api/example_based/search_methods/kleor.md | 47 ------ docs/api/example_based/search_methods/knn.md | 68 --------- .../{methods => semifactuals}/kleor.md | 51 ++++++- .../{methods => similar_examples}/cole.md | 16 ++- .../similar_examples.md | 14 +- mkdocs.yml | 21 ++- tests/example_based/test_projections.py | 2 +- .../example_based/projections/attributions.py | 6 +- xplique/example_based/projections/hadamard.py | 4 +- xplique/example_based/similar_examples.py | 2 +- 14 files changed, 192 insertions(+), 272 deletions(-) rename docs/api/example_based/{methods => counterfactuals}/label_aware_counter_factuals.md (68%) rename docs/api/example_based/{methods => counterfactuals}/naive_counter_factuals.md (74%) delete mode 100644 docs/api/example_based/search_methods/kleor.md delete mode 100644 docs/api/example_based/search_methods/knn.md rename docs/api/example_based/{methods => semifactuals}/kleor.md (68%) rename docs/api/example_based/{methods => similar_examples}/cole.md (83%) rename docs/api/example_based/{methods => similar_examples}/similar_examples.md (69%) diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index 23704ce7..6485b09a 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -23,6 +23,8 @@ At present, we made the following choices: ## Common API ## ```python +projection = ProjectionMethod(model) + explainer = ExampleMethod( cases_dataset, labels_dataset, @@ -39,21 +41,31 @@ explanations = explainer.explain(inputs, targets) We tried to keep the API as close as possible to the one of the attribution methods to keep a consistent experience for the users. -The `BaseExampleMethod` is an abstract base class designed for example-based methods used to explain classification models. It provides examples from a dataset (usually the training dataset) to help understand a model's predictions. Examples are projected from the input space to a search space using a [projection function](#projections). The projection function defines the search space. Then, examples are selected using a [search method](#search-methods) within the search space. +The `BaseExampleMethod` is an abstract base class designed for example-based methods used to explain classification models. It provides examples from a dataset (usually the training dataset) to help understand a model's predictions. Examples are projected from the input space to a search space using a [projection function](#projections). The projection function defines the search space. Then, examples are selected using a [search method](#search-methods) within the search space. For all example-based methods, one can define the `distance` function that will be used by the search method. + +We can broadly categorize example-based methods into four families: similar examples, counter-factuals, semi-factuals, and prototypes. + +- **Similar Examples**: This method involves finding instances in the dataset that are similar to a given instance. The similarity is often determined based on the feature space, and these examples can help in understanding the model's decision by showing what other data points resemble the instance in question. +- **Counter Factuals**: Counterfactual explanations identify the minimal changes needed to an instance's features to change the model's prediction to a different, specified outcome. They help answer "what-if" scenarios by showing how altering certain aspects of the input would lead to a different decision. +- **Semi Factuals**: Semifactual explanations describe hypothetical situations where most features of an instance remain the same except for one or a few features, without changing the overall outcome. They highlight which features could vary without altering the prediction. +- **Prototypes**: Prototypes are representative examples from the dataset that summarize typical cases within a certain category or cluster. They act as archetypal instances that the model uses to make predictions, providing a reference point for understanding model behavior. ??? abstract "Table of example-based methods available" - | Method | Documentation | Family | + | Method | Family | Documentation | | --- | --- | --- | - | `SimilarExamples` | [SimilarExamples](api/example_based/methods/similar_examples) | Similar Examples | - | `Cole` | [Cole](api/example_based/methods/cole) | Similar Examples | - | `ProtoGreedy` | [ProtoGreedy](api/example_based/methods/proto_greedy/) | Prototypes | - | `ProtoDash` | [ProtoDash](api/example_based/methods/proto_dash/) | Prototypes | - | `MMDCritic` | [MMDCritic](api/example_based/methods/mmd_critic/) | Prototypes | - | `NaiveCounterFactuals` | [NaiveCounterFactuals](api/example_based/methods/naive_counter_factuals/) | Counter Factuals | - | `LabelAwareCounterFactuals` | [LabelAwareCounterFactuals](api/example_based/methods/label_aware_counter_factuals/) | Counter Factuals | - | `KLEORSimMiss` | [KLEOR](api/example_based/methods/kleor/) | Semi Factuals | - | `KLEORGlobalSim` | [KLEOR](api/example_based/methods/kleor/) | Semi Factuals | + | `SimilarExamples` | Similar Examples | [SimilarExamples](../similar_examples/similar_examples/) | + | `Cole` | Similar Examples | [Cole](../similar_examples/cole/) | + | | | | + | `NaiveCounterFactuals` | Counter Factuals | [NaiveCounterFactuals](../counterfactuals/naive_counter_factuals/) | + | `LabelAwareCounterFactuals` | Counter Factuals | [LabelAwareCounterFactuals](../counterfactuals/label_aware_counter_factuals/) | + |||| + | `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | + | `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | + |||| + | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | + | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | + | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | ### Parameters ### @@ -93,110 +105,22 @@ Returns the relevant examples to explain the (inputs, targets). Projects inputs Projections are functions that map input samples to a search space where examples are retrieved with a `search_method`. The search space should be relevant for the model (e.g. projecting the inputs into the latent space of the model). !!!info - If one decides to use the identity function as a projection, the search space will be the input space, thus rather explaining the dataset than the model. In this case, it may be more relevant to directly use a `search_method` ([Search Methods](#search-methods)) for the dataset. + If one decides to use the identity function as a projection, the search space will be the input space, thus rather explaining the dataset than the model. -The `Projection` class is an abstract base class for projections. It involves two parts: `space_projection` and `weights`. The samples are first projected to a new space and then weighted. +The `Projection` class is a base class for projections. It involves two parts: `space_projection` and `weights`. The samples are first projected to a new space and then weighted. !!!warning - If both parts are `None`, the projection acts as an identity function. At least one part should involve the model to ensure meaningful distance calculations. - -??? abstract "Table of projection methods available" - - | Method | Documentation | - | --- | --- | - | `Projection` | HERE | - | `LatentSpaceProjection`| [LatentSpaceProjection](api/example_based/projections/latent_space_projection/) | - | `HadamardProjection` | [HadamardProjection](api/example_based/projections/hadamard_projection/) | - | `AttributionProjection` | [AttributionProjection](api/example_based/projections/attribution_projection/) | - -### Parameters ### - -- **get_weights** (`Optional[Union[Callable, tf.Tensor, np.ndarray]]`): Either a Tensor or a callable function. - - **Tensor**: Weights are applied in the projected space. - - **Callable**: A function that takes inputs and targets, returning the weights (Tensor). Weights should match the input shape (possibly differing in channels). - - **Example**: - ```python - def get_weights_example(projected_inputs: Union[tf.Tensor, np.ndarray], - targets: Optional[Union[tf.Tensor, np.ndarray]] = None): - # Compute weights using projected_inputs and targets. - weights = ... # Custom logic involving the model. - return weights - ``` - -- **space_projection** (`Optional[Callable]`): Callable that takes samples and returns a Tensor in the projected space. An example of a projected space is the latent space of a model. -- **device** (`Optional[str]`): Device to use for the projection. If `None`, the default device is used. -- **mappable** (`bool`): If `True`, the projection can be applied to a dataset through `Dataset.map`. Otherwise, the projection is done through a loop. - -### `project(self, inputs, targets=None)` ### + If both parts are `None`, the projection acts as an identity function. In general, we advise that one part should involve the model to ensure meaningful distance calculations with respect to the model. -Projects samples into a space meaningful for the model. This involves weighting the inputs, projecting them into a latent space, or both. This method should be called during initialization and for each explanation. - -- **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. Expected shapes include (N, W), (N, T, W), (N, W, H, C). -- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Additional parameter for `self.get_weights` function. - -**Returns:** `projected_samples` - The samples projected into the new space. - -!!!info - The `__call__` method is an alias for the `project` method. - -### `project_dataset(self, cases_dataset, targets_dataset=None)` ### - -Applies the projection to a dataset through `Dataset.map`. - -- **cases_dataset** (`tf.data.Dataset`): Dataset of samples to be projected. -- **targets_dataset** (`Optional[tf.data.Dataset]`): Dataset of targets for the samples. - -**Returns:** `projected_dataset` - The projected dataset. +To know more about projections and their importance, you can refer to the [Projections](../../projections/) section. ## Search Methods ## Search methods are used to retrieve examples from the `cases_dataset` that are relevant to the input samples. -!!!info +!!!warning In an search method, the `cases_dataset` is the dataset that has been projected with a `Projection` object (see the previous section). The search methods are used to find examples in this projected space. -The `BaseSearchMethod` class is an abstract base class for example-based search methods. It defines the interface for search methods used to find examples in a dataset. This class should be inherited by specific search methods. - -??? abstract "Table of search methods available" - - | Method | Documentation | - | --- | --- | - | `KNN` | [KNN](api/example_based/search_methods/knn/) | - | `FilterKNN` | [KNN](api/example_based/search_methods/knn/) | - | `ProtoGreedySearch` | [ProtoGreedySearch](api/example_based/search_methods/proto_greedy_search/) | - | `ProtoDashSearch` | [ProtoDashSearch](api/example_based/search_methods/proto_dash_search/) | - | `MMDCriticSearch` | [MMDCriticSearch](api/example_based/search_methods/mmd_critic_search/) | - | `KLEORSimMissSearch` | [KLEOR](api/example_based/search_methods/kleor/) | - | `KLEORGlobalSimSearch` | [KLEOR](api/example_based/search_methods/kleor/) | - - -### Parameters ### - -- **cases_dataset** (`Union[tf.data.Dataset, tf.Tensor, np.ndarray]`): The dataset containing the examples to search in. It should be batched as TensorFlow provides no method to verify this. Ensure the dataset is not reshuffled at each iteration. -- **k** (`int`): The number of examples to retrieve. -- **search_returns** (`Optional[Union[List[str], str]]`): Elements to return in `self.find_examples()`. It should be a subset of `self._returns_possibilities`. -- **batch_size** (`Optional[int]`): Number of samples treated simultaneously. It should match the batch size of the `cases_dataset` if it is a `tf.data.Dataset`. - -### Properties ### - -- **k** (`int`): Getter and setter for the `k` parameter. -- **returns** (`Union[List[str], str]`): Getter and setter for the `returns` parameter. Defines the elements to return in `self.find_examples()`. - -### `find_examples(self, inputs, targets)` ### - -Abstract method to search for samples to return as examples. It should be implemented in subclasses. It may return the indices corresponding to the samples based on `self.returns` value. - -- **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. Expected shapes include (N, W), (N, T, W), (N, W, H, C). -- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Targets associated with the samples to be explained. - -**Returns:** `return_dict` - Dictionary containing the elements specified in `self.returns`. - -!!!info - The `__call__` method is an alias for the `find_examples` method. - -### `_returns_possibilities` - -Attribute thet list possible elements that can be returned by the search methods. For the base class: `["examples", "distances", "labels", "include_inputs"]`. +Each example-based method has its own search method. The search method is defined in the `search_method_class` property of the `ExampleMethod` class. [^1]: [Natural Example-Based Explainability: a Survey (2023)](https://arxiv.org/abs/2309.03234) \ No newline at end of file diff --git a/docs/api/example_based/methods/label_aware_counter_factuals.md b/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md similarity index 68% rename from docs/api/example_based/methods/label_aware_counter_factuals.md rename to docs/api/example_based/counterfactuals/label_aware_counter_factuals.md index d08f6224..93a20c9b 100644 --- a/docs/api/example_based/methods/label_aware_counter_factuals.md +++ b/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md @@ -11,10 +11,10 @@ !!!note The paper referenced here is not exactly the one we implemented. However, it is probably the closest in essence of what we implemented. -In contrast to the [Naive Counterfactuals](api/example_based/methods/naive_counter_factuals/) approach, the Label Aware Counterfactuals leverage an *a priori* knowledge of the Counterfactuals' (CFs) targets to guide the search for the CFs (*e.g.* one is looking for a CF of the digit 8 in MNIST dataset within the digit 0 instances). +In contrast to the [Naive Counterfactuals](../../counterfactuals/naive_counter_factuals/) approach, the Label Aware CounterFactuals leverage an *a priori* knowledge of the Counterfactuals' (CFs) targets to guide the search for the CFs (*e.g.* one is looking for a CF of the digit 8 in MNIST dataset within the digit 0 instances). !!!warning - Consequently, for this class, when a user call the `explain` method, the user is not expected to provide the targets corresponding to the input samples but rather a one-hot encoding of the targets of the CFs to search for. + Consequently, for this class, when a user call the `explain` method, the user is not expected to provide the targets corresponding to the input samples but rather a one-hot encoding of the label expected for the CFs. !!!info One can use the `Projection` object to compute the distances between the samples (e.g. search for the CF in the latent space of a model). @@ -22,23 +22,26 @@ In contrast to the [Naive Counterfactuals](api/example_based/methods/naive_count ## Example ```python -from xplique.example_based import LabelAwareCounterfactuals +from xplique.example_based import LabelAwareCounterFactuals # load the training dataset cases_dataset = ... # load the training dataset -targets_dataset = ... # load the targets of the training dataset +targets_dataset = ... # load the one-hot encoding of predicted labels of the training dataset +# parameters k = 5 +distance = "euclidean" # instantiate the LabelAwareCounterfactuals object -lacf = LabelAwareCounterfactuals(cases_dataset=cases_dataset, +lacf = LabelAwareCounterFactuals(cases_dataset=cases_dataset, targets_dataset=targets_dataset, k=k, + distance=distance, ) # load the test samples test_samples = ... # load the test samples to search for -test_cf_targets = ... # WARNING: provide the one-hot encoding of the targets of the CFs to search for +test_cf_targets = ... # WARNING: provide the one-hot encoding of the expected label of the CFs # search the CFs for the test samples counterfactuals = lacf.explain(test_samples, test_cf_targets) @@ -48,4 +51,4 @@ counterfactuals = lacf.explain(test_samples, test_cf_targets) TODO: Add notebooks -{{xplique.example_based.counterfactuals.LabelAwareCounterfactuals}} \ No newline at end of file +{{xplique.example_based.counterfactuals.LabelAwareCounterFactuals}} \ No newline at end of file diff --git a/docs/api/example_based/methods/naive_counter_factuals.md b/docs/api/example_based/counterfactuals/naive_counter_factuals.md similarity index 74% rename from docs/api/example_based/methods/naive_counter_factuals.md rename to docs/api/example_based/counterfactuals/naive_counter_factuals.md index e81350c0..93d35307 100644 --- a/docs/api/example_based/methods/naive_counter_factuals.md +++ b/docs/api/example_based/counterfactuals/naive_counter_factuals.md @@ -1,4 +1,4 @@ -# Naive Counterfactuals +# Naive CounterFactuals @@ -15,28 +15,31 @@ We define here a "naive" counterfactual method that is based on the Nearest Unli Thus, in this naive approach to counterfactuals, we yield the $k$ nearest training instances that have a different label than the target of the input sample in a greedy fashion. -As it is mentioned in the [API documentation](api/example_based/methods/api_example_based/), by setting a `Projection` object, one can use the projection space to compute the distances between the samples (e.g. search for the CF in the latent space of a model). +As it is mentioned in the [API documentation](../../api_example_based/), by setting a `Projection` object, one will map the inputs to a space where the distance function is meaningful. ## Example ```python -from xplique.example_based import NaiveCounterfactuals +from xplique.example_based import NaiveCounterFactuals # load the training dataset cases_dataset = ... # load the training dataset -targets_dataset = ... # load the targets of the training dataset +targets_dataset = ... # load the one-hot encoding of predicted labels of the training dataset +# parameters k = 5 +distance = "euclidean" # instantiate the NaiveCounterfactuals object -ncf = NaiveCounterfactuals(cases_dataset=cases_dataset, +ncf = NaiveCounterFactuals(cases_dataset=cases_dataset, targets_dataset=targets_dataset, k=k, + distance=distance, ) # load the test samples and targets test_samples = ... # load the test samples to search for -test_targets = ... # load the targets of the test samples +test_targets = ... # load the one-hot encoding of the test samples' predictions # search the CFs for the test samples counterfactuals = ncf.explain(test_samples, test_targets) @@ -46,6 +49,6 @@ counterfactuals = ncf.explain(test_samples, test_targets) TODO: Add notebooks -{{xplique.example_based.counterfactuals.NaiveCounterfactuals}} +{{xplique.example_based.counterfactuals.NaiveCounterFactuals}} [^1] [Nearest unlike neighbor (NUN): an aid to decision making](https://www.semanticscholar.org/paper/Nearest-unlike-neighbor-(NUN)%3A-an-aid-to-decision-Dasarathy/48c1a310f655b827e5e7d712c859b25a4e3c0902) \ No newline at end of file diff --git a/docs/api/example_based/projections.md b/docs/api/example_based/projections.md index 2918a573..c0495b20 100644 --- a/docs/api/example_based/projections.md +++ b/docs/api/example_based/projections.md @@ -8,18 +8,67 @@ Consequently, we defined the general `Projection` class that will be used as a b In addition, we provide concrete implementations of the `Projection` class: `LatentSpaceProjection`, `AttributionProjection`, and `HadamardProjection`. -## `Projection` class - {{xplique.example_based.projections.Projection}} -## `LatentSpaceProjection` class +!!!info + The `__call__` method is an alias for the `project` method. -{{xplique.example_based.projections.LatentSpaceProjection}} +## Defining a custom projection -## `AttributionProjection` class +To define a custom projection, one needs to implement the `space_projection` and/or `get_weights` methods. The `space_projection` method should return the projected sample, and the `get_weights` method should return the weights of the features of the projected sample. -{{xplique.example_based.projections.AttributionProjection}} +!!!info + The `get_weights` method should take as input the original sample once it has been projected using the `space_projection` method. + +For the sake of clarity, we provide an example of a custom projection that projects the samples into a latent space (the final convolution block of the ResNet50 model) and weights the features with the gradients of the model's output with respect to the inputs once they have gone through the layers until the final convolutional layer. + +```python +import tensorflow as tf +from xplique.attributions import Saliency +from xplique.example_based.projections import Projection + +# load the model +model = tf.keras.applications.ResNet50(weights="imagenet", include_top=True) + +latent_layer = model.get_layer("conv5_block3_out") # output of the final convolutional block +features_extractor = tf.keras.Model( + model.input, latent_layer.output, name="features_extractor" +) + +# reconstruct the second part of the InceptionV3 model +second_input = tf.keras.Input(shape=latent_layer.output.shape[1:]) + +x = second_input +layer_found = False +for layer in model.layers: + if layer_found: + x = layer(x) + if layer == latent_layer: + layer_found = True -## `HadamardProjection` class +predictor = tf.keras.Model( + inputs=second_input, + outputs=x, + name="predictor" +) + +# build the custom projection +space_projection = features_extractor +get_weights = Saliency(predictor) + +custom_projection = Projection(space_projection=space_projection, get_weights=get_weights, mappable=False) + +# build random samples +rdm_imgs = tf.random.normal((5, 224, 224, 3)) +rdm_targets = tf.random.uniform(shape=[5], minval=0, maxval=1000, dtype=tf.int32) +rdm_targets = tf.one_hot(rdm_targets, depth=1000) + +# project the samples +projections = custom_projection(rdm_imgs, rdm_targets) +``` + +{{xplique.example_based.projections.LatentSpaceProjection}} + +{{xplique.example_based.projections.AttributionProjection}} -{{xplique.example_based.projections.HadamardProjection}} +{{xplique.example_based.projections.HadamardProjection}} \ No newline at end of file diff --git a/docs/api/example_based/search_methods/kleor.md b/docs/api/example_based/search_methods/kleor.md deleted file mode 100644 index 9ad70ba8..00000000 --- a/docs/api/example_based/search_methods/kleor.md +++ /dev/null @@ -1,47 +0,0 @@ -# KLEOR Search Methods - -Those search methods are used for the [KLEOR](api/example_based/methods/kleor/) methods. - -It encompasses the two following classes: -- `KLEORSimMissSearch`: looks for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the method searches for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. -- `KLEORGlobalSim`: in addition to the previous method, the SF should be closer to the query than the NUN to be a candidate. - -## Examples - -```python -from xplique.example_based.search_methods import KLEORSimMissSearch -from xplique.example_based.search_methods import KLEORGlobalSim - -cases_dataset = ... # load the training dataset -targets = ... # load the targets of the training dataset - -test_samples = ... # load the test samples to search for -test_targets = ... # load the targets of the test samples - -# set some parameters -k = 5 -distance = "euclidean" - -# create the KLEORSimMissSearch object -kleor_sim_miss_search = KLEORSimMissSearch(cases_dataset=cases_dataset, - targets_dataset=targets, - k=k, - distance=distance) - -# create the KLEORGlobalSim object -kleor_global_sim = KLEORGlobalSim(cases_dataset=cases_dataset, - targets_dataset=targets, - k=k, - distance=distance) - -# search for the K-Nearest Neighbors of the test samples -sim_miss_neighbors = kleor_sim_miss_search.find_examples(test_samples, test_targets) -global_sim_neighbors = kleor_global_sim.find_examples(test_samples, test_targets) -``` - -## Notebooks - -TODO: add the notebook for KLEOR - -{{xplique.example_based.search_methods.kleor.KLEORSimMissSearch}} -{{xplique.example_based.search_methods.kleor.KLEORGlobalSim}} diff --git a/docs/api/example_based/search_methods/knn.md b/docs/api/example_based/search_methods/knn.md deleted file mode 100644 index 7f0eb423..00000000 --- a/docs/api/example_based/search_methods/knn.md +++ /dev/null @@ -1,68 +0,0 @@ -# K Nearest Neighbors - -KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors` [see the documentation](https://scikit-learn.org/stable/modules/generated/sklearn.neighbors.NearestNeighbors.html). -The kneighbors method is implemented in a batched way to handle large datasets and try to be memory efficient. - -In addition, we also added a `FilterKNN` class that allows to filter the neighbors based on a given criterion avoiding potentially a compute of the distances for all the samples. It is useful when the candidate neighbors are sparse and the distance computation is expensive. - -## Examples - -```python -from xplique.example_based.search_methods import ORDER -from xplique.example_based.search_methods import KNN - -# set some parameters -k = 5 -cases_dataset = ... # load the training dataset -test_samples = ... # load the test samples to search for - -distance = "euclidean" -order = ORDER.ASCENDING - -# create the KNN object -knn = KNN(cases_dataset = cases_dataset - k = k, - distance = distance, - order = order) - -k_nearest_neighbors = knn.kneighbors(test_samples) -``` - -```python -from xplique.example_based.search_methods import ORDER -from xplique.example_based.search_methods import FilterKNN - -# set some parameters -k = 5 -cases_dataset = ... # load the training dataset -targets = ... # load the targets of the training dataset - -test_samples = ... # load the test samples to search for -test_targets = ... # load the targets of the test samples - -distance = "euclidean" -order = ORDER.ASCENDING - -# define a filter function -def filter_fn(cases, inputs, targets, cases_targets): - # filter the cases that have the same target as the input - mask = tf.not_equal(targets, cases_targets) - return mask - -# create the KNN object -filter_knn = FilterKNN(cases_dataset=cases_dataset, - targets_dataset=targets, - k=k, - distance=distance, - order=order, - filter_fn=filter_fn) - -k_nearest_neighbors = filter_knn.kneighbors(test_samples, test_targets) -``` - -## Notebooks - -TODO: add all notebooks that use this search method - -{{xplique.example_based.search_methods.knn.KNN}} -{{xplique.example_based.search_methods.knn.FilterKNN}} \ No newline at end of file diff --git a/docs/api/example_based/methods/kleor.md b/docs/api/example_based/semifactuals/kleor.md similarity index 68% rename from docs/api/example_based/methods/kleor.md rename to docs/api/example_based/semifactuals/kleor.md index 187d6923..99ad2486 100644 --- a/docs/api/example_based/methods/kleor.md +++ b/docs/api/example_based/semifactuals/kleor.md @@ -33,34 +33,73 @@ We extended to the $k$ nearest neighbors of the NUN for both approaches. !!!tips As KLEOR methods use counterfactuals, they can also return them. Therefore, it is possible to obtain both semi-factuals and counterfactuals with an unique method. To do so "nuns" and "nuns_labels" should be added to the `cases_returns` list. -## Example +## Examples ```python -from xplique.example_based import KLEORGlobalSim, KLEORSimMiss +from xplique.example_based import KLEORSimMiss +# loading cases_dataset = ... # load the training dataset -targets = ... # load the targets of the training dataset +targets = ... # load the one-hot encoding of predicted labels of the training dataset +# parameters k = 5 +distance = "euclidean" +case_returns = ["examples", "nuns"] -# instantiate the KLEOR objects +# instantiate the KLEOR object kleor_sim_miss = KLEORSimMiss(cases_dataset=cases_dataset, targets_dataset=targets, k=k, + distance=distance, ) +# load the test samples and targets +test_samples = ... # load the test samples to search for +test_targets = ... # load the one-hot encoding of the test samples' predictions + +# search the SFs for the test samples +sim_miss_sf = kleor_sim_miss.explain(test_samples, test_targets) + +# get the semi-factuals +semifactuals = global_sim_sf["examples"] + +# get the counterfactuals +counterfactuals = global_sim_sf["nuns"] +``` + +```python +from xplique.example_based import KLEORGlobalSim + +# loading +cases_dataset = ... # load the training dataset +targets = ... # load the one-hot encoding of predicted labels of the training dataset + +# parameters +k = 5 +distance = "euclidean" +case_returns = ["examples", "nuns"] + +# instantiate the KLEOR object kleor_global_sim = KLEORGlobalSim(cases_dataset=cases_dataset, targets_dataset=targets, k=k, + distance=distance, + case_returns=case_returns, ) # load the test samples and targets test_samples = ... # load the test samples to search for -test_targets = ... # load the targets of the test samples +test_targets = ... # load the one-hot encoding of the test samples' predictions # search the SFs for the test samples -sim_miss_sf = kleor_sim_miss.explain(test_samples, test_targets) global_sim_sf = kleor_global_sim.explain(test_samples, test_targets) + +# get the semi-factuals +semifactuals = global_sim_sf["examples"] + +# get the counterfactuals +counterfactuals = global_sim_sf["nuns"] ``` ## Notebooks diff --git a/docs/api/example_based/methods/cole.md b/docs/api/example_based/similar_examples/cole.md similarity index 83% rename from docs/api/example_based/methods/cole.md rename to docs/api/example_based/similar_examples/cole.md index 0ba3916e..004dd7a3 100644 --- a/docs/api/example_based/methods/cole.md +++ b/docs/api/example_based/similar_examples/cole.md @@ -15,15 +15,18 @@ COLE for Contributions Oriented Local Explanations was introduced by Kenny & Kea -- [COLE paper](https://researchrepository.ucd.ie/handle/10197/11064)[^1] -The core idea of the COLE approach is to use [attribution maps](api/attributions/api_attributions/) to define a relevant search space for the K-Nearest Neighbors (KNN) search. +The core idea of the COLE approach is to use [attribution maps](../../../attributions/api_attributions/) to define a relevant search space for the K-Nearest Neighbors (KNN) search. More specifically, the COLE approach is based on the following steps: + - (1) Given an input sample $x$, compute the attribution map $A(x)$ + - (2) Consider the projection space defined by: $p: x \rightarrow A(x) \odot x$ ($\odot$ denotes the element-wise product) + - (3) Perform a KNN search in the projection space to find the most similar training samples !!! info - In the original paper, the authors focused on Multi-Layer Perceptrons (MLP) and three attribution methods (LPR, Integrated Gradient, and DeepLift). We decided to implement a COLE method that generalizes to a more broader range of Neural Networks and attribution methods (see [API Attributions documentation](api/attributions/api_attributions/) to see the list of methods available). + In the original paper, the authors focused on Multi-Layer Perceptrons (MLP) and three attribution methods (LPR, Integrated Gradient, and DeepLift). We decided to implement a COLE method that generalizes to a more broader range of Neural Networks and attribution methods (see [API Attributions documentation](../../../attributions/api_attributions/) to see the list of methods available). ## Example @@ -34,7 +37,10 @@ from xplique.attributions import Saliency model = ... # load the model cases_dataset = ... # load the training dataset target_dataset = ... # load the target dataset (predicted one-hot encoding of model's predictions) + +# parameters k = 5 +distance = "euclidean" # instantiate the Cole object cole = Cole( @@ -44,9 +50,9 @@ cole = Cole( attribution_method=Saliency, ) -# load the test samples -test_samples = ... # load the test samples -test_targets = ... # load the test targets +# load the test samples and targets +test_samples = ... # load the test samples to search for +test_targets = ... # load the one-hot encoding of the test samples' predictions # search the most similar samples with the COLE method similar_samples = cole.explain(test_samples, test_targets) diff --git a/docs/api/example_based/methods/similar_examples.md b/docs/api/example_based/similar_examples/similar_examples.md similarity index 69% rename from docs/api/example_based/methods/similar_examples.md rename to docs/api/example_based/similar_examples/similar_examples.md index 1dc9f1b8..be875a5d 100644 --- a/docs/api/example_based/methods/similar_examples.md +++ b/docs/api/example_based/similar_examples/similar_examples.md @@ -7,7 +7,7 @@ [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/similar_examples.py) -We designate here as *Similar Examples* all methods that given an input sample, search for the most similar **training** samples given a distance function `distance`. Furthermore, one can define the search space using a `projection` function (see [Projections](api/example_based/projections.md)). This function should map an input sample to the search space where the distance function is defined and meaningful (**e.g.** the latent space of a Convolutional Neural Network). +We designate here as *Similar Examples* all methods that given an input sample, search for the most similar **training** samples given a distance function `distance`. Furthermore, one can define the search space using a `projection` function (see [Projections](../../projections/)). This function should map an input sample to the search space where the distance function is defined and meaningful (**e.g.** the latent space of a Convolutional Neural Network). Then, a K-Nearest Neighbors (KNN) search is performed to find the most similar samples in the search space. ## Example @@ -16,8 +16,12 @@ Then, a K-Nearest Neighbors (KNN) search is performed to find the most similar s from xplique.example_based import SimilarExamples cases_dataset = ... # load the training dataset +targets = ... # load the one-hot encoding of predicted labels of the training dataset + +# parameters k = 5 distance = "euclidean" +case_returns = ["examples", "nuns"] # define the projection function def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndarray = None): @@ -32,10 +36,18 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar # instantiate the SimilarExamples object sim_ex = SimilarExamples( cases_dataset=cases_dataset, + targets_dataset=targets, k=k, projection=custom_projection, distance=distance, ) + +# load the test samples and targets +test_samples = ... # load the test samples to search for +test_targets = ... # load the one-hot encoding of the test samples' predictions + +# search the most similar samples with the SimilarExamples method +similar_samples = sim_ex.explain(test_samples, test_targets) ``` # Notebooks diff --git a/mkdocs.yml b/mkdocs.yml index cd59b533..ee5082d3 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -44,21 +44,20 @@ nav: - Craft: api/concepts/craft.md - Example based: - API Description: api/example_based/api_example_based.md - - Methods: - - Cole: api/example_based/methods/cole.md - - Kleor: api/example_based/methods/kleor.md - - LabelAwareCounterFactuals: api/example_based/methods/label_aware_counter_factuals.md - - NaiveCounterFactuals: api/example_based/methods/naive_counter_factuals.md - - SimilarExamples: api/example_based/methods/similar_examples.md + - Similar Examples: + - SimilarExamples: api/example_based/similar_examples/similar_examples.md + - Cole: api/example_based/similar_examples/cole.md + - Counterfactuals: + - LabelAwareCounterFactuals: api/example_based/counterfactuals/label_aware_counter_factuals.md + - NaiveCounterFactuals: api/example_based/counterfactuals/naive_counter_factuals.md + - Semifactuals: + - Kleor: api/example_based/semifactuals/kleor.md - Prototypes: - API Description: api/example_based/prototypes/api_prototypes.md - ProtoGreedy: api/example_based/prototypes/proto_greedy.md - ProtoDash: api/example_based/prototypes/proto_dash.md - MMDCritic: api/example_based/prototypes/mmd_critic.md - Projections: api/example_based/projections.md - - Search Methods: - - Kleor: api/example_based/search_methods/kleor.md - - KNN: api/example_based/search_methods/knn.md - Feature visualization: - Modern Feature Visualization (MaCo): api/feature_viz/maco.md - Feature visualization: api/feature_viz/feature_viz.md @@ -106,8 +105,8 @@ markdown_extensions: custom_checkbox: true clickable_checkbox: true - pymdownx.emoji: - emoji_index: !!python/name:materialx.emoji.twemoji - emoji_generator: !!python/name:materialx.emoji.to_svg + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg extra: version: diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py index d7b7fab8..9e84694e 100644 --- a/tests/example_based/test_projections.py +++ b/tests/example_based/test_projections.py @@ -123,7 +123,7 @@ def test_attribution_projection_mapping(): model = _generate_model(input_shape=input_shape, output_shape=nb_labels) - projection = AttributionProjection(model, method=Saliency, latent_layer="last_conv") + projection = AttributionProjection(model, attribution_method=Saliency, latent_layer="last_conv") # Generate tf.data.Dataset from numpy train_dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(3) diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index 46074fab..ad8ad878 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -57,11 +57,11 @@ class AttributionProjection(Projection): def __init__( self, model: Callable, - method: BlackBoxExplainer = Saliency, + attribution_method: BlackBoxExplainer = Saliency, latent_layer: Optional[Union[str, int]] = None, **attribution_kwargs ): - self.method = method + self.attribution_method = attribution_method if latent_layer is None: # no split @@ -79,7 +79,7 @@ def __init__( attribution_kwargs["operator"] = target_free_classification_operator # compute attributions - get_weights = self.method(self.predictor, **attribution_kwargs) + get_weights = self.attribution_method(self.predictor, **attribution_kwargs) # set methods super().__init__(get_weights, space_projection, mappable=False) diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 9ac2b8c8..5eca3c65 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -45,13 +45,13 @@ class HadamardProjection(Projection): The method as described in the paper apply the separation on the last convolutional layer. To do so, the `"last_conv"` parameter will extract it. Otherwise, `-1` could be used for the last layer before softmax. - operator # TODO: make a larger description. + operator Operator to use to compute the explanation, if None use standard predictions. device Device to use for the projection, if None, use the default device. Only used for PyTorch models. Ignored for TensorFlow models. """ - + # TODO: make a larger description of the operator arg. def __init__( self, model: Callable, diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index 4c598fd8..1b213288 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -198,7 +198,7 @@ def __init__( # build attribution projection projection = AttributionProjection( model=model, - method=attribution_method, + attribution_method=attribution_method, latent_layer=latent_layer, **attribution_kwargs, ) From 410ed98730142dd498f6666ae42fca7f1eb4b744 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Mon, 12 Aug 2024 16:11:45 +0200 Subject: [PATCH 092/138] docs: change the wording of the prototypes such that the search methods are abstracted --- .../prototypes/api_prototypes.md | 85 +++++++++++-------- .../example_based/prototypes/mmd_critic.md | 6 +- .../example_based/prototypes/proto_dash.md | 8 +- .../example_based/prototypes/proto_greedy.md | 6 +- 4 files changed, 59 insertions(+), 46 deletions(-) diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md index c02132cd..2dfba112 100644 --- a/docs/api/example_based/prototypes/api_prototypes.md +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -5,9 +5,43 @@ Prototype-based explanation is a family of natural example-based XAI methods. Pr - [Prototypes for Post-hoc Interpretability](#prototypes-for-post-hoc-interpretability) - Prototype-Based Models Interpretable by Design -This library focuses on first two classes. +For now, the library focuses on the first two classes. + +## Common API ## + +```python + +explainer = Method(cases_dataset, labels_dataset, targets_dataset, k, + projection, case_returns, batch_size, distance, + nb_prototypes, kernel_type, + kernel_fn, gamma) +# compute global explanation +global_prototypes = explainer.get_global_prototypes() +# compute local explanation +local_prototypes = explainer(inputs) + +``` + +??? abstract "Table of methods available" + + The following Data-Centric prototypes methods are implemented: + + | Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | + |:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | + | [ProtoGreedy](../proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + | [ProtoDash](../proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + | [MMDCritic](../mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + + *: Before using a PyTorch model it is highly recommended to read the [dedicated documentation](../pytorch/) + +!!!info + Using the identity projection, one is looking for the **dataset prototypes**. In contrast, using the latent space of a model as a projection, one is looking for **prototypes relevant for the model**. + +!!!info + Prototypes, share a common API with other example-based methods. Thus, to understand some parameters, we recommend reading the [dedicated documentation](../../api_example_based/). ## Prototypes for Data-Centric Interpretability + In this class, prototypes are selected without relying on the model and provide an overview of the dataset. As mentioned in ([Poché et al., 2023](https://hal.science/hal-04117520/document)), we found in this class: **clustering methods** and **data summarization methods**, also known as **set cover methods**. This library focuses on **data summarization methods** which can be treated in two ways [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf): @@ -19,17 +53,22 @@ consists in finding a low-cost subset of prototypes $\mathcal{P}$ under the con For both cases, submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). In addition, $F(\mathcal{P})$ should encourage coverage and penalize redundancy in order to have a good summary [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). -This library implements three methods from **Data summarization with knapsack constraint**: `MMDCritic`, `ProtoGreedy` and `ProtoDash`. +The library implements three methods from **Data summarization with knapsack constraint**: `MMDCritic`, `ProtoGreedy` and `ProtoDash`. + [Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) proposed `MMDCritic` method that used a set function based on the Maximum Mean Discrepancy [(MMD)](#what-is-mmd). They solved **data summarization with knapsack constraint** problem to find both prototypes and criticisms. First, the number of prototypes and criticisms to be found, respectively as $m_p$ and $m_c$, are selected. Second, to find prototypes, a greedy algorithm is used to maximize $F(\mathcal{P})$ s.t. $|\mathcal{P}| \le m_p$ where $F(\mathcal{P})$ is defined as: + \begin{equation} F(\mathcal{P})=\frac{2}{|\mathcal{P}|\cdot n}\sum_{i,j=1}^{|\mathcal{P}|,n}\kappa(p_i,x_j)-\frac{1}{|\mathcal{P}|^2}\sum_{i,j=1}^{|\mathcal{P}|}\kappa(p_i,p_j) \end{equation} + They used diagonal dominance conditions on the kernel to ensure monotonocity and submodularity of $F(\mathcal{P})$. To find criticisms $\mathcal{C}$, the same greedy algorithm is used to select points that maximize another objective function $J(\mathcal{C})$. [Gurumoorthy et al., 2019](https://arxiv.org/pdf/1707.01212) associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximizing the same weighted objective $F(\mathcal{P},w)$ defined as: + \begin{equation} -F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\sum_{i,j=1}^{|\mathcal{P}|}w_iw_j\kappa(p_i,p_j), + F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\sum_{i,j=1}^{|\mathcal{P}|}w_iw_j\kappa(p_i,p_j), \end{equation} + where $w$ are non-negative weights for each prototype. The problem then consist on finding $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. They established the weak submodular property of $J(\mathcal{P})$ and present tractable algorithms (`ProtoGreedy` and `ProtoDash`) to optimize it. ### Method comparison @@ -41,6 +80,7 @@ where $w$ are non-negative weights for each prototype. The problem then consist - The approximation guarantee for `ProtoGreedy` is $(1-e^{-\gamma})$, where $\gamma$ is submodularity ratio of $F(\mathcal{P})$, comparing to $(1-e^{-1})$ for `MMDCritic`. ### What is MMD? + The commonality among these three methods is their utilization of the Maximum Mean Discrepancy (MMD) statistic as a measure of similarity between points and potential prototypes. MMD is a statistic for comparing two distributions (similar to KL-divergence). However, it is a non-parametric statistic, i.e., it does not assume a specific parametric form for the probability distributions being compared. It is defined as follows: $$ @@ -70,46 +110,19 @@ The choice of the kernel for selecting prototypes depends on the specific proble If we consider any exponential kernel (Gaussian kernel, Laplace, ...), we automatically consider all the moments for the distribution, as the Taylor expansion of the exponential considers infinite-order moments. It is better to use a non-linear kernel to capture non-linear relationships in your data. If the problem is linear, it is better to choose a linear kernel such as the dot product kernel, since it is computationally efficient and often requires fewer hyperparameters to tune. !!!warning - For `MMDCritic`, the kernel must satisfy a condition ensuring the submodularity of the set function (the Gaussian kernel respects this constraint). In contrast, for `Protodash` and `Protogreedy`, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. + For `MMDCritic`, the kernel must satisfy a condition ensuring the submodularity of the set function (the Gaussian kernel respects this constraint). In contrast, for `ProtoDash` and `ProtoGreedy`, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. ### Default kernel The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. -### API Implementation - -The Data-Centric prototypes methods are implemented as [search methods](../../search_methods/): - -| Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | -|:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | -| [ProtoGreedySearch](../proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | -| [ProtoDashSearch](../proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | -| [MMDCriticSearch](../mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | +### Implementation details -*: Before using a PyTorch model it is highly recommended to read the [dedicated documentation](../pytorch/) +The search method for `ProtoGreedy` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. -The class `ProtoGreedySearch` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. +Both the search methods for `MMDCritic` and `ProtoDash` classes inherit from the one defined for `ProtoGreedy`. The search method for `MMDCritic` differs from `ProtoGreedy` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of the search method of `ProtoGreedy`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCritic`, the `compute_objective` method assigns the same weight to all elements in the selection. -Both the `MMDCriticSearch` and `ProtoDashSearch` classes inherit from the `ProtoGreedySearch` class. The class `MMDCriticSearch` differs from `ProtoGreedySearch` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of `ProtoGreedySearch`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCriticSearch`, the `compute_objective` method assigns the same weight to all elements in the selection. - -The class `ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is [different](#method-comparison) from the one used by `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides both the `compute_objective` method and the `update_selection` method. +`ProtoDash`, like `ProtoGreedy`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDash` is [different](#method-comparison) from the one used by `ProtoGreedy`. Therefore, search method of `ProtoDash` overrides both the `compute_objective` method and the `update_selection` method. ## Prototypes for Post-hoc Interpretability -Data-Centric methods such as `Protogreedy`, `ProtoDash` and `MMDCritic` can be used in either the output or the latent space of the model. In these cases, [projections methods](../../projections/) are used to transfer the data from the input space to the latent/output spaces. - -The search method can have attribute `projection` that projects samples to a space where distances between samples make sense for the model. Then the `search_method` finds the prototypes by looking in the projected space. - -## Common API ## - -```python - -explainer = Method(cases_dataset, labels_dataset, targets_dataset, k, - projection, case_returns, batch_size, distance, - nb_prototypes, kernel_type, - kernel_fn, gamma) -# compute global explanation -global_prototypes = explainer.get_global_prototypes() -# compute local explanation -local_prototypes = explainer(inputs) - -``` +Data-Centric methods such as `ProtoGreedy`, `ProtoDash` and `MMDCritic` can be used in either the output or the latent space of the model. In these cases, [projections methods](../../projections/) are used to transfer the data from the input space to the latent/output spaces. diff --git a/docs/api/example_based/prototypes/mmd_critic.md b/docs/api/example_based/prototypes/mmd_critic.md index 8743fdbc..e9f436a1 100644 --- a/docs/api/example_based/prototypes/mmd_critic.md +++ b/docs/api/example_based/prototypes/mmd_critic.md @@ -1,4 +1,4 @@ -# MMDCriticSearch +# MMDCritic @@ -8,7 +8,7 @@ [View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | 📰 [Paper](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) -`MMDCriticSearch` finds prototypes and criticisms by maximizing two separate objectives based on the Maximum Mean Discrepancy (MMD). +`MMDCritic` finds prototypes and criticisms by maximizing two separate objectives based on the Maximum Mean Discrepancy (MMD). !!! quote MMD-critic uses the MMD statistic as a measure of similarity between points and potential prototypes, and @@ -52,7 +52,7 @@ local_prototypes = explainer(inputs) - [**MMDCritic**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) -{{xplique.example_based.search_methods.MMDCriticSearch}} +{{xplique.example_based.prototypes.MMDCritic}} [^1]: [Visual Explanations from Deep Networks via Gradient-based Localization (2016).](https://arxiv.org/abs/1610.02391) diff --git a/docs/api/example_based/prototypes/proto_dash.md b/docs/api/example_based/prototypes/proto_dash.md index d694504d..3684dcf2 100644 --- a/docs/api/example_based/prototypes/proto_dash.md +++ b/docs/api/example_based/prototypes/proto_dash.md @@ -1,4 +1,4 @@ -# ProtoDashSearch +# ProtoDash @@ -8,7 +8,7 @@ [View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | 📰 [Paper](https://arxiv.org/abs/1707.01212) -`ProtoDahsSearch` associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximmizing the same weighted objective function. +`ProtoDash` associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximmizing the same weighted objective function. !!! quote Our work notably generalizes the recent work @@ -28,7 +28,7 @@ F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\su \end{equation} where $w$ are non-negative weights for each prototype. The problem then consist on finding a subset $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. -[Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) proposed `ProtoDash` algorithm, which is much faster that `ProtoGreedy` without compromising on the quality of the solution. In fact, `ProtoGreedy` selects the next element that maximizes the increment of the scoring function, whereas `ProtoDash` selects the next element that maximizes a tight lower bound on the increment of the scoring function. +[Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) proposed `ProtoDash` algorithm, which is much faster that [`ProtoGreedy`](../proto_greedy/) without compromising on the quality of the solution. In fact, `ProtoGreedy` selects the next element that maximizes the increment of the scoring function, whereas `ProtoDash` selects the next element that maximizes a tight lower bound on the increment of the scoring function. ## Example @@ -55,6 +55,6 @@ local_prototypes = explainer(inputs) - [**ProtoDash**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) -{{xplique.example_based.search_methods.ProtoDashSearch}} +{{xplique.example_based.prototypes.ProtoDash}} [^1]: [Visual Explanations from Deep Networks via Gradient-based Localization (2016).](https://arxiv.org/abs/1610.02391) diff --git a/docs/api/example_based/prototypes/proto_greedy.md b/docs/api/example_based/prototypes/proto_greedy.md index 57644ef3..35900522 100644 --- a/docs/api/example_based/prototypes/proto_greedy.md +++ b/docs/api/example_based/prototypes/proto_greedy.md @@ -1,4 +1,4 @@ -# ProtoGreedySearch +# ProtoGreedy @@ -8,7 +8,7 @@ [View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | 📰 [Paper](https://arxiv.org/abs/1707.01212) -`ProtoGreedySearch` associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximmizing the same weighted objective function. +`ProtoGreedy` associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximizing the same weighted objective function. !!! quote Our work notably generalizes the recent work @@ -57,6 +57,6 @@ local_prototypes = explainer(inputs) - [**ProtoGreedy**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) -{{xplique.example_based.search_methods.ProtoGreedySearch}} +{{xplique.example_based.prototypes.ProtoGreedy}} [^1]: [Visual Explanations from Deep Networks via Gradient-based Localization (2016).](https://arxiv.org/abs/1610.02391) From fbef589cc2e599366c5585f0f0b2b3ce844516ed Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Mon, 12 Aug 2024 16:18:44 +0200 Subject: [PATCH 093/138] docs: update the main example-based doc page considering antonin's comments and modifications for prototypes --- docs/api/example_based/api_example_based.md | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index 6485b09a..6412e674 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -15,7 +15,7 @@ While not being exhaustive we tried to cover a range of methods that are represe At present, we made the following choices: - Focus on methods that are natural example methods (post-hoc and non-generative, see the paper above for more details). -- Try to unify the three families of approaches with a common API. +- Try to unify the four families of approaches with a common API. !!! info We are in the early stages of development and are looking for feedback on the API design and the methods we have chosen to implement. Also, we are counting on the community to furnish the collection of methods available. If you are willing to contribute reach us on the [GitHub](https://github.com/deel-ai/xplique) repository (with an issue, pull request, ...). @@ -41,14 +41,14 @@ explanations = explainer.explain(inputs, targets) We tried to keep the API as close as possible to the one of the attribution methods to keep a consistent experience for the users. -The `BaseExampleMethod` is an abstract base class designed for example-based methods used to explain classification models. It provides examples from a dataset (usually the training dataset) to help understand a model's predictions. Examples are projected from the input space to a search space using a [projection function](#projections). The projection function defines the search space. Then, examples are selected using a [search method](#search-methods) within the search space. For all example-based methods, one can define the `distance` function that will be used by the search method. +The `BaseExampleMethod` is an abstract base class designed for example-based methods used to explain classification models. It provides examples from a dataset (usually the training dataset) to help understand a model's predictions. Examples are projected from the input space to a search space using a [projection function](#projections). The projection function defines the search space. Then, examples are selected using a [search method](#search-methods) within the search space. For all example-based methods, one can define the `distance` that will be used by the search method. We can broadly categorize example-based methods into four families: similar examples, counter-factuals, semi-factuals, and prototypes. - **Similar Examples**: This method involves finding instances in the dataset that are similar to a given instance. The similarity is often determined based on the feature space, and these examples can help in understanding the model's decision by showing what other data points resemble the instance in question. - **Counter Factuals**: Counterfactual explanations identify the minimal changes needed to an instance's features to change the model's prediction to a different, specified outcome. They help answer "what-if" scenarios by showing how altering certain aspects of the input would lead to a different decision. - **Semi Factuals**: Semifactual explanations describe hypothetical situations where most features of an instance remain the same except for one or a few features, without changing the overall outcome. They highlight which features could vary without altering the prediction. -- **Prototypes**: Prototypes are representative examples from the dataset that summarize typical cases within a certain category or cluster. They act as archetypal instances that the model uses to make predictions, providing a reference point for understanding model behavior. +- **Prototypes**: Prototypes are representative examples from the dataset that summarize typical cases within a certain category or cluster. They act as archetypal instances that the model uses to make predictions, providing a reference point for understanding model behavior. Additional documentation can be found in the [Prototypes API documentation](../prototypes/api_prototypes/). ??? abstract "Table of example-based methods available" @@ -93,8 +93,8 @@ We can broadly categorize example-based methods into four families: similar exam Returns the relevant examples to explain the (inputs, targets). Projects inputs using `self.projection` and finds examples using the `self.search_method`. -- **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. -- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Targets associated with the `cases_dataset` for dataset projection. +- **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. Shape: (n, ...) where n is the number of samples. +- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Targets associated with the `cases_dataset` for dataset projection. Shape: (n, nb_classes) where n is the number of samples and nb_classes is the number of classes. **Returns:** Dictionary with elements listed in `self.returns`. From 4c2dbc07772a293dc82ed2d610780ea5ad350690 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Tue, 13 Aug 2024 14:16:01 +0200 Subject: [PATCH 094/138] projections tests: adapt to commons evolution --- tests/example_based/test_projections.py | 44 +++++++++++++++++++++---- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py index 9e84694e..22d083f4 100644 --- a/tests/example_based/test_projections.py +++ b/tests/example_based/test_projections.py @@ -11,7 +11,7 @@ ) from xplique.attributions import Saliency -from xplique.example_based.projections import Projection, AttributionProjection, LatentSpaceProjection +from xplique.example_based.projections import Projection, AttributionProjection, LatentSpaceProjection, HadamardProjection from xplique.example_based.projections.commons import model_splitting @@ -81,13 +81,18 @@ def test_simple_projection_mapping(): space_projection = lambda x, y=None: tf.nn.max_pool2d(x, ksize=3, strides=1, padding="SAME") - projection = Projection(get_weights=weights, space_projection=space_projection) + projection = Projection(get_weights=weights, space_projection=space_projection, mappable=True) # Generate tf.data.Dataset from numpy - train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(3) + train_dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(3) + targets_dataset = tf.data.Dataset.from_tensor_slices(y_train).batch(3) # Apply the projection by mapping the dataset - projected_train_dataset = projection.project_dataset(train_dataset) + projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) + + # Apply the projection by iterating over the dataset + projection.mappable = False + projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) def test_latent_space_projection_mapping(): @@ -105,10 +110,37 @@ def test_latent_space_projection_mapping(): projection = LatentSpaceProjection(model, "last_conv") # Generate tf.data.Dataset from numpy - train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).batch(3) + train_dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(3) + targets_dataset = tf.data.Dataset.from_tensor_slices(y_train).batch(3) + + # Apply the projection by mapping the dataset + projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) + projected_train_dataset = projection._map_project_dataset(train_dataset, targets_dataset) + projected_train_dataset = projection._loop_project_dataset(train_dataset, targets_dataset) + + +def test_hadamard_projection_mapping(): + """ + Test if the hadamard projection can be mapped. + """ + # Setup + input_shape = (7, 7, 3) + nb_samples = 10 + nb_labels = 2 + x_train, _, y_train = get_setup(input_shape, nb_samples=nb_samples, nb_labels=nb_labels) + + model = _generate_model(input_shape=input_shape, output_shape=nb_labels) + + projection = HadamardProjection(model, "last_conv") + + # Generate tf.data.Dataset from numpy + train_dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(3) + targets_dataset = tf.data.Dataset.from_tensor_slices(y_train).batch(3) # Apply the projection by mapping the dataset - projected_train_dataset = projection.project_dataset(train_dataset) + projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) + projected_train_dataset = projection._map_project_dataset(train_dataset, targets_dataset) + projected_train_dataset = projection._loop_project_dataset(train_dataset, targets_dataset) def test_attribution_projection_mapping(): From e7f9d4a96d46235c3490ed975d74270b0f1d24e4 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Tue, 13 Aug 2024 17:31:16 +0200 Subject: [PATCH 095/138] pojections tests: add simple splitteing test --- tests/example_based/test_projections.py | 26 ++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py index 22d083f4..ec303d05 100644 --- a/tests/example_based/test_projections.py +++ b/tests/example_based/test_projections.py @@ -3,6 +3,7 @@ from tensorflow.keras.layers import ( Dense, Conv2D, + Conv1D, Activation, Dropout, Flatten, @@ -14,6 +15,8 @@ from xplique.example_based.projections import Projection, AttributionProjection, LatentSpaceProjection, HadamardProjection from xplique.example_based.projections.commons import model_splitting +from ..utils import almost_equal + def get_setup(input_shape, nb_samples=10, nb_labels=2): """ @@ -95,6 +98,27 @@ def test_simple_projection_mapping(): projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) +def test_model_splitting(): + """ + Test if projected samples have the expected values + """ + x_train = np.reshape(np.arange(0, 100), (10, 10)) + + model = tf.keras.Sequential() + model.add(Input(shape=(10,))) + model.add(Dense(10, name="dense1")) + model.add(Dense(1, name="dense2")) + model.compile(loss="categorical_crossentropy", optimizer="sgd") + + model.get_layer("dense1").set_weights([np.eye(10) * np.sign(np.arange(-4.5, 5.5)), np.zeros(10)]) + model.get_layer("dense2").set_weights([np.ones((10, 1)), np.zeros(1)]) + + # Split the model + features_extractor, predictor = model_splitting(model, latent_layer="dense1") + + assert almost_equal(predictor(features_extractor(x_train)).numpy(), model(x_train)) + + def test_latent_space_projection_mapping(): """ Test if the latent space projection can be mapped. @@ -162,4 +186,4 @@ def test_attribution_projection_mapping(): targets_dataset = tf.data.Dataset.from_tensor_slices(y_train).batch(3) # Apply the projection by mapping the dataset - projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) \ No newline at end of file + projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) From 85fc97d3d0de4d0f848b21eeca9119df7aa16889 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Wed, 14 Aug 2024 10:36:07 +0200 Subject: [PATCH 096/138] fix: correction on the labelawarecf method for computation of the mask, add a missing case returns possibility for semi factuals --- xplique/example_based/counterfactuals.py | 6 +++--- xplique/example_based/semifactuals.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/xplique/example_based/counterfactuals.py b/xplique/example_based/counterfactuals.py index 360fdcda..6f0dac19 100644 --- a/xplique/example_based/counterfactuals.py +++ b/xplique/example_based/counterfactuals.py @@ -250,9 +250,9 @@ def filter_fn(self, _, __, cf_expected_classes, cases_targets) -> tf.Tensor: cases_targets The one-hot encoding of the target class for the cases. """ - mask = tf.matmul(cf_expected_classes, cases_targets, transpose_b=True) #(n, bs) - # TODO: I think some retracing are done here - mask = tf.cast(mask, dtype=tf.bool) + cases_predicted_labels = tf.argmax(cases_targets, axis=-1) + cf_label_targets = tf.argmax(cf_expected_classes, axis=-1) + mask = tf.equal(tf.expand_dims(cf_label_targets, axis=1), cases_predicted_labels) return mask @sanitize_inputs_targets diff --git a/xplique/example_based/semifactuals.py b/xplique/example_based/semifactuals.py index b912eb1e..572d508d 100644 --- a/xplique/example_based/semifactuals.py +++ b/xplique/example_based/semifactuals.py @@ -80,7 +80,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar by default "euclidean". """ _returns_possibilities = [ - "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns" + "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns", "nuns_labels" ] def __init__( From 1bb8a1631ea1e765f62179c58457a638dea7c7f8 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Aug 2024 10:48:26 +0200 Subject: [PATCH 097/138] projections: add warning to tensorflow splitting --- xplique/example_based/projections/commons.py | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index 45110310..ea859690 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -86,6 +86,13 @@ def _tf_model_splitting(model: tf.keras.Model, latent_layer Layer used to split the `model`. """ + + warnings.warn( + "Automatically splitting the provided TensorFlow model into two parts. "\ + +"This splitting is not robust to all models. "\ + +"It is recommended to split the model manually. "\ + +"Then the splitted parts can be provided through the `from_splitted_model` method.") + if latent_layer == "last_conv": latent_layer = next( layer for layer in model.layers[::-1] if hasattr(layer, "filters") @@ -153,9 +160,12 @@ def _torch_model_splitting(model: 'torch.nn.Module', import torch.nn as nn from ...wrappers import TorchWrapper - warnings.warn("Automatically splitting the provided PyTorch model into two parts. "\ - +"This splitting is based on `model.named_children()`. "\ - +"If the model cannot be reconstructed via sub-modules, errors are to be expected.") + warnings.warn( + "Automatically splitting the provided PyTorch model into two parts. "\ + +"This splitting is based on `model.named_children()`. "\ + +"If the model cannot be reconstructed via sub-modules, errors are to be expected. "\ + +"It is recommended to split the model manually and wrap it with `TorchWrapper`. "\ + +"Then the wrapped parts can be provided through the `from_splitted_model` method.") if device is None: warnings.warn("No device provided for the projection, using 'cuda' if available, else 'cpu'.") From ac74a9d4365ce6b7199655b9f61ae6a854362d0c Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Aug 2024 10:48:56 +0200 Subject: [PATCH 098/138] commons: linting --- xplique/commons/tf_dataset_operations.py | 103 ++--------------------- 1 file changed, 8 insertions(+), 95 deletions(-) diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/commons/tf_dataset_operations.py index 783b7e57..a08d2902 100644 --- a/xplique/commons/tf_dataset_operations.py +++ b/xplique/commons/tf_dataset_operations.py @@ -150,103 +150,13 @@ def sanitize_dataset( assert dataset_cardinality == cardinality, ( "The number of batch should match between datasets. " + f"Received {dataset.cardinality().numpy()} vs {cardinality}. " - + "You may have provided non-batched datasets or datasets with different length." + + "You may have provided non-batched datasets "\ + + "or datasets with different lengths." ) return dataset -# def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: -# """ -# Imitation of `tf.gather` for `tf.data.Dataset`, -# it extract elements from `dataset` at the given indices. -# We could see it as returning the `indices` tensor -# where each index was replaced by the corresponding element in `dataset`. -# The aim is to use it in the `example_based` module to extract examples form the cases dataset. -# Hence, `indices` expect dimensions of (n, k, 2), -# where n represent the number of inputs and k the number of corresponding examples. -# Here indices for each element are encoded by two values, -# the batch index and the index of the element in the batch. - -# Example of application -# ``` -# >>> dataset = tf.data.Dataset.from_tensor_slices( -# ... tf.reshape(tf.range(20), (-1, 2, 2)) -# ... ).batch(3) # shape=(None, 2, 2) -# >>> indices = tf.constant([[[0, 0]], [[1, 0]]]) # shape=(2, 1, 2) -# >>> dataset_gather(dataset, indices) -# -# ``` - -# Parameters -# ---------- -# dataset -# Tensorflow dataset to verify or tensor to transform in `tf.data.Dataset` and verify. -# indices -# Tensor of indices of elements to extract from the `dataset`. -# `indices` should be of dimensions (n, k, 2), -# this is to match the format of indices in the `example_based` module. -# Indeed, n represent the number of inputs and k the number of corresponding examples. -# The index of each element is encoded by two values, -# the batch index and the index of the element in the batch. - -# Returns -# ------- -# results -# A tensor with the extracted elements from the `dataset`. -# The shape of the tensor is (n, k, ...), where ... is the shape of the elements in the `dataset`. -# """ -# if dataset is None: -# return None - -# if len(indices.shape) != 3 or indices.shape[-1] != 2: -# raise ValueError( -# "Indices should have dimensions (n, k, 2), " -# + "where n represent the number of inputs and k the number of corresponding examples. " -# + "The index of each element is encoded by two values, " -# + "the batch index and the index of the element in the batch. " -# + f"Received {indices.shape}." -# ) - -# example = next(iter(dataset)) -# # (n, bs, ...) -# with tf.device('/CPU:0'): -# if dataset.element_spec.dtype in ['uint8', 'int8', 'int16', 'int32', 'int64']: -# results = tf.Variable( -# tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(-1, dtype=dataset.element_spec.dtype)), -# ) -# else: -# results = tf.Variable( -# tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)), -# ) - -# nb_results = product(indices.shape[:-1]) -# current_nb_results = 0 - -# for i, batch in enumerate(dataset): -# # check if the batch is interesting -# if not tf.reduce_any(indices[..., 0] == i): -# continue - -# # extract pertinent elements -# pertinent_indices_location = tf.where(indices[..., 0] == i) -# samples_index = tf.gather_nd(indices[..., 1], pertinent_indices_location) -# samples = tf.gather(batch, samples_index) - -# # put them at the right place in results -# for location, sample in zip(pertinent_indices_location, samples): -# results[location[0], location[1]].assign(sample) -# current_nb_results += 1 - -# # test if results are filled to break the loop -# if current_nb_results == nb_results: -# break -# return results - def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: """ Imitation of `tf.gather` for `tf.data.Dataset`, @@ -289,7 +199,8 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: ------- results A tensor with the extracted elements from the `dataset`. - The shape of the tensor is (n, k, ...), where ... is the shape of the elements in the `dataset`. + The shape of the tensor is (n, k, ...), + where ... is the shape of the elements in the `dataset`. """ if dataset is None: return None @@ -306,9 +217,11 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: example = next(iter(dataset)) if dataset.element_spec.dtype in ['uint8', 'int8', 'int16', 'int32', 'int64']: - results = tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(-1, dtype=dataset.element_spec.dtype)) + results = tf.fill(dims=indices.shape[:-1] + example[0].shape, + value=tf.constant(-1, dtype=dataset.element_spec.dtype)) else: - results = tf.fill(indices.shape[:-1] + example[0].shape, tf.constant(np.inf, dtype=dataset.element_spec.dtype)) + results = tf.fill(dims=indices.shape[:-1] + example[0].shape, + value=tf.constant(np.inf, dtype=dataset.element_spec.dtype)) nb_results = product(indices.shape[:-1]) current_nb_results = 0 From d709f0675ed37f3fab9cc1f19d956eb55a66fa5a Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Aug 2024 15:15:29 +0200 Subject: [PATCH 099/138] requirements: limit to tensorflow < 2.16 --- requirements.txt | 2 +- setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/requirements.txt b/requirements.txt index da889c05..b7985ca3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -tensorflow>=2.1.0 +tensorflow >= 2.1.0, < 2.16 numpy scikit-learn scikit-image diff --git a/setup.py b/setup.py index b5dc14d7..96f0aebf 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ author="Thomas FEL", author_email="thomas_fel@brown.edu", license="MIT", - install_requires=['tensorflow>=2.1.0', 'numpy', 'scikit-learn', 'scikit-image', + install_requires=['tensorflow>=2.1.0,<2.16', 'numpy', 'scikit-learn', 'scikit-image', 'matplotlib', 'scipy', 'opencv-python', 'deprecated'], extras_require={ "tests": ["pytest", "pylint"], From cedf74481aa666d8fcecabe346063160728c0675 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Wed, 14 Aug 2024 15:15:49 +0200 Subject: [PATCH 100/138] linting --- xplique/commons/tf_dataset_operations.py | 12 ++-- xplique/example_based/base_example_method.py | 31 ++++++---- xplique/example_based/counterfactuals.py | 61 +++++++++++-------- xplique/example_based/prototypes.py | 14 +++-- .../example_based/search_methods/common.py | 6 +- .../search_methods/proto_greedy_search.py | 10 +-- 6 files changed, 75 insertions(+), 59 deletions(-) diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/commons/tf_dataset_operations.py index a08d2902..20933bfa 100644 --- a/xplique/commons/tf_dataset_operations.py +++ b/xplique/commons/tf_dataset_operations.py @@ -204,18 +204,18 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: """ if dataset is None: return None - + if len(indices.shape) != 3 or indices.shape[-1] != 2: raise ValueError( - "Indices should have dimensions (n, k, 2), " - + "where n represent the number of inputs and k the number of corresponding examples. " - + "The index of each element is encoded by two values, " - + "the batch index and the index of the element in the batch. " + "Indices should have dimensions (n, k, 2), "\ + + "where n represent the number of inputs and k the number of corresponding examples. "\ + + "The index of each element is encoded by two values, "\ + + "the batch index and the index of the element in the batch. "\ + f"Received {indices.shape}." ) example = next(iter(dataset)) - + if dataset.element_spec.dtype in ['uint8', 'int8', 'int16', 'int32', 'int64']: results = tf.fill(dims=indices.shape[:-1] + example[0].shape, value=tf.constant(-1, dtype=dataset.element_spec.dtype)) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 7e6e19e9..c03c1665 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -22,9 +22,10 @@ class BaseExampleMethod(ABC): """ Base class for natural example-based methods explaining classification models. - An example-based method is a method that explains a model's predictions by providing examples from the cases_dataset - (usually the training dataset). The examples are selected with the help of a search method that performs a search in - the search space. The search space is defined with the help of a projection function that projects the cases_dataset + An example-based method is a method that explains a model's predictions by providing + examples from the cases_dataset (usually the training dataset). The examples are selected with + the help of a search method that performs a search in the search space. The search space is + defined with the help of a projection function that projects the cases_dataset and the (inputs, targets) to explain into a space where the search method is relevant. Parameters @@ -41,8 +42,8 @@ class BaseExampleMethod(ABC): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's - predictions. See `projection` for detail. + Targets associated to the cases_dataset for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -52,8 +53,7 @@ class BaseExampleMethod(ABC): projection Projection or Callable that project samples from the input space to the search space. The search space should be a space where distances are relevant for the model. - It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in - searching the input space, you should use a `BaseSearchMethod` instead. + It should not be `None`, otherwise, the model is not involved thus not explained. Example of Callable: ``` @@ -73,6 +73,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). """ + # pylint: disable=too-many-instance-attributes _returns_possibilities = ["examples", "distances", "labels", "include_inputs"] def __init__( @@ -104,8 +105,7 @@ def __init__( self.projection = Projection(get_weights=None, space_projection=projection) else: raise AttributeError( - "projection should be a `Projection` or a `Callable`, not a" - + f"{type(projection)}" + f"projection should be a `Projection` or a `Callable`, not a {type(projection)}" ) # project dataset @@ -115,7 +115,10 @@ def __init__( # set properties self.k = k self.returns = case_returns - + + # temporary value for the search method + self.search_method = None + @property @abstractmethod def search_method_class(self) -> Type[BaseSearchMethod]: @@ -257,7 +260,7 @@ def _initialize_cases_dataset( self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) else: raise AttributeError( - "`cases_dataset` cannot possess more than 3 columns, " + "`cases_dataset` cannot possess more than 3 columns, "\ + f"{len(self.cases_dataset.element_spec)} were detected." ) @@ -295,7 +298,8 @@ def explain( ------- return_dict Dictionary with listed elements in `self.returns`. - The elements that can be returned are defined with _returns_possibilities static attribute of the class. + The elements that can be returned are defined with the `_returns_possibilities` + static attribute of the class. """ # project inputs into the search space projected_inputs = self.projection(inputs, targets) @@ -337,7 +341,8 @@ def format_search_output( ------- return_dict Dictionary with listed elements in `self.returns`. - The elements that can be returned are defined with _returns_possibilities static attribute of the class. + The elements that can be returned are defined with the `_returns_possibilities` + static attribute of the class. """ # initialize return dictionary return_dict = {} diff --git a/xplique/example_based/counterfactuals.py b/xplique/example_based/counterfactuals.py index 6f0dac19..8b486dbf 100644 --- a/xplique/example_based/counterfactuals.py +++ b/xplique/example_based/counterfactuals.py @@ -16,8 +16,8 @@ class NaiveCounterFactuals(BaseExampleMethod): """ - This class allows to search for counterfactuals by searching for the closest sample to a query in a projection space - that do not have the same model's prediction. + This class allows to search for counterfactuals by searching for the closest sample to + a query in a projection space that do not have the same model's prediction. It is a naive approach as it follows a greedy approach. Parameters @@ -28,7 +28,8 @@ class NaiveCounterFactuals(BaseExampleMethod): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + Targets are expected to be the one-hot encoding of + the model's predictions for the samples in cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -44,8 +45,7 @@ class NaiveCounterFactuals(BaseExampleMethod): projection Projection or Callable that project samples from the input space to the search space. The search space should be a space where distances are relevant for the model. - It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in - searching the input space, you should use a `BaseSearchMethod` instead. + It should not be `None`, otherwise, the model is not involved thus not explained. Example of Callable: ``` @@ -109,16 +109,17 @@ def __init__( @property def search_method_class(self): """ - This property defines the search method class to use for the search. In this case, it is the FilterKNN that - is an efficient KNN search method ignoring non-acceptable cases, thus not considering them in the search. + This property defines the search method class to use for the search. + In this case, it is the FilterKNN that is an efficient KNN search method + ignoring non-acceptable cases, thus not considering them in the search. """ return FilterKNN def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: """ - Filter function to mask the cases for which the model's prediction is different from the model's prediction - on the inputs. + Filter function to mask the cases for which the model's prediction + is different from the model's prediction on the inputs. """ # get the labels predicted by the model # (n, ) @@ -133,8 +134,8 @@ def filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: class LabelAwareCounterFactuals(BaseExampleMethod): """ - This method will search the counterfactuals of a query within an expected class. This class should be provided with - the query when calling the explain method. + This method will search the counterfactuals of a query within an expected class. + This class should be provided with the query when calling the explain method. Parameters ---------- @@ -144,7 +145,8 @@ class LabelAwareCounterFactuals(BaseExampleMethod): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + Targets are expected to be the one-hot encoding of the model's predictions + for the samples in cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -160,8 +162,7 @@ class LabelAwareCounterFactuals(BaseExampleMethod): projection Projection or Callable that project samples from the input space to the search space. The search space should be a space where distances are relevant for the model. - It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in - searching the input space, you should use a `BaseSearchMethod` instead. + It should not be `None`, otherwise, the model is not involved thus not explained. Example of Callable: ``` @@ -207,11 +208,13 @@ def __init__( batch_size=batch_size, ) - # raise a warning to specify that target in the explain method is not the same as the target used for - # the target dataset - warnings.warn("If your projection method requires the target, be aware that when using the explain method," - " the target provided is the class within one should search for the counterfactual.\nThus," - " it is possible that the projection of the query is going wrong.") + # raise a warning to specify that target in the explain method is not the same + # as the target used for the target dataset + warnings.warn( + "If your projection method requires the target, "\ + + "be aware that when using the explain method,"\ + + "the target provided is the class within one should search for the counterfactual."\ + + "\nThus, it is possible that the projection of the query is going wrong.") # set distance function and order for the search method self.distance = distance @@ -228,20 +231,21 @@ def __init__( filter_fn=self.filter_fn, order=self.order ) - + @property def search_method_class(self): """ - This property defines the search method class to use for the search. In this case, it is the FilterKNN that - is an efficient KNN search method ignoring non-acceptable cases, thus not considering them in the search. + This property defines the search method class to use for the search. + In this case, it is the FilterKNN that is an efficient KNN search method ignoring + non-acceptable cases, thus not considering them in the search. """ return FilterKNN def filter_fn(self, _, __, cf_expected_classes, cases_targets) -> tf.Tensor: """ - Filter function to mask the cases for which the target is different from the target(s) expected for the - counterfactuals. + Filter function to mask the cases for which the target is different from + the target(s) expected for the counterfactuals. Parameters ---------- @@ -263,8 +267,10 @@ def explain( ): """ Return the relevant CF examples to explain the inputs. - The CF examples are searched within cases for which the target is the one provided in `cf_targets`. - It projects inputs with `self.projection` in the search space and find examples with the `self.search_method`. + The CF examples are searched within cases + for which the target is the one provided in `cf_targets`. + It projects inputs with `self.projection` in the search space and + find examples with the `self.search_method`. Parameters ---------- @@ -279,7 +285,8 @@ def explain( ------- return_dict Dictionary with listed elements in `self.returns`. - The elements that can be returned are defined with _returns_possibilities static attribute of the class. + The elements that can be returned are defined with the `_returns_possibilities` + static attribute of the class. """ # project inputs into the search space projected_inputs = self.projection(inputs) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index c1857b48..ac1c9e7a 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -11,7 +11,7 @@ from ..commons.tf_dataset_operations import dataset_gather -from .search_methods import BaseSearchMethod, ProtoGreedySearch, MMDCriticSearch, ProtoDashSearch +from .search_methods import ProtoGreedySearch, MMDCriticSearch, ProtoDashSearch from .projections import Projection from .base_example_method import BaseExampleMethod @@ -82,6 +82,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar gamma : float, optional Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ + # pylint: disable=too-many-arguments def __init__( self, @@ -97,8 +98,8 @@ def __init__( kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None - ): - # set common example-based parameters + ): + # set common example-based parameters super().__init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, @@ -129,12 +130,12 @@ def __init__( kernel_fn=self.kernel_fn, gamma=self.gamma ) - + @property @abstractmethod def search_method_class(self) -> Type[ProtoGreedySearch]: raise NotImplementedError - + def get_global_prototypes(self) -> Dict[str, tf.Tensor]: """ Provide the global prototypes computed at the initialization. @@ -179,18 +180,21 @@ def get_global_prototypes(self) -> Dict[str, tf.Tensor]: class ProtoGreedy(Prototypes): + # pylint: disable=missing-class-docstring @property def search_method_class(self) -> Type[ProtoGreedySearch]: return ProtoGreedySearch class MMDCritic(Prototypes): + # pylint: disable=missing-class-docstring @property def search_method_class(self) -> Type[ProtoGreedySearch]: return MMDCriticSearch class ProtoDash(Prototypes): + # pylint: disable=missing-class-docstring @property def search_method_class(self) -> Type[ProtoGreedySearch]: return ProtoDashSearch diff --git a/xplique/example_based/search_methods/common.py b/xplique/example_based/search_methods/common.py index 0f3af3d4..3daa89ee 100644 --- a/xplique/example_based/search_methods/common.py +++ b/xplique/example_based/search_methods/common.py @@ -137,8 +137,8 @@ def get_distance_function(distance: Union[int, str, Callable] = "euclidean",) -> return lambda x1, x2: _chebyshev_distance(x1, x2) else: raise AttributeError( - "The distance parameter is expected to be either a Callable, " - + f" an integer, 'inf', or a string in {_distances.keys()}. " - +f"But a {type(distance)} was received, with value {distance}." + "The distance parameter is expected to be either a Callable, "\ + + f" an integer, 'inf', or a string in {_distances.keys()}. "\ + + f"But a {type(distance)} was received, with value {distance}." ) diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index c1e46862..0625e8fe 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -96,9 +96,9 @@ def __init__( if kernel_type not in ['local', 'global']: raise AttributeError( - "The kernel_type parameter is expected to be in" - + " ['local', 'global'] ", - +f"but {kernel_type} was received.", + "The kernel_type parameter is expected to be in"\ + + " ['local', 'global'] "\ + +f"but {kernel_type} was received."\ ) self.kernel_type = kernel_type @@ -109,8 +109,8 @@ def __init__( kernel_fn = lambda x, y: rbf_kernel(x,y,gamma) elif not hasattr(kernel_fn, "__call__"): raise AttributeError( - "The kernel_fn parameter is expected to be a Callable", - +f"but {kernel_fn} was received.", + "The kernel_fn parameter is expected to be a Callable"\ + +f"but {kernel_fn} was received."\ ) # define custom kernel function depending on the kernel type From 0316d559df0702c0180382c48d1f49f1788cae88 Mon Sep 17 00:00:00 2001 From: lucas Hervier Date: Wed, 14 Aug 2024 17:39:10 +0200 Subject: [PATCH 101/138] docs: update the README, add warning concerning the tensorflow version, add the notebook link and update the tutorials tables --- README.md | 35 +++++++++++++++++++ TUTORIALS.md | 9 +++++ docs/api/example_based/api_example_based.md | 25 ++++++------- .../label_aware_counter_factuals.md | 4 +-- .../counterfactuals/naive_counter_factuals.md | 4 +-- docs/api/example_based/semifactuals/kleor.md | 4 +-- .../example_based/similar_examples/cole.md | 4 +-- .../similar_examples/similar_examples.md | 4 +-- docs/index.md | 30 ++++++++++++++++ docs/tutorials.md | 9 +++++ 10 files changed, 106 insertions(+), 22 deletions(-) diff --git a/README.md b/README.md index 56fc5746..b4d4b719 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,9 @@ + + + PyLint @@ -41,8 +44,13 @@ Feature Visualization · Metrics + . + Example-based

+> [!IMPORTANT] +> With the release of Keras 3.X since TensorFlow 2.16, some methods may not function as expected. We are actively working on a fix. In the meantime, we recommend using TensorFlow 2.15 or earlier versions for optimal compatibility. + The library is composed of several modules, the _Attributions Methods_ module implements various methods (e.g Saliency, Grad-CAM, Integrated-Gradients...), with explanations, examples and links to official papers. The _Feature Visualization_ module allows to see how neural networks build their understanding of images by finding inputs that maximize neurons, channels, layers or compositions of these elements. The _Concepts_ module allows you to extract human concepts from a model and to test their usefulness with respect to a class. @@ -54,6 +62,9 @@ Finally, the _Metrics_ module covers the current metrics used in explainability.
+> [!NOTE] +> We are proud to announce the release of the _Example-based_ module! This module is dedicated to methods that explain a model by retrieving relevant examples from a dataset. It includes methods that belong to different families: similar examples, contrastive (counter-factuals and semi-factuals) examples, and prototypes (as concepts based methods have a dedicated sections). + ## 🔥 Tutorials
@@ -110,6 +121,8 @@ Finally, the _Metrics_ module covers the current metrics used in explainability.

+- [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) + You can find a certain number of [**other practical tutorials just here**](https://github.com/deel-ai/xplique/blob/master/TUTORIALS.md). This section is actively developed and more contents will be included. We will try to cover all the possible usage of the library, feel free to contact us if you have any suggestions or recommendations towards tutorials you would like to see. @@ -361,6 +374,28 @@ TF : Tensorflow compatible
+Even though we are only at the early stages, we have also recently added an [Example-based methods](api/example_based/api_example_based/) module. Do not hesitate to give us feedback! Currently, the methods available are summarized in the following table: + +
+Table of example-based methods available + +| Method | Family | Documentation | Tutorial | +| --- | --- | --- | --- | +| `SimilarExamples` | Similar Examples | [SimilarExamples](../similar_examples/similar_examples/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| `Cole` | Similar Examples | [Cole](../similar_examples/cole/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| | | | +| `NaiveCounterFactuals` | Counter Factuals | [NaiveCounterFactuals](../counterfactuals/naive_counter_factuals/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| `LabelAwareCounterFactuals` | Counter Factuals | [LabelAwareCounterFactuals](../counterfactuals/label_aware_counter_factuals/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +|||| +| `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +|||| +| `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | **TODO** | +| `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | **TODO** | +| `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | **TODO** | + +
+ ## 👍 Contributing Feel free to propose your ideas or come and contribute with us on the Xplique toolbox! We have a specific document where we describe in a simple way how to make your first pull request: [just here](https://github.com/deel-ai/xplique/blob/master/CONTRIBUTING.md). diff --git a/TUTORIALS.md b/TUTORIALS.md index 759964ec..e3681cd6 100644 --- a/TUTORIALS.md +++ b/TUTORIALS.md @@ -20,6 +20,8 @@ Here is the lists of the available tutorial for now: | Metrics | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1WEpVpFSq-oL1Ejugr8Ojb3tcbqXIOPBg) | | Concept Activation Vectors | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1iuEz46ZjgG97vTBH8p-vod3y14UETvVE) | | Feature Visualization | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1st43K9AH-UL4eZM1S4QdyrOi7Epa5K8v) | +| Example-Based Methods | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| Prototypes | **TODO** | ## Attributions @@ -74,3 +76,10 @@ Here is the lists of the available tutorial for now: | :------------------------------------- | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | | Feature Visualization: Getting started | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1st43K9AH-UL4eZM1S4QdyrOi7Epa5K8v) | | Modern Feature Visualization: MaCo | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1l0kag1o-qMY4NCbWuAwnuzkzd9sf92ic) | + +## Example-Based Methods + +| **Tutorial Name** | Notebook | +| :------------------------------------- | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | +| Example-Based Methods: Getting started | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| Prototypes: Getting started | **TODO** | \ No newline at end of file diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index 6412e674..fd45c8f0 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -1,6 +1,7 @@ # API: Example-based -- [**Example-based Methods**: Getting started]() **WIP** +- [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) +- [**TODO: Add the Getting Started on Prototypes**]() ## Context ## @@ -52,20 +53,20 @@ We can broadly categorize example-based methods into four families: similar exam ??? abstract "Table of example-based methods available" - | Method | Family | Documentation | - | --- | --- | --- | - | `SimilarExamples` | Similar Examples | [SimilarExamples](../similar_examples/similar_examples/) | - | `Cole` | Similar Examples | [Cole](../similar_examples/cole/) | + | Method | Family | Documentation | Tutorial | + | --- | --- | --- | --- | + | `SimilarExamples` | Similar Examples | [SimilarExamples](../similar_examples/similar_examples/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + | `Cole` | Similar Examples | [Cole](../similar_examples/cole/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | | | | | - | `NaiveCounterFactuals` | Counter Factuals | [NaiveCounterFactuals](../counterfactuals/naive_counter_factuals/) | - | `LabelAwareCounterFactuals` | Counter Factuals | [LabelAwareCounterFactuals](../counterfactuals/label_aware_counter_factuals/) | + | `NaiveCounterFactuals` | Counter Factuals | [NaiveCounterFactuals](../counterfactuals/naive_counter_factuals/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + | `LabelAwareCounterFactuals` | Counter Factuals | [LabelAwareCounterFactuals](../counterfactuals/label_aware_counter_factuals/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | |||| - | `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | - | `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | + | `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + | `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | |||| - | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | - | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | - | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | + | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | **TODO** | + | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | **TODO** | + | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | **TODO** | ### Parameters ### diff --git a/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md b/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md index 93a20c9b..2701410c 100644 --- a/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md +++ b/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md @@ -2,7 +2,7 @@ - [View colab tutorial]()**WIP** | + [View colab tutorial](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/counterfactuals.py) | @@ -49,6 +49,6 @@ counterfactuals = lacf.explain(test_samples, test_cf_targets) ## Notebooks -TODO: Add notebooks +- [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) {{xplique.example_based.counterfactuals.LabelAwareCounterFactuals}} \ No newline at end of file diff --git a/docs/api/example_based/counterfactuals/naive_counter_factuals.md b/docs/api/example_based/counterfactuals/naive_counter_factuals.md index 93d35307..1982ea8e 100644 --- a/docs/api/example_based/counterfactuals/naive_counter_factuals.md +++ b/docs/api/example_based/counterfactuals/naive_counter_factuals.md @@ -2,7 +2,7 @@ - [View colab tutorial]()**WIP** | + [View colab tutorial](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/counterfactuals.py) | @@ -47,7 +47,7 @@ counterfactuals = ncf.explain(test_samples, test_targets) ## Notebooks -TODO: Add notebooks +- [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) {{xplique.example_based.counterfactuals.NaiveCounterFactuals}} diff --git a/docs/api/example_based/semifactuals/kleor.md b/docs/api/example_based/semifactuals/kleor.md index 99ad2486..f8aa571c 100644 --- a/docs/api/example_based/semifactuals/kleor.md +++ b/docs/api/example_based/semifactuals/kleor.md @@ -2,7 +2,7 @@ - [View colab tutorial]()**WIP** | + [View colab tutorial](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/semifactuals.py) | @@ -104,7 +104,7 @@ counterfactuals = global_sim_sf["nuns"] ## Notebooks -TODO: Add the notebook +- [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) {{xplique.example_based.semifactuals.KLEORSimMiss}} {{xplique.example_based.semifactuals.KLEORGlobalSim}} \ No newline at end of file diff --git a/docs/api/example_based/similar_examples/cole.md b/docs/api/example_based/similar_examples/cole.md index 004dd7a3..8f717ae5 100644 --- a/docs/api/example_based/similar_examples/cole.md +++ b/docs/api/example_based/similar_examples/cole.md @@ -2,7 +2,7 @@ - [View colab tutorial]()**WIP** | + [View colab tutorial](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/similar_examples.py) | @@ -60,7 +60,7 @@ similar_samples = cole.explain(test_samples, test_targets) ## Notebooks -TODO: Add the notebook +- [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) {{xplique.example_based.similar_examples.Cole}} diff --git a/docs/api/example_based/similar_examples/similar_examples.md b/docs/api/example_based/similar_examples/similar_examples.md index be875a5d..a36eadc4 100644 --- a/docs/api/example_based/similar_examples/similar_examples.md +++ b/docs/api/example_based/similar_examples/similar_examples.md @@ -2,7 +2,7 @@ - [View colab tutorial]()**WIP** | + [View colab tutorial](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | [View source](https://github.com/deel-ai/xplique/blob/master/xplique/example_based/similar_examples.py) @@ -52,6 +52,6 @@ similar_samples = sim_ex.explain(test_samples, test_targets) # Notebooks -TODO: Add the notebook +- [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) {{xplique.example_based.similar_examples.SimilarExamples}} \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 55320f23..eb6062ca 100644 --- a/docs/index.md +++ b/docs/index.md @@ -7,6 +7,9 @@ + + + PyLint @@ -41,8 +44,13 @@ Feature Visualization · Metrics + . + Example-based +!!! warning + With the release of Keras 3.X since TensorFlow 2.16, some methods may not function as expected. We are actively working on a fix. In the meantime, we recommend using TensorFlow 2.15 or earlier versions for optimal compatibility. + The library is composed of several modules, the _Attributions Methods_ module implements various methods (e.g Saliency, Grad-CAM, Integrated-Gradients...), with explanations, examples and links to official papers. The _Feature Visualization_ module allows to see how neural networks build their understanding of images by finding inputs that maximize neurons, channels, layers or compositions of these elements. The _Concepts_ module allows you to extract human concepts from a model and to test their usefulness with respect to a class. @@ -54,6 +62,9 @@ Finally, the _Metrics_ module covers the current metrics used in explainability.
+!!! info "🔔 **New Module Available!**" + We are proud to announce the release of the _Example-based_ module! This module is dedicated to methods that explain a model by retrieving relevant examples from a dataset. It includes methods that belong to different families: similar examples, contrastive (counter-factuals and semi-factuals) examples, and prototypes (as concepts based methods have a dedicated sections). + ## 🔥 Tutorials ??? example "We propose some Hands-on tutorials to get familiar with the library and its api" @@ -109,6 +120,7 @@ Finally, the _Metrics_ module covers the current metrics used in explainability.

- [**Modern Feature Visualization with MaCo**: Getting started](https://colab.research.google.com/drive/1l0kag1o-qMY4NCbWuAwnuzkzd9sf92ic) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1l0kag1o-qMY4NCbWuAwnuzkzd9sf92ic) + - [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) You can find a certain number of [**other practical tutorials just here**](tutorials/). This section is actively developed and more contents will be included. We will try to cover all the possible usage of the library, feel free to contact us if you have any suggestions or recommendations towards tutorials you would like to see. @@ -333,6 +345,24 @@ There are 4 modules in Xplique, [Attribution methods](api/attributions/api_attri TF : Tensorflow compatible +Even though we are only at the early stages, we have also recently added an [Example-based methods](api/example_based/api_example_based/) module. Do not hesitate to give us feedback! Currently, the methods available are summarized in the following table: + +??? abstract "Table of example-based methods available" + + | Method | Family | Documentation | Tutorial | + | --- | --- | --- | --- | + | `SimilarExamples` | Similar Examples | [SimilarExamples](../similar_examples/similar_examples/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + | `Cole` | Similar Examples | [Cole](../similar_examples/cole/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + | | | | + | `NaiveCounterFactuals` | Counter Factuals | [NaiveCounterFactuals](../counterfactuals/naive_counter_factuals/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + | `LabelAwareCounterFactuals` | Counter Factuals | [LabelAwareCounterFactuals](../counterfactuals/label_aware_counter_factuals/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + |||| + | `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + | `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | + |||| + | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | **TODO** | + | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | **TODO** | + | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | **TODO** | ## 👍 Contributing diff --git a/docs/tutorials.md b/docs/tutorials.md index 38957e89..0e3e9429 100644 --- a/docs/tutorials.md +++ b/docs/tutorials.md @@ -20,6 +20,8 @@ Here is the lists of the availables tutorial for now: | Metrics | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1WEpVpFSq-oL1Ejugr8Ojb3tcbqXIOPBg) | | Concept Activation Vectors | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1iuEz46ZjgG97vTBH8p-vod3y14UETvVE) | | Feature Visualization | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1st43K9AH-UL4eZM1S4QdyrOi7Epa5K8v) | +| Example-Based Methods | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| Prototypes | **TODO** | ## Attributions @@ -79,3 +81,10 @@ Here is the lists of the availables tutorial for now: | :------------------------------------- | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | | Feature Visualization: Getting started | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1st43K9AH-UL4eZM1S4QdyrOi7Epa5K8v) | | Modern Feature Visualization: MaCo | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1l0kag1o-qMY4NCbWuAwnuzkzd9sf92ic) | + +## Example-Based Methods + +| **Tutorial Name** | Notebook | +| :------------------------------------- | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | +| Example-Based Methods: Getting started | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | +| Prototypes: Getting started | **TODO** | \ No newline at end of file From 02b248a47782cc5337c380eaac347a8097cf2ae2 Mon Sep 17 00:00:00 2001 From: Antonin POCHE Date: Mon, 19 Aug 2024 16:56:51 +0200 Subject: [PATCH 102/138] linting --- tests/FreeMono.ttf | Bin 0 -> 592752 bytes tests/example_based/test_image_plot.py | 3 - xplique/concepts/craft_torch.py | 1 + xplique/example_based/base_example_method.py | 7 +- xplique/example_based/counterfactuals.py | 5 + .../example_based/projections/attributions.py | 6 +- xplique/example_based/projections/base.py | 23 +- xplique/example_based/projections/commons.py | 204 ++++++------- xplique/example_based/projections/hadamard.py | 19 +- .../example_based/projections/latent_space.py | 8 +- xplique/example_based/prototypes.py | 1 + xplique/example_based/search_methods/base.py | 19 +- .../example_based/search_methods/common.py | 22 +- xplique/example_based/search_methods/kleor.py | 100 +++++-- xplique/example_based/search_methods/knn.py | 88 +++--- .../search_methods/mmd_critic_search.py | 27 +- .../search_methods/proto_dash_search.py | 78 +++-- .../search_methods/proto_greedy_search.py | 271 ++++++++++++------ xplique/example_based/semifactuals.py | 81 +++--- xplique/example_based/similar_examples.py | 30 +- xplique/plots/image.py | 16 +- 21 files changed, 614 insertions(+), 395 deletions(-) create mode 100644 tests/FreeMono.ttf diff --git a/tests/FreeMono.ttf b/tests/FreeMono.ttf new file mode 100644 index 0000000000000000000000000000000000000000..f88bcef9c138ae61473f852d10803c195601d51e GIT binary patch literal 592752 zcmeF)4_Kvh{`mjb{kPLhGZPbHGM$vBnaPL^u_0t8gb*4+Lx^n%A%xg5GC~Mp5JGHr z4?+mBAvUyx5JEF^tdKQq)Xe!lU#D~X^yxC}etzHUcm1yG_i=q5`+nc=Kd<-ee&6@G z ?CVu;A*{EsdE#fu9MUb>_8fJ9F(q%~nl?*5D8-gipG_x_-W*|DVH&?BP1EVx8` zYvV+=-)qScNAA1r${EjyZ(y}Zbk(6p?6~t^b~@>y*=J|J>f5%Etxb=f%;GFNtByfh~x;{WUSs})}JnT`R)^DWq)tpWd&)MzYXs8b2+{)lx03+`Dyk+ zcQ^dMmOqq5UPa^UHaurO6KM~3>i@(Zpe?b78G}+KEfOmx$EIwlQ{8Dqa>O!=jCx8j zS22G_snaraobie5WSEX)&a$IT(=ly(NEU@&luJXBQlCa7S#rpcescGRLK8WVTX%^V zp-_nEcbmUsdB50Zk)&xRl%5glH=r12;b@$OVKPajyb;=-hbtrHXOT8-SA{5n&eO6s zQrAAM>-ejoF@A?buo5~x8Vj*^q|~~1ybE+O{Vj_ z>o7Hj*EczTt?T+;&U)@TZfdD(dF%gDsd03Sp1+pf@o$wn_SfjSXbi8^z1H*nt#Vt& zR6}z;7hWEVsq1uL^DOO3Dh_NGdW zsdJ{5X_5ASw~S-FzV0+XkFI+Trk1DC#<_;pQl?MTWl^N`u92P#*EsYddB_4Uj(4CkWzpAB!XPN9AnG}dO3GLO>R z6Mc@-7y6n~<9X*79d@&u^8wA6^Wv^zI++o6OrkZw;^1bJO$Edwp`w-rQK)bsv@P_0)AV$I0zpzqg)O zmNHJS!_;&2@<^d?^8G&b+`Vg<$2g5y0o_ksTaWc#^{&^{9K1Dtt<*l<1C6ieo(Mf> z%}IIt;g$Q*rt5j~soywh+`-f{q4&1tr}34Zhu+gyM(TR3bzN)ve+fNHUC(V-Xij>+ zc<1fqr17V-X?Bs}aPFyeEA^$+g$_ zwa^H4{frPxUGKR#T!v`qYkRLrb$y-cb3~s#XF>aQ+;r$OXMbp)(!NumuSY#zgfz^? zV(5Hr-y3=_YFvGm={#-IXV0Fbhq+&}W$DtMqy5 zLZ3}KW(Vk8eJyLL`>4+yjidAZk-Co6I66k@yxlP!dfr~U&eMI<^U;|4OiGK?58!;> zqHbd==v*DIIqGMe8PIu$p&VyJpCcMi?+v}~`$Xuz7a=optn;)_ufLw-DbSoWe=Rk~ z66pCUT~qf!*Euj!*Lb>?(!6wi?bCJj+|{|zYnK2Wqj~7Mx`ytT-nZHwi#gEq(sjIb zg}MVDQlb4?R!7XH)c6`t&q?Raom6Tp<@I~(g-c#G^7@ z`=t5mIZiD#H{EYNUp@B}=y^^lL+fbMTy$>_|F1Ii9rewj`>5B#EA@Qz^`rUdIeBH; zL_Pc&Ro6Zcdab6Ep;qpnHxMbq_p_I)mz&;`dL1X{r04F%nod0v`dZMv)ttOi&qdE$ z_eXQy8Tpe+Z!K?MG{+O*?eFALN-ZBVS2swIk^BDHy(Pve&{?s7tL4uH5aWffeFn|V{8EL72(e@*HixhPvJV~KInDR z*t%|dq<%4_=A-wsmg)fL>q_HiLEE&xFZB7jD~`pb(01)V4!VYStnt;U(0<*A3!r08 zgI-t7S;u^a$1t1kqs`(i_tuEU$T9DuGTHaEPoEg0|7o~U>y57pj@#~Z?;B7G8a387Dl|Idc5Or1qWJ<7dEUy>O}Zk0SLqD2MSR-kzY>N(T7+R7d%8#5m=r zXu_4m{uFD_1br53o||c1#^jws8zQ0yZ z-p{G^D~R!1yuJ6zIOceDuk^0R=yQD<^t|*~pA`o}V@0D7I##cluAdMo_oCF? zw0&RbT+L1A=-g!JGuw-|AX49*(tA(Sb#y;w!iC1z272BaN9(#~8gyMR7tK}AITc=> z2UGt&G{!N|`8sYsbd0X)(O8Q+36_an6K>7nk8)AB~>9;;61TyOqY)OFqu6J_{0qHSf+XTSEn zhKr$dc0dK1BhMTidr5@m_h;xF%}dMsaax4#w~p7>K{xcC(l*WOP;3E>@0HrFa~TuP zOV?R3aV-C!RM$e=HI9mg?z8TvzGkPEdi@F`?ONCV|68TzsH8(R2b#mCO7FhhRH^w)-A8Z#eyP+P_4W8$rFV@sRi+U`*Z!?iulHPd zrQS1o{9C2Q)pPbrtt-v_oL?w47p1XvTpsj#>$+N>TFNHcr;hurV_j3P`6kNH7mNxMd@=7oFDP`z;=IVLqXS@td>HoKnbw4$S|FqQgevR(IuL$uh2>pN_JP4k1 zA-?w!dI>Ku_xfL^Utfkcfd0@;;8_w{$yj~O{EE;&8TS*O!V_r4Luki)%zZfG6w1>g z^(IPv&Yj7aaZ0XPXc!u62$~D^pWnyydQ9cg7Q;B6zu}zMQtD?7-Sc0={TR9%x;F#A zF_t*t{Ct$0N0{ce4m`Uf>(IUl;5 zb-i^qQEu|OT*G|5E>rWFa=gj&Ud{X)CY|>$)Tz za6fcEYjApm-cL881jpd43E}Ur=;tu5Z@6FYwJGNoeqY^UQhVg} z7G86ZwY%uMIWoV0(zr=+LVsnPULQTDnOKY1*WK8ja!=^z zpq1=Jh_W=|5svkm={b<2u_YzXpzwL5QpQ4KABw{w^cr3bjiGhD*4IalHBK2ZKd02( zbRYCwbYHYj>%Vqxo%BD3C-FD}k-E0Ofk*#~F^#(J^-bV9%Zt!{-OHyVrN+{__WvEf zG+%RPU&7y`xhzumUN3qtX?;3$Z{|SHN#C!tq5b|yspmHX`kw5~i>AI8G=}yUO#GY4 zhA;TuCSRLx_&X)vN7?Yisb>zYv|_&oulWP37x0+Wg3zr$GZR8 zZ$o3~YfI_KJ#(ECYqJP}((>R#z`tmmi4 zTK7t=XGM9}t^gMJ8t!s{6p3|x8KI#3geR`~W;ea^$z8xN` zd3yV&;|nA0I(|O%p4&B2>VE36zWy#k6>6Y4sDmT4uH(E?$OmN-SZ|Xp{}%D z_v|%Hn?&yky{`INUJTAz_T#+uo_YlOY`F#Ay}N|^0ni-v9QC};m{QkcrF*M>g3kBy z(z@O+9=&d-AQ)*okW%S*@4U41#%rI}wXHN#P9Epg7tyD@<4u*bH!)@(+7Fr3r+cOJ znWEl+uCICOIK4)CzTUA{UPaqv+OB)Osd7_!PHorg;L$zN*NInZY!x4oK&kY8no94{ z$#k80(AT_rC_>xMhpwrwdoQ00BF7g|suQ4XUJhGQS9;C#IjVb>J&E3XYKI8#-hY>4 zy>}X+v2?!Y3hF8!dLJq;AH7FaRisU?jdCKMp;X>H?^@^@TGt%CSXyek&iDHD{PZ=Y z$4YbLH52|`y51MNLHE=b1_k_mv+VoiG>+!Gk-A=#G z(^8*B8cVN}&eQtj`TFcu+ODznI_S0cu7k$$?uDzVd%5WSuk`&_uZ51&XR5C0t*^&A z-h_<@kPY1@rTed?UPCQ)oR;1iT5cX`*FMd0GQDnkKkGQnL&tcCV-)_E@aM?^`VXUx z_Xdw%S3`5tQm?=EQHJX$O(}I9&0kHAn4If&v`s$Nx<1=g+9Z1IJbDhEqp7P$BW6PYi$v9pY(OErLL8V8k~-EP=s?)hBI*qG+t7q)Vk7h zN`V*WZ0b5jkC#DTqo?5*tc3RIGf2LndZF}wA6UfI1%1C{+ng^dg=K` z#&7C4+^*M_vEgezCH{Zf@5S*-Z~gzTmwKMzuNU?Fc};|m^|hfU?a6=IK6&n@>fxA^ zu9J=njp3V#wY5L|+SDBB3-7JICL`B(YPqR({+)JR=jU<8MBSVbS$A^Zy{e?EKZxdyB zZ#2hA=dN*oer`^l=be{VzQ#PSKDnGcN8?WJ)B5CotxxUO}pLZsB^rk9iU{~GTZ=8f0fu7rNRIB$|1>iQm~ zYi%2G6s7LR9+6V(yf=n6Qd*Hx?@xU`dgqr%UGGJ`SM+_tJ3cIOypmGw8nK*G_e8%p zbKoZE`?2P)rRQ1dO5aPSM;uFeFm$Z8>wA>mV`_Fp6s6{?Zi%>EZ&yI)gE z|Nr{mJ^0^y;QycZfIbgB`k7QeTYC1Vu3xk0*BYK2>iS-=0QW~|-(K*>X#ahf`b_tp z-(IQDT{RNnJ%2S1#^`64aBkl7PoJ?$-;>{s(65iwAvg&aL7&&flQdI*0E?l|!dcMV zs_`0b$7kSkMEEn2P5p@fp(B|$|9?35H`nBQ!~8th#$?RHk_g&1jH!ur!=D2+2Ynw| z0)5}m^WGN^;c4jmx4yUQd+D#yz0`YPr%9#Go9v27#~NRcx5T}Z+P(3U$7-8i2aT)yWnPs?IrC~yAM;}!HNuYct1B7Ncc+NRGojT3-Am#=`{m)frNv!MNIId+J|r3{a|l2V_knzuTO zXTL@HGCv*s@Wfw92%iPNbD@8aK`f-h=KqHIgAM!_6HBYs(kZ8f+ikm(pX*JhF^bkp zOuzX%b2sZu>s;$X>k?~~RcGC8yyo+@yn&4oSK+>7AsHlD9+7%#>Y1rm%zJHK&%F2MeL3&DdE;rJ1@3}v7wo-o z#=@-^E?ii+D1OnLMTv_REy{SH?!g_$oUvWUvd8us+h;6y?76WQ#=aN}ZLHilzA+Tq zpns)8{QR74in*IvU{!{9YX!S?t#z-}YkgpiSnF-E|7fprVx4WA6lW)Ask6+viror0 zz0Qd1vRiZArS8|Uz3kT5?4~W7=E~frL`jTK^e5&e7DRUIgv6S}ClX&yd^ho<#D6CK z;1_>|ztUgtzuVvL@Abdy|IojN-TFt8B+W=lNb<8=sY&z0yR}QwKJ3=PNk=5rCk-U6 zWw(UgGLwDmR$Ov?azgU_c|-I5Iq&;4Ss>xvl7*WuoU`zVMO!Y~X3^Y5Y3x?*gEA&#+m2O8uuHX3G#t*-r{=M(}sDGAz^HlJm;Qhg- z;GMzM!P|n3!CQhi2X6{C1aAmlAFK~v8@wiXb#PVis^BHT>flAeGlR>6M+XaohXo6Q zhXxM`?iKufaF5{b!K~nJ!Cixy!JUFT1{VZVg2}pogHue^@5cC2k(bM~6E z)||2C#5Kihj$KnUR5Y~jP}1kiK0ogB;?H*(tQN484WY4}Kps{QPU; zNv_8yQZh#Ws*J(US(v*`FutSY=Qyl0Xzg93i&@m8DCbOYHlv)2qiNrql6Bp8 zsOM9D4f1zh);uh3tOXS)lZcF_MNg)jCD%NW zYv|*8C2B0r%SWulAFz=+@kmqrjP-F%{1rHhI(z1?#I@AP&0miO{-zZ_XXNKz^FK~K zma-jBQQw}j7jIFYM@bI;_oy$RBo{yTesntJ8my(h3nk~TqIaX@I{V2pI!h#pe3GK6 z??pKS+-K43Us3|N2+^El5=kd*N1eS(N|_K{OqmMSk7oap=3^ms_AhDCgy<6~cb*V^ z5+!*g{hl$Lf6_k4rOxXj>7WVGXHs(Ak`AH0g7OGlO8sm~_9W>Y>UU8NfW409{E|4o zq%WwiqvV>Y=r2V`SGMEYMh^?`hZClKOliUunf3{#5A5r-A9Rh*ZJric^ zN!bhTu^D?&vS0Jq;~9%7*^7CsGlOe3?|Y0>FOcvD)wB@xBH{A{T>b`|)>&K4Jb`lW z2{XxSA!8QKpnVx7*J2@iK9l?w&H&!^-(78X*k6d{stvEj&WUQvgc!2$fo`^ zCFieZa?WEp;69$Yo^n5MKhOMul6@I_j`}ENCteVl#p`_RZG1uf97^_SjQyEK{u?V$ zN&Q;N^FZIM8!72mv7BG%M{J-zM+}R%F>5Y$Kc(UZkKLQc=`>D2jPU0eo6Bs8lP&oBa9c^dY%SZ!97&LE zd1NPwp9hM508QaWO_h1v2J^Z63;CL32U*15jmwaoWM}!EWXdkGtL!FOvb*fT-`mKR zJ!LQ1TXJL{*;n?HT-je1%M!_x1LQzCNb==iIYbWSr!fzc!{rG6gx`^JlpHPmdx;z) z$4aprC&$YPQXY){Pe?nzW!b^kSkKCHz6a%ODV3AuG~b{2i+FcSh5RA&!Q2_WB-@3w-zb9`HRSE2K{T zDA&j;xlZckdbw6^lAAfh23ak4$nE@8^yy{|UsG0_7n_&xd+wK+mzsYNPAokjvkwX^ zC@2aXc;3>0B<~#`a2FQsxis8%@lt59AaSB?WSp?hvrkb!t*_(uGAI&3S$49L5!GnDf=rImt-@XJJub@p((b9ZQ!I z&q<$Ae9R63H~o?Aj9OOr*A^Gg2?$}L(jQ3)H|6}?8?huGh_g}1wbP?Yl zu(m%S*&nb|4-7~_(W;YIE%R$7Yfe(q(mAWbM~6=w>4MWHvWuM)o5Uj1)BSISPbVhb zzhfY3VR4b)za)9_veW%V{?ZdCLg=^|x-v`qSNWH$TD&ZIm48)oc)8?&99|U2VI=3L z%LZ~z)&u6u46n53yYWd$bNuhFVjGxr0J$9*$&LINHY+`uFW4p)Pxcobbi|ybfU&e_ z6(@8+@~UM2ssmOfFVjr)bo75UpIE&Nn{yd9(-Y9j=2K39M3WaMFFU{$Uy;L zLD3_IaqZGa7VEtdh~-9$JDf_*(rvl0#e6@+^6ww87UmiOd!n*55XW;p{5K%y^OQ*C zsj-OXL=w-EZFrXKM0ox)il^XNYnJjkm1m~QbKK|SJ>HKJ@P0LoXZ3X6NT;{+p2z%{ z95nMjM14j*&*b=t=kly!o|&|7#{0nLReS~|&g@ZEJ~a$YtSXK74HM_ zBO+T9W1AHsbI5BB&*^RB_-tzP`3`>S{w6+y6!O_46a6B|1H5;4^1fZbdo2C)Qh0B~ z{49{e0{Rwa^R1dnksUHX+oBec9m!|M640OFM-c-2ihi5O&J7~Jqc1ZT#NVZZKS$au zvKzAUK-=z3B6~3J_l(P~64{e@dyb0iH6*fkD}RqP0sSKT5_7*akz8`i?G@R-Ok^?h z7LV|^JJT^PlD7h^c|f+vf%zf_Rfyzwi5xsEa!9Voq3t3CS^OQ+ou&XLq8}q7XOYuc%sq>_XBVOztaCQ&oXt9C zv(DM9b56BLC4H6jRnk{UUnPB&^qt#{L5zx=7mGCHh@4*}Qbk`CeO2^bK;H%QT|nOj z^j%PaS~Q^@jJ+@@a*-e&8OTEkszjs%zmfTk%x`3VBl8=3FobdbL`R&+ zZAButH=!N92nuiIh(`wUP=YGdgSGD<=AE@7cXgqUKl9KnayRvRf+F{Jh%^t1+_zrj zej6ivwq;BUV_F9IYr{!^YvYp%${X(E5F5cvxN;=ND#0Wm))K#RzS z<)}sjTG7Ry%g7M%`b5O^i8NVkA%|w23h{4%8D% z#qjrvku)wwa)uZwDd2c}a+*gD^M=JpqfBFb8ZqZHcR{Tf3u8e}>Ey5jYwlPf2Jg$p zPBGwkXVoXh?^ruCAjU4_w=3)KnvW7xim_XU7+INO?2bK%_xo}&vJ1r6lf3p^FUDRy zV(i@_MoyC$`%vynj=AKwKV$dTvR917ePS%(91o!Fz-%!NV$8u+VjR*hMnSw7hYgEy z1jk3R#!&*=j_MNQXyz{E_!#0GTME`Irmc8LjN^!T{D2rI1jHyQz-qLKvCM};kjse) zs2Ag;T(Cwd?WJX4ZfQ`AlbLri^G{wc#wj-9kb*4agFQH;^ zqiVev7qHF+xhO(8YQ(rO3;8Gk=Xqf*hb7XrV*{^1i4(s zyvtbQGS;|^c$a4(2=e=54I0pb4)kIW>%~~%LjoATq5$M}MK(%M4d$(kiSScqf^jPu zx3UU#Anr=yt|actA&iP~lJ?p{R6y&Dxr(?~5%VfyUX_Pp(0>(kuWCdq zI?;z9n6rvGtH^5=>#m~hYSzA*F?A`(25Z!ngBW$I(S{xjU|5W6n0L*%7}v%i0qJ5~ z$J*Bwp$v?Ne8ORai9$SoiOHnCCGxP3akM3v9 z``g5LphS$81gsb1!4@$dY82y70d!*!X8ot*LENWvP$0%L)nasXiSe9(zUMNK zC&u%+C={cUah;6o91-J%egwt%Yq}U+m14Y9izYE%rhJ7suhd~RTE*z5t-BE|XcyyE zV!Xzf*BJA+3NdWV}SmTnEP>~7@stQb04e)$Ac|md|HfhF+O9>&zSo;xesx?rdo`( zj9*84Fd)VkwEu(lFPp>|X5Mfo`o;L#k1R31$priIO(7Wf4f97>_uD=(zKcUT=>LxO z)-&dN#(dA1AF9Oo7i;_&E5=W4VvI3oe1#YrnG>qvdxJ1q#kBbU{9sp$>Bfi|)qrNS zp$9`^`pPjZX0(k2F{jZV6OU0bXAEFm%$c;$%tSu=!T6c9&x(Ox%-Ak5H|M9fXJ?5S z7boTx>0)kKCgxW3Z^c^iwPJ2vD&{s-V$P`(GhtZFxy54oGsN7^CuTA+Qkcir(`G8; z=8=0EW9L_kxgcN6g%x6^6Jv)~F&FiSnGrAMP9W{xf9J}F}EOOE?BiJ2P^bN@~;7c+ke4p=SbK?z7l zE{es>Zx-|5E-?>b{X-dB5ES$97%`7vox(gZk1P@MC~`i!o*zJI6mu!@mu907w_OzC;%~!tw1fB(2icPcCmot?_*yYf$ilFgB{n$EWqD$rNRB71=G!nmVxHV4<|))q4T^bMhM1=_{tubRMIlO2 zDQ0<{n9Em)c?R_}iO<)}=2_%&cB7c*G>BQrTD(7-=i&S&F{_Hjynt92lE+1iuO`mL zwwQcvYF@%xmyU{A6A<&VE-^1>ecqqU6;)zhL476lD~VZKCgxRb{J$(=d>wP^m~%~^ znAZ+ry_nZ^i&@_<=JmB=-q0cDjnr>s>`lbsnQY$Fh!!z#_8}gubxXRKjb&oqS}Nvk z^ZkoX35PyMKk44-mJdP0R;rf2c*w zKgD5G%+?+;AMO|P&y3+YYX;iIeAEVQk2QliZCxPl;{{?q!SR#zV85Qq6Z7d<_|YKd zGb3X1wVL@XIX>qD$2?Qb=R3sgq^+}6%oozZUcJEh7ioJjfN?SZO02&U^RGcMyK+#7 z0TAOQV!YH1a(a14%vYG-O@DWvn6K7hSj^YT#r&He)O!lhA?E8vAl@6bV!l}k;`Nq@ z`S)}&-%7xsm~V4#ee}J<8t;sY`EHe%?=_0qUn=JNKD3GXK`~Z~`Jsq8K&+3*`4j4& zq=7X)Va_LYVh-Bq7V|Ua@!T~(YY_8u+WDH#T+@P1^n-I+%b2x(u+G|QFm`P#x-cN- zx>_`e8H`5;n$aQV7wza3^B=_glJ&l%9Ht!Z1NE<{f0ckt zeIxXZkk7ZA&$s0Et=7ryyI7>4O3d|X$QJW^;(b2=`hQ@JAL5aY92A0BKQR7>UNQg0 zdjIMWldt*A9~t{&f|x&vm}A5k=Xkt9%nfm1+=hUd8=1Rt1xCdT`M~&4i&*@di6z5g z88#A-iCh$;5_M=s2l~KR6Bgqw##@ZH@==OvG@un-7{Ge5>=>~e{`JX;M+Wjxf-2O5 zJe*GSi{-YV2SZ{-u|^bYM6pH`Yebb}1)9-;J`9WHW1Me9tY{z7kOO`@h<|IgqHED4 z*0emarpJgC<3|<>P=*>bB7klTVpOadu}DLXSTlPuBG#;Ww1^cO0An{}-e%M{qrQ0t z@=zicU&C5)wP->+dJzScxNIC8c0gtmFi-QtHLp9;w7gW$m<5 zvF4YEwV*((h4iPB%MMv$Eh-diN5*6{inTNCzoX2o6Kj`tvH1S9wHtA>u!n%-Z07By zI>gGU5o@1bvG(f}E0;O@v&LfDmkeQ4tUSi$5j!sfIVce80OB9WIq?2#<*yLy;0m!0 zX%XvC%0r2FXg!+5Dj?2bJz^a`BGwVz=ojnAEU}I%7wc$Z6ft*cyI9Bi#464d>$onl zj%RF%U{tJSeISn$2QVyFDdS4xktWv3Wn%IEXPsIiR#}x;rxE85d195finW}*I)ix? z3{&Z7V92C*uccW$v*=f#M1KJzX}5bHwLx{!4)V*W+l=ohPcT&zp-#kw>Weq@MM zQz+JDjJb?4mv@Tw$5OFY1VEjyv#cxA#i}KrRXz~!YGTwe_8RiJmU=xgu5T9WhAy!h z7}F4s5(LG%DG$u$dz;qH%)KQ6O&AudF&D(=dzsd)jJvfSa>OlSWSmdKttkphf z$vSroigjliszLp(Y>>}gqhd9&R#UHpf28W}3d&}&?y*q-;@(3Z_okx~@oMbT!)0gCQ{QG2%Uzj$9O@62#$s+%|zPARC1!#|o@Q8+tG#JoXVU)=TugMBmHRAl@tF`wDr!LY}XX=PTs- z3VC)jzMJvg8OTEks!)#>bfO<4V!i4U>u)(I66gZ$ngzc*~SdBLHU)BZl~AJFyzZ3C18lphzO94oLIZRo)e#>M(14(Z56F)C4qW^{=4DeHVntWSyc zDeHX7b>Op?^_d@8C_ou%(1-xKF^EyIK95D3SVN2(V%!kph8Q=*xFN<3F>Z))YZ$kN zacgo=gbLK63GL`bP^`6rcw`_CC8$C@TF{ApjEJ?)2S2hUwCqZdK3h6VA+Kpsj^g?hB06a5$w>nk7p$U*_iP=iJU(2YTiiuH9Y(vX89RG=13 zXh$!CqCd)NeG`uiyrs7DJr(T@?aMttxi3k4`c4H^+ZHwH1vkJ82>4LK-61!~cR zcJv}B);|UD$Uq)SP=$K5pcDNV5$ii2{K!H9%20zw1kjB^jEc2B7HP;q5h_rNCbXj$ z!HFO9{XU*D19>Px73$G~PV{3$tRH;vBMSv6Lk$`cKsN?4D%QVZk%k-;p#rsNLOXg9 z6l*je%>9w^KQ@T<6R+!^cwO`TAL}Px*FW*P{)yN17_aLwUe{y1uE%&?kMX)5<8?j8 z>w1jW^;jQ<#o}uPYdis&$VVxv(STNTVF2sJ+7N>jWP`CA7`uV78yLHRu^Sk>fw3DH zyOFUQ8M~45+c+XtC=Vs5LOoj0iGGZTEk5{>g*>$WUoa}RL0lsq{1q6Z5iMv(uh?cf zR$xGED+>iELk$>fXNv9cYao2BVmll=<6^tDVn<~o|F=aKi9NkT>=<(3>kd062Myrd zVtT}$!J0Gbv0m(1m14)%iM<))HzT*r^Ux{wY}T1wE_Pfl$YBfSY|$t7R`hLMBK9`q zG>3T!O=54G3Fd8E493i*Z7%Wn>~GH<5j&A{NsI$A5}BLGyhQpF`^EOt=V!jZ2sN9Q z6fbsCx7gb;H@Qme6xK=^6nlGePHh!?UIA94P3$z*N=p-aej&3QfEdk5z3z}$>Bv3E`p`*&GlXEutxE9>%gh`mQZ?CfE& zb27!=Ck;7b@7p2ve$3rJUhKu>u!Q3S(#1ZoM(l%Dh<$LU*oTaZeJEoJM#VlX27VCZ zFxEY+7>qrvRqVs_q2-|1M>K-^kriSeMgLI^VEv=|5ET1p8?1SBI&whnOIM3sTp;#w zxndvRC3Xqz%UHLRv88chpUirv^oV`xda+Mq%pX$3F3%Esxeo(kpFv**G0$QwUvt=J zv)(zQVpkI5JlfA668i!X`$FPY_lbQ8acUTMStVAW5#)SX2Z(hUF)tet`*P-79t%G* zK-(W<#J-{)&1e^UWicv1oRuvg#!A*$$r@KOmakFlEAvr~I<%q(!(!LQARW0V6#FXH zyQ)jHs8auSNCBE>&3pqMjUFzzLRzCWXzqce^-^*O|&vx9us;9RVn5g*_Cs0d5&KW&Vz)An@A=se zQ~z_e*neRzUtiddkoO~fVn136<~>#e=C-AvP3*@R{{(ZNh_BRV*izSUG%@ixR=Q5Wn#WO zEcPp`_sV*)yPL&+wHOs*zm|Xwu+D3Y`I`?zV)s;`PVCnyUmq9yO~2T^iWt4D_4gF9 z`Rr`JMZC9}(^mxg`l`j|bFKXjvER)G$M4po1;l+X0U78(kJ$a`$VIW(?-TO_Vtv4x zAM}g;VT0HMv=6Kn`=d1Ei2bo30mS&2c^`L){YjbFgXHw-sMwz~e$5K8gIQvKK_8!~ z?5~*1_wDSjN>Gg!bYe*Cuj7!34za&s44+->5$YqX|1I;s9RTO}9qWCUC-!<`@V;;J zzHk4FvAlQN|EdD#I7;7-rDFdS561sgfC|v}6ET146?-gzZm{;)xY*;2<-OSEz1SWv zLnGS6-at&=SM5+5a#4&*5G&LwjCjJe{mLcffx&EUr3CF)o2ih z_Z26dwbO~cLk$?aCu)uT^XqV zj#!zD&m`6^oYO9G;_OO&*DNq@H|AxrRu*kpycfRy%qyD9-L(;{4t(PB#5} zmWZ=gsW^M*i^F@IlT(0l)Pi~YQ0`kH&VH1+`$VCVxnrgNslO>W7f;A;aPv zO3sI}c0mJL#5s)Ehb4$}xE~xJA&5r?hZ5;B^BF8$Nh;s|=jp-;vHRx-ky>Y!bwSgL!w*e+TV%(0*sWICtfu zU!0}_41oOaZWHGo_WT~k+{-%mvX9N#s1oPCT5;}Ytro^VSS`*&)cM}0^KiO2e{MjF zI0426%E9~qu^)-W3XF^MXdwdPJeG+baoRX;8x-g93^3+##y>&*i5m2Y^JE&RKiMlz zdkV;*o%&OLRH9p)r>Q^9+^2`dd8QPD;&kMpOPptO&??Szv0(0V>&1DV+@2?PX939J zg)FQV=f!x`V^o~KmS8}ft|D}b^Ac;n#F&>E^KuP>;=ICIuMCRQoey&2vyJmA?XOi} zT%5nviqjK|fHc!zRjq@gLy*cO==kKiXcVhp2NSwFQQH6F8<89V>n=yTy zPakvMNkF4G?+Q4NcUkYf0(6Mep9#jiPmK4O^8tB%&?C-=6&Mz0piG>PSo@d_4PJ|7onC)iuVvoa01YynZVJeGF6$-=(ohKU@@Js|^gW~STyxe$k_oshxuDDCcfzJ)@0o51~ z_dv!TNc@B7Kd4XKeA*B8V@TYBW^oUX6ZZ((3R}cIx=P$4`ilC+J*Gt5V~JHw+1LB@CEN)q=xThzG`-fg}%e%!rqg328iCxho?%8$X zp3@<2CF}6M=$>bbdw!Ols@=gi&#?XT9s!i+e*BSmOrTZX`}axwtpwp+nr8Gtdj>+``<(Sfrp` z+*`@#R>tw~c`pB+=icTAV{Qw8Shwe(7R+BAj}o+CMBF>FP=ju9?~Fwenh+HCt_)P6 zQ`{yW3ebo_aqmt;1=_{EN05hl^ox71A7uz&RNUqq)S_41`{Gf87L15{e->)cE$#!c zC_)p0;w)#izIjBXixSjDRK?_F2<$c%X`v>j|-QvC&iy|~3DDGb~P=!u$yL>1>BL>BN zDGe297x!gB9_rCA?kj$jA%IbFyK_*BUU6TIM+sUmBJOKhs6n^5e~U#Cnh+GXCj(XJ z6!&!>3ebo_ao&x^oz^)D%{>O1TZS@-*ZrlUUA=wM+sUmBJSH+s6n^5 zeX%G)6N2KtlYuI9iubL*g-&rl@u2{X7!-Ff4HalduehJKiOXjMm!FYxKWEP8 zJa2~b&;p(xYj~EdsRhrTHOyJlC+=GM*Or3y*Xr?zxa(rz2lLi-fwk6+iyMqbCRj7L z0!<+97mWXc7+>qq! z%)b~vnlJ8;0nnE|;inXrEJo5s6A@k*IB1mwyk4noBG`cSI#N zN|fIR;_;ay%FjGM{ zO7$Tg8OTEkDo~3iw4)n?7?r4bvG5}c1t>!m>d}Hu^dcxxX@YoUAP*&AY#L+J7@Nk} zw0?|8)O;WO$O2>M4@uMl$^}IzLk$`cKo5cvwa^B07BXjH0T{QCaSMr`UV;kLq6zKj z1@qEJC29vBx+H2*C00n(j+7ah615Ze>Q1b)(}+av%$S{t&-bsQcCJG+h_iDah_my! zMEx!nIVeOKYS182nUuS5zwJWoT~?zFD02Yh2^nJ;|aqh76c)Q)7yvYIVhvL)AX_jqRP z={?gjGnq^>Ei;*f5K<q@tafV&lLJIh5k%!$8{3d4T5DF-#Hz?)rD&m7d}7z9IoF87Uf`?+KRK&*H*oXYt#!e-tcp_}pAtux#uSESs!?Wi!5O zGuk%e_2wB|hj3km>w;j}g1&8eAFd~HeFfKZxPBv8w&F8eW4QWoZN_y-uxxkYO5++8 zEIW1!mYp?R_{=V}?ZSP#@&4}XaQ#NG?7`>vd_b`5#pf=;xG$N-brcuIaS6t83Eta> z*Za;3mi@QmLfe50xZWmM4&vT}`*7jCgZJXXy$7Gh^=(`~7c7SaTwz>2g5^^D_Tia} zTvrK}Bk0eOcMF!I=)=*U36{(7oyX9>%eM=bS*!$RAX>>SWrDKG}(t0 z*{fRo!^29@rwGG}(Tm%9di~>Kr5uz3W{VVPPmQs0*58+HPw`j%e!LYh7g#FWH(>F$ zc{6R^wtMX36Z!4OHXIH}`?hQ!ADanC?~$X4WCVW_KNPL?2>V)njp#MRJ$u#-ZSLxj z<(bU7LU|&EXZ3qyT`~Osu=r1}cDIQS38L`g;!EtLcp6o4T2axa8l`#l^Lb@TqNKzZ zY9XWG#gu9f^R`gjTBx-cg{ZNhCRst~>176Udn#w>%l7vT&|qTBY-s8^+vyYiKGDZQ zUQdX1yF!UX$Q4W^#7g~{Sd6U~ouW4w^dhCUo``qG@n1l{-pNMT*Th4D3DtW>1;toW zY?Yc|fEH3v%w?uHONwcsCK`nl8bpukz&cp@E7e6yC}Psh!|0vAVlk(3gZ%G1Z%-Nf zjM`uE(5vb6tGERG6NN*IUt)h0--6$oWS0y0`gtZg`qQC`>M^`9?=4MCrbFeb(_=5p zPdbNe1;tcW{H1wcacrD!3gIS{#LUx8AwS&|l5tZzdOB57+7{G|@r7!pjo!$VZS=Qb zE2z_~pbSKxnRpK6mZXB+t=K(EC+yLs2@_%eHBrwrncX)l_NNXaQ%NI-i17qV8 zlZ`uG^QcTlu`H&+b_>V`G}AxWp6c%#VU34@vJ{Xkl|j%Fe}caSW}z2&&_3dCw^Z1a z=U|J$XLWimk>&lit+KqsBW7C;dYC`p*)PinZQEsemxl!`uGm4_!Ss)R9Cg}Eh960R zTcyB{BM~P42!BQPqkwd4K*F15BYWGepamvev-lzQy!e1H!;TA83EZV>#~;O0Q4@w2 zs`dn}qTPk%1t00EsMBaHPg7&LWGgUb9g62Hs43$M^8+c1wVTc8bvFA%k+v{$vykbOJDxtV|dS@7(Md_qv$X28%%g%!i#w${e9+F z4}S41zbWmR&!n@sDV??A8PP|U{^Dd*gVB`EWOI#mWUE@Rq)fG}0m~a28y;4>d+{tW z4PF=XD)C`O@TpRiSmAUTyK-n)NqC>K_@f>DW9blhV5YBV1nU^=@2iyKtd)hZ@^J=s zm@BgOlvxV28kZSlk>xZ_iJL>MtWq9epD(W4zOJ}qLJG8X$)Z0H@EMGA#lAV-d|-1x zYM=E7B*|+qTrxPh>+jQDEq-aRYhb&jPnP=(J}EAb%0W3lR%q*R`K8g~$ad?SWcf{E zYtSRh+vG^HXN-7+Nw{S3HzE@+7wo7V)+Tg;SNx+;g%wm#y&!=Ui6QWTngfuj3oHmv zb8-GOXLx}rVo3pFT{)L6r`s##_Dq>QKHFcO zqn670N+p#k|LaW#r^Ri)Npx6UoJXHpe3AV~w4e^#kZ_|=HPFltV&?H?LG6SBkSnPv zJVZ-s3m#fLia)vFr8Qy7a7i&QsC}5{K9B0eJjXH5YaqYWK>e+1A|2fYm7ZKK=aeWvLMz#f(;C6yvl3|(UbNP zb2C1b?5&CJwIh2+`cj^`a$B3X_FE;Zm@*x!^^+IfAl2(mG{nT zzp|BnnDcSrKf%WjL2JAgI*6s47EZGNb!}c76*wYi@{c?IxO_m(Lz!xOvIr$ z%PPa3@RqC0MDH;Px0x6{HQAt{MEHth-bp#tOl2K-I>og*03!k{(cZ({rv-+81g@qu_?Nt38k^iOyx3wb`6Kp3leW&vluhY<38i z2&c8hPlDFohZ>WupfwNYHZ-$riPx)M{$)0%50vR_^C$Js*s#A%Y=hNpb&5B>I_q;< zOb+pBJV3ZWH@yb^e;zuz1Jx?WgerW91=SC}=Ol?^r-_L2-}B)2JRUU&?uMUMWjqEw z5CcY=-9TahM2XFs%8s!-ytw zde^(oy!G(@V|VjE`dBgG=f&gT7vtJ|a#CYu44bnL=K}qHl{q;@89`A_qA0=XC(YMM zbfs|;=H+BWi#Ni&x^g$c*c2$f{>51A{%fo*tHW`Zc>L9`LV^cG6B_I`$6Z=KcB3CZ z!n~w}8-%K#W(6eApVV43(@>@vGSCdQ^WEQy8EWPHUkR60FJ76U)O)F))HzgmEU2y6 z!m8Z?N$K<|hG7L8+6NKZ0tHvK2hc2g=LK^(i5Z1;p_x1Za|@H&w4Kq`Kd4)AWgDCU;m5sg}uX-T~2e0#mN@BPn?|T-nQq9hO{hi8U4|^@l@Z|(e8jl z8{bi^J1S&C-d!zJJv6=u#+Rot8R?0UCNRMYr{&rmyFcBPqFY}=`7xCA*QQQ>_D08TiigU zq4Fng5!?2b+}_HjE2gp|(>L!Od5hECxwUuZ^3!v&{P@;2og<;34Rq~s7D}_D4(XuKM*-;C7W+m0SXCnP%bV+-f; z=r)7!j0r4J8ieNww$WBrg*IBcXq=YL<5k=k2TPG~?DrBI#7H#uLb6#TB5kZ48_kVp zZGIMw#a@~l*>e01`*T|=NvGA)xwSlZXd62gkb=w=kiOP>ZugNJ!)-~AGn+lOZ7nt% z@uGV{lXcK!9QyBReSK)81{!IYo`h*221xOmGe_&BI*BT~ONz9hTCqGG13stC~g{nxSw1ViYo|;KVN&MX0gW3*L zn_F^rEF5X_f&mNX@{r+UA`HkgtcYG!u#*zW`r9;_+9ovwAA73)!+^xz_?H$)15s@A zwZ~$cr(S(Ua#~GR%)Mbr4tRwaY7>LbOhjkUc&mQumm72zj+@+=ax4S`WJPvp5*>4p z8Z6;S(xYb3Gm++=+8{C14m8M;*Y|0o$>wqk@f4@8IP_oDB7(ZYUezYx(eG0|;EL!w zseD{k(-M$&lU+6zEYN{Trhqx1_|&=go;!c#sRvKpaP78j!$aHe9ow^KY;^B#He9)4 z?!<|?<9&Vm)*d^ycHj5r`g_;S^!9&u^Z9Mt28TCal-9yDS}^k_605e+ziQ)vc4Mxy1tqqnj0IqiGe2;5z`g5k+q;!kn zzg?8n*l;tc`l#-M*E8EDC`Ny_z>zzH)|#ko*E_xjfJai_60>M>lzB!TSHgT zRJCSZr5QhD%X$k}UYa!+)}19yCQJReA3ED%^uvY<*b8`(z z4PjE<0T7A6K;g|AT=UQVldjF(v7|LM^g7Y5|0wCMU7}rt>MRHuk~O(*T{p70SWvVR z{nwAM55j76x$2Lz%Ux?N9@Ec%ZgN{4mal#lN*wxK5k4yNwGizYAcpUuA1zk9e2H^njR_g);oz8GrF&6fDW{}*(c%$)!K@aR8@ zL;o50nbzwR@YWxR=Yi)B2=@xrIPGHyN<}CE+Ipdy=5}W~4{_)(sr`7PR3hz%DT5`& zu%NbKXSZppZW&L@9=^Ffk#d>P`>LZz^4|fBP94H56oEU{;8c#MNx(R~YQ8{Rx!(`+ zb-2LWN?s)h&SuhdW7_Xtu(bl~Z*Tyv>*)G&Lp2)g*V!H`Tyo~zo|%JwxvkZ{X6t3! zrYkd-?49fE55ZVFGC8t&upo^;a`j(c`^cen_5Wq?N+!c>|Jbfl-^{I-f8bKUkq&Dp zUw?W{cecc7*@g+)hErgYu*ZSPgZUSfXocixRg@Cta#`c|gmI+GHI}ye!TF#HG1Bw0 zoX#_CI%3K-8NX(^VPn#ucvH>*<>txy?*_JUy(Te#gDijZs~K5-AC#O8=UcA&?8Btr zupdi-SAR#(KFc!oe<8kX5DMsD9Q_NTVtywfF-uxnu3RguHmEh~!WK z%kk2nn;}r5m^~fDJCr9ouu;Wi$1El;tB2Pww0}_#w8< zo82<-$hZ49cF;g~jc+V=AD=jYVS7f4T_g1eq--Yi{1oW|6N@j3L2(jezf7p+NfO1d z9I-qpJ25EKRh*tt&;~EjN_3T!!qT?&0dTOENzYTeF)AOa-R4-HbT_wGO|%g)N-+?l z71{zB72|NAgfw$v2?y1UH4PW%YIpyHBUL^ye8ntqa-V28v3A$>ckJuinYY;j$-doP zS3b_x`3EtQ?=G?=oTP14MFAt*O(V05yBUP(R~>Edc6^W1WOJ?{gM6$BDB{#<66ax4OBl zs>I4wH%C3(4vfoJCc7QVH;A~QM2HwA)Yti-RfJK+0o#%q#IT9Yihu_J`>0kTQxkVP zEt!LSyXlnL(D^D$Ydgk1%nieFSsoxF{Wy%oe|d;R5A!?^82SU+9)v-B>^t4Ee5WjT zV-GIROHO#(3LCuYqsf3Os6;R%A5Dfkh{=!-%3mvJQNA->Hf?c*0v_8*0fr@_?Z&}= zjk9VN7YPf(R$jAr*~s{T4&Q~V*3V&3Uy8-t2QIt$;kTc?d@WYl&kRS!(7@Ko;M|pO zOg&2L{_(zdT)*+udj{O_Vabu~0G1o;-H&~m!#=IxR1i%fbn6l!4}qXb1f{J)z;`tq z%c7|kWn3^si5>No6xRaR`#~0>i-sy<>tLQ>qtYwDD)g6jHN2^CWt0A#$9B(G=5qs*VU?3>kbaU&Cs^yDblTMz9nGq{0bERwpgq?BFhjz1I&%r zcoHef^4sb4GC2gK?|g?Qr=B1J!F*hUNqHXg5yB}eTwB&LsAc*Wz_kE!t^%W=JZpGV zgQgoG=L#@Nmn_(@Z1VnCto}t{uvk7e5sN`zw~0fdnWyCEAkpf7RZkdDsLDztET zrUmm_4bz?sfGnv`t=udFKBKriDkOu3mC;U}WbheReVh;ZuxUAG^^xglBSyey-qk8M2D(ff!j-!Zdu4ZDJOc!__EqR(aF zs5XB}kt4<`>i{(o6HyWs;v>~iG!1HEN~JNEz*(C3!|Lhm14*cEg3BUh5@&<2OB81%Kdd!aML>Gx6&l#r zTF*Whkly{K5bc)<>=)8eA~;{?&&my27C@+)ET}R!UyBgdVwFtLr2y$G#O=Hd%x{^= zbGmQ;Df5D>jmh%Sqp}P}M>O*R0OJ}r*Kuh;Ug8e)u}gRmmKJ22lGdi&n7|^Gx$9X8 zSGadkj2HMS07q$AP>tA_#un=Yf?-lYyly-dLbPU?c0+sfX61x7!ejs~w5wOX#nu-s9NB5cFu+XeEejX}{Dz#11ufG5ayr2~e3>#vG)CyR>y8&joRCLz&BUCB`E# z)KDAIAW&Q^60nx4fiIH5Op9co5t87>Umr#f9ELD(0k;W3dQSke0w_mMgHeo|;I$y3 zGYrtD7NMhx2-bmful+S+(^!>0d_H#e+rIj@K%b9dwBT2#S+72H_WOEogC^@%_APz0 z!8Qed$Ut`4VN=jI#5$&cT5m@P9+?vjQCtPkaZnXQAQ%Y+#9JsGn%PKxfMi)MqwB>E0AXf#OTZRw zm&jQ3QUn9w)^>1VGu*=D2bPjh+ynH6nuSiFdYwc%rj%wZPJn^wxEQkmRb{|FX_N-fi-db-_M=7zce|P zkmcd&J;QfhyYb|nf%p93yA3k9bmFp}82CWj+vhBbzGhuLTtbvvBw69kAT1n zRlr#VLJl}J@CY6aKo}}1-P-b(%k!3wZav}#v_N4v?VyDqpi#wHQGKA}VYFGgF~&j< zq9uYb(rN~KB8$kdVbw>Tq87M>1U1r+Z~==m(4OzXJK&#MOv`ID!R2TcfLxj_upGwn z?Y=DsxAdLbws#baBftzh*NpGrP1hYbjo22+&%|Rh>o0vzXP;LJO_ZnikN9QzgdFZ3 zo$k%`cthc}mAS*SnyJ@2GY8%}i_Pua;*p+HHsSuT5aX>5a8nyz~F6f7(1|Fp%%BZsZ+B2 z2U$M#*ARkV57?Z4g(wD+hwu|!d#&8!#KvZb^?VVro)w(GcTU%(fF5u^3XwDE{DKek zjE-ix9tG2*8sKiNfCo(8LgwG3g^aj>);sQrj8ZFW#|=@gT}6p}bLe{%2<8gy3Qix+ z@-;E1G1ld#cJ}Kx?A~*&F4_*Roj$aYOF4-5Pwu(y4SU%RV*2$@&K$aQt@fP81fIn4 z9GMVRP^*R$D1i#$BvnTXP%DflR?_@ZB5flV$s$8jSNs(n#Niz?Fvr)iKj4ts@{Hk& zTe&bwd`GnNUr%U?>8rn^-wxonXE2{h;rse;$6J$?N`+hZN|1M9o;kfSL@qL=4FNPz z{SlM+?D^mGcr%3|2Eyerk>Te=90YV$ROEVQ?7m-~p%@7ziMJEas!o?xby3`z``Kr{ zbjdIOh2AqMtwlANu;P6;%=1^27Q{5Z?f1vE_q;_VUR1muHD-E6ZSh!@xJUJvUJ+_8 zcZ-KkH!l|1q)T8qxL7S#_2fJys1xpl*T{?7r z{OmBaG05$f@yYXV&}R#W`qgh>&(X}i`aAYu{X7$Huyktk<4*}(<5%0z0ikGKmG^x;#YU~}XI#}n|kW}Uvjc@0($Wm@0!^I_iY z?7JVy$6^>rxc+_W`1hHU`ux+M@}ASYpTM7&(056=UZ^tSS=^T5@dKpVa-5B#KdL6q zlT2VlTz|52Bfo04(^fY_6()NhQ6rpxc{tPZzyr*+DzhC6^Q&OSxU0}o(y&=K=U1E^5x)``p`VU2MVymkLRYCZBa}^DCgZF~*bS9eZwg4_5BR!X zqsN%Id5lR*Nx_(*w*W9au}qu<=cX@+dDl}Ha~3i=J?571Xtuu{JTVy?zUzfC%GPI$}%Y2VN#{M1Sz6~)u=$E-va~ml3hoX0QIJvOaFVMQxqPl&|AIW9c$02{w>IX0!eu#V%L4{zGPr9e%~cPCaHs-AyyEmwo&jMq=baxuF9-{m)}08jS+XNS~%ObAf}Lq9as=?quRlCv0@-m=v_dM zp*LeYdu46O8f=ggsD`A{!c`DfuoRBWen+Ia;p+Xj-M0VSES}$jCEK*|B$n*ee+WqX zPu=zOohN@m3$}U7>C>CGo`yK&W3*zuLf{d@x}SrBs3aN@gm^8?S|sd0KDgErL2m+( zIds?t>kdH>2m-@8wDC|AJ*ZQ&_lU+!ZAF$cBm3s4Ud!0inrf&^orZ- zN7?nxE8lti@3tLj4I{oYI&|-42XDS@a%Sfa@WxZ<9PN`%;XHB;Xu`1-C}@diHD%C4 zV}@wTh=7`E5}j47TQxQL2Vk>2so$@1r_@RNTT?udP|yzi2= z>(}|joY`E;jzRBLxkA593XBbIaG6?gE(7L$y;#P)#}Tn|lTeM(bQ5hcdO{9gXNkLR zLM1JF4PJ=ol6#l7xe#cuK0Xf?qANX)&5fauS&F zgqF72N(1L1i7>svp^MW7CMG;fvHiW~an(C(aQW@OV zoua4RotOkN#JdqhrLxVWA@i9Y(l1?IrDsb64kjbM>rsTR_^2fsmZVW}re34c{T z*WCP+`(r3K{06)SZ2;$W*E$HvK>fM%C7|%pOJ?3I%eR-O3?u9YOxxP^5GT_W@om&4 z(2a&uQ;a%vb&=>MRZ*dSsqq}B#zs^FElgum5St(s?zTi=7c@!5O%6E<03s=g963jY z+>tjE4 zmF2a4BXnZU=C$td-*;efcYwzX3ZE3JUBpAZt(A(}gL!jC$Q4#KVyTI75(EgB*C4HY zh57|BsCPllVk!ZPWP6F;vS67?)>G>@Qg*4*UxH@@uemb@Nusrw){b0Ja^65&AuR=W za;PYgGCiattBGE<+aW$o30MM?rTi-6-Wg=GCN%|Tajd$}W zKwiM8`TUV@k@VGG;XRsURPq%bU~VH{X}Ox9=>iN4@B~k|hKkI$z=CSSya7V8anJ<{ zFcLRBwXiln;ZikBF8awZ`4nv$(h78~+RkHh+Q}DRY^NKF1+Xwmo-LB2H31eDq`({t z7`htOFhvqpZjywGW9Y*%Leax^ArAbo382@1ji#$d|9* zpvhECci4dOQnV|^E7LG3nqx_KaKn1Uc&%lGCQ=+M4`PUDF|P=9B}a+?l!F_ux`5S; z?j{+_H3&bHZ8a!!4T2KCgg`iw&@>3>=1^3dG>B$OTcJQ`jtuq>IupfEkFCAt(5<&# zdiB+tPMp}Z@ze>nZ6~Z^4&40V=uOodoAig>d=GvMey}UFSYz_~kxs`Km!e+7G<-Ob zxfcu&E!K)+JS^OEOcr6)$2D&sR(D?D{$u#L1T_Hqn>PzlN?3q;i{VhH8xN>P);W!) znbnRkZAZboBw9^(uI5|g34fuAzsdWO_qChh zKpU3LvehX#Pzf(62@WKXxU=wx7Yl*oCW&4sPKn~4l}LqyXaNpRH~yHfs+DX}hD8O=&W+6m@XrU@2x8-6xSf4^_p#AA(un)B z$9G|5Z-5k;-Z?wMOwj1Qo9#QmpFrKz#?^|wyawZv!KXD0lj0~WG#shIBiNmuCjsUL&``PyOAL#(tAB#;r>VJ4fmftPQ9i&W>#YIB=wZEGb-l6$Uu=+fL zo1#adQ_h<_61V`L$_CE=#Rp&d4F~c~0P?-S`H@Es7@mN6qv@4r#vlCEo&0U7h%8A( zLU<;j{6HK7!0J_R04V*Z1*6I14MP6guPQd3h$Iih`JzBq=g@S<>rWsTdOW; z?i5?EI}EjO!$~(#cG{K(Y&$53RRh-o4Fp*t246K`C#oVa6$}boMsXmyn&3M|LP=tc zfSP`$`w!WFuYZSO66^P|ZfFVYiFEx*gaBQZX1fO=l!ymD4*dGPu)XsXo6EDrDAr0u zEHoY(Fg7$H_V2=5*O29b30{L~YKK zfsx9)3SEAw(D`uR2tmee*@ri`hIhUz(?&v6^CJ)*Z-Jf46l+N_a%+z)J}Ml*O{4%4 zz52>*GM^WxUOfz>=!3*mnechc#ospOg2zE19GMd1p+XR00qWvfp?QIP50oH-Xs$FR zpG~F#5LL}-&RS_&d}VDelF%7c3taqE&cTKREdN7y@ zUz!n2%;%l9D2+a1b2)9xvm%|u*fa0i8E)PDaJJ29wK+!m9_}ni{;tBiDx`vAJcoqu zu|Kjy;H71O))Hoj;wotxM8jh$z-~}|2<4&0LY7q+sy9j<0In;KH&kwdx|T82R>tlK zcD8z)J2S1*BSC-C7qnZPo4Z=qjl7!6I;{TfJ*99w?6i1y_8=yIagjPh|2!hR45W*{ zxOkAAWZ#2NBL*x1n)ecR00+9XP?NQ)IoiX#RhCdS2bnXdn!_C`6)ILkC3N68SFI$R zS}4TN@C z2nq;AzNM1a0;~`pp`^<^TsGf@C5TXxV$iF)5JIN-pg?dSJCYzQB9I|;+u^O6TmXQv zHVAST^^zal00yNjSNy;RBExv8EK-qO<4nv#W18qNwMYT4!8G18v;UImu91Yzf2VjK7E0!JO-}5JMDj)P8kAMA0%^s35Z+=Hr-2o`Q>gNcGNe^d z_;(j!6$lrpWmDlUWS3ehoP7YiB#@wkd)lDT()@G)-zO26K&Aqc78(^{D4rJi3zYgt zQ6mCEiBu=Nz>1w}Jt+a-yr-)~fg^&NBKyjR$Pu+4jHcRA>ec2Xpo=3A0DQ$%8)_-5 zfpUs*S~u*pnXFJ?XbNTN9{sBEkoI1Umc+*m2{+2Zd^dxIUr( zAsz}Gq5wlo7$N7?2uZKrk~)mHdxvRqdZAA$U6{2Az)=fS^``0=;8X<(LJ{Nx9(5Gc z1J+YbkJ1}y0$iY$PNH>y!bWYBk~8X^mxM?O)1{<+YAQ$HWZsstRS4;lM`H)zuctShOrT@W7D#P40xh8@9uF=NM{F4LY12$57Xr;qF0V z%l5%k`yjHWH{jb? zYOc~ZH&^bT<8;UM73k@cM0bP_>y~RZ2f9=GI5bu5Dh)&a*QAI$2MS~rm3X(gT#F@n z!-$6bQ?x3IO8FO{A#nH@x_py$J6@ijY{rpE8E!=z0m@WyO0@!AQHq$(cG885#3ywF zzS)k~!?yK@*9}(SAq|M_xl@wv+$+o9m*u^81cP_ry6!{p`9t%_{l#D8_>=I5u9u`= zj{5w^Nhvz+_l^2}FOv?Ux&L4G`2~^t&^6iD0-S?aQgAMiyP2>ifj_94N0xat85m*9 zvXG=<{kMQ!9ddoE{shwxnx0<#67qKrLgQK|JcSG%+U?}A%O^pC0)@9j(DHa^H;3OT zoRlf+xz`SPJM&X+D-2Xh8wi)wDLg>Kp;*TPo)$DH5J9;VcuP16pn4R4%Hk97&#N2I z;-11>L)R$)Pf+D?kg(7DRIpSY9$iZ$ZlqLnCF@oChLr)InntjhpynYL=8f_4C@1rb zmg9t^G+tOlLmNum#Xm?K0hT*BC{^ho=p})qXbUB=W^oHutU+S}*_6R6U7Lr$B?Ug5 z?;n|+ddE=7wYl$m0qG-|fzka#>nH9pR?=&FCwI4_ntbOikP`hQ&)$>8w$zj zP(HnVZK~TC442lO4TsltCt`iQiR{{4qQ&bEu8F3L2^RGiHndZYiYUB|EwX<=y(G%X z93c<8(!xFe1gqK+hNy$>P#~=fHF(>p9kIKBRckKIbkmYH{Tw#aTs0&b>DlL{cx%43 z^(E~=yu4Kk@CNOH*4!W%7GDx~gV$4x=&0~f9UC6Rs#D&G+6!iHOmt;YD9~sS&Y>Pi zf`{xUKnyiUn7`rRZ}8$xrKR#gaCZ_4j87;r4=J^9#?`!91MDVL6{6@;s^Y}6x;|xe zB(hz-gS5_GxOYjDT0=?5z!M-FpYnGUOMPqTU5A(J`0(~(6&tA}ea*$NH05$!EC*K> zWm{S`1=esxY>;7|Hs4Zu>1aec}_h-th5H+ERL3pq5S@s2S z9P3#CzVjGxDw;+p+0`s%ln8{JS4)rxwIrW5y;ck54Z{E>JgXn93%E~wf!rrZ+^m*) zVpf@=36aA|DSshUiz39Q>g^)o;H7#El*3sjAPyBOy1etQXtD#-1@n{At-_SPS&erg z*I-yl`jiwgv?XI&Q_matux9CNeq~Zvu`jZr<-iB_q5qKO|B&nTJ>jmQXecx>(BCz< zX-%v<9hcUM7W+BbLcs+wJrMwtY7rW^Y#2`^S3B;5ZaRC`Fmjes&b zi9|@P!ci8T@UOE-rBX9%8uCal9%o?ZjzWlWf_*5|pyubd&A0k9(7Ul$sY;~Z!?z^L?r|}gq0ob1va_du z1jMAqNGe7^RAfykD}?L!G_3#Fn+M*`iFmGM)ZI zy00VFW%K6;(r@yo6LL85vDMoXYyOxRM*P_%?8Xk@KQ!?aywP|~UKp*QG-Lq$OZ1>(P_l}M29TP9?xna+q@iluo#yY#u zfNr24v-sl?y@&^llXVXxmwTY}5?8=FsdklxlLALF?HtLpBQ$_#TEXOpnMF#m)s zKasEhflnXS;*;!V_K5f#vTDx>2q>%BGzv#WS|Y_T4^rb4&J%0NTqJQ~Lu#~^oe5=) zB5OeMRO|tkBfV`d^9U$ARj#41MV^`HrRWr(p;8>AN_G`AU!pw=l1w2w+Fw(S=$Xmx z!qmtukEZ|au57Rnk9Js$Zi`*KJ32PnkzDr%1E{fZb2jOT=Hek9V8?`~*c;dfFqR@} zXps{ib$6*Z=~AsjdnS}>nxL!-6qJcti8TEsDtub=XFNSBm!xy%;Fx>VO_ zam|KuW{cfoSd&gw`tp)TiluYn(@vMuZt-XObA?2@uWKUhwb)ag@zzd;Dn*voP&i4n z2OI|RfuC!{2O9Vo-FkeW8T$x}vK$|{T-HsSex_dQL~@6d{}nGe4)3Vd!T<8_25vvZ zvF|qF4xx(Q)8brX^n`-ql00N5N!UD<;GjTl3YPIfej*jeqKHO>bx0o#GJ z)M_g)!_$h;D-^sU@*)p4Ps(Y5QyR*LYN04)4b&RsydHo)mEj`vitulG&bb{4%@0cf z!oMr|D|YJm_onSv40{j{BxdvV?{<_AGIPHEmi)*j?AdM5wOzoVGKhwGR72z_oq`4c zY6sRy=q5rJKsH%}u!p7;u`q^(8mhp@L0HhI!NZlThUv=`_cJMDpH?$`Z)A|gMtg(& zth{s*_hV4$q-vOlM}{eP5-~BLDe~)uK~icKP8?v2O4O_5h^C5+kUO5N*PPCVfoqi? zI3VopcQewsSnSX>Hy-Z*jHI~94XDI^|NWJw1`NxUT;SXYG`rR|oKU)H55 zyDY3LyM$+acX1cH5hN89_G_3eaheFR5>E}K0ja<>VrhFW;QIT}927}|@j9or|q!`Ffb+t#wo!3epS7b*? z904ElS{>-y7ucJC%l-hodxno0{UN@g7m9;c=;T!Z(Y7k3jMs{zI7vZuB9=w5(wb@H z$Z^+Dqgvbmo7fNtRHZ~NIAvvKM^gMj{mW^epCPhbbXXj2jF|M+XT>SZy##)|4f8~@ z5r{@ZK}EnX$3=-cxoWBx11E5f5_KUTDDt$G(%C$y-y|T~3{T;ZL${W7%_Wr_lhcrL z{e8Mp`Ofh@myC|>-FxQHn%ZvGBd-nu43JQ6&$^fX939r&M z+@K$X`r{s|){QmuUzC(gOe<@}wQ(t6ApXz~`B~AlK3(@i&cEfs4U*JEbL?;HPuM+9 z8$0X(&oB96jz?}Ec1eJ{C25Z|ES?dMuO!OT^aMUYN`%G=X!;m^yODgk*cYwO@CqsFklNYyc(r9U=}=}Vw0 zBWhC1ywECr+{4+Zhq5VTUUO9@j*7~Wq&ZJ6tw3gy<~-An_a=Fvdu2_zR?*%rl&LVa z>LE=8310={L0KSDa^i1|uLzziDQxmj6E?x!=#V|>b59%|X zx#+Sid01XYqhbHrJp1*(S;-Gr=S3AB_QMrRaLdZ|sek!()gkN;1OoB>QA9P)-NHU( zsZ@8->P%_tuoX%i7|d#wsG26*SkswmGy8URz}A_q)Mm7gZ7x^$&CrbR+W`EbRid|b zRf{S6|Es!`;`8vb@_Wj)t=dv;C|9@g>f~E@U?n$}tJ7QQSEk2lFHO&2FCD;2P9th` zet2~KKE8)$fL5t}0Hu|EUS-`d*Uf6s&Gue`x0kfDs(W}83ySUZf25)P*SFa%e@>NS zJDQiic#(}a`Db;#%U2M>>b*$yS9h@Q!j6{_hXl`PidHesH=T;m5s=toP2yCEtag?JKtw%LorfX05yPvBJfW0ItD%w%K|&<^ z^jg~ky2nJXD9v9P>UX2{4dr<@eu{*H$wRR)g&n^B;xO-*0__8M@p03}ZwJzT;egW;b zi^o>Af7O!`PQSXU-4Mn1-@5q6#`j|j!38aD1!mqa3^S`<^`ykBp3IA`Jj!$9rS0Wnv=4GX`>L@VJ&h$vH(FCt z`WKV|&Dn+M#(vI8`*CUm`MpRnjh7q6Z%}iO(qF3)i9R2pNXLjr>42@HmH;!1;AR9Z zjeY!=|IULbTt#K5NGX&eA}j!CxbYz3Fb8o6)%J(!ui+^sy4*vL*+pf6^_*a`IR)2FF+_TJ*NWH1m-}Dc~S1Gz3g1v?p_khO$hm^E&y_ zEtp>HH_9cj8f<(YhK2jEgMi5_wbTAQxAo2dH($fn#T8Q?PmIZbmT4gL{q!?d-{IiMy+-Ee(@{x{fn11+Oco(kJl87!q&N} zDXXOu`;|0rDxlg{u9=wR}dpYJ$Lxtdk-UiEydns zmpj%M9w0d9(GAxyj}$=7p+`;*oqQCVBG}S3+S~q}fb{$=+b~yvcu}|Hi^Su8e;i}) zrBS1u*ZR=sHo@l>xrR&acD{B$5$?w)n%XrUDi9B)%*3VpH6AJuuUy&wWtwy0Xrmo_ z@_OMo`w9CtG8qNboF|)+&}4F!T&&`J*E<{@mxBxgpB;bU|Fl2wfSnQ=*&rGLRlMT? z@@?`~I6xoZ_c)-J<`=(@yy#0|N97Pxa8jru*iEavL4p5T#6P0!t062jk0AicOyTti zpr^G$geQ*|$WkIPifb3WpBCI6TWh3^5Pc|#iXR$eJK``uZF6`cvOq`bK>n@p@d9C4 z;o1d0fmB!S;<7PqufBcC28XP5-84#}%ue&?>(@SX^1w7pj?C@7iCtCyA0r$0-c;G? zbXd&xOZHuR;mPS;cy4m++jYZ*CvRt0jXZklAT!n<#^YhEUmmfFJuB9Ah_92@uI0=Zhz$9WlDZVoS9)LW`=IjcrK2!>9oe*&JWTT`< zEt2O?5f9D-eY7+iHJpSX^+P$8>ZTktx)>BGDBW8AUEKDqsR$ z8Zp45n%!JX!)zH!*$Ce z5lHL$wApU*raOv5T>TxQ;LtF}uA1SXQw3BDr67(DUJRb#BgFlpvLu(RJjR`954Ov4 z@e3Vq1MKTvQ^S%;4wPwY2_${c9#KS@WLRsS3{eXtAf`#qtMx zX47CmmA20Dp-df#XPCT?_wU#>F`6sBd-tU?y`e-Vv0kI8S&E|-ZwD{BI>0~`Gdis; zx1amx)$P|3KmYBoSKU8DJZZnlTcvr1!V5=`ab0k z%E@$vyM4N;!l)Tbs^R7#o){P6X`V~5V~F8~M#U{vVm(^MG?Z>X_Z(Wt-CikYiqOL? z8;-SR#@)Wj^xUjiubXDekz$+2d1=SSa(?Lc-Dfs;bX79xaihigov!xeMz1tdtgvaL z6|rdh1MDV*&}MVpx18C0ERk?pz0)L~A~U-u2R1lI+sjE;{STMA!X7ErIq=W%Tz~vg z@crG;#V8MX9LJMr87dTtLpXA^6Lv~9gFlq&M0u8oCaFqP*in#>9Eyr(HH)r*Ayy~F zV|6GxCx*#`V~|d_BZ!q^IBgtawE;WSl6r(Nk_;%R2Xa@*@uRGG_>KjzSTzGz8MaOj z`O$iZm2R&(0y7ao0glu~pkw zN!mZ~MR6X!+VCd|jI@6m-*X7O{g2xBOY}Xz*1vzbUE8-S+pnYV z|KGn|bwB_9U(xsT`yo4K7ykpkv#X%R4N`v^nez6JXG3*SN5XVw*DTu|?pNB_)0(S&Lcm*@#oV#d*8GMO!QIFOms<}#)xKe_9wJ1B!H9KXD`leuj! z!<|dHR9IV{^Cog$_dDWsJDPZ=M_ZE=t;x<+?dN{Dy8U`u6XE4m_w(-;==)cG4__;R z){3`-XU+-{tW^xYTOT}~g970QR(^aXfz(ElJB54s+zn{b80r0aa|owmCj_UYlf zkL({pSSw z9_)4e({ua8tyud!ysS&PeXCOXC{hjX~R^$}S!%@okjQ?NB=M%Rs zHs|wwN(y|vk+WpU? z{X+fbRrg;_?Td!S{g_+Z;uYe4=+J#QHx7A(aiU;%2^qp|M7W_&LW_7nAo(Q+D0!0n za$`Ws1Sn#G!X-E!^iIJguA;*mZh`hrP=$<`Z7=eHmWdh^glinm+k zH5*a8{bs+->;v$elb&)nc4x0oqVCoPR zxJl)B;EvKsz*s|n)3k6pCU zgeMAtix==SdR-YURZTM>7f^1Hm2@f z$_T+u;+rV*391NT%Vh#KTHRnLLlg;g-A7v3P2Ze+7sOJTK+re z0_BWpi-0#@e3rYSo*-tqix@I$H9%Tn!m6EomO3#@RVT+&`Js0xy?`3X+7zf!ice1= zSqFi(z~WZPE9hSoY)-6oMJ*&^xdvVZiQT<44Fj!_EA<*k%XN|ZC;NY;RYYn`MY+xN zmSZT1bV{#~f^p%gnq6_S?s1?M|265m<5FF2oinfO#=UyyxP7JmbJKJ9uV+L898 z?<3SA;(Ye>kLmu6OYL+&-$yRmM=S3?OZ#YX_5I(V{o*>Y>i)yj{^LKa`2Hb$|D-1; zeuMo1Ib>$~1?+cT8%cZ*?mzOq=In|ujz!@;x(O$+{4IEm!w2GLRu)sswRaE35YA8T$H(!fw+P<5g|npo~WZHQH-XM zs)PIze%}kKz8a!ogMdFYEp*@8^3}puM{C`_JKfE-Z>@=l5d`uUWi; z{RT3uAl#yJP|6L4VBw2LF<{AyEBxG;#gTqeM1*2d!~+T%(tO~Y(Q1YZm<%_EGVrGJN^@Mcs7@q<(qtt6^knd$ zQ|)MJUPSAQ&2#a$T5+%Ue;}63Ciji3oCy>{K*cYVCQFA>;{W31wU+4dYoLo;mgy06 z!RZk(9jj_qHL8{UU#wYmJ)G@L)yO!{jzdO`fG%5+N%H|gD^?Ek3Dl^a?+cW?>fz}~ z9tv#jB?gXK7^E%>0XhSWC(@vR3bdPQl84_Vq2MZgVD{V%Qpn>~3B?4o@{Ia-9rDI$ z?TC`)^CD2%!YEuo-L{|Vb=x}OQwBFqkS+urk}BX3_azH~vi*f}qnaBXI9j2AIm#-d z@*!|8!*_tl&T52m+$SS6_%tOv3v|vL@=$Rm6po9WPo=vNq0^-iJj#60oRRse7k`6k zf$5?Wa2DKak#KKNyB7uIwBmKTI~qw`{9xne%5Z*EhHF&!(%z+;D!hQ*{0>>ZsZlSk zXXLLYW3i8+&T(zor+_zp+-H!AL?Hro_4O%~a)j9A$%EF^|s(Yu1@AYIExS~fC zPyZTe6lkXybesXh^^$)T1bhNcH?&VsY&z^Y(pR+ht-Kv}py*>yctpcTwfpco&h684 zA0+#fhp*!Ed~X?AxvzUsct76R*7Uv2zo+HaA6gs&4&MBG`u+I+B=jcOmBQaFegt-; zem~j;&E6F5{Di(=YsdF&`vID`e5W&Fx+zM*PZUL* z5mEIa9y%1FGFeNw5ZRuHQbz@y6hC8-v>W}*C<{@915v4s^Q0*1K8uvwFddTQ(AXm# zCO|RiKq{oAyyS-Ypa<#bDELG}TGQtG$vsXW-QM>yANboSB63dKa8$hKRz&8!farVs z_qL<9v%_}iM@T^Vh7|aRakC3AH~uRxHnLosFH7hAcxjBJnP7}5a1`1y{1D>c0vHb+ z;>JhC&rES>qk@)J4f3RpAaoU+pMt1S{DqoAmBEq84yTT4#1WvxC6>m;8G>F`s(AP{R}Jbe*xeB<^i&$SKfaUZ>K%P z?+2T;gT^TaGmYH_Pd=b`4)aoV&rS}GcOutIJ8*|Q`6#a0P2iBw4L(srx=RZTN=j%F zxCdE)h(*;6T1qN5*1#tC5%nH^k|L$*g~}9-f@CGKn{bAelHgg^Jd#!<)EG5$dBI38 zs=BErWMLqa8u*3k1a1M6Za_3`8dT979m)O3lqH`Dw2II*sVEHFNfD5rx%U2h&$5w# z^aXzUZ~fsw_r9@Ya%^8WP6K`Z5b8WVcH1rQdoL=9d3ZWz{p`~Rn6au-CWF=6bH0971&M92zxt-g?)^fUV;L@JZg~BP( zocI0xKgqIZrlt2jeV)tH%*dZLs{Y&W|6AVQ`vO*9?hV6>wnIf66f0WUj_thW_I8ns z4tel$4=V4aVE|S}Wgc*XV}?r8G_VZm0qHGQDV$cpEDSU;Q4WtD4`s8_TFtsLZbBDE z_jYye7+O4a&Eg;$e_Psu!@w%MZ*+NiY-Go>zUA)V#0}e4Z;HoVHt)7#VRrE?r>=f< z@sXbNbk8%-yy~5I+_7u*4(wCL(t9N*bH_8x$-rid%@y85mHT#b-yJod(>H;hQIn0v zzrg1l5!?N#rswz`}e$d*JVHd{D#k0dO<(M`|r4TAKpK0{{I#JLiK(Ah2KZo zQ1SWd{mem~yU261@%>W&`gaXX)#}3#>v@_g9TLbPrmT`zp1Dc3!c*COM*#md1=jJ zwD^&j@vM=3Xn1X!f1-`IrHM@u&%mb0%;}3DRGgfeE||(J`lg?SRB;vvn{sLhR@Q;c zYbCp|hquu(5Ci3{JGqP5Rt4UJ@}4X}1-8DS*~OizA{mIau#fff*pT!(vtpNuNUkKg z^g&Tky8T2QCo+nzwQS(j1o$gOQ)${W@!wP`SJ>uc>@h5LT-U%1ok-O<%3Rh;T?vc{fb~vN zvF{hR69)@l`wc&O84=uZ(_SOzk2zeo`-R5+@v>nxZkE{bsf%AT|HZl{T1IUSCw zh*^NcVRFNcBP**k%gLV1=2yi!=@QmSG?iD!M>7y=r`9Ol6=q8yl8EGzWId|*H@=^K^8?x&@qFX^#n1i${e=zhx47{B2L;!^@%$T23b+<#@d!fr20^a4NM?S zc-cg+UIJ(uRdmT6N&wX>nLtS#isS^chRo^C!a&SQPIs22)mgQF1^B|-rkKa-SJL9A zrib9ClGx3OTfXc;$XQMU=vj6bc*nE6Sa#y&C5(7X~_yT&lv?m&j%lDk+?Z=KAfgIeMucU+wwP)of-Y)FF> z=Ao+V>GjKz9V_4q&t3pFcv)~E=QR4pXcm{VCp^fi z-YPNZN%MIg?@x)%J}18VLj5^;ahu+cJfeJ#djFpgf3Ce6pI?9fSs1yW7(d6Ex#OE} zz;mHm!1uWa&wr?V_&?#9yw?02_5QEp{crrm`Agn^EuUZfUzfaJ@(<-@UR@)BKt(Ne8`;0T$~Obm31z^=IWqkr(APWbx!nS(WF&9J zdc{V~FMXRS>{a^~Q8`5CY?l6d!c_Qr6u=xXdc3zG|D^-S2$rkAk@d}Mc5ouiXzw?9 zTL>D&btrmfK*EBvzp8cg1`yqIa>gd*^yL;0rbN^j0pLW(V@dPk$MOL318XgzY4XhB{uPL9 zdYFxGXjNgy8)%HMDO5{Jqx?vQUf5gwf>-h6^vB*+JhWQ)Eq%W?y?6RO-pHIL`sc)g@{-4I?Q97d>GpZ}FZ~Kr_%WJRFAV`i;k(?hY z4`Uc@)oJYuJSQ~7_0Yn7W7D1yEn=}~evATdjP%B#* zbopk>W61ojMdPXAdOTI%1lS2k3Sn^O7Ff^DOS`b?m)Z1RG&Vr_U0bgo51Q+rlVm-b zxmHn@aCm;*T>HB}8p};aW$E9Wl$`zeHuM>*Wd8bkbL>;BNy~a8%d04Ox!zGp5Jure zsO3g;LM4fHmpm{C1JPxIWe35O$Z44ij|4B4x*As2B#a7Qo=eZzhD$HG-}R-pm`mTr zrPr-$=_yy8zy|Qu?FkLXr8eXW)AW>R1wwqTc{7a49BoCTo6M0ORlo5xKJKRnAE1oQm#QD|>G`>AFkhK=#OpIB`^;0W=N*G^7{+ zTtNW$9ZY#^%oSEF5ppksNT(Htz}roap`g0a6l=GYW5Y}4MltZ11ttMc#*C@S!)>wT z?y(b(-g|iaS&QyR#$@-+g{$*P=lgFNK0K9pV>}&y)5%>|KeRj+irg8A^z0a1SeXeP zzWM#}p@DWkV-Sw%H;mW8UcOavm`L?Q=v=fx@WqG@F|}Ki#xrE#WN0vzGmXYkM51i? zraA~{pUAHaA_Xyulv}D;2d6N;Qp8&xfpiCWmMab?_){_BzCj!UZ^TY!w35|ZaR-n- zrE#2Gv`AZ#G|JNOv8PM8p0N63q);{U2RsgRnD~}@=lA5H0vLduy&->Z&}UfAZa;kQ zqbDX0W!l;Ty^}{qZh3#B=Q}k&lLiNv9oVFP|DBQ0*z!YH?>hOWq49QqTY9+n{WsGw zVGq~K#=B5y&2f%eX%$%J9~FZ7QJ!~^kz|(z!3h+K#{DZuSdj3UKuGj%qL)Wu9X(Vg z6Lm?#UHfr!LR}ImVbEiOmjF{!>IN6JSDV1Jp!s4Hr@`)uce0fSd=q7VH|J=R3Nokk?vx6|K`IG)=eJ+q)M1{PNG zwfF-E^u>3bn>i0LHU0a&{!nlIs-&%i|7?sPw(9|c2R*-XPara(mmz|1u$!PytC+$H z2+XQle-H13yON^idNPHA+=yKE(L1>hOwYK1qrM@ zq!ht|iE-xvz*9L06@u{`;LgBFSu`+gL6lkb_`7P~02S(I7237-_2tvS@$2tjo%+Um zZab)N|D!ViFQK-AY|eGX=+tEI$Fe(raut`h$FC!q-rp4 zFoI8}1)GH~DtePsiaoHeZaAA1~@9mcBu<(GK&Sr-9D9*mu+u8Fsu z!RmruJtNt=4D)iL9SLR=4y;2On`x$S#SqTsVoLO$vH-au!2V6w^$Ml88W8m!emLt%s zD^2(+@qQJR!=qeJb-`}%jy1PYfkYzBovrm-hv$mtOdni1aqZ6KGltx`OXp6$W+68) zaohf53z$*AVL3Fnc<}gENX_W(;#6T}c_Jj&@9yhXrgnGO+*?PsT|EymowJe9=)$%$ z3nCDtYxavDHKwrFhP7Wo?h~^oOdP1egXjT?UpqNRAvi!(`NO9fzCLYTm_U7ffc162UpAoW=T~(pt8*V6$JnjLIbeNo zdssMjoHOC+B0nO%@Hk-dN+HswpgwRq(P)4#&h)TbA`OyEqLYMBpB`y6=|MpoQ3p8_ zg>W*AUM2K<_2Z}@m5ocOVwiGk|V)A7|bs*y(^#r;wr}G`Qz~- z#%N4CiuvO(r$1J30S?(63>@EjO@|lY0p)A`CVZ_0aw)LnBx97p5~a+(t&bF!X>~zqV{!2$X(C0H!QzzRyJHn1chGJ!P(S_PCj2=&4IO&;N**6uPjsh~M zYc|;69mr$NXjM}qh2E799tZanQW>gP!pJyoN)-4UWAAjRpa_~h95Q2eO$ zh)^K|^fisghD>#gSWtvwxoH9~H5lp%qC>O^^cIPSZwH$b^FI?ve+cLC7-?&R@Z-yQ zgmEv}+r6nF2D#+|d%31$O5wo6i=P5s6~3DF5;!~)p*BL`)yy|?&BhLW*L-oz-mWiq z_2+H2CMjgyH#+#}RC;uf?64s=J88MQmaY<{N#0N16s#(%NFPadJEmi_x3L|g_TD8AI4twR3P|k_jRpQC83L^vOw_XKMw|)45Q4M=n>!!%xO2OZePkW z;2JCk0Vr3OqTtIhc|tZT1QDd1&WivN#Cv%YCIzaH1XD|8P_F(#q~F+B3C3Vp`id<< z8i@6~ui1=kl$*8%%@VT7mSfe=%LRC;`t%qzRo5@UA2w@H4^$CDeayTBWt^xR5Kjy) ztg{@^@QY?Pux=<)k2~Ou{NGuu){IilI1n5mB$hD zGc!EnU2AoZjPmwbP$bHJn2X>#dlu5faUDBVzNP~vk=wR!fctS?_t$ATKrk#Q_5b1z z+dFnkQfu|QUa!#-y`#H(=A$2-g1%3iwZ$HBelD_4l3xeH)TFz`?``d!ko4AMZQSm{ zzSNJ@J`H>6ZVBp-%uGuXYzFk>vTlxxe{USbnJ#FbHQ64`k~_{N>iUYj#LL0QhS7<| zYo|NS=3kNM0@0X)J(zmjr4FRW6orj+fO_i6-BA!mDb-Mh0DFn9$IHhx)*xD(5QxkgX^a30)(Y5>`g6E!s`a)L|?Z4CXS5x8<}j0 zFq8U^e+flzQ_ovR*IF$LJ#YQ}c#(siH(y}y#Lao6pMM1Nm?pP;3KJ(0WT{6qSjr&? ztduNfo^1e{JWATELr|F=$R?~6I~YZ49`bK1oi53(_xIu$bV=_7$t5Il(8a1`C-RF( zFA^ex`ALaLARc!i-bShsXdTnMH9Nh8UUHAAIY{b%vNTB~aoxi|r+abJqAR3tvi3jc zipRu}H+}q?U59`Xqig+VvTvp??uLs<{c>Q7JGfDRr>2&|CBo%|TfxK$+2H@rB+OMM zubx}_{~}?I9&=d4=OJQ_cv|6!i-@O*=r!c$qsE7jhac4LSJp+c%+lw8{1KYgJiRYv zrQIHK;(+c-8X?_Cy3l1)=c(L*mn8IKqJRLQ7ahJ}NF`K&>O;1IA{(xe2O~0D z_AwR+%k9vpw?Oj_h(ZKPjPVc}F~ioVN5OKA9DBnXjvc-K^)qu#nY8(oZ`(W1-um>@ zw;nLMp7|398r}YkqbZSg^bN27)Te*;X&E!I)BB7F#yrF>Kw4B8vp~-=<~8S_h8zGH z2I@lw!TG;BWTNajK zN{afK6rP}TWoFo*Hd;?GGuZ~G{_-BZfDow{P<5t9MsvJx07quu0TONvhn*!iWG%A{ z0CQg8{t*1tlO#yM4{-4}9fL7s-mP$`;^@~k_s4%ba`mZ$hpsv4AG~F0E*Om-N7Jv> zmx(1Zf&7B58+ZQUJ$K%cFuI<;>-MLfzT?$T#a32_>J4R%V0T9#+V}9T!3RI`@dw}Z z+n6St3HIV0ftF_*`iszu*Qm>RkcF$y*%$*I%IVOim<;#AZ;2H1`eap8igO$Ba|H7x z$<|F|>qVCBB--J8t+gjIL~oZPjGUbi(8Pp6)vC<2QdvjDyk@Z{7~g;@6VxZ;6Iy-7 zCNDQ%I7KM%+Ul_)W>hy`v$|(??yBkBb%V2G(dgsh@K`djWpqhDyM5P9H|`$RpSbTR7%1h*LrS}dwX|o5B$#(jZsC*}-p*hL=?U?wIV@Rzqfz*ok5Dfr+ zx$+nSyRu*SIy#}*REp1n%b(oX?a+(qhAJ=j5ewYdL&L`Aq&zj6Xi@M6@ojkNeC&y> z*Bzbz;&ju&TIhdS*ec%|js6h6rqTYtRwwf-O(zSSm0#rKV83(%r_uvIniz=(f%M=J zLPA9msZkbF@@hCxgah>CK$)W&T5#iJq^Gv5dEv(O2(fezZdC-t1X5)rSZ2zXJ++(y z-~a>=(Mn()gI-|%xodn~p(yqQZ`V#4H%^J)%r!}Gw<=yY4cS}wv7fs2-M8Ly)#|Mk zakVptX0LtYgQxD?jzlctE#+f>ch`+)cCMVcQTHR^^iVUQ)AgpC4Ga$EAPOJDjvUFa znfn2m0YX=&IUj_AETCRdpN)tRrB@T$APW_!X6Pj(Phe?Su+-Wx9et#RF7{zBlxB04 ztfarQ+zyk{a!ptSHjrdjT3Da)VCB)_M4oj2Y&ik+19aVqg@6LEU5RDW#&T0Yw?q&Q zHlT;)6+$!104TEgRIXeoJ-{ImuNT_{+vVBQizin`9)E26sk!2CHr*9*xI($mV5l%~ z*Aia7C**RBj+!r)zB`tT4GnM04Nba25Bx?y3V!kX&t30)5-(8r(Z8?$#yMKgXBTv6 zbdBQ_iI{_k=n+982tLJY3i3VZ@A9A|JtY_O*r-%7S;=DriU6DvfG)w0$>jA~bc|Fd zg!CSU6{7#(;@lyN9)PR3aBTl=0`lLORDW-Nd#KAhJ}|LuVKjtndBi_| z^|stnSI|2-)SOkvx=CLMe9Rf`Cknlwk|#tdOkj^!rn%8^-Ua0Y(nRuKSUsg7t{c{e zplw!a^ErZm!TDcu=6Id1X}(UZn?U)Yt%m&#tPj_%eetFLs@5&;DJ5Piw~#6x-5D(g z)*|j%>y0OX(8r&bN9Z?JL4N^j(D+Fq^l?#(r{Zhl{(5AZyl}*D#FN6%KVRj zV;@Ea(JkPYoMsax&{WGO_j82rxdAEp>~^D1VI-5d7F` zpheyc-Vz|?dEZpx0>z3W%`busG`5w_E^3kiO~3>r6ezpA6;GUyijoz03Roe8gcX0N z8&3cuk?WaeM?e(ld6-0_ib$4fpC9WSX|Xws zmAv$UTgo<^y<_#TWfdcx4O_#ijiKNmHT*-)>pdnn2t@dC9jx7 zJG;V)2wniR0dC9%(pezkM{!3l0k{fLUDST*$fMb`m-U~bV1S*10d}$!gdmxfLK5&> zNB9P74%0$_s+ADmHh6a=2})yXENI=KW?~zQI;EtqWW!XbUP`m*WrlmraBv0m6~GaW zu2uQ}dPIsIdk*h9vJMDufWaHRl=F%0GrFse2(SH<2?)Q@Y^2nYyJufO9yho zypZ&{pd%G+4yv6L-Z@bbS~-HHH|^T_l+j*${^)D&Ikp*e-?V*Uw#9b6Y(SbC^YLk=StlLB zR!$5ftWnHDgHx6>1n5Iq!vx7qD-(m|CE>e^5jZ1HZMi%rrnG-MCDe+VoRms?4d|fUe(| z$KdY&i#}xSH>2x)P!u%*L38@+YUM!i5`2AQ&IU8Q>vU}&a>7w#9{vY##{k3wno-|F z$YR@Y9?-YMEmdttGcx)>J7WEk8AUk`ZX{d{)Z0pSx+7)_0-C5v=0T;CsC+X(4l6;^wr=GXn@V*;bTgR5hdsv^@Wm!xOprwEONu zQ-`y$cEcGT0bPmOXa8sn!fx~tk1stku=}b<#wtBB=(J$+I4L+v+=Z3iD>cJ#mZUl- zpgcDU0-~1Cii?m8#wg996gMR%9l?Va4iawu<_sr9BuBb3!`;_Kou0{09ldrc8hv}b zXJT(pTi19}|7fSvR(s+%_Z`q*$INE!t#ey6{o>!@4ol(t=V>>BTyyrPbttqj4LaMo zG{cE?t|kYA#16v=kQB~{H23fdr;Wh+qISS5oWP-`8kgj4!Waa_y9oy@b$FqYiLi>b zEw@g`r`(goDznuD*V)FRbaPBSh1&HdJe(Cu(DwubP2-Y1s(MoLs8zkow1ud_RtyRnYX*|>id$1O#$*A>zgP9R z+QFe&(v+A&4@Qad6%s5Wf3Vvto>*hJ_(3xU6h6{KGaUWIZH%Ud?U+5#X(mAP3W zWFd|KK82EFwj5xGL6j!X)?lcaLLHAY)PMyBp+g0v()1_vIqcYbKd?6vIrsM3KcK{O z?PDM^(z}1$^7csN#rf}L{?Kx7H2T50&t|ZW(wl>xtGMnI11D{b!2fl!cVJM;44;yY7vg;xvU!G=Sk+xdkjQ}1f?$)PrKJCcJzY#M?LW1W+cyNI&))prfvfMiY41qSQ|V|_UkV03 zQ?<3>JOqwx-+v~Zc6xdfq1Ru3Ry2lTk6eH8jQ#@7!MFxqCK2hN zcRrkhG&oI{qL$Je9oW{;UT5!Et1#fO4?w0NqIO3_PDzBWI#$#3%*(e*(N-9OG8)J*LmX zwWoX{Z!QuGq6|5YNAni0`+O1WKIqF6#yN~* z6J4JE-MOh9_VAX8(Y+SK8@P3_;K~gzOt*DSBu9HNg?=0$qz41R2ko8N6guVN{i%*X z&@tjgArt%L2>$pU;=6I}Bc>07*hich79l_iRq5dY0*V8m1`inOOJnPx1~hS}#1U9S z9S&(GGl59YPk9Ce!2*9(*4*%}&Gv2>VYQV|kpL%BS=@tGpOh&|!(NcsqtA|VzbRJ-_ zRop>@Ln=ESbe_PE6Wx#2Q{L!L~h2#g5wL|`Ow_MjVk!44aX z{&pOQie^#BZ;@r~F;z;ynyK7K^Dn7FnU1rBL5PUP^e2LWW`_MU)?d>g^>(dg)H z(dcd0?K*QHv$WW^9El9}E#-!Hqz(pxuclY|>R@1cEg0C9i9}j%KebcS_0Yw~^&&7{ zHYjE5AV4X#Twvrc|DkG4(4jZbhwN)kFJ>~v)>nu}`UrS0F;{fm1@ zK{|A9Oi-jptVxcgXX1T>PJSUX|zF$aSq4ua@UeUVItPUywO(#q<5J&)wiC z|B{)p27rq^a??Dyv{o)=uQeA8kaSZMz{&0(jrdLMT2PX^WsxhoOoR^405K~5P0>wD zjSL`CJOI} z{MSe%gRT54Kmc0YR+q!;2t?g>T`}b5?r{Zu4zDc;m^M;saWuvN086O=EZ5=Ih}~;# z^#q6GJtkM~Z@geQT07hNY(PuVEt`MooN(7E{QkOZ?bW`PbsIibt_w(qqs(lzC2>k0GMk2r1$f3J=0^f-@ zZ{K+$7}%CKo)nH~%-2SKVXz)FvkRx`ULlnZwb`}6P#4*L)hxBr#3Em55)KQ0=PyAp z{Qr(n7hG z70bap^%8zXw5A&=OChr^kFp&$p5Mk}J>eBlh1?amhU?W$q6V5yv?XcTP^(dk;9q~o zFsfGps}YQk9|{JB0$%v30Qc5%9~}lDKN9h!BaxFI9g0TrunzzwTkYK8iIX_3k1nUC zb+|ZyGX7luvl?!8bSG_=b3mDp65POb^n=_S2*2 zG}{Xe*ww_pJ=a$<1p3pHUjthH8B8+#h;v^Y^u6GC@knGaxfwYU8C+A9ya0ysoMCiv?!p@=eY0zxv%d=m zWv#etD}FbR*`Hs*?60cX$M;xKvkwA5<-C>csC%GC7z+^NB<-!K+tcRbJe*D!rcJVYef$ z8?MkRvOHj=*)bK{H&?6w)NHZfm*7f+KVkB|!CUF4>B4hjJ$nplu$;I4Ii4?@&sE>c z`uo90BJWqd0=M)17ylOT{|EEA!p*L~ee7?rd7oJ@F|5bcq>+hd2KS${X%KLxD z_n$DI%lj?8o+LYRw7+L^Bw)Y;jJvxD`4?Ix2|Nb<}iu3lS8g>V$+)KLK65CFf7 zkWpr>_{7!mP2%hfOUomjoV8`&F#ZATFwf3Pwz{m&4%cT6iMF*4XJHH1XT@8wwIIbM zJkBORE>XbIM3!Kza$yT@OrmT2aT6q8pYva$rVlsza);`|-L_U|o6T`II4W%V;AQIk zmz*d|;3j*k6$j)dWPhEt-?`G6THCl}&F82U=V&|dqfeMCrSt)*ch&{wi&=Ax(0N+v zFfZ0>w-zLyII5sJD3V3*2<-^=Hjt2h_r(aUm6JtVy7Gegp#5Jy8GGZ>|i=HBPlSNcyjq_ zv-o?0Rv!L@>aeYMZeK112EQ92=FIN&IU(! z%@Y*LI>?}-(umSrE3lP@tp`jhJkA4zR1C>vp)*9#%6_&Ist8w=T!FgeB&jJ<=^k%D z6<<9&&zyPk%;^U`dl&AGL|(I;d;@6hEZc57`uf)&J-3xjlRBz%`V(h{)s`;G#w+n{J zb&TiWHdF7%^B#*w@;`?5G=2jx20kZ$@hEa*MSRXf7s%hc?oS;xJUY&V`MmB=srSp7 z&~PSncJin*g!db5JQLbi@%%aSxvIIo!tePP590aj&F4~QGsM69EBJQ?l1=hC)WrE+ zcfjwj3`mi304;KrC{7zW>d+-s zZpq_!p(u|_$j*G8Vh5!Q-zk23CyG;GaG~qYtAfx=M855KiSt*nvf^Ka+PSa<6OO9k zsYl{1k8W-#D{EH-7P0}tg54BC-Jxu_X!a7xDRA!AaW@vFfe>u1N2xf7YF8OKoHjHj zqnM!^6F>U=!-3#8KL0*!1ivp5IrZ_6pNd50)3391baW;L;`90+tJ-~SlEwVDGYj>~ zOf4Ar<3sWOd{+VmU31Q6F=u{wr#bSp5O2wTY^0sb24zoDk_VzL*t%Be1k$2Be6UXy zm*8%gftCzC@>2vpB)?U8Mtmz-_ zuYFno0s0XZ$7^r7+`JhYbESjG7q_9~^$*Mn2Gedpa-pWB>{EnY5n6=dpU?ooGAZN5 zl5jic4;HEtldjAdD&W!zJKZRevwy|_ceCJF$5}|%#*sE9c$GL1fjo0R7vqTDsk4#6 zQxsIC_jMdq)N(sCCCW*bm7}xe3^}rTQ2Ife8y-~+n}1xmEm~DOR{=B@>0}>D&FC|> z22FeK&DZBO-Dc-X0fpZF!SvFf#CNz$F)zlr$)0A-7g$G z`{Len`ZFz7hpofv*1wEx_%(e$C)b?niFD+_Gk>nmwz6f2#k70Beiv-bMq7qh49^*h zi7mr@>cKiJ16#DDy##I;&ioWs!AIyDMQuTMz#405Snh>NUz#n)l||0|9B$fvhh-5L zD^e^%ECBX(CaTb~?3ARx6D9n$qO%tZ*d;LQE|A5n6-A!iDKK=hJm#?7b5hFBfmWqr zmyTpRJCbuAk!7Pbn4vY$W{@2dHi8+F=hF&Ifg?kp`>6Fx`!S9;L3Zk{*px_RCsgx2 ztRTGge3!8d3XG*JC@{W=m3R_WC}?Ni3UZMzB8i2+yEk)Sd{BvJN_P62-{k#UJG)MT zq3Tj$V*$Cuw05t_yo0G)uhMy>F$iTwVJ?H_U^|N+=yO*fkzzae>MKF9j&%XXc8`F1 zU^ZpDCSVn19=Z^OR;?&fCU~_*$d1~L1MU}yISNi2dcaJ>81(9@TxEM|apkUU2lm$f zQ&TZYk8hutJyX=bJCZLHj_$2}^$DgUjNb?bex>%f9^Sq^-HWbD-b-j2#68J9w|$y> zZUT?_e2Vv>@hg13(2nN^`CJ2v19;B;mR_v`mHkO=5!$uSsk&`xC{OBnbSYGPDdgpI zX!)s5s;&l56y@^Or21h#Tiu~v6|>bTd8H$=C~dh=-KP2)g1O2T$)aw7VPF;&DIS2zHG%_t4(*}cc!N9-2^05jd9JwU)aUQkUc2f2EAsjKo8EsDf{!1V zpP!J=M-~mAuX1SW^Lyp}$fDu7%DJh}KOvvr{C@TM$D7_SIX8=%pFYv=gO`ZU;ktRu z?+@N~d{20Zc&>7EzbD`Gghmyg=J%`5KhgAl%(k&teSiJ?^8Il-@j1ZQ;Eby8KPK-# zD$hATdyOy1=g(_YoNIo+`uv-k-mh|cEi%u2+S~AcWC8K{DyOGD-!AV*77)+n{f7Gf zC*}J$zh8a+Qq%jDPGS)|7hbH#rqmhnIcI^hg5FGB6AAj#$K>;OLn{e=6L>$^Gn(EH zZ6#u5@cj7tbKHY>Ui`6MGm7~1+txdLH}+Eg!_+*o9w zEoabpfUckyS|W)tNnijwGztsjg$x}mjDXVs4q7se@TCoY9Xg@n;BaEqWywSm*{r(9 zVz;-weL!0L3_q?NY4MAU%-?* zYcbSH<_zi`A|DZ=|77xok)e^@8|xrXYnIKeX#JLQ>E(FB(34P2n0d8qB_Y0jf~?-? zRX~P9!Xizw5*j8JvN`l#9f-IDrsN=9*Z}UaINM3Rz-g6}AZ-$cXVm2z9pEDHcWK$Q zE_&bc76w^Lj@*3Bxg3W~GSDMO=s z2sE?m%S)4lh-IRRONyH=^01&5gdQeR6LC*jWlvc#UPOzCjLJkdxY(Z+M29>`jwQ=r zVP8U$?9b?HDm&TQ*@o86^`5-M6b055ziW_M>MHfZJOkJ#^loXwt9?-4tpMG%&l2R_ z=)N1;6c_%D*>;EV--Gf05ID3X@xC&C!LG~r14xXZR*Ufq!c~Uo00x-3Jn2gW%a}*p z=ZaJMFr6GfoMet4I4$<#Sp^JijvsCfVK8M-c_UDf-c<`>NC|V~;DRo-p$bgQzklQC z8+dPh^Vq#=>}&d{K)!3=B;0#_=>00DQ(?!`YT&TJsvh+v&x8$Xu-R&c4QSTbOF&JF4_J+lI64uJknwRu8v8~v_@KCSaI&ROFKv}+9%Ud6RVc|>i7HeeR~m)r z045YbTcT?Le55WcVsS*@r5Of9aJox4#4}uq8Gaw@BEAn|bAls*uvrUDj=>6oO_n^> zKXEBy)joo zkE>huy7J$eYns2-I0GI-^5h!xcZ>ja1BmG_cPZwS7 zPs7&v5$#njfdd7ks|>9Qb>VOduUT^C;5w8ipV&}6*B|={wr#ks0N#w#F~9fUBcWdQ zIJ9NsX8pr2;p>WTzYBZRfexbAD%DZp(eC2aBds-(g&<>HgAPmDg>d(p32m}`wV{cO z@bHRPy`6-%Ob}5g7|iHQ_*en@L?;QjOOU~~w1uKQ46~W$H+hcXopj+EsWbRUo1`SI z^)UFdp^BwfEe)J^RkJqQLZtK=A6@9{=oPx5`-K;DQ_A zSoME0ZZXHIv}wq@GD2pvFo%eHqY~o_wEHV27WUdWU^XiO-ZZF;2*w+%;kZ6c;uSfp zJxJPDl5(9Uk)0vs3VVgAEkavHp71jD0?^&gI|V1Ntq=JH-RkR7+$&9r5e1WbMa9Cn zMH|{P_2PA1C}LIGp_|nrqzk&P5m5r;$`k7{5p`Nar4i7DbnfKe|q(U8K zgOf_GE{>W2b~imEgBN3^HO$svt?WQ8qN;g9JuF4QhA2^pb*R3HhiVLJjGByRAqla6 z^3bhk4=-J75gv!7Yj1k=p);91lNrDBd^i@qdHVk=_6D}2|Cz1Wsckj44-}UHC*Rmev zvskWOI(*hYe0Z)fo}2w7iT(#4-nQr&+PO1*1YnX^1zhws1U^jl`acIW$c96Yp)dt35J$Tv8&-Nr8cgD-K9Zby#h1$@sz+=bxG zMu&!!RaGMb=oqUhiz|ljiINcXuLV*(u5!Huy0wqv3)^MpEldxzdxpnhlgK3rUIxGK zGj^{n6iM_AshoijG-P!pW!0A+NTKG?++5UE=u#VSpp}15O&Tu@1;S&_u1??MH|)ME zqTexlXZB32-D-EdW4U-fF*n@N)0VLM{;Kww>01un`j4(aBsl0C8N25G-HTs15)0X! z_VCPXF18xBwm%EidvWg9If07W$CTb%#m`~$N}^(g+K3R;tWGcZ6BxT_6Tta~Xdu%0 zYjKU#KY3U#U@1}Y1Gz3s2Wx{)51!LK$Nq(AFl?JR(W80vHTo^lL}Ye>ljHH0)9AY@ z4I;P$sk#PASa^nQUc`YSNDxK$c4^$VhP|s)pgD8l3Shr>O?9A%&0VonIk8X`QUz^#$=M&0=T|kS(&J@R5 zqR}0ZuD;B0Z)7kK?CcFo%>1(GB^R~dR(*mMRFj9xi=?z+_s&w(N}c)y)7!)ugf_HG6aWHC>niu#U$p z!gm1vpLJ|8S3v)=Gq!aUEj{B0mN(KNjdeJ?;{I13ybon3OJbAWBA2t z8Z+Au9tfyAly~pN%ocZIX1n6qLg(Be?pe@2A|VmQe3LezJcHn-5>4P=fS?$Q!P~io z!F=0pJlXDjLGuKPI}h+*?)8C~0N!T(Xv@@sR08xmynnAW*Pzw^H&#mjPxa$(+6ZII z=0&*ta%B(b_t7MHl(}eeTk@&PEKBAxD~JXI*vzY-Z9<*!UPK!3XT^B%7L!l{eX-Ta zaeph;Xp;LKxHRoTHQAyq06>;hjo@^0?4BVk1mbZZGMRjou9qa^!XBdiUTk&v+Xu3w zP`b_N0STJFlpr3zLOBJ-u`;ui6njIktM* zoUmXj8ut0y&=;V)L%V{VBj>iAXr8(o{>T2+&QNh;eVQz-Z8n#!qqRf#pL|EOxIcAx zvT3@wpWlVps~0g=0=a4t2P(M~k&zu7-Q3Hd3NTySB&>mG9$4y0Xo9w@4oMbupgE`# zVRNGvLCYsHNo&Oj(Cj96cS>|V{}#$B59O>XIt{ZSwR!x8XOfW8VQLW&KNT6;<6B2 zRCibcw&o!+=clkmb>~3;?CD(sEK+XEaDErTMfqKIT#@)L-(ERGRFVE+x-Ok_W7EOF z?!1B(J-oqx*{OdYeTi+T|DH8F-HE$ON!~Wam^hdfycLq52x+7WyiwxG){e=*RU z(P2;Sz4(tti~cO~59l?PDAa}#iQ9>xRUMu%Z+jAubi$m1fCRBNRk&JIaxDq`05fCo zgDN2*Yzq-g)Fxe+Z3xsQ1Jp(zE)18b6jJxRRW36CS#&Yrb)X#* zxI(;wAqTm(xRj=w!j~cJX&h>O`0$ytH_R`-7Cpz$cDtO;{YPK*$OAVVna9PWx9Sev za!`Nt_}1yweW6n>dmvzU-J8DkL}A}unf5?nd%zJ*!-T!TB6OM0X94KRA+g zW*Wl@=nr7!k|7gD3s97h1aU}Z3$dg^V=+Wu8yv}F-m6EKB1)et z1q_M0Z<)il)P>kMx(%c)ervF^D^~ywmgOTk(M9R_d@2?-qP?jXLDd4TnTZbuT72%7 zlzv#y3iq$(TY{-nrzLll{P_{ttcsY#A zuw0lxWbO}yhC6ta+nC;Wv62Ru4WfW@?F0#;>N&#wnhd=ueEqkOl#?m?U>p_|Un zPORl7$(Xuf#A$-3*Mlk~V%rVPF!~G2g`vWr!<38>3u%|2FTHbY=hb*jxih_YbT%>W z63LjlWChIUJ!9$pTmBb4KJGd85zh+e!<_b%Rzb;pxQrlzQY9kg1Z(F8oK%qNLRKJn z#12d#J`>b=UC>H73MPx3Ob_EMsj6+(+o?~UetdmCl)M$l{>SDooj1|Bk~yh8r|K&t z{$IUx>UcgC_TWyS5c{<^Z0y|h&^>U%Ah)%~J-|YD3D+LmgpF*~Vs>Vwc=a-2L{c50 zW$qR%T{lqv@Dxhjo~ohRsElmIkgS10G>r{Tgh5-KM!=12gHBY_K$+mGdW%233R4qA zUz`u1oA*QaMj{`C63B<@Y9Pj)P!mK?>YIasQXu$2b0g@Q=m+7Rxw3u`M<^N2|1kZa z_Dt>T$D8zn-i!_UP1yS=`r=eyF}X$2#ofv8f*?(5yFDT+Azhk6Dxs59q8kpGG!xMw zD+O|rtH9L3@q!Dl1c@6w%!)oDsc*f{>2f)1KLF{>>t9cPjBZ?3B+a7)E6J7jtJ{g^ zM(u@DKvY2!h#1s5=ckRv1ov?@)RADa1bE#_hg`Q*x~N(P<*C>gP^P3WKuMIvMk4># zxj(TRn(05a9L^PrIE;uuD~57bMGJ#g23FWxaZxJ}sJ=p>BUKN`W@#&%KAbUxo|%N9lpI z!|p#2`9toUA0$IPA*bA3_4!C+M()_C_SDK5!Op4nX1hViO%QW0EeWnJ%mDl^a9mW9 zQZr1FN^C^=rIrDeKf{5k_@$1;6q=fXR>pFgWe5WY=8hhRIb19+ed-MZHT{w<02)Z| zKd*nkE$rujKgi4{Pe<*&I4QY-eE#ZSPu8M`(NFwY*IO{$Z#X^9Z*!#Ip6;`zChi!F z7?x}RJPz8MktfxWC++0kv~z?w{p1+Jw$w{qLi9qL=N6(DH|S3)Rsx1KiWincTt_df zkw`hAa8A;!1{J8d9ttXn;TSrznf$tD7E^J5-q^@m>gd@$V-)aRmVo|J*)X0B#3Tjm zOC=W1T_~Ab!3aK zJmK1L_~dHe_5}E7td4<&{@m);Ea{UzNirnTVUqYD82CI$k?z@h^s!jD%VuwLq*CX1 zES>~qlK2#&6(l~z0ppbEQ#cTN9fWNvQ2&tFgVMTwkiZT$$p)1QW=k}v*8iRALC{>U z!*;bCCmFopzRrp=%DD9f-!DKMvh-y_`E=o|xvcD-3UJoNhe;3Zyf^(I2L*N2bEWAxJU~ zL{sG27y*xzw8@3B>q=oyy}uNtslGgZF8i_jfN-gL6vjuPTh{ReMdAr0P3LE{GcMd4 zx!JOnF;Dv>gFVYUez7=P>Xls5KtFt8XSaxsl!sZP9OR^$3Y+xi>M)-xV}9t(&<}?C zh@_UWll3hE1IdJQ(`Z><9SG+NY+VjV`uJl*tm)_$s1`j<|Kv7IZtb5yn74q*N2Om^E}mr+E*@rV z&+xYF_LH1JG=Fc^pRjefLg)~$J-hDUeFPof<4LpQ`=6m+B);$z=IX;TSI|fP*R`F% zZl1>6rKMwnj;~A`dPz8K=w53RQZMu@%Cv>l{tIR6(@7R#a|Q|QOUeu;QHorjwm7D( zgE`J5$t5~b8(OnP`^YX=4s*^z^*Or?^j^&uHW+Sy*=lN9u_@Q*%mke^=F7ZYc-gGa z*?NDx%+~WPli7tR{(wxHPIRc@gB{FhkDJ&clPMHj5h0%BgiyqlBe>0VNMaFm1}s3u zA{i!&4VwmfkuYjrq-XFaZxz%L*&NO`1WlU5wSK%UELof|lWD9mhgm|@yLm|cJg)}I z=j=?XyLggVYq)sx`X=%09ldv~eJHpN)s1RW+HmN>iyma>L`I%A|MOZ(G@Y&aS_v_N4vXQ|I;P zAG=-&GX0mGr>@T- zOe=5i4BXACJJQdbFbP;9((M#MKCI%FOedK;B+;T>Kx0&0S(OkY;;~%062zEM1_gz% zP`iO2mWWFF6g|Xtu|Pr@RvV!YZ7>DkO=05?b(#y~v;Zb#5UnMRUQKB{_7w>+n4yX2%q2F<9@1NOE-1fdm!LDVx}|tH<9<9h3#rNMzW;0XEri6R3Q-p3h(3IoC7P=WOeW212&>La4AM((66)RBz|eT|?t` z-%xtxNKbg;+HLb^^-uH$XKpGSOndF#O!Tcjr}eE9gRk2)y({61^`#FioOw7Ei1obr z)XbT#;l2UdKnrl$Ge*L=MT-dg1G1>Z*p~bXH8G4cEvShQj;U-KcHRLM0A-j;tElL- z;zY}~0B5KG^Axv>wHMAjDE`WJXd06&m>UKDG4}wen84%>ZPq+&8Rx-`G75rR4m?kp zayWw4U@OIS;2us--RX)ikN1sEE$wT(7+p$DOy@`R;|m%2JJvZk6#j!hSe^fM5@CPk zE35MpBl^MVl#v^*JwFYdLgtE&T>L5SM&^o+8h6pJt3NZY@i>fL^$g2DiF;u?&d@M* z50HygWmhN9I#TKgWZ1kb&|6N9)>+pr@4LVLsuEI3z&#;^J= zm<l()UnWc$wcN^n2Vtt@5rGE;1KhSCI;gArTishE}ZD^aQSVEY~f2p8=Y>rT# zhHt-S)xy2JutJPC=a*ZuslJ`epT#OL{9AW6Rp&_)=DJ|B- zPBZI|o>v}Pt0w&efIVOoN?A{hV&j_B+uF>_cYP#HYqEJT-)O9f{)zPw$-2NWS+VCr zK=D>|?l}}JkjPMdm?CtBdu1b=D2(%(7Qg^2CZ{O70dSMzfDNhwGVF-t>v1_Ewa9FM z{wbN|daY?K5(s`v)tJ5oZ=L&N#{3@JkUQm+3Z_0QG&2vNXaJW#ZhFCUTKx> z-D>a)f$pLHExJ@_%Y&@)>u`c4CGmVDvbHt%a3u1`@^P5xufVnaNPb%&cw%SmItC4b zk2Io~k3n>>q1Gi47j)9m&|B8YD=AWFn3hsDSJfu$L0nsKZlyC?(aDv@b9F91lKz=o zemH=z)kaA#nO)|Wlq|FS z7SU=!>cV*Y^s~)+UM|PCiq{qAhURl3_%*X<@-5N9e&^$-=dXRCF6Xt^?Q!kM_RqSz zW>-MSdSWH;x^?h^7tWg&2em10B+?&ESh3Vc;w;%LzjprgXg-1?A zq~d5BCbQ*X{C=4Hj2g4{dTD#MRJcGoENaSS3fv7Dz~k4f8MfRoRfn#6#Nnibco zS|J8(On0_>E7neuIIpKXr4FY28QRo_vtw8pjxObc8H?Sq*xgVqn^#(8`Jw~l;2rBy zWtjJ7owg@UOax)8#6+MxT!mSnD4R7~?Kaz(A=cJ)p=wuc%&6png_uNnd>29(>ePq{ zRNx4hNnwT{{Gk#Ebz>MnaN&-Ca$6KXl)53ENSU^FU)cjFyApCsVXTU9g(pwdwajff zIZcnZY2DtV#M&N}g4OY2x`0MxuWZqUzED>m{xkL=2UNRz>orrW(^LJ{-n9O1GvOok zPil1K;nN;BVn*^M`VwXjCaP9Pv`QZ)LUKMNb>wZ)3V&QYBpn>A$ovrEOwR~+gH7rd zHnsEJy}`h}`0s=7NKc*=|9vN7SD*ez zh)wP@d*CQ{06BWBE_uk)OgU7$ELu1Ab3dj7e0|72BYj#4OJ`%485iVAVMPfXsU%vs z&8TvfHJ9pzV=;r{3_`;eC&^A=`4` zj_o_no;tAojst*%U>~FRLceIlfnnQX`ozpkG^PzdJ^-_4klSn_J)gc(b&KRelYMl3>#@uhZ3J;S)b(_q-J&(rL;uyJz} z&x1NAIcrQ&vPvkD0#-pn7Z8X`xl<@Ubim9iPN{V|St2BP$F`nh^NYS%*~CMG4pD%d&?1`n4}ex5_@4n zX;nL>q(HZz0~2H=^T=_0X2K!I1;3mhNAl0oF^^9$43x!Txn{xU4dI0BDdbIZTf9 zF^)(faRf^#h2OyoW#rE)og#eDNq7Sb3Z3Ye0lrWZ;kPxTGu|hzTmc&wfZXmFWw;3qbyqx`EyK&6Os`Qo3!e3iBm7N28!_F-j^(r1&*%Fi`A^c}FXGb(OI=L0Y zF75P{ikONv^p^t}Bl=lZXPmlAYjyS468#ZAhC zD=W@e^-Q-r+SL>G2O|d~hNI141r*Efa|Ql4cW)jaSy|ny7Z8w55L7^La1`880mrIJPzTgs%R7UPBZ~gCbw$BF`T@C%Zr`}<)Of_c;eLX|xNf>ph^GxUTRP)GT zW8T~Y1@bd}4eS!E8mTr3L!eI3e#1tg>pGYA*O59<5@%{GX3TJNLWE9%ZCKM?Zj_y@ zkrmudBX)qToOVj7k+w4`(VXZR2%8g)b;H=WY={*UlTz=2{fu1DLH3ED;8rja51Y+o zP;!M0{GNvC)!Cg#HXbrFtoEO=b9!oBD6IP}N#DNpW7~SaP96D5W_?e7bsGBz_2ber zUTaoUk?#+E^;-FifDh*^;a2k*n@{q=YLD=Sjr7IimrZE>j+{7h0IPMx__w^$SZubuh*J@EcHWa*N&U{FL)Ls z*MKNY6Lu<$+KTokhK@U39Dx*Jj1FM8mM4mXy~2DVpV91uySwRzI)zcEfc?JUtJeGipkgvsh zx1v2SNH*1S0P-(e6T+$L@t0bp$|y{F2s;wt1?RS$#u;BSok_#NRlC}m&YwMg;U<8c z0o`)(+CA6bv43bcf-<4h(B9sQ9#tVb5)Rq<&5y4wy^s%PKbJ8&|fZ7Y|3iblt0RNb`X#tnJXWnMfe05l8=uQb%EMgoOkp?(IXlLqy z;fQSF6dn&#QhN2Ux!Ig65H&jaF@(dMTFqbfh7rj@7O{zwVo6DcJMbT1+Mqw5G1~8DhcSuu&amXi} zb9&PNdQi*C8aFIRp-_}$MK=^khGYT=WjFZ=H|XT|!^DKEgD2yj3iDB(v=uwnNC99N zr)rNs9+!RelT9>&)B6)=mz-Q?-2H_W6NHB>Kbfo$ecJZQYui z1={r&o#|A%@`pX|llY7-z2O&tZzYf;j65s!;}M=!BTWM69{Nltdupb|c&?Z_N%wUw z>rdgC5OOT~eI*uQqZCZCfmw9@8HX#B%yh7D3zRvmmq9x-tA8$%>5y(_8Y5WHC3IeC z(F^G5(GA=Pg&GzMAD)$699`t)pqq%tqWYItY3-jqxVb)*p6TCrCCj;{4sYpM-`nD~ zD{E8FPIb>>iIyWfbpG*Z$3RPC<@nZdqnImG7@YA%I$H*sLS3si^wJHGy!h+XFOl;E z4#-uM0`sFSoY&&+?}PK+^2g;gFj)kgW!ZTht74AFY98jva2Xu@3K#)7=4HY;4hmF z=sreE2{oYhu-rGvulv9qg1@99nG_#=5+HnWG*3b!cx@Sy3_@h4qqzc5=Gt1?4InBr z&r{+IFm=_3f8OVGMQ&z(XC{Odcsvs6`*~j^avUo#)Iq@cmWa#Q_ZRPjm{>J!qtIPrYI|*?T{5)+)H{YgaopDW{E6M0Y(Qjcy9=P zv#bj3gXWcy@M{DhU^7d28FR2+AoqI43cDpB-_5jXvo#A7fMrHtIyf5PF#lkZaV0WzNcyGMrRX9;x;#HO zxV)Q^nX0>@I}e7C8rF5*W_)G|}eGASmoYfMKK&UK-22w)Z? z2@(;5a#H-7B!h=nle<>b6vi42q-g@i7E7!fQG-?4@S~&DS&%des=JRh`hFRBa&^%cX7|%B! z*08e?|Cn!Si`E&Lx$KVW%g+r&#qdTwQT`-3T8sUX#!rWo?Y)tg7cj&C^LuWF!l;P> zHqzV-g+UT6f)O;u37sYTp%-C_2Vjb$o(L6!fV)dJ*h=j52w?b)Oz{;)s{y3Ffnm-r zcBXND33}VT-asE&YG}7>nVG)KPS0z9VpjbV)Ne$5%(TOT!0?Daul%{^n3MjXV&q8nw5F22BW{=cNdCI5}gu!N)KMU$7 zOHOvV_)ksw(+TG^hCRh$+QZ9Q{Vg}}(GlqNz5Xm(7x2Y#(HHbVgnmtI+A5Sp!&9$k3!c<`#R*O^w&#=FfEy znMBSeA$ZV9NXkeDLtQ?SJ2j)7PsuAjoeOWl#TpO`1`L3bPiPdN!}h|O8wD$H279iF z!Y*e9mMwK?WOqK+H!;=Q-1~#*QE{Y5%O1x7DPxiEq|I62MD(uovPJiONJp&SEYh zUr-Ek$f($!V}j;qY!jj#960x-~5;f~k>-3f#1RrK;@q3V!$o$SG+B7wLp2A)d^nWLDn_~iGNAdtn z)Eu_aY;S5{l;g3%7z<@dB8GYRFDs#k;fwxw0i?!@_<&+U~vh$}gV#U_H8KRf&OU@At ziW@QB(hvrtlv*c&C%h%U_-qEIrY}4qR1K^B#TE?75)Nn^H77aE&J{~QKtPz9v@H#o zFtdChv1-(pYY5?wlq`h2xi+4f5`fTv4J(f>TRt_+@SH(e8MU=SxUg*zMqII;XwQ3~ zXSRZ)ZXFoA${>O4VX6Wd*T*2|@(?%b&?MqIi2oF4z{Ac1q$MAa&WZz)a_1r? z&a=H6m>SBn{ag^UNb)ZC3pa6p9^wTzF~k%4X}e&X&*IvJ9l&LQn(VfMuMYJ z>DL6wGUh<^BLmwFTmVM+j8Z~Ec&DWHbiy<*2L;5GNLr^)U}>oGIjg%5t|sXDm|Ot^ zs%(|y3M-N?gfC!-8=DWMJ4g%fhLGJ20~B%+(8-*`I>^p5^fjrs;bDf+X&zF8ZS)30 zGaT$;%aGxF@?^aLb7^o^wW+fwYC^ju9=~OLpbwfxox?nO1oLkjT>ts_At425R>=-&YwiR!hD0j)x~y9rDiF- z*#}w2_FxDYuhvQpGf*KYaMo zQ{GF{+}LZ8S;8I%@Nqd5Ku(RZOLvMAS{@P>f?ZHpJ(`QhbE}4&b(81s?HL21e<-|T zd}dRfFFDrHpBo(5x`iIuK$oX&F=(8b%8d=Fk0NyZZyo*m*!36Q+SKe8a;wb5g?kfS zBdyVOL(^lq$ziL|Tb&;ctQ}KsSrQdxYfDdW#+Z5$yIKu>zZNxIFBy5iY+G2$uE%f+ z22EO+x@qpEb&Nobb8XlQg8Q#CH*Fe&$S@R6q)oYE;UqLqX+1^)2_fqX{HN<@@lOlu zIj(FmiNAys*84=yFwe*VOKJnZX6HR-7qwvQNJ7vePIL<+mz!~@C5y3QOY{V)0gxGj zDt7F?rC|UKrOpNn)i^Bb;;1Yi$?`#pUswj>b#)aqi)<#?y)qF~hK@0WdVj&M{#5T@ z(=<4J=k$*0og2o@ERyEY(fsQ5bqWS$uo zd~)V2$t+nnptkf*oL#3cmJ>{kZgkgsL!YKK1=9)o31Lt4{ykS4Q3D23n74r_EaLVejC zWHVdqSNDGDr_9!>EB5z$ii0N*(Emd*c;dO=J@h3zkC(zhJMxur$WzMM|A4|ZJI;FU zcU@nK<7+3rnhS-4!JbkMU-bLta{YtBp67mLf5FPH=LYyu{!z|1H;}`JGEQGg`_euW z42Ao11A|7wSHF4|pBFz$##bj(4!~mChKyR%uj~e2!(1epAv!lOSP=iwTv?JJUCa7< zqdlk%83%Wq6_4Nk^7N*a`;9)^SKDul!nlQ(2C{7#uRqtFpR8>(v4M_Ol*Wc91}8Sq z?uAQ$egx;DpG}N)(IDTGB8ZZu3G2utm__cOK}XCyX^gjEwW=5w^N3(NVw_Df-J6g# zTJ}lxGnr!NK+A(M1=e1NQDnmgrrXys&|wkAM2&!~#Be~RVAcu{d^KSaq$2o(zffb9 zKfVgg09~5o!7Gb>Q;f&_iOfXA@vlI??^pFl!Eb(!rd6)WcU~2bzxTayuj{JHrHgO5mKuQBbm+&x zBLQCI1V@8D8WVp6`XE!b5vD~B4a2lRKcrA4Clq!pyJ=yqjn)D{j*bSZRk42=;D=-c z1(v-2sDK|)W*)HrOBD7I+%NElSg^HgO-O+P5{ev>aw@}uWz$tKa@$QPxn}ws32pu% zZ2Wj)a|`*4H3RDk@agZ0$NTA&MaO!l_D_eibvEZ{d}!#-zR3%O&0yiRaOmILJGKMF zx>jnGX7`C+sfUg~3$hqxenOD4AWqywSr8{CtzmhP(~$u2NS;M-6qbd#6&l;sN|PJp zA0lc{ZlsH*l!Wm|gA`DmIfVJ^m0~3?3*p8g78Y_L4FN5=5Z=bm zkQJ%a1V5`BNdPH=Sn9>_Ha1Ne@6$@d*m;+bPQ(>nP9+*sAU7Ni_m3IEY0Tmyg|kd_ z-9s}$RBkn%x#qH!LaG3KH6&>0;hWT3)L&Y>$hdl+R&?jsAtYeYu0V$nYug|YUxJw} zasXLl^Kp4+=2*>J>v$qdi>zWKveda8lm!w5l~B2FV)~+xHPimF}s}x$Rb*-ZrB#j zB%2(L2K$+Ihr`~~%)hgl$^NOBy}>*f)7MPITc2=xocLdDX+WdM4-XcyA9mV!ijB0Q zSYNk^j68_IJfo3MDIq-^`AlKvk>!h>xp}`KBbTs0AeXT0vT*J&MMoH0z^~P9hzmGT zw=B1A%r+xwpsWe{t04kG0vi-m%!du3g@rI8Of0FLN@k`#1)-7Fr5(0($uI4aMqv=l zr_cjom_kF7c`vMO1|LnM4D+ikjif_J=yhTmK8<9b8p{VWcB>xutUpCur-)Q9^$mDk zXfL*E-EygYa3YpAWeWM}+FvP)dc@+={OEkPO+%jVyvLB1%FWjq?~th(wrB~)B+(_$ zf3h~WkgJbbMDo2*t{mdjnh8Poa=%B200|;g`!ebhjgzkJ>Z(Wl{?Co~?hOR?%%=PF z{q2>1?i&yI*Q$?f0yLwvhcKSc>F-1S)tfYs_sxfld$8q^;5y%Gd`RTsEi#V|!)$gB znztDP2q4TRV<9#2VX@l7sPLSd2@?~RLK3zM0;x9*YQ%z&&0B0WW2EBf7+D8PlNBpm z;y&S8RgC&Lats1pqnG*}_Ln#prw8p0|Gt6VXspwvAC4sR_xWrtm(AMnO_$BTwl~@0 zu)0{!be_6F-HZLq3oY1G7dXM^y+Nha*Y!t%=b=jyKBGu8Hy@RB*m9JRr4}U*LssT4 zSF{})SYh66K0Hu7(3n{B^Nq%wY?L|K$S2ks0cw`p1?6mKaTChCz?g--px8qovt)&~ z!%SRO;3OD?EYxrazMQdYmubMEk++}v=<7Xk-D=MS^JB*IE2JYXKL5Y1YG|;!yaRnT zA?Vsg>K^q~{ojyh(nkiM40QfHf;SngN(pIo6y}5S5ll-ZM_ZvlQgwV=ti$Gwr;VPR z@2HalFjAu210)gF3>%|xWZo-Zm%XxVy|8rztn7x?FDo~a0HFCJwAsVLf`I5TLaJ^G z!l;*sAgG0M2~%G!GhHOq$~DR$a@%Ll%#8MA{3;O2b~m5Be&9oXe;cgAEB*e#3))B5 z4d}c4Yj?Qa8-~LztGlM$$k%k^zmGoJmu%_fxL=_jplySQ`L!A{py%p+VbE$Dg^n>i zY8t$ihE@EflfU{3t>0ojjN-Zz^(CL<51yj`#BH*D>Wg7~6T&yG4fsYop}|G+ zhiJvB&uVq`K7T0OYTC5@G-G9TcG}ujrXCg+vk&p?*vua4SvA%_;`RGC`~8tzVQ>sZ z_Wn)whJar^@QF|Sgk6+RJo)5_V8Cg^Ue0HFP&KL_;F&s5yNPLRvan#6D!aT*Y_-c& zv=APyGgquTIUlLhZ0JZOYf=YlCRHn{c2jZ_i?_r(LbirP962Vc<0hxyzt!dpM2{YA z3IsL=0*)s~{Qk%M{%Q5D9|cig8T`T*Q1}+A{B|i zH{Y-%gNEfYD5-|U`-_;cPfn?=-KxRnw4bFv_8wKI+ZwzdfQ{a({z2V`ad(470(J`J zYUcGLmj&+>6nW7iPtV=gBH)Y#{sc=hVYwk|pQdU97NL=%+%{~K+^XKDwzj71@Mwpw z*v_j}4WYT0jO90Q&EDEIA!qZN_8jwPYS+Xhb4v9<*(sUSPK9g2#+Pf_dCfXrGb|SF zXjnil)!Q*ToW*r@sN2+&`ai+zI8Vb?$o-LetC7F0vHV5ci{#1q zCPPEPPr{XW3shjphC{VEH!6G*SiuX&iBUKv#DTn5!(TZ7U9^+}2ajXQY=4wCa^Fr^ zySidSGyTviYo{kb#>`4_67p6#;#PNFZ!-P;4R5~d*7%Ni&JTTsqazmwxnQ(bW+>(5Qw8|wO_=wAC_zPYA`bpR-AV%UY^YX=3UFK@8Ax~()guTc;;<%s@`c_ z#an;)a6K=lfBg!#+g4}))TiuqHaDUbxW0KUT>seO_1_0w@&)}i^eM;nU8p22a5{VV z_@H>>6vFdURU*R;J2(@{+52PZ=|xu!jjG~D}ed~5Y3ocB@vJqt!} z;=id*;Mg~C>|=VC=l>XMa#o8bh(izqf)GT0~=ufKG~<=3HXd!ZffQ`h+{`VrtO z{~&Vu29EX0u?vi2xQFL4CXefni+&d@maVcCJo7p!b*MuXfT(z2fWU;x4a|`-`ghig zs;{S4K%I+eC0@bs5LhY;`H;HfnRt9QI~&)d_=9&C0*ux37~{wFE3rmy)c4oDP1dM! z>}%+`g1RY;eh=9WPy$CV-zOd}0jhFPV5=Z5sNj_pJ;DmiMBX}dF_&~#whSwKB-lsV zj9Ock+AT5qhKY&WEGYA)aA*3Ja(lIVT$ITS<~qhj@M#eUu?)OXP__iU2c(%MK*>p@h zwuXjqgLzO@pyI$Z+_|e2{T6-I-bpmoMSMejoKgfO3s7X^Q?XqQRGgP3G^VZ@FjDK0(JWK?fB*_>@|)-Pxdhxm@`i2E~b z0tZHHr~EROBdr2wZLgLA!jHR+`aS@~P;a~j&f=()h&Pn$$;N`M(dokno82M+&C<1( z^%Lz~(I@oLQ`&$+&MI2HP?aF@pf+Q2k2>ULrm%@?k}gH;Q0@zfVUk}Y&{F_lfXtJU zhc<%?y0?CrcZ6akSc^acj|2RvcMK-lvuEVu6E_DPR(B+ooM`v9cBMJa55ac)vVIWQ z?1UyQDta4DSc1^_N|FIXb6~2L_<^S0`d; zx|%v%cGsF@4fHwffx!Wei?Ij185tx=FjaH63l!#^qEbwJR5m;mUGmZl zek8cxde~$Q7eA%@Ys7|v7ItwIS5dVcZa{R%bp&9J>y9H%AKvnSzUvU!m+VgdwYl&& z&>#mWlTRzF`hMEr#)<*gh`6Ax=*>) zH}ntSetWcI8cd=2tlGz_*8Ii1cw>KOy8gnYMd|7-gBYm2WI$HC~=z=YgEnnISpAf#+P0@(g7mZ#I zrws05>SDLW*`TYB{k_NK@+=Ir1{))RRQ`|rRQX9V>`~q@#^XtKw|ds%f!>2{3SR`% zkcP6#6Ziqri4UUru+143Xjz?7W~aGO(u^+K4lGb;L#PSz8;VhHBH2s9hk}|o76`l= z2proM2v|Iohut2JTkY|{F2lV%t?p3I=pWNYm0Lr##5@`+^IoDknu2`i@D)7>SrMH; z`KBPzy(j0hhHXEX2PHwTy@yi=+Qeifm`@v@Ok#q0zhOoLgl4qsH~YWzX1eci>J4rJ zC%Flpd+n?fpXcFHoo-+G&A-PGb&OR?qCu@;=t=;EEdtEYQc4~(W^D!Oz%muVl&4zCNPHjPeaDfY>(o^kaz z(U`-TN~F4*ErCE^z}py0ghHV@kGnCG8Emup{UZTSePar$6XSEAx(hhjBjAtTqj6`4 zjSQ3w1A~=-yek%dl{48q*F?$~us*DbAvRrM;RKc0FegZqv|bKBrhfvB5Zo~%9aKe@ zVGEroXIlgsR?PbqaDbp+_EKJ+z37G$H|9z)^q-h_Y3Xc;R6+fS2F^ zdgG;s{jKStGt^tMm8avD)F1yN7Sn%d*h=~8@8oygK>Vfn93YD{Mmoh7bxV+s<|wL# zp(5PNY*74m0p+IXScfaA591nfwGVE{;p|&tu>}W;#c=*mkE{z5nHv z{I9!`x+SZQn3q~{t^ea^%E&Xl{z{+A>K#kZWR8ggqf^GJGGdYNbO8mB{ZGt2cxvEH zEZ0ecf;z;1O?$6Ep~(f#Fva!5SnQAg!FhZXfBvYKG1kLcS?5@z;GLWRHYqi*AqQQ? zTsNsVjH864jWKVrM>)I$H)~))V!e8)e3Df#xj;S$w6&Xs989@_E zMlv6=nXkIFvJC>C#|;~=`qkO&k!%)z(ouDm`mTPiPq#mblnvk*xc`UIwbal1hiMD0 za}z_MplMFlKol6TrkCKYnJ|%4z>DiogIyTE55Mf)`u^(FiKT05xMtc|CqxxakKEv* zr^RdBwtCw$&p4IM?K-*&;G(DQ{6p}Noi_S{#xrTS=A*dg*a;r{2JYj&>OaG8zZL%d ze#5`lzNcAD|(B%LjNdyn*psh73(F)TraLLoF-!` z8uL(-d1!b|aLr*qaUQ;MoDQ%4Q98aDyV`FyV26V(BrIt*YA$SD=JJ7iO$QLuyPhjZ ztEbgt!qo#yUh5V7Mg61l^SE6+!zD zmN!DK|4{uV@+wAZ*f8)L03BqVBvLGB4)`o=Mn`nCLnrqV)*RcIVeZx&HK9_7}}P90L$&xTag`MFO`Q1Kvd&K}vwo|BK6v?l?WG2znFK3~&Ig5fCr3xDG@^)R4SY zPYPr=tM+2uJu@pX-W7Z6JqQFOyONxw*wY}#%3Aem<>$bsEZW+IPv8)bolvhr`aqO>jVhyo3z#1TXi>WVm z*VF{i1>^qkdzaxFw=7=c!>U@f>KEYHTNaNE;n;ONrkVGrsmitL+tmU6DXi>$8XKR0 znugt_!@{f>2ARJUsV0PNs2K5;+?Wh@%uo>b0I6j%2H8mMaN`+l3nF0vb{8^DNso|# zZE3-*_2|rbW{wSu7CYMDN8=MzE9*C3ZcH?S3866-QF?yF?Q@TH=-DBCTU{vT@VguA zZnwRmG2!sJM)U$dbd8Kr{x@J24xGb8kByrT+dJsC-dK_|=vDHMfsU}R|yDaU3|Y1r zm?Qjgfg7#q&dT2b5O+9^@NdJv%3r((r{*p=pf1ND;9-k@<;dc^#{GQ6*CulW^Ksey z*{EKuUZQWpZ``8Q)#a&ADxb&wk0Kss(a&4>lDQ84R{gNLPk#W%&R_WZ>hEN2ybI?w z;5?gtc;OM_yz1RL3@rUQocCEUxT6Oke($mBcy+4!by-&n3rI}FvA0*psySe|7VY|l ztBK)ajy_(Ut){ELgl#Zn+JmqSSpHrwt>>AEXojEQ*-L(q*gG?{cOX2Q3HE^&2zJ2_ z4HWVt@pwlTB@Db~5x+lN{U!9|=NGv=boUTnQn=L zu`_VYtAA%A122;p?mHcC@?aH4E$$FPiX*~o6t?mJn zHR}zo2a+08FD<{GL-I`=z()bg7!jEwuII(|Y;qa7UZau4a3#59x=fBd>De&QO~M$+ z>ciCws@Ce4G54Sg7huWa*m(8g>P^+Z7G3zMg=2E;Hpprma;8ox(&oV2Tfni4s$auC z)?SMz%MOYC%ZMiju~e0TNN}#zV#=dGT(`yMusYPx+uRN-pbcE}rs|$*tESV?$MS@d(g8mSV2O(wxAIdUV+2p*yb}kITM3Sfix!Si0a@aY9`YT?)<=Iz1i-$K`1TK&EHixWJy9kctY>bG#-EtuaI$$1mi&DC=; zX3$C1e^<}Sd8?{#s$Pvb&;9l3h0EoQ!e=c+8PVFD6SE&aDe`Wq&E&dMsAH|0R!x1mQWXV94L$TMORa@obPOHu3 z`*T;l-Rb+O)oph3Fu@g*+yCj;~-YWkjExSa`Xbk-22fFW80o|4qA)vwo#b_(_&{kRLEOPcBp4Pj^d}yMzCEKUv^fD7kdNN9Ipw$8!kvY1+fR;W%jFMVaKU{< z_f=&WGv*%5{dVvK(J!nQ3OW`^dL)(EBXb{=C`ct`8ZdHzS%t*nScVyp$4^xfCpJnm zF;1OgaCI`oEu|HAiD)!xAQbi4XdwO4-!=F^=EPgVb3p5guKYn4~@Ph-Z7N%RD8sElb;IxNAo znsaGhlSTqSwTqp*t+?ZWM8^>(fniXq)`7;wO^dtATG3x>H7D{>@&ZvIG-PYJL z=(2lS{O>+ZA0dagWN-Cd1T{}H)4FMY?3tG$%B%fGq5 zy0z-7zKq{_V&TIJy|`0_-+r}v1MIFY?GU>Bao;9qA7V1e5Y=Qdjc6gmGv%xqqd|@q zbIl#%@3CtmsZ7Z1Y=&!$P|cjv7a-Y0DM<AWPhM)-~z{JlA(&zumr2)LO)j z`+MzLjN4ll-ctP^B99&LKkn9Fm2I5 zzZb_&R9`{PDCf#1_08&W{eAMh&tQ+BjapxK_X&P?C+1eJ`V*Xg=fbBJ&@BVUF2}KP z?6Efe(S?tj&wd2QV1?q?dluer9&_Q?JGFXX3GZ3hAm2T*@JI8zZ>aBL{XT&G_r$_? zjXkRRKKN5r!^f(8$UN_Q{6@a|s?V#rE7O(dQ!=+Pk1whI$meZnfSg}TaZcW%{zr8} zzfNmHoRFH3WejFkD-f-+nbitYOEF23)Iexz;>5OLFkm(yR)zUyPBKHzhVu6cE#|kj za9XvW8Fy`QA!5jPx_uRA66SmZ-Z<<|yu(dHK99gxzpVNL-`g~&_Nxj~b!M~^d>r{s zeOXiIFH}>*xSMOE5)#r?3z^U-8)hNdD=fK>;%zc5H z2Uj0aVyEy8V;wT*7)#2IS>zip1v#7Q@pKWcCc~bTMPw+qgXwLK(4p>*w+7pHT0@6> z(sy0ZA7c136c|AC)41m!;GS>iJy(5}CpA3H48FmCs`5+VKW){2!rbnZeGc&`hK7;* z2TkBavH&BqMvSTCkp6BFV~=KuOQCfU5wuzFM&iSk%4jI0zPzcSq26MBWesb8;)(L>`{Ly$1cIKEjs)G zZDVDJdERBX#wPtG9NSts+q}j;_0{T5{Vp8aiQoIG{M(yxY(~EW$M#juGLPMZ>u%Nm z9LLTAZeU#ZLimZ>WuRV} zZt^AjTBBW&Owt$bM}Y=hf;xZmM7Av#vXVv;_i%spN_7YvqR11M>;D`%DR0uBfQ@D3 zHeqf^UMK7}CWs+-iupGtgnLSqTpd5l>U8*RXEoRye*OA8WrfLK&O3|tZv6I(^4m>_ z_3kwDTVjRzMuRH01*@Q4QlEo$!W|nFf~@(Ze3Bq_8fkfDixZ8-giMcpbbU-Fw9*bZ z<#ud~nHypeGJ-#%?p=0^-}XnMsc7`K#>;P#>BV6gyKywulbiB|p{ zd$(?B!zQGFohH@w$fa^JE`ZN9ZnNd@6%*c&d1{cGnJ+|=uw$@n#BhmmgG=OO>O&nQ zBkPcB{}k8WVP3mZeF+3@of~;iKza2u^~-rr@Q=TN<6Sw9E0Km~LLVoA1=0dNR zVhe{D=Yzrt_gO&Z5;jtN?!o7ze9qtmrA^~=3^n&r^7vR8n~~lQs8ps+g4PNo!*a7x zlja7)i(m=h4wix^*=?D+f^t7mRY0K`MMSw62^xb~6h`L>X$K%YOlCnI8~Xs1YW>{X z6Ny)2v5^r(Lte;MUhE=yjrvhIbU!$5aDRI3{^4OXBENgF3kIm}QCH~VJpyz=7XBi{ zy4dH!Y>z5QX_;0_n?AJgt9ZO+?#6ihYk!%ESCZ<+&w7okExZ;Acc~xMHhb7;i2E06S<+vB4sF)6JSF zXb#dENLkcO5UB(ZvNX&ANj!^zQT_{jUAU-G?1I8?pn63VcN5u!aLFr1-)_jq0Lfq{7Zd!M@P%}+n` z`)9h|^R4$hdhuoN5m~{@1-B<<1)JBLd#mI#tQ^ceb8Z4F=rQIV(E?1rvKCAtw7xV2 zsfdu*Z!kA6pG?cg*qmRMw##;awt3k|$KM~1Z~Lrwac*(6Iqq^#NFmjC$vt7C#yvLT zb6!3V%J{Cw=OEUH8+-F8>fy?m0N~)lP4*6RQS@dpdV^@m!qJA$Sgw=cNgZH5L`>#e zL&FUi`&N7r5+We(?$H@XGTJ zr@E!V(OAi=dy_|xCX?@cXY$EYM*Ryu=YgNX)Xf7BJq3!ee|3kX;L8*GXP- zm>oUExDlUiY$cC%*-M#Wyi1`rvRY@E^T#Gy<&0pc8QAe6qJHSIHv&LNM?x+;K2_4S`7Av9I-eBJdlCM;s;_|^hYn(j^91B) zpnTvw@sl1)y5K^mmfZ{>Yi_Y1;KAr8G3L^z9X-=P&IrVv#jMwlLkY zc@M!1o;-E)BBP%N!v4mS5f1K^*D_{M@lWHgwUiI(ap7=;2@#eZ8S1NVy zN+2x_E-*y#k7T>6?ZgJRp}x77^1@OYD8kg4$9NC8gyK37oTuBB=r$QeWjj zEvQ*v#V$N)oD04{O_0qwx2M%$g+|IOs{l~)$$~k?Z=^KV9O_zmC|_f_$Ld?>+O_`8y}BK@-_u#bS?7Pj22ltKKm^RG7Nsz@7*8 z?IknC-hB`3IUqXZ=#QrgL(`Ncb_;z#WH%{uA&AdgAiFi%2Ud1M?iEEbj;f?iOaw<^ zHCDoWYOtvn0z~>k3i9$pUbDGkrpCI)Dp4fB!rvza8>p~~5FK_`lHe?LMMm2tP)V1Y zB`LWX#d~uz&7hhn!e~Q&AxTW>CNq(CEk0UUiUy8tAb$A~VAc>1y;p4(#sSHRK)!Dgo(9$a1+%uH*G^OM5V~Ip-V{l}qH{B3OY&mBz62a?U z>=V&vkFNhtWAK^&ZGFv7{cyOkF??8sdd?dEl+)4p-3^D8+J|MP+uWhb(;~wSkYNBa z9Ms-!_E|yhbsGc(T71ml9%fv*B~DfDp;R#&=QB+SP@l3L54K)tV8!Cx>`Ba0A!JME zNw!Fd0;^R(1S#1QWlttj)o<5K_Fo)&$}1gF;X8_GfCH4xV{C!!Ab-F7`p- z{4(~^ulds2@#-b8?`VC9&V`IJx*XSd9O!}^Uk$rtqy7x+!Dy=o*5j;s{Gn<^{j>f# z9`A|aIAkp653>iHaejv9LpR9rPl;VAzf;fSHM_E>`a8V~cn9pw$^wp`$@5{a$#tbK zW~+9chK8LO7nI)#A-p9?|ztgu(|dx$ARx9+T%<1^Gn4Z_#X60WFws ztPs5d(+?4U_9=$pi#%e^x}F>dyX&WB^5aproV? zi&r^Jr3bK?tcjS3!W=N%hD=$PHPh24MqY+tE)N+KOKo7AEe_D#Dz^BMYlo*7V-CH? z1f=MMuHzQSeGYN%0$7*B{zg;(r8~Jx^xJ_Iq8HU*ir!VhaDTLwdt z8}hrKwKVu6U3rXfvDaTp2y1vAOc5ow4P%UYanOu)aKaZOM(;V0Vqq>5SQmvnIx=b| zPA+B4PN@#;jzMmB{SXbbRHX5}JEt$$I(XZWgLmb|Qyn84Cr3WWJ?&4&yYmn1zNoo( zxZAt^_~@waPIaF-diJ^Z-h1w$!3l@8&_1@R6AeMOCzGDej)h-cxvhJBHr{agj%{Nw z9#CUIy`r86V;+1U;P|MhFzPPW=1O2t0#(m^j{$3q_D}(g<|!~RiD?K@`n9JE;)tl*lS=^R|$5Da&?;!Ta+ zy*I?BEe@;48qgb?ybW8tR;xFhjMvSMy4zL*&>Pt_>$KQhdRtq*tKIGE7=b)*s%}s} zhQ8_79?=YrO;}RpFa%px-0PU;s01KXZ2rQ?KXWy>J4UhTfySkW_4z`qiNtnPxga?g z`=QZqL*{HF%ukrluoTK2VmNa?3xkWh2P>@(^9_xx=u9>m0a%3~RmlzBR;U?qBXR#G)jFnG^;z5HRaxW`ctFvERbMIP}F0LsNbAoqbcy@pL@kxHTT{OFyuB&(vnU zFp4gp?VnzA;nv(5i?=9;A^Q{dm0sEcUam^oo?y+YZ6XFO z6qtqJM^Fr^h^A}*yUn=aHE?;d)Sre@KR$M`0EUgY*?zlZ17X^KzxTFbRsE+ew_z}C zeZyYcn;^BnWs_}0jjO+gFMV8d0s%^D+mVk&8IW2v%;^9xYTn;eM_&^@ht$?0K>|w# zvw=8$S@;f8|Igc6ZdDA+ZK7l|L)7$gHZ{D}_rvtv&l!~f5( z+g|y>t~KkntM*;$yrb7Y^2qf^Z+PU9%DVdw1%pTK);sSz6pJ0Xn|6TG#BNX@2ClTh zyjRX&pOh?<{dipMWn7wZZIk@Se5Lb`S1xN68&zJ8WJjNanLJ&8Su{V1#LHvF?98k3{wt(52> zn8Xb#R3M&I@`ANWI+a6+)ZuW;G#_ikP({z$CDb9J%tb5Nk>XzRh;At`;9C zv#iMSKXG0>E-k)3)d(Ug(BA#^FUZP^rqnM3mDmvumF}fN{y>o)27nZ$O;ju3a=Hi{ zTfj2`0HS`qz%~bF2|_u+834zj+>SS@h&>7|X21|fQYHFWLS1e*K->W#pCJdO6{#YBd>3LG#v7QNr5&fddnf3 z?nChVZ;-JkMiz&hQ`#k_A6)Cv#Z3?K^|I~`!2pmv5G+xn*93VP!VoV~B?0uXQssqr z|BLXi74`n2r=JywZu=hye<=GOEOSbCd&n*UW(iE;Q~P;MTTF%|X+yAf!?okm%WLpR zylZHtH6DwtPquC7sx|ZKS$)pIzHlhpbI$Amb8wV9 z)F5e|#&L}CFffr8gZX(&LuL@G5Kk$*nQ%Qu5GKbQ8iajlV0V~NWlh-awQ;IoO~4`p z*ASama1B4~!*ZI)>r5Kfh45y&=#UJ7O^XIYU4u8%GeCpE?-ydJ1{z3NF&4nIG8ian zKmalP(y&1gZTczL-a;$%^e7g5O;cn413hL9MS^Zu?W36?`XfU>*gN|X)!{W1cdjRR z%eJe}-Pt>vc$TXDfw)&a*H(a^Q~&h9(D7WW=b zvkCr*PkkD5X1B;2f2GH82A)ni&N+^rn#}sN`NPDDFeJj(RjLQ@%Q?=j7HB434yP!g zFu0}4DPiDAwZm*k)<84bU1ggPl#x(VSj%PG3?Pmv>jX7*21oJA%B_{3uv#ux*s$2A z4*)Xy2E^kxz|l=(o`Zq1Bxt~n+AG@?!bgwHDC%9U?c>>1ZH>AaYZ(Ycn9s+m_T4b(}zU0C5v4K+XQoI`7ag| z`Dxm)<%$fy*!xpw-FDNtSInRq1a$Hv`np2z$oBIu+`Q?+L+a_9&O7&(bN($D4hPlQ zftgE_k)De-ZNBiLOIXPhdx{(F+`Ah zC}d5BQ)4!k@i>J~j)60B#$}PN@T}hwO{IIq$MM6t1LFW}4`jqJ@M?6-fZay34JI26 zTRaUkYhwhUh2hz8B15Bf9J>F?)#!W%?{4OTE6<;9*t$zs@$PmX=W%lQ_PTA?zi`z} zor%QycqBA*!3EpS(c5y#?w}phH=eBTKoC&Iuc!JOJ*=JrPMrcfsNoC1U6r09X4_0r zVX*nl^kV`o=y)$}WJvs9Y7lxtnye)^Ob0xNU{7oTATeAPO}EOB^(dP1HtWKcL+3wu z_{heyZ|u+YZ&e!a`Thg3wpH;XJGLD={>a5$x1BS+E8EFL%-L&(*S8@q!3hPwrAu_~ z4s*Zd1`(D0Hi~w023s;y(nE&c#g;1=sw{>^Y`HK6G0SS5(7Ev%c>A)Am&P&I3M@Pl zHLciyznBQP)L$h0jvp;*s{U8oSishFxm2sL}K{hzNd#0i9@TBYexRDF_it;&h77iC0qH|egp!z zrZZU6^l4Mb+yTl<|B!iJY&wL#q7CBYH_&;NzKt=XA4n%22H>6hBq`i3M`+(oO{KZH zs)6m>vE4Np?7vwF_i&kGlVDRC0(3}Lhrrv2Z}OJK%%Dz(ib4XIjRxBgkT|{7)jZSJ z)7Q6pM?TelZXugLlATPtA1L%59lSHz)gArHS2j$(1{nLtU3vB9Sa(n3Z^v8Td!RDU z`O^Ylonw;K(BqvgbO@ngvX`u0cr&LFl6*Z2Qjfup}`yVhQ-}kYHhn)VzwukQQGDhN09)o9I`Sm|x6e_=| zIPD>&eHV*5SWzC1h0@Zv=RWvIecGF~GQyh@U%65}P8uLU%?sN8S8t=lVp7BjxiNx-E2K)3BSNp<0 zjC86*$l?6m#MUQy;K^+hzs<@T|COGFO}$#X#;Bt!P8b~B?Rikwl-<2UB)j4oAHak| zdn3zYo*{!tCfhlEoKZ|JHZY2j-9Yoy(`Rn=7pKOHT>%8PCd8ozR1}!EB)dkZppudL zLf98$ipSszg7$5O0X_|M*z~4hSxoEj7odU7b4+jojVQmedGBa2k+?h_@0m(;wYnUU zLY!j%-U~eW8!I8b2x(DomZr@wa-M=TB-MeRWN4wYV&nDguUahwec6r-( zj21TaWV3q?9KJadYEEJrz?L{$Fv>|{qhJ5tve>tQQOfZOj|)awvpo!~(?d)X8HT1E zOH5O+P8W~Y&hLS(k~NOE^Ek4K@Sg>JA%CfU3A@~Z{e6IFXN0dZPgl|r3aI;8QD|I5M%TC~K=7yFu z@?#-28v7#qfy1j+_$Nk%9>_Upj=3;?BIs_Qt-}_2;ZQa{Q`G&YRf1 z>9PlRT{=6m4s&tV^hd|Y_UqlrSe+ zUi8YU>n&iFYQ0>R8hL&Z{KNtjo?oJ4>0UfBb4>m0keL`Sb>#5M4FBNJG|1VAbJPO7Nf#hWL${(Q&oCsbfRmo@2-MtUuyuW z-Q{*cJ=<}oa~d_PSgpoWh(#*5PKMid+mLE@+8Be6Q4f57)@QXagM{mJ7ks$|>=WdKnWQC-@GW-Sf~2>A z$0sf1;c3!V_HnNmHo}%j#_bbYpFY7V(b^1q1q{|=gV71m(~lSh%-K@i5VlCzB@AoA z(lvXPVfIN6vXNml)DU368m>3CaszEa+gd75F4J1vh|5>Kyed05Qs}Mx+3E~m)orq^ zW1bCjJ74iZCGtI$Pmg5ue2>~Sl6fC@d1K4q_WoXe{5Yrb$N3Smz<5%Z0Y{60^M}lz zltj2t%3{+Ph5drZ=lU&qWt*SoBxP?)#xM}7VB*OT-VRCs0fCo5V7(eg`w*fiYf0Hi zhWsekC{6~@uXxJXNZ_^3TLRI6$rT&PVmy9nn*ipN#=d#ly@hd#PVOTKlqJ82DE-L3 zjj~&Oa_jkVU;W@jvOB%BugtUsCVX!@_t2gQ+<-mj9l4#`g?f)tI}eWT>hLeyRL;(g zoU;pZP})vph40n#$XEEB#=>Z}zd)UI2Q?Rx6Of!^#;;MzDcT8JDRGKlkACTSgHa>p zD<)2qto45=+QAS1yA!XGH)GCbwFi5ODc@W&l?e9CHS=#72bU<|%lO8R4(rY1+GkQ$ zn>~?iCbWV7)G42Z4n7L*cQJ3n*uMes6qUc!&?fGmxJAv-d9)c3;i2jS`d{BB!GTGo z@4e$gpILYK@Vc%olU)f{s5upWv>_C1_9gZ6FZ-)cKcF`}@qwweAx~F#dw;NA&sx@? z%n=%-&b#}-r=EkDAg2S>%k>y2K0AT!ktvSRFH}rmlM^|$pPa83Gq+eT4a2z4F(0Fp z1X?!N;18u_Thr;{xzJpN_@fb=Fzy?QqD}6b)j7Ic8STTZzw+CzAE}?eR zvB27wj1@DLi-o9WM=!>eNoGVf0RY~>TE+8N;h)o}XHFcyX~j3{ym06tq@=uJK!xfB zWOBUtP?z!ncIf+=k-lUk$$b;gc5`)`9#WstTC@wu=ONAr+EKs|EC&f6XhC2dun?t{ zl$EWdG^^AnrVxcPoO237K!+ygCxwhQmuRd|LNl|HAI>>C+Jbdkz+&7b10zTut2yIs z$O(YSM>Dutfnd%$d{+Ouhq~|#7a{I8dGw-H>ef)ef7Yd4y#LBmS@pvB#t={*rF9}( zeNg<4bw&?i=77~4jn_FM#AN?#&ts7;1!k(P=6RgLuCgPM*nHJ9{o-kKzw_ondLFmF zyF)(bue%1G$M;KcJRbcb{Eja@Mz`a}?^Ayst()IqjfSdo#lYvW3}4Dkm}T z1|*!#M1oF$Inuk!fxs^j9I8+vID>fYTS9PrEZ0EUhA?=aisUrc)2V>etyAg^ksJg< zV{13Bjm1`N+L%eB7vc&`=Xt?&9!ezRP6tA?%4!Yz+FO&at%P^5hno3}KC3z`YyH#J ze^sAUMJ)mT8`&+`MYrtWK7mB0c^{AiEEC8;Iy{2JgKh78`5HMX9VI9B5^3mIY61>I z8&*<;$(B@)nZ&NK#`9TC=df8IYW0qF$MVUMvw%gPalRcq6!W$Vw#0pFFC5EmUy}`m z&s!4;uRkw@keFc5vc7-i>#*E~DhaSO_P%M^@so?kkHdH3ytAW9#fx4(gY0w4Xg;3{ z&5q>#e8M7Bi^vukG}vPTIpPvU7b$Y+NJ?4>dUdniPRwXELTesW#52v2-H`5EN!}z18qNJI$wnHp-*iBu4jEbF<%cA+}MOV{hK;9xWEA_+Sk^+UzY|J6yXDU%un~J!4M{^)!1BVhA>>A8n2|U3+yr zy6wj)>*_Th+j`uPp^*zr8Qvk4CSa!o8mqJ!*bGs91~VSEzTkaCFFs)<y#%J ztNiAU{YSRh8;s`JPpI|b@y(N6%bI6fL*eMzcb{GBo{hC|F8sf{6+k-3B!wIZL@ z`1~Eo$+;S5K{RsSUdU^sWX(6G1 zCubs}m!S1TTA1n!c0!Vhk{pq;L4b?Jgio|Gn*+6qeabOlF`?U-Q44@2?G3ZX=w8-CR?rv-@tiV&^rnSn%4B_V{FDDCOyoCKA6-B%%|m_8vqVtuG#a0Gk?kc4jl3-RTIt zeSc%8dX6aV&;Da`VQ345r-RGKkkt_E(~?$((JHcvVZyn5Ba8&J(kaOmWHZw`x^ZYe z>uqd>oM^-jv-gz<4908Rz&|?kkj|XP7xYNQ66(fKPg86G9Po)Dy<%6s#5R2p z;D`ut1eSl~i@Fx1m1Is%V zFO%ZdK}zwCSgh%DO^L+0i3H+Qv2zj$e3aMd+U2tR*`gs33V-vPD+CGsrdi<6jUaFH z&$M!Zvx?14@L+sDKVaxL^g2Nx1%Vd9Ed1zgi1au~bdiM3rM9mCB1hib?rd~O)6m&rnXK5^jsh1ot4*Q{3C3}NFt%3-HVXR}g z^vfj28J70c|7WW3dxLv+9cdEHH?nD+uca~Uaa%2QD|Dgm{&C05%*QCG$A@|>RvT*J zEf#1*1h5u0BKH!kT78vHJ$T=e9^~GL;~hAj*4D0m6a34!amwO&UXK3;=RYjxKc(;2 z?t$JcREx}~1yYPXCd!!{EU-zKEbekt>fxCMR&L{zHZ($6cV#gQf)^ttPC|uZKxiE} zF-g2cI_u&uvNecRqSS)0@?OkpV95%?^*}Rt1Xb`r^m8*F;`&NhXcZEvO;~xfwYHMk zdcX{K3Cg@IjxE#oW6A?TJ`mFreGqYJpcIcc^~dAyfN_ediBK2;u$FLW=T6Fzwkc(@ zqV5Fi8oCknF9l%!9kjslwNYKtmN%+jX+;4L^3e#@d;t3AQ34ynqDEP);0(wfrWcCF zG9F2v^T6x|eOx73D2c*dd!%&hkK(gg6&z5ZO zdV1|tYifMWc?03Y>ndM2ZSc>*27g)Y*mcRHFAr}~Yg0qRiJ3!2{Rpd7zWlL)ZNEl_ z0iK<{#{YL}Y(9JciW-~0T&#mwtg%T|@6^F$gR#kIcNjXCXkJMJ^j~fi)}2Of+CjKH z)Fdc#D7m35Bn%526To;eD*3Za(k|-~zLvJ{X~X-GU=4p~#>-IyX&`xa5$a|Dwxlyp z0llBoe)_+F)PbTK4w1}0KlnQgzerw)q+nCBy*-V&s19H%UV!|np=;wHnIt7lu7Id^M2wwJCt$w={f-Dd12;4T8}$ptIt8NFQ# z>QEw)d}i;Q1=FdW){)ll>$N|z^5`Kg+Xgr1_G(8`^Vp@3`b zUo33dMs^Yr;Z^ABh~>PB%NhMdyQIF6O9vl^9VQdgva!b3IlB@isI0COlYoP25 zGB=RxUu+g4=A2g&Q|6w-@N0N6aaJZ9z5}(jVdO~uOhKjBoV9CuVz)&PLJq6W+;PcZ zXJPW%vyZQa8VoA^$hr*&_isivk{X@N56-UYi^eaG$AgmxW_xBmHt)uf&4)H6ay0gq0`?g6Ys19sVaH*ncj?>?kmt`W0_jT3LWwe839}F^qX<2dMFp+1 zbun0U=y#ghZoOu+z;a=j5y>;iXorerfmu0Q;MZAXN*E0%%LU1}EU>HDK;B?1^&$vS z(h$`f7<8QCge*-szfo;rPCs)Kk+0Q!<=5Ob60&K)BN_gRb!x7rh8g$Y(ixb zB!xziM$sax8MVc_Ma05amM+qWL4^%MET!lOpB(pGuAgWgXX zC5@j%t6rt26N&4wzA8WHU$<{vWGoij9~d3kR_IvN!=dmCz;{C7@3@*LE?6H5PaYiU z2|BU1zz@J$L+mHePi>qNAb~)9vlKx3blD=?pamr};(swC6iyVM7L3dtM&26C0|HSn z-OBZ1tdV|)#u{1&7)0Y@TP*A4my}eQB-Fh?6(DsC6x&2AFB$)4ZO!u07M%>wOO{jk zNUJ+X?YwKrP%9kEnjhG$=mXK$2e?+`m< z6P|ssehacN>`=<0u(FmnK;;3sFbyvva#_j}^e~>-bjnm8MMllyrovSE#SgCrVzIko zv6Wm3VnFYwevgc;Ve$47H7AgRhsuid*$p%f)X_?XBPUg*8$}S_TyXG`- z#-ny}+0-x$wPE$g%0+Nn`x6Q1{t>tivDiEDaZ9MN<{wmA3_n3^G|b!0m>+z`y!PNy z2j*fHmcF^^LARHKvgys&Mf*K4BZQr)AO)k61Ss%38}60uk^~5XDshLv*|fOliSM** z&oiqG%sr20hemR|I+$h>V4*jf8D`dTzpCZk$U--==uX$a6zim*<|8|rd$vqN0dm)y z-kv@qxMJh$`f(r>m>M)3(f+|mSX@v;2~ek1zXf^R19=p*CyabwN%qw|-1QBt($)u) z6d8o`x~n~lni0bo0G8i@aqJMXsSYv-HA$j>lW4CdXfL)b<-m$t*-1sFp$|3eHomPT zzx;PxNOGD(I)#L0El`qQ?OxvdqieTzqpj}wct{o zI$aZTbX8815h4z}hf8*@c4oj|AoVp)g@YhoU2q7B7U5EEdu|7=w--Dla7 zFsrnw_s#Ncb9FF3AqQFK#Mj9oZK#|UbTbV%y_hqqy4ZppI2#0Yy0vqP?3T;OTuFMz z!XL0tKu%}l<&&6D=PY~OLdUzdkWPJt^Mk1bzZ)qj$dM`_-rGj>mf7PHA7X=6tS-hY zm@cJRWimi7%)8?tBN69Z*gwGji!24ku~>&p{LzK#OPOK`7!&0tfiXD62u!AM4*fc8 zpGZrEV^{LSJ}0Up_O!84t^mpW5qjH%cxFQ3pTar0bKmus?*voBNY|s2`wjs)q!Sko z5kKVfE`o&?&9oKpjI2-opiy_k+;{+}$c@L0KwDL+kU)V!+_L88ESZlEwshkGk+LT! zSNq9Q9#0j`m-FouLcR}A)t=|uc0w#j$;le$O35t*uNZcUD|62f{A4GC>*y7L21wuk6(z&1qR>drduz*WphP0aUoEkJT3FMStXa-7iEY#Lvh=fv+Xhf)bUog-^|i&W_fSpxl<9&$t|@M>CSKQ3{QMR16dn&E~w( zum@|lFz*u%A*8ReN(_XAK#5CTIH$`e9ZanlR~AF(;CGkp4t7G)O1&(G@_~!0#A?*C zwTt~bO3i3?Dy559@U^bB%*%R17(BBqCH-l2&G{Eyb06-b&PpW0c!A=Js+K)x zpR;?{xd+tvHIID!?pvSy@U6>{?;k#RXNQ+~x7lr}Q>spp zM+SWmr)YFc_*8x#9s1%B4Xg{Z#Vb+7a#YN!C|OyMzd)u9$b-06x{o>`&0q_1^!r5f z_jO~+PB{|2JVPukGx$;ypOb@zpG&{O)IH1T-+d+{0H^ydq?&i z+j8|?9$#QU*2*sB_>u_6a(e3%h56$tyoRG15D zJ%DHcD}&WiXnpv^9t`$pbW0?eYRdw4K{+z{1WIj4_CezfCJU4riSjp%7d?JyUX?K@ zwx_MaJCjswdyMovnJJx2i8qvmZoBolTP?0oW@y{_YZ^n|Ri1&_O})vk^<(GU`h8Sx zZ`pjw#os#b&O6SZ*s`N97x`b*y$g7wXLT<6|B@~FD(f!Gk}cb^WceoBlJ6ehW;{2~ zeI|3iW^TDBlPkGFLhd9Gns7-~jv!gDwA-C;Ha6zV)s3uC?Cfb7IoOZoSeUpXp7<9z1yc z^EX_-u=57Q9XG;Y{4;6`8c`XM;_BtFuv8nh9u5kCiqL|2dMO{TJY;~S=?e`Dh?ohg zXc9DQ=&&Ra1>$B_W&_P!tZC?gMC3vM6CO?o+QITsE|;T?QbaT{;WzAIagI%m7#%I% z@B%dPiy%rPSzrvqLuRQM(dSPj8pEI4w6!&Qc-`!`np}0;Puz9xmKXXvcW*e+#s08$ z{juT7=-wd9M@M%TZR2hD@=dHajLHO6lgUmDC*~D%t+etecJmWW)BIBZ z0d94#IU7TD$dqSl7H7S@))oJd6r@MQn^7GY@m1GZwoU(L!2cJh@pu@h4?1P^9Xc+k zqh)O|@JU85J&77L*8eB8cba&!HLbPm4h)&9V1m3I4=KGRhj(-RW$ z<{!aI;h#OyRU4?N)OVTjdX=0-MC#a|I@qQiSoc2df_d|58{wd0V+xfuvkSZ6pc>6w zarY=5LmLb2iT=zw@msH=-^%*nUC_*djIm1`wq1L%KYdW33qfJfc5b+H>@o9hq zXQI*I3-M@_zk4AVjh;cj^rxQ~!0+*%Xye|QcobNDd}Eph>z26>Q+Q8E$D{iC;kjG* zoAk!`2NiyxYu6wT|F5W7oYg*tTqZ1-0cIM2!7yVHh)dZrsCuEsk>+hL27QivIi}FT)Kyls zCX(rCL6{xWFmgQuc+YsQ#*y88|p$a-!1!3O;JWiX4fCOv%0Ajwxlc4 z+LLMzMK(sG73ta2E!B?7`leLl__mSSNMv6m=xiSx5*?g_4u+tES?%2>rihkTRPGrK zVd30LL!h}Tl@ZqLXv0yVW$L0dLm+?LEJ$%P%700Fl@k0g#XGTA8YS4QkA#P0d;}?L zpqGdU!!k4qZRw03^WB{gv_uS004PD;2CgL|!onmif^5B|%olsEgRg{KL%8UfPG(^e(l17ZD>?00enqeEhIPcwtoF8yynDcCdH>^Rib|EYOhFWUNICqyq4g1xN$` zftF7)tP3E+1piK%Mytwt;v8~c{zQ~Jq<{DZrLQ;PTh-fR?{KE1^`*H9*~+(Bdr5!% z(S-eUD-cO*)&2n|6l1JUY!&;roUt{RX-@1D(j{%Z#mZ(M1Py| z<@brbVqIBC{0Uen=)-x;Q~Dm(9APJ-l@kNM19_G5{rH*_{^(Iv;ILyR1InU8IP935 zL+BTo*UL8?gJYHnM&^rfynSYg@(R`F{WW}Ee|OOTIGFPuiAK{;pr!I9Iz&stVxaGa zV}y^4HD$3ToL}T`j^Tk_YsJrFwkgCXJviG>AYx;HM!=W-#BAGRkb=9V1+)No1v(2G zD&bT&UmEKf2G4#eU?D|+6_S9=8|NGPU=JDv+H54ri6)*er?$cWXTbt%;n=SMYzJ`* z!rR44pT9yb@JcB3GMOCs988R6S(~%yRglgiPnP1~Ew4AW9DB#xkGBBGJkfIe?e93o zSMNujef#-q7ozjX23~vqnHTOg?tu&W^gYJCFHos%q0?J8o!0j)ed_e4t>{>xjFgRo zjE>4a55r%kZg*<;FJqqhRXG64>lcy(_*oMG-&(p2nhsR}sYM?K#zrEvc4Iuw`Yx_{ zDPQ~!9l;Wzqo?rYCs<=2DyO7YiZ%-s0Y-|;LN96uR6=vO3E;Kb8&fc9t!CT~1|h(4 zhAsiQE>NuCQv$^ToPzEm@L{@hvA($pm`8P%1yei_u8ailO**W(y;bVa7{j%QM}oWT zG8jJ^cBFKmxqk-t4{aBv8^vF7li9(oz!luTk`%z zbQUd{3hT$(^FKS*|DQzpttDbV2fjb|>+MZVH#If2w_%jx|7dDzjYXr^N2BeZL?Zkq zd`AqcbYQhUP#)0r?U2VXQ7)ZsKJmRZ@O_Hr7mx3+3F_y1GCv=#A79cV5y}P5K>XwJ zNaTk0_SnbT@I4Y~n_exKRp-TqSV*%)A*YDK^`)bQToS&mW z4W|ZW0)p$8Ujjrj9B}BQ2P6vj$MB1%=O+wi;`(5aRJeYe;9qTx`0c9@{Sg*u$&G-R z5mvEBpZ#XIG4YX!S%qfAFQDGxg-pkpr+M)`we>t<41E`Xz8}OBxaV*uum*T1))oTR zIj;SR*%wj*@nHf0ztJoJ>PIa;pnl}`Gliz{wmvKnrNV;iqgju*K2V&Dpt~8#bP2BC z8ON%6O1M7D(*Ygu@&Gmp|qQk6kqM1jCXY-_7#1r{(bLH5D7$5KgfX_}!KQd>rYirHAD z=gY9Ofwh%HfCbraBM9z5lOJ4xw_M(=m;+V#8FSc9%Dz_)P;zt?@5f$|1* z?#sjb>iQ(Z`)V!#K0TmpGonYo62W{}bHwmQl|Vmi%Mt=FKfySgfccfJ2-M-dR}-w& z0PM4;I?m9&9NHgM$iDO#tS%B`J{V1DC#qZGyRzG_aV~u~8r9uZ=XT5-v)fS>7MwbH z_Mr#POy4|GcSBQ4)1l1)|H9GHRAU5{-1Vamo;h`YPru6vFbHa{)nMH_vF;>veNnL( zuu@w}Sy=x?p70hPXo9xnlwdInyr6>K)X@TANU1w;{hy@}%uI@RWB0q=LcfCazhL~` zEcZck`G6Lr@ zu%HWY5^*1##mkKI!r>FQ-G2PwExPylW5>2{JNj9RxBuMXJKuTri3cv|$!mA+y7t=b zdmyu7oZE?x6rdRGjC6rwFysS+6|Dn(N8x*@hoI8!P9-Q*0!S7JT}r*slM3lLuOM9z zI?f4P06Vh)7c7r}1F{TUNN%W?y_cQixh4#=j)I63Lh4H65!;zi6owvW9Crl0KzFSH z;II{U18q5c$4Om3bYgr?nZ@>w#>U$k8#}%TFb(xJ>7RTk8$bPYVQV%s1N*p?=XrnX zDHR2^CsjN!1Kp{@jLoF|qrflF^E84$5&olkeysHT`XBumx(1P4x=#OV^g++Fu7FY_ z@WUHpkUsY?UZalI4B@T)x9ck zkPu8TQBc8d69FLsAV@zoznOt2eg$kHz)rGYK>zEO>|kUr5}6HjB!ly#zK&3HZL^KB zNs}H)qZmW@*WQB?c7J5PbN*;zd{+Xa0N7gnmEMV(nx@)z`$d#h0Wp-IQbts;B1@?{H9M6>Q%zH>YaIn|iOtnMdU)W>0}q`&ITb{oquqXNX2-cI zl3_1>*LltM8+X>Y>kd`7$D8gyb>_j*`e^iMq%k#m6!iz24{fQT9fM3vj22pWKz>Ga zhu*0R_<6hK=cDYE(*7M!zmGrT-`$Dc{MY1tJMh4UI8d{V;O_5%NBq3H%h)x6V$OUE z^LO>ahgHvBsU!u7m{97}3wpLXS+s{JjKzpG$t6F*7L9y;a*Fz^G5*V%*qrWQELI?j8`0i5*(9Ty`{I}l0$UX#GPLd!sy9C^wIAee2P_{USMco38i za4k?iOzDvRsCI}YH;^SCodH4{dPZIDP&*FsIu$kRIK*gMs5M*g ztA412kkCUyx$aD1q$H#f5z+{&Yh2P2B|=KCZi#Y}eMvek%H?#0{ZJ)|oj6O0y&R3U z{vKipBo>fBU_PPk_gWEQLvAM^x4k8~y&&H?4g_Gd#R8 z#)N9hsLf9C#*h|J9K}Mv-4ll%_2XqU%$Ksi>o>@=Or>ynCi!2 zwzCzghh|Xvo>tqvwas2x%as256_`CULjTc9lb5nsyg%iYgL5jFIBZ_h{t3a##q!~~ z!q9JIPt?eFU50OCb5w8`K`{dy{a{oJcP+-6(oC|h!-k`c;caY) zLiA5I@ecfjNnX;4w;}r*5+FDPSjg;#YGdaUF7MLCB3I4I0!KV_>hlAiTL$u^CEv_B z?A~qju0l8hD89wI{kW^=Z9~w%+fCMu@e+5Fv)^!aXUN@z%NH#XqG#Y$A$=>jTUmF- zd^FO}tldL{qUcK)9Il4eAfuOBJ-Kz`xlXUqO)g~UkW;vuqMaki9zv5$uDKF1lW_Ke zt_gLGxEn~I!`pxu7P?6w174qk$Q+r?S`)`_j7LVi4Pu`JBd79dko+v(& zm!UaiRkC3o(k0-5_adt8)!wD1{>o4Z*d*0m&%v7Qk~C#};}@GG#BhE$ix^1I%6Sur z4?xraZ;stYsMumbRsxdKkR>qcq^P@&weV;-A;p1tSCFLw93PAuz+!^~iR80HI}*&5 z%9qSyHt9u{?j-Vim8!MgC>1k}o(=k+PCbdSqo<_IN#ACcGrCoREjTGKKk{P%qrgzI!2s7K$Hs-zP;kbQUYIY5 z^3xI)u(Jll14z)Y+6n$w-j6N;MZGjD<7<%hVSdNNgqZ+hDik#y&?7KG=i7x^1Z4^m zxSHuovxcLze46@VNdpeipGvUz9<1LVcozh4av1>VLeyHonCo4Xz%EE2S&9gu0yrzI zby{beh3D|+YPL436w5$Cjh*$HhGrL|pT6r1elJawI78@M!=+}#Bnpg;K_=avx_;R4 zEt1vDZGd*{5P}AtuPziq2`Jbmpdf~+p^3l9m<*g}0qpJ*SCHCOwSZPWdy9+@w%oSMJw)P!bITt&7pqkgF7@7aOPXrEPTXdvyH;O2C( z@)k8~gnNCMnuE>Rg3XzkfY8CBg^{Q%VDt2j(sK@b@@bT;p^$AoemygfU)$i_<$Z{H z^A2FZ`3j6yKzLie7mR4%!ksoL2&_eKS=NHHlC9hMz{xjpvgwB(f4j50QQmDLKhGV# zeYMZ+q&t4$ki^M6Gs|<*?5LtYYckPQb>TR$O1z@p(~)L%Njnb+EoRMS#nz)XF;zf#@$@ zXt?n=1m$eMYXorTb)3?bh(zweJhsT4kw{|f#=qASOMm%!IvwwOpG3QV6Y#5nUGIBe zX`S#N-eR3p!v967tui?{gjy6Jyx7uQLMj}MNNhdbeZ9Fh zi1RgW{59m z)~R@1n7P05op~}|<<_Tj$#ibL8}+3>X4+v8-`2YqGwTOIbT!AX_SX~1!OYxxMP~*2 zs{9OwyT7hgR>j)qX)r+-wn6H!U;{_3N@P8Hp6nW<-a1Q&fFJ>ATC#uP{J_Z(ZzS^Z z2nKgS@R;g#*G&Lg-}#~^64@Gw)c2ji6p#&(NW=a68gBgDv(G+TR5C;hU)Pf*Wz*#s z&vsKCsdtt5vJZN;{;;Ey~Jm5tFx}Wb$TJDX#dj(q_;B zKi23MC$=8|i!>PbZ-87~4c>`q)_mnUJo)WAy0-= z?ajXIA>@O{h-ddw^8>Ar^Z-Qaj>zmd8KpKx=meaS=yn9bmJyL|mk2A=lOznDO#Q9-G>qI`MbP_r*eIh1LkqrBl^E3Q?`N zKa%%0Cl3)$Led*4|AI%}PEbRUi}sD@;+{pjHjt&_G_sH)BIu0%A-xvp8z za6)jQ4xyYc32u~v6T}mO)6F`;Vu_$x1IMyMl`BQzGX;0Z9{5SH;?Vp|P#V)GHkm@( zJQibLdfVvP)RxbF`?-oLucxZ=xwc(<{ei1@w7x=dz0$fB>GPvovTuX3RM1jN5B~^e zR?lb~w4CWT*9IZ4Zu>tiIy@2Z&6yY{koEi+Y-C|#CP|k535pQ@MbE|jBu?UZ#+Vd( zlSwF!(KAUM5_*%#RTWSmb+VQ}ZG>PfbAMArzN8n)qFIsN%U#V|p)tX`tO%?4#xB*$lDNX&XJ~%ZhE&dk$+q66CE0xjf zF9j>UeYh{UuIKHar7}+08i#H*j^kYpcVS3PdSL&0 z)^Nz};$WGeL-C`P7u#DZ?Q`IhKtiezQTn?MLho}x=$gG^bc{j=g@g_kMK~!yL30rn z;A{Cx@GO8?DotIU@ZJb^0N~|% z+<71TWT#@VKu4FajeqJx-}mCIZvpRUbIY8-B(`l2dL$n`a?VOj_D5>9mkH*9PJG$eqCXVhSu}YQ!g0#dyIS2@urcoHBAr7fX!AoFs?w zS0)~zNPS11_1JCeu9~#ly;XHr-FwxKJ9}=as&_fxHaa=FdFZY!_q$vz75$OOJ21qv zA*uh`%)!}HYH&qz#P+>lb28Z+tgq>tp6&}R{bf8&`uq)}bN%j9J2oGlb4Tifc3+^r z3WWyOZ_4c1Y6IQshUNV@FZ=Q7<^5RR2E;D11?Zz-#mdjI6U2D96Rk8mvIjtXHbC~) zJutK|Ho@G5pt%dJ-h6cjFE`6RbjaAI*30b!3o%RE0P{>ZJKp}E*@wMq8^$K8>)hAf zwteM3%|CCu>#4}$rpz49PDfa8mHxL$dl&lU5pShS-P0o??J%h10VuYk+@y2 z{R4FCVGi+*-?V>qSIYHOTa7@{FZd-s~8G^5)>RphzFV(); zabML1@mcJE+ZHuDZXNk7=*k0lA)jUS=$qn4(06b8c$ZAQnfsvcm77w%#c4PC-S`9; zaZfp-SO9YUOLx-t%i4{Q&Nuk6@LWEZ?YKPu7d-zvKF1AEgL=KIZ0tiD0rvZKvtJwR zI7*1ZUpI=M#0&O`G#B+cR~T0?XM&)LE8heMP6nK2FXktp^AjrQfRUoMGrag6|G({! z@Ip_!(3Dn59JdaU1^Ux8`;UKf#`}X-`E??l3#SR;HQ|@2;&s`7jK;$%JOQhW%h#jj z>eJw<)$8hA;iiO&KxAN$F0g>nnUqTbolEEh$=jxIRrh2kW?>}weX0`e0js75on zq(J2i9!q^x(%fRW^e=32L?V;$gNFz4VjM3{ArSc#?&PJfotSHLZQqxkYj?TYE=*2N z+&wXFZq7IKW~#|gf5h9^_TPFBoZI-wNBXWhw|>Lf{r2}>xb^Ax+;Zo8%zan@CX+z# z>xlOEnt=p|vCEn&x|<0m$`rd=D4AN`1{CFVMO+wBJ(;fB`w z!*|?i^I&Lnz3<(HcLT)tKY8fbyJsR%??-RF{w06Cr^Wlxn{NIn@478mpId(ewl>A} zwe=V3OX~}o>jSeDqAhcM9;}PZ#%z8ArM7p`-`K(i7*ObdkD#y z{>HNPp^Em37C60o_Y4*|wR`9Du9gsJm@n8h$-~%V$V>eR?ukzO_iYjF&)iP?_iSC5X~0=# zob_-Uf203Y>&AEd=F+RMfMCck{e7$X_fGd_^o;Pi`n%`x9Pe|>^GkPY`clev-KAf` zD9i?JzXmdKP6Ma_BMOv;9-6y9rzuYpt?X0WRA z$F{syrK}iWIC~|t`^ey+3yn(up`|YYZP%yq#rDT%-vM-e>p6bKXI$EBd-&3I_DbB2 zuR|EW#m~L;Uj18_YHZKD9k$QP&tKR7`qF;eyYcg9nVOX4H0)NpHXhq)^nXf>32$UB|GX`+Res#ZPrK7{iOKBY}y?5C}evCb+lWwzfu$I%&K{# zAwq%}BO!Be5^!&f0p1p<=xD$g4AtutQ+zkgS=JqP_ZX0DtVMBleOW@;`)Xt-i18?BV2wxzAZBY zB4r>OGsc-8BjdR#Pkw$QHYV!L&A`5pOfRpa$rxeA%GNQfurJ#@W?vngEccU5`GbI_ zSGJ^dtk}5a_A({-vIaA~u58of+~A4^pVJojR-M`%imggY{Lqbn(XGI0aWa?=Co?^A zn_iKX7liC`t<+y+S_46X%3-!jU1}?-N$MqH5Q17aye!qF%&HaKJ6b+Q9ydkq>$4>3 zB+(7v1{a8;TeZ;nzEa}gV69Z0yZ${Wl@g8#{kLydDa;S1wpZ8HRc}xIN|C&`Scm#) z%?PNZPt?fp8657$a4GU5*D5lmUB-!ij*@&k{xM5h0Q zKW$A7J-j?_;JdqTTA0(%xNB>-Ca1Goe^Nd599}PYzfeSoPHx(jomF>%*`tK1wCl8A z){GG*UTR@D2KfST8>f*1hGWM~bHAER3-(9gZB%4%T*J7pNhsVeicHCjLx4!$llrj= zUXu|SK`wPXH?F{)W@c9dAa*3+78%d=d0($`#j@kX+wx%bXGzV#V8^ncOfrVdvaHyF zYjNq#w1c805P@x=h!$qfZzuUcZ^yidY(BH@!iLk4$nTidgsHvb$KSp%^8rdKvvcd| zGn+P_@ou2tHbzhVhj{bV0sr2i-g^$8nVNy{Pz!_0w8@r3&*>41H2_dUvPY@&g_ZRQ zrsESU5H8mFuEAbMl1GpiA%nxFE*>LPw8BAHlb#Nh3jtfx(ywCAzoobe^$(@2M08jf z_t)eUu!R(-K-t_3ZZOX@+6KZh&Mu6$0UeW2S$F{lagFg9SOky_7S>}z2FvXU3Ywe( zsz`Bl+gw)vEu?0a5IPk}1ztik*&~)tIg2bg-vd)^P?M+l1Y6>f^tA_dUU@&}84#)@2NWl` ztGOOXr=V!YTJj4;qJ8D9Pkv#b0&SnAAvJ}Lxdy{v!qUH#T6#l&CbjfMIbE}c$ zqU?bUB(%3HFH3400xHEMd3BJ!NM2pDp;)<)Xpb<>1PhzMdK<7N8Iww<5+rFzm|NT` zj7+6zHAkjaV3F|*oGWsK!KEU_5w_4|IP{g)z&>@P>7>@ya%AxPq3q|-?xgPk|HDq7 zZ>PCk-9De*b}-;)E8xM@SGmHkBAr3qzw~Ri0ocW%dj8JY>EsBx=3H zJh48>SclXQwT{b;U5LocT{bbI*xhi5!{R`d0=6&LPJUVLADG`zbVodXU_9sbzzxIz zhD;E5396%%VJ*e`CQPh$leej2`eCrCAGD_f>B-}Bxru1>L*}XC=DxQ7Z3m{q)3u%T zbJ=r43qL7Z(WlVz`-Q7F-7s^=4M>yweQ93!JF=|)DU%7$8g&>8UDn}TTFE4-WP;mB z(m|$7Fv?=hQ51`_ta{@>3Wx=pF5ocuY#d^S9fMGCDR#B9s2zErD-}plHLMb;7l~{f zKVmt()Q|%^Pl!YwgG3(f8!XjrQca%PGV_!v5$Z`5{8(}{@6kS_f&LtFaCov=HAa`n z7!Ej*>)B=)AETFcb9@tik8X(q66AB+fII2Ay;8QBRdbkUKsg-pqu_NkTWC;ifa^p} zNMc1^udSK0{Bn49pkx5Pr*4izkQFyw<(El8?(sxLR%#E!pTV6%5IXk zplOz*87}JLoJ=a26F9hSw?Eiz$>dy)t(jTIPg+TyyXc3m%qt$UayFOIJIgt&gM7(J zvR`}EoOA66Kz?v%3R987WZ08EWFpcDw`a0EN+s`OT0ABz%Bf#mJxKF_i-G z7P7S56gPqkR!>FT{3$~2yeXNrV2awPltxXG(&i6BZ1u+%C93 zX?Er|M8}zMSeCVbu508OEoSLwpf|DY@CWx7`ALMkzd(YP626AI(Ou0}0(;Iyaug-3fZ#anAI4e5>a7vgs9Kt)es@6Itc zbM57!-`Ag;Q3=d>==S6cVk#E>c{4dgxg5*Dz5;X-z|gS<(imd-qc((2Sz@OQf7jyM z-i=MU3csso)!Nj0)J0Vz!KwJR$2|F+d*O4%y#+@=+q?riled$Nn_Sykegp*Exx{#W z=Vth8u?hU`cy8~1hKSo4PI)IJ@?HA=6d5z({)u9yxrbr>ZvKt`D!To?b=XO@5kv!cTL6d&)k1H5PbVmTex!7>;=#o>An_Ed4%5CpHmwrt0q zZWkk--^sgeC;vgmRrm*YDthNF=y#so(u_Z3`md*-wI=yP_{(7rHmbpoJwZ4@o|}9P z_REMTI=N5VJ%y(BRGKqi^F2H9&h;C(rB!xp=}y!za@5*UN(%!}FL5Cp809&}L1vVgKi%v)wcJJpjNq6q|t+yJDHP6Th>h5dp3lZsnjHWg);jTyE` zY9g?Lom~Iheg5E9MLWI_jsCiFPJVqgJ-Jvj#XtTpMK`64o?PyXO^}NZcSbk)8pt3? zxq!tAlM-+r%LtEIH|4_iO_q5gZ=G*}T!d-{xhs}sN4fHXw?Znmy%}=ZI_&FGHCtx;3z7roA|f84V;D zpF9J%q>v6oF}tadwrhJa`ZbW|dIF?iP79A_8o;EU14&hzqND_gSDQ34w3#pm8X1s2 z$f3`U#F|#-FPldpB1(^Hw69gl~pe=>Hs2ec}+nXH-HkW zfR?u3+DVrB_EYOqeosS1xx$R4UPcb#E^vSlIC75AxD9tUPmWe$@5-gsV4N;2v(WAfpv||?t|aFZ9Ed@q(JphcNb4Zr z3XT{1f5BUc(*KZBB#@W3WX<`7bZ)^}nAbW%2qGLGKJt2kkq>1e5E!7u6c1Ff@PK7Z zxL72_Dc7IXLeibTOP>~6^sZE_s7JBXjhIbJO}h9go}PE03hr=Gmk?2Jy3rH~ZHuW- z>&eQ?+#P#e&~%h2&o?mjW-<_%M3^ha=Fd{stTE|8Db8al7&a=*GXK^Pd8~-EaaJ$F8`Zdwr63?1 z3d;#%t3q#Adsc<=z^_F%;DqVC2wptWnj>)nS+oiK)&>_Ag)JP+!afl+lyncml@;CS z0>}ozfCOY&FxF$iL6W5~&|+Ygh4RH7kEf?(OTKODhgg|ek7t%WANu~(lrk#9iz9}n zA_ckxo|8-eVCuw}h8`hSJ#vf%Y98}puz?r^Az4D2g-%pfd8rdw0Y=$OCil(Q#o}~c zG$;s%C-9HtAhjaNl_QffC?%5gC3)GPs1MbUc_(J=A(OwFs%k49f$yXPwofWif+s@HBf{AI6`tqd3O_Kz`cjzMf{()gPhJt zhyXyX1sGeka)Xs2M92k8m??C+>|{I; zj}Xk|Z|zWENCoc?v*!u@?C4Q3hS@DMJB-lYs2k=>>3jmG(r{@`t;uIn3}odN9li{A z7j&eY@5|Py5>?g-7htv^_rFkFgw1<(^}xEN825ZHWaPk}53}R-x42Nq9h7efE7L++&S%Jz-1L?7W)0T-OQhhA*63UNc{xHR2B{zG{ zBt4U}FpRTsLs56ElMqoAMcrwm!jw%yRA7kKj1oV~P()$9FU>-<#^%72mSb~7=$wVH zJPHvm=^8*YX%R375kNERhY^&RBGSuP!LC4g9GUZ|^u3s*aBS9y5_w!Z`%r8o6&xKZCOIsejZukg1)REkyQVGNs1RLdV}f7jpK(* zHT?LpG1I4E#jaWt%w5jUfVad7>B5bfxpY5J&RJo1`k>hj-7V2n4jO54Zc}XvkLFf~V6O{3IrWxJn1848ZVOQ%_DHp*N9>Sy(h z*Zyj4d*;aaxh;U0yf*ta8}{Ax@R8Ah%=jl*>)Q3;`f2yCoZsggU0By+89u!E$fJ*E zXU5a}4!KTO)zwwK+m+chy%n;$3bJZKonOE5AaF0>)>2Kb1CW&zU1+$tjCv@a;>$*~ zU5ICUs~{`HqhkEQkcbmZBoZ_!EF~aZa>f>I8e0c`FKUE}N7H&HIN?oFGL7XFcI_O?!pwa-&%*EEfY2~XkKqOL(vvK)2&W*2gvW683oUe;H=h}zs2W%+T*v<36k@N%NPLK; zjZ85qS*Th?gc@(EajK4n2A%b%L3?9WyKDE}1iG8r-IEr<#q=`b zn``#&I=7{!w#qZ9q^6^8@@CFqLtW3U7RCakke%cL(VTSd{nR;>cC%oFugO)wX7fgb z^05_M1(c0ZC$#qBse_xYn43ZcAoArh+_n{0=8ANPBHlxunVti?%02q;`-7OOa^1(E zG=Jc)u6xz=Idu#?j@)(|%LA1Ue($B5^uNa4)~wyM#u{e11~M;16!U`OS|EcaKYz>0 z&kwt7)JfDOpC2aL^7&DgAfG>W`Oj~LAx@9}q(2C(DSZcmEO05@ky4dbI>W0+bfydS zrM&2E7JeY-04Bi>W;mg)!Wx0-kF^vWXagV5Xq4VT87Ew4l7>oXq!X;HlUx)(u*?nC znf7bq`DGDF6Ao;`>h%v>%l<=Fpg>;_m;XN?^F_2j-V%uVk<{GnPqcGe)^2>vmr3a)}97f1`w>Y$CP=G8Dv?4WytH}am zSrDwn*#T&vF!m+@S)h3f+!SPDV;2_?3!pI86_#xXE^*=-mXJ*kpNK?GtlFR4IZRVB zH}7J~x=l;}mJ&n{s)qHpTpE-88svHBY>iw}js>DG(k-#r4JA+070Ci?cfF5iH6Fj~Q5}u<=m^^D&{N^&@AKi*X~Z>tNkQ`NCIhEmj`a*PHtXP`^H+0VNQ$!RE6@1tQtvID-~QaQe@UoGbb3v zicAOsEBGJCtXtE)^e}YLFsk$h$ujCydV{75mP_II)^_mnSryh3=M32f{DkzUU-{^) zpt7FNFE)UdCKBm+;g{jA;uJP4{ ztdqO`v#c{eFw6rP{kd0~kd@B8@FGsO z&@71cQOU=mQ;w8nK-?^;8%~fVM!*q5op=LUlkIqk@&LmwX$-p)ZvfU1#8dd^sFCEX zB^Ip3SF$cCl^`c4i&o8__-$CPI%^#;DT61^;;xuiut9Mu9&ZiKj^nMN;$qa5JZY=L zUiWb3WFzm3HD$nUULgZK`US~gSpX|$1HOp>+Ry$Y;zyqKoaXGE>L7z z=0!>FZOi%>paU-GfOMFb?#rUjBKN(_%F6Xh`p#(W9HjZ~aN9XDOJXpz?;@AijvZ~6 zloktgjew+U0-%;)vo0~VWQN^EMpkaxY(I^i5TA)_(4Ar1k?9=>pi~ERHs{7s2_YJj zyQ>_OlzE)CPAkq9KyJC5soUmKh-BTyL|b|&<^W>>5R?(u?c2J_A;~}RG*vdlqyNgo z@sHWzZ}K}FBJh!4x>@+hhsZyCIr*4I00EcLDrw(BqCw;%^=ZgLT#0--Gey+C=p+qf z(CJ~BD%c|_ihMP3>jq%;T0Vg!@fxy$7o4^iji*BMh2_yIERX&Ze-PJSdtxpU>udCy zQN3+CSz<6Z&Bwzry~@=bc$qSJsor1YFjuy60sa>CXIy(-VYX7AhN^DXU=gB6d}Y|{ zMQP&0fnMDCxCs%D0(~{&fdAq$xyMlA& zF*^|0TL5pe*jup7UA#EUsWZhqJL3+NbqaNJ3f*NW6Nb+)Qm!!;?|^Itk_JR|kyUbm zAY$4~Kwmc%H7uQ`0+ZOeL#7mHMu;&(tIu;6HFRR)GEIu--nP^sK+)dciJF z!5EYCeuA8YaH4>UiKieuq4X*0Aj5$86otxg)(sgVAykR?;G9-U;-He3Zn%;-8mF2_ zvwe+>cQ8UVd`cohqee&yYg`nBOn5=_g_OEAQ`4l|5BNzR3G&B=Q5n~fEexp2xUNhd zUG{)RcvUmL$6>_vjCRLzH51Wb_~GS+>d@DxqKTB>)Hlb@)L4^gxy)}ZPmg)Mqm`~$ zNU2=3V%+N;uWI_R97>tt^Kh8{NKy9&d4ILiA8{#6v)WgNY32qp1$#)KOw^8w|ApQ> z0wIu}IoKO$CUPbOo7TcK0ghaLK8E;cexM{d^du{DMc7g%AEJz}%u!3m+nv@2)}+Db zFK?xwV1FEm+@-Hy!%V5mjq)h!QHFO}J>>z<1Wu=+Icb%a$GaxWg{WotO)dzZRuya`vIidt6)T1EF~c083=dW?H_ZuQv_Tkcbj|6m zCWc8ld)cK20D*8?X6f{0_4ivX5j5k)NMvuBsp=}(B6Yo4%@l2u?b2)L;UoUz1OjAA zL})5T*fOJyOyYxcEG$n_q1{`LVX(PDNisq&3ZW<;uw+G$AXsn~$HCID8Uqyt?36`A z3v3bRBoCMfkeiKSA=15JSe)0W=g?50(XImU1GkT++b_(#z6828&?a1|75Zsay(I8>kqW)32#yWAQ>sT zw6Tc&=vlq?@B#72+jlNc|fR8d_bi#M!Q2;=Y*;gPl%&G%< zL_aI&k&TrKdZQY594GJ<`eTu52uda1A%T2HwY&9;{j3xfQf*A(Fd;KAbf76fYS-{K zrlMPVvHTAB_vBk|hCE^@^pviRd>1gWd|NwB%V=9##~@X5tOWyYLP%NUj)06nOj$0c z?6vw`b?2WwJ5v4Nk#i3&ovOWgPygwb8b?*_d*@F*>GWD9^xgTrdp7R7@qz1hoos0Y z9bVJe{YRGtqI5=KhEDK1{kh4XM&1qBnjT}w)Rvs5zfh$B!zy+MXe600^hQHNkPIlY zRsBJsEy)M!jDhAUcCk24m6sK&5~h?2O{rFm9XL`uICnwYK?A)*Db@rOYd1vKMC`Q< zZvC}Nj*Jh z>gl}pMRRr}N@9#z?h}XCQCZPPS8%9(a!C6OyEqEnO+n1U;!$Tq5>zvm77Z3>7W`<< z7mJY!I3CcNCF3J-)G_^;@;~V3MlhUa6Ix8Q+UhtU~`bRdPG%P+Q)Iqz@;l~5BV?(V{;uiId9qzQY_>ztDM1ZS$acMC&a8afmq%-uZIj3m65klf{_C zI&=L>gq<~V>q{L8!V=v2b+AF0-CgHq4+M*$(0Uro7R3fjZuE-SdgByZFVue4fs#dO zP!zF6T42w|bG;s@!cPndEL7$6xdFZ5L%PC_shPss!-?C$Y@hvcHf)|EY@^D_*ZzCptR1D5I zS!5gW1{5jEl&MPOGEfx{h+~wu`ieP;D#kC$$RiXM5F9+C({1(rh%NLzNLiaL;eR~R z5t#`1cYA`J`fsY3)kMbsW@&w*v%5XSX^GU6Ly!Y!LiTD8X$HJ!smE<5+=J3N)l4)S z#&8>>nel|yj5y0O9uke>LZuAM*^~p>fsytyDx~X1#zVHLDKQ|&F@^2On*G>o42Ue|1C~cbV$H+A)4kaHQP%2O=i)Wd#vpr&Rx|Je zV>}#;yE-6kRSiwWD|<}1e%L|Lnz2mABk!_bXvh8NwOpJP(rFMgu#Ku6bHuRxemOGz z)Fns19GN8F=@`br8D|(}2cJo9cs$o&t#BCjTxp?Ik=I0bNKSsuiW%S>;R91>H+u$# zM4zxS{Qfs;%1_@Y^_%U5oLF}?TFR69a4}-$Q4@iUHRV**CUOD|wVt~oIUzwr_KKfF zNJSg+*7Y)ic&L+uMDxFj8M#d9RY^jBYn?Ttimwip%!ufcrG5{WofH!>F1MwI7+Y1z zty9Geykwd%aaA%+Q4Vk`H0ixQs1@2kBn|(w4Cz5}6;sR-a0y((aVo|zdx2($S*Hc% zMUE8*i@lA-8mueTq+%0+V5<7-m(?WG-stXCdbAW%XAd$W*97O1O8s7fGE(yyuZU@) zpttx2AS0xML`GM{FY#uq5-ffUKq|0DB#}U(0hVXgh*GP$yd5nf0q@gC}9|^lX_bu@)6$EwKzw6aBro_`ieqm$t#i$t|G>~qEREn+%@#b*dxfy zDp4L=GjB2-zFrP9-7vnus1W9m3S?M}h6*Z810oiDWjkhti~=C2VZ_9(LtKx}S#e-E z0Eb5_p)Prue#3cJ`5*`uw7kcKml`U9AfdRutlZP|>LMnR<0-Q*p18g#{oyLOzRXX+ zl{xd`htA@f!nOWbB(hNT3H|EjqPwM!>c@z|n>YL4KrTa&3-fYm`f`{XA$T7kcEjmI zD?aLA#f57_EN=Aj<^s@6^tm#??77J0%?N^X5fz7n9LVsKW^b1FI{~kHh=Fc^9mfnW z{TOSL*FXlO&4g$T!EV{ISJG_ zV8nb1iTi2)}GxpM46`pFnN#6q?lanP{|F zgscAzeF(&N&&jvgY&(qS=$qnm=Xd0}djHpo@7J!sberY< zM~m;*u6O*qJcs(*c1oQ2g#28Ge|NpiKK{7;jFuV9KI4D?5%u5s7xmwtA&w<;XFzGj z|Ng&!7dFSHW$;{Kf?dE%)p$MNf0D3fxKz*^s#t1|YiKcC1$HT!HUma{q%@hwbi$fI zY+&G)l%|bGAsj&fk2?F4fc;>M#G1{$URVN z{XaQtYw1?EtztbSz~}N_+X+6e{tr9{!Kpk~F}%EgjTrto->?0o7{lY=IdH#3`l$1+y?gF<=pSCX+Hv>G_t<7X^}s!!eC^*~Tl(j-k3VrHb>@jDDHwZ^>r|@+ zGh0|YXnt;HCt*Z;=$97!UisWmcwuV6=Vl9?mo0Qc2#hQ|KR0X4eCV#k$~~gmUbb3R z%PgGI=Y@}5x%`0vQ|rB_9(nA<$wwd6!_Mm#w%*{-?^t?I)pc7o-=P1-!zYiu^Pv;R z9{!UpCr)ij%^y7mJyLQu_YwMtU9jwnxsQ0h&wM_MeQC7)ciCS|mb3qg`CR7i74;6! zKW{$AZbPr0!~1X54&wQ<=5sZhs$QP|F`gS`?_d7+%imv`UAN->--3R=Q^Pr1@$Vm! z=l_@G-$!*SK2K@A_79r>-BY;ye`5LkpXPIH*);CGCinwQ+Bu88FXre~U17=eH)0Hs zpp!138hS1ww*brb>tv*>WZT^tmH-#i%*gyPLMJ?d@jq~q%M$G#TB{;&jYmJ}5B^nC ziLmDPOJ#(LtVY!k-iDm{H*rr#v}-MKw-fEp(zw~c3!0&&P`z+CF+Z6W#I!FNOAmtG z@;lg!u`M?-B1r-ez#`0v^t!4RTIp}dN7s>q{fjLumx=Zvk)-d?(Yt?iDPO!^Wv*E* z_GFLJDNZ4@e~6-oZmRoddC!HKAA2&2n3;PrqJ31|A}o+VJecbQnnLS9N+eXVNne*1 zAfL{lf*8CFRbjx-YE%chR`ndHR~4xn3uJ&b)Ju~hdED~u5W+ju%&=7=jZI7ovXEvL z9KfeTx``b__$jTx7qW1I=>Z4>5z8S?yOkb_Qt%9klo;QvJBy^`GHsDrl|3}FRbuCl zpig|*!cz1eMPAfa3ZA}sSJo&Q%tI?PE!9o07Yh-4t>~XY#ce(r$9EDCBKX0nDC?RnQy8gF zMKa5r5Q4W_>o)a+kHexYLI+Twwp_cJdw(SI^h+!2H}$cUU{Fd!{vq|J3`A7@Cf7-y zka}_1by6$-)9Xz9eCxa#Ma8$girvFHmv{J8l%-zx)XM73@1$nW}hG zd;;Zrv$q9Ph=Q;eDpgNJl01|~igysohzgn`l7fc{WJT&2S}sZ`n;_{Ka?8eq#LN>Y zA0j-9GzEtP!WCE?@He6fXb3kF>idw7D^@rRp*in$M_r_?J;juBqbJ{o=`eZ!04xVb zfom$8<_Tkh+IUX$NK!vHiu_tP-nSem3q!hD(||aL#?`bLqo@^u=~%OeLmxqL>(tgm z$2ZNNw&_0fR?S?0uiRnED^-+Idc6fB_{9t3`LL}0><;2#J z%?(c1`kwWNCovG^nn+{cy7hyZN$89@vlyHnuV@Cj4GkX+Fifj(r5kO$qtJg}7+L3Q zgDf%XL?}BjTNob5VoVD{Qc@41szXrpfVA3TSDhQrQBB1mlwdyrC`Z1Id><-9KJBwd z+alwf%9(R}^2r{^stcaARO_z;{$zmcoB|y2iwvJj6|;Zk4gWKOc%yuAhluOc%c7zf!E0<6Vok_?M9v8%KJbMi)~ zTyLF{ixelxK~@WvmNW%mit1>hkyg)PkWpQEY9(&Tq(R+Q<=s}KRLsPhCF54Ix3-~D zYRy;5yOpvjv_Xd;2j<$++B;1-F!lsj0y#A*t`?NZgX9x929P6%7GChtIkTD|^qF~zn#mAaHDky!3Mh_Pqg4rHY1KhI!J#Ae_U4(%%W8Rhhte~bAY zsCMM?G(@AyS1EeLrMoF0qpVD<3kz~~s zEivCD7zsJkYqcO^x?*F4D9WzmoTku`YFDT_jjW^kqiMoNuryJYIvTRuQOp25d=x@Bx&YTeE8VvjDtH zQS&m~E#>CeXXelvvCk=Ty$bIMM$urn{i3s);}SAPixeKTKuQYD0sft@1oxDzH*O}2 zRt z7dqUR@Ny8+kz#{L3>mi4>Nj8_znwsOnng4(_#h?Wl;11rO ze6R&(vjX==1=C_o{`iFXxkX1+YFUy{fw*fI@y0(F+SSstFg+xEA0yK{+jscNYQ$|d zf9v&orv_gk_<%l~^k69PRqB4RikCWH%!>^$&%t{%hsu99bU<^&6V!n^>71#PG4s4i zn68gz3a$232m%5dnRL^EH_!nAHG>P%<3creAY1Guz$kj~7L#}xjF5&V+M=QMndFez zerf&A;Ja20V`-PhvsUKBS|Apb4!%VpZ?isNkr*m@&zCw1aGl%)ZAr{MqZFp=xgOO{ zP%Poy`mz#?+yz~iZsA>l-U8gC(tAN3MRH@77scnae&v%*z`DIf(L1_j>5>_Jt<;%5 zB)W{&KE7;4T3yHiT0`)@DxGLQhBh>@l9_aBl}wL{gK8Q_v&aCB#I#+N@D$lzfay2r zLEaX&jHiDrTh}~r_~y8DQ*JxFp<}!~RJA=CeJ&bpuk(OVvMPFy=b8GJsV?{3S8*^L zj*YSLM5MDnQD6KqnrVyHI~=z)*u3ns?803zYTJOGrKePX8k_J)>nt4Ca9a!YPCx09 zaZMJwlk!q=uS=^|_#)}h$k*+X-T{|@11==HjId-D!$1ymF4eQu3R$hm%+4mId=Jwf ztYL&%1V=JJYt$-3GV${IgW_m`5?7A?hsWnO9M<0*i7ef*UAjx-x4w1@WOP0fY0~fB za^%GPfAI%yl3Q}ufjjR!FiyE(+2r51ZL-a4t#}2-G;!47UE!8fpgf8=3z04#KzWi8 z!p8yxFRMo2LwF6=9Ih2$H>rGs4C+J%?6pM4B_v^lRKe0)90dsGj0}OBw=r5civD z2<~^fTV9f5v;!;0IOqP0Ojj8lQi-%;U~H~K9#sE&l-L};0m)SzD++{80 zm(db>a9~?kr0Ou*U$D!c#!Td+>l2f0L1&ZfdHjNHL;K*mUiW_q_|09P#}OEa*UQF_ zAH0KdXb0|345MFrT(N}+uWB`e3Z(FO0Nk{ONYE6L?Hb!;U^j&h8zBa0LO9qSok<}n z`U^=aT-svn(iS6)h-iy>a|^*(gBC(b0rVhcXFp&nKX-n5MF*DOp?4mlj^pHS)0tPj z)3gSdeR>Yo;DY;}efm!7{Mms2cT5vtuK!Nvb=oie&^BUQ4@^1={{S3W9_WG!w1^Wf zE3P3j!97q&+C8KP79qeNf=dXC4>O-@AOOIAOK-o>a0Y?SOT}UXD;9ANu#FSFx^f>h z1LrZS!$|uG88f@#!zh`>I9UA4_znlM)q|;Z4!i-=D2l;EHsfmpBflS;MC>38|Z?mcq!-usRmy+>cX`1I2^-1ziQzYq`m z{df5NEn_Kt%aOb9IgJ0y{V;JUXLCXyV(dqj+#CW05-T{Q<;j7BfV zyJ_k3Aoqh8{JBWusGeN<`k(Bno($A(Ec*IqyOv(tgABN7!v}GuJIHNVtlPyLPMPB^ z6!-C1J*IaH_7=uL##jQ?q+{vOvG(S`iR-|YVH}DPmVqkl12a0rx^=)c;&_652d=Yx zfGADTUI#o%WmOb(IYa#p7;5-2tBiEftvhh%9R~-MiH1$Waxfck2E0D&v=5Z8PEsSh zp}NQFkp*D|ej%OZOIAVAXfT);IinX%djgSlaZ{Wwk?~?&5;2MZZ%oiog zA1PThw&B>r?85>Fd>Rv-r#ObdqwE%A6H zUlPa^s=LInMYG7lQhTMW1-8xXx#@%~z~V#47z*DaQA`V4|2oAh?nGs>)Ub4-8H6S* z-vt7DB%uceL&WQ+?dG70VkvtG&hIqsnFB}ne;&L5W8LR|?o>4TNf@q^(dfww2gR{@ ze*VM}_uU86Te~9Fk5eHY5Bgs`cxRWmGcO+5u<-=s$^*75BJ?*WzjW!VAPx^|Q84)>wls;^6Q?7Q@8Fy6W*YDR=I2~$MY@kV z$=uz)*>+>EMzxnsZwpAW6kiGZPee8P3X9Sph)9V;pidrtdY)7PlCMk3RCb1D%Y)g$ zF~|<^tHPPaGb!_H}(`_vNW&6D7$4oj9n z@^jLnT6WT{??9vy)Ndc!-XVTS{|NL_kdNbLrs()XBc@FvFk06myA7z8Dx_}Bm#NIF-` z^N6MhFkpsQhtGG}Q$B85?m4!kbXAR%l((jd_+ z?nW~@C@{4H*RD92%}67_LW9(WO%f7sW~Oarn`+(4VMb(SzvXuJ_O5Sj=!-`0iALkt z$^RaWK0a~vatQW9a%{k}Ui4hZTt2Yt02I?TYhSIjU zQ@IdoWS4qPP|eAjFz{LYzAHA}^~p%&<8%A=415M2^Tun&Q>Rm}xDq{hLtt#EYdGNF zwXSF0-GA&=KVky8Kk#J^bLF~KqyR?)MFTjAb}vw7R3Fd{iudQCkSGgLWU|=3j*F~P zBGpuj_zi-iAS~dt06-@^l&~d+CRN88n&@StsFoND+SyMUF5Ij@3UUrkDwDwwIFC4# zjnH7+wOEzhsdS#u&T7NmGAdI{+2<+=HI?lN0+yfD5kA-&HjTUw9>=414abIBQi19r z*Yx~7Cw3gaB@%w|?o;Qt?^xQ|IbAmsZ_p3ze`VMgJiM)QptHI=H+$ml(>qhi6K~(N zdFfZ1g9psJW+VKpIQ=Z*X-ui3l3>}-^3=C*OG(b-gu%kDzLlRvPGRx0MB&BHii@8` zCIcy&jj!^v%B~Nr7sXzp$nj26_voKKJ~w{|HVCOII$4MoKN@KqQjxGYR)dFbzw-dy zEghL$_zG?KGHx5f_Y`Lc7H^I`Ysi(ufT9Y83!^+3xG;E_1R``?WFHPgVN?n^R1yU@ zhpG`^c$~uONZi#dC|olgDPlq?_HqTrbIqQdpYCJ%tul>vWd_5tqoxzp$)QP}Hd|t; z&F`C7@cXyvlOtdC`@h^hxS7 z`~>(z+pxxd?S9i6V(ncWj1qaHuqMM3VPpGZhZbX@CZfE{UXFb=#r9fPPjYJjxja5`?{jy9mYRQd!Jd@9G*!$IK}2;MBf; z_x5Pyxs$0-d#tJ{wb@-8>)qN{#C7IQACC3tJG<}q`+GW<40rFk!8q29ObF^EkK+`h zZwY`0I2f_kD2*{(Aj2iGiHlxcRPdT82I>@?6+LX|hlQ4)AN}em`l1s=n2(fN33o2W zpbxi9uC`waziV0Ttv{%egOQ;Bgx^1qJ~Wrw=nbCi&+e?U`2u5*kr%o#fjg^< zTw|4#5p#_$%7{Ur59JH7N*DY_WQ$O~AXU6o7dijT2via83lQ(B62cl}Ux1FPkXKBd zTv8ekWK5dkdz*Q#$zasXEMq0rKcU+8%c6geg#G2WmHtytV%Ny7n-4`IuSOz?gOgp; zF&D~Vo`-ArdH=et*?ad=8^GCP%$`Y&4|-11r%?Jog*Aq4XW@I@XXX*olbwS7@Cu&q-qP7t>+On=O?ypTX zg!{W6i!^y!8hSc1Gku=J4=R~-!|r8Z&lB3sCg%s32oOGCJc4k3*eMd237L7PSUxNi z9~N>C9u2m0UpC6*(7U`>HOXMZeTtmH>V0^W<7Itx?S(@JidgT_^&5@=+$7Me-@Whd zI}R43Wx|{4HP?g>eCc;>4Ymbf0R!4^Yq@B@1j3m5guIU+`CtyGN|h$# z33#Zm1liE2Q_8{c+G+7tZ4K3aPt4V(-+0mO(JO3Ao8?>KV8`i)AG!MU!(BJ++I{0q zyLQ~7UlZw1Mxu2={o($KhJbz&u$JC=w&J0yPrvh_t9M^_{jS~D-vC+ah@&*y4D@3} z`u`u^#{gg=n%iX4wNc2spZ(&vwPz7vY|Pmi4)3GO`id(@{^ z=+jQgnuAMp|OCH_odK_eAAnL`l zu9pl1YgUa$sb_60W*{0{AsSh1g_dhjC3aZD5}kl0x)CD;{ku(PWR*D@O#M)z z*j?rJ2>B)4JB$ZYw6Ux$WA&I3!{$>h0HDIyqHsm|lgK7XMT$7UQjrptiWDhX!2k|c zF0YMOV5#C+I!I0$luLDWF3rI@(iJH;z1NyVyRd&bvG z7`>OsuAG=YdDR7^;t@_X=?AwRKREgof1q1c8|1IL??6&H8K}Q8>j;rl9TE8y@7E6c zRhY_A1fZ3oGqvKWk5SQ!@e*)pa6euacFu+0>YAl7^|E8iqT(x}CGsR$Wq@0DKQk$` zaz`FJ@`S|ra>?F#Kt)Rym9}Yf*z+peUa@Zvn1~~QXQP_|`)oHV#!1ymw{ft);I(Tv zSHO3ISwq(sAZEiQ%yTXjo#m>8@QK*7S*Ast3Vyb9kP=;9ba-n~ra=IMb!fO;5&DA> zW)QGDYWxJEl3;;eb6ST1D|*V}F4Iw#D-#^)UNnvgLzG`XH??m2=>Lo~-rCZ8ZtIp4 ziH>`Rx_p`E(S6734hTxW1ExNAt{le1@K;&?YmywWwC?%bs=Gv-54%nD+1Oe@L#Kh(VmbYxd` zE_~0ec~s3+QVmk6BvolFsY-JXQny;&t)3^l+itt<9^5vjX&Vn=vYpsqAPEUW0&xf- zKnVCbFHlvI?T`tF1&Td!6CmPFX1IP33-GhICklM-#)iAw40FpYrTK1zn875 zx++!ObIv~d?BUzr9uu%;J&B@OsdXx_9(z5=$by9R=p(6QZ2I75QVd1;MtwMh6*k#+ z4eoc8#&eQRk|e-1)0tIJIYm4xjx4K^03!ztaROnxVtBi9g@Xzfvl)KzTwc>-Ft5R?|W}OuP)W@v1hq;v$p%MKQ%XDLvk$^FWUcb&uM$} zN&8K=s{OW`_AK6vGu_0T?MI%xfNK1o+04vHi&GlAW;)}{lOtY>0}N4t73UZ))TQ)X z+6-Y5m>sOUhcGu5uObjbx{&yqSSseASUE;s-yX12S`}~oC`YN=+!7D6>fDM*mRQqf>;t$TYX9l`?mzk3o$3RzSn~oV=F6G(nVVvnvSKXaDGySm-w=QpX*k<@no`CohtQf%U#v^OpNso*!Xon3is;MEI6cJ z*M}!7fc13D2$EQPu~+p`O$z=RA-XLpsbw{1w{bBNoyeON0gF;inxDZNW-!T7Wji%< zie z6!STUE7>`vjxC{ZJ@BiOmB7E)cM}u(vU*)q;qsAtX7z)9qcnzyW@>#;JK`c|BF1CbGi*I_@c@tr}zt31ex<(nFGLNgz+PtX8yT!oC z!TNikvl<~H>I~*8YWQ+cJV^B&dUX+ZfOYj?OAvSiWDOlp2gCsNcJ&BWsbRY4eWT9- z@kU6e$LNOB;FCBC6BdjZ61EHU6_%oV;!~gKA3wd@7x8tC%`Q!s%s0KWI&|`c$LI45 zjZRH3=$NoF-f13CpEv)=XoGJ_oD@zuPzvze^MdSN)k5W(y0Q|`vlOOUL$#{~fGb+G zd)o6Tyx(`i=XLr0mH!8I;0FpPe150bUx@>qf;sWRW<07sj-1|aS)E)^dV^>c6lViA zg_u*CL@-f6T@&yc^)SGGlFvi>Qi4(;1hI@5IH)kEKCPWWeF4zoN=#pmVXk#L0jz8~ zrUfEP`4c2lvZ0ldywHT4pzE(2z|+@L)YE9`o=4|llZXFmC(#d}4gQYch1GOLZZk_S?CE z;|LJOo4?mNq@O$-kM&RQ+QqpWU)^aAs~UU^^p_QTo8p9D3<~pWiBA~f@N?Y^KLT~N zY?-dBPZ!sE)+*`XF+z#C1)z$f@I#YTv@miRgi`EvMB5IA0%lOFI3nOlD#x zmA+-Ss+~J@EYtq#u^o3zJrj$+S5$@>iN{W!#Sw+r$4+y%dJ1vuAp^FVWXzal$}m(3 zBaK^0quYRh&{Zs`ra(Z%>McSr7f${k4QLaFoc0ohLV8oX5WA>((N6@zQ11jI3WvPT zLAnvsBqq0VG~>Ji6&a5bhYk8v+x+>%#CNMBs5A?ylQ zHx{wNB}dS=W^fvS@LHb+iRujt-*&B9X>^N&-Ax;|JG3&UH^W%DF(zU#HXzrf{`@~M zMa-+V>y#BjfPe_p^MFC>iD?<&8z)BGPOv0zw9R!;JdQ)V+&kNya3O(u8cxaQGnv=! zn8@26#VEY(>zp_7*->RNU<@-B*%~fln)Jk=0S62^T!$3SSfGj$^G-V7yzl`FH z?%^Jh+7ftVn!bT`IyQ;vvX8dzutGi*X;A~qMzAwrL_t!6{1;gvMahDhywS!$uOW0O z=uP6)q6F)Orv~(p#Cg1|bz@;k7dSPM;)1P&dGQKUsY0ZCT^op>3j4(ZfoTmgF|PhF zMs;xP?AEtt^$Z&a(DS3*{Q>_zC?$+K!G%Fa<+IBHofkk7ITa z_lEr@NFf_Cc&OE)LX088u;K~YQ|NGxgo6mE?qk8_*+GW4A>9%{`g>MB;D0 zyZ{u)wA!C}R}0Vhcuc9h$oq}^2VVZJy0?X721=VHBC`@*0@rujg1-(t{dEB$&G_Lm#|Qgh#rI-MUKZbnBg9d0Hprx?cKv=3dq`DAF4YH_+Y0k0B4t29+Sq6pco~Uwn2Z}I|6qgK3 zfdG!U#7vlX!MSBY41rMGauss@-;R@4nk?>el8*eCK!WIH#U^@P?b-c;gS~Lt*^;Z~W5k zn{V1v+IsXD&Lxe-+6oSXetOA}6{%d4>?30zE1n+159cYVfmjh(G6MoKwLr!ss0R=W zaS@2_N>J+)rjWXsN97Xt@BrG3ywbYF(y&OMT@!S*A1PvNxoaV=J{6C>D-r!*3vczo z(;9CjGHz~zjDu%OPFNkh(d^fh@in<#F%m+1jYa;yE#is)fQWC1cw6VnwL}~>HdlV6 zfImL)wS&+;G%1aa5LPi-t|3ntf&_=;ObPK!aZAC48#y;( ztKdmMMQ+kF*o9G$7Vhp{7#K5C&}GQ{zO zyT(~x)kocSHHWq=^x&=cs4JZly<7KQ*l^s{`#g|J*MN&#n3R z!26nC?yn|2-GP8|4>TVE=-)K<;OswvGouSO;++=Km)+<{v?ys`_B>>7aE5G&ABBa8 zW5co#f#203TVX0SfQl7UFb@^8;zXHpP{MU=D+kvH?m`q4d9^TyjC{U8K*uN15FTMU z4RL^fkN8aziL5fFgl=(V(hL`;c8x`D+_mTJr;9%s$bLE&d-q#cVzGw&R_ zw%o`{EHsOfBH^gb0uEU>KvZwk({aql5X}OVz+4@QM|4KfOIlxaeMC7`WMm;1unwYy zvBGR;u8_$bfG9klPQQIA*j*{MsbApH)4aPplkMzLYT#u-%6?@(?Su(92RWIGcV9El zE5!X!T~JtE3rD<8Ze-x4FIH-L{8s`7i(F(4~9!9=`KV$i`0WlVh;i%H)&Db0WC$ z2}R{x&>Sg+YVnq>;zu9|mz>xt00>!N)ZvX_RA5h62J}uCC^rVujE)R2UU2&HBMCk(Wv9Oyy@*ntQ`eTZ9TKue0bNm8`OWM1ttQMbT)J2*I(3tX2@ zqmp=V`$$KrKh{2y$vle2hg6jimi$;L*xW|Ai29x3X@4P+Z8=J)<`;PZlw5rS`zD7j zf)}lmhDTeU?trLKGE|oxQdTWBLM0w~tdK?`kB3X29DIFb#db?Bj|X23xjZ5w7`^Pr zW06@qBs!=kH=l*@l(nYp2JSDBbRm|@6qfo%toci^=*}In*p7*@$78Wyn^T95URI^v zslm1*e;th?Ee#}?ZTpGRYaTdmj_u6{1qh~$N$CH6oSj3)$Bae?*BcwZk>nHld6I&K z7t!eE@^{j5!2tjke^xzZiD@(l;9;o~u~0hHBvcpZY9om{CSZ*);wnhgp$tUM2Lpm% z?*O-XJ;lb40by$k(llqtBsNp;g#FS;^|P`7{Yp^_Q}aM&2E=5S8IWqXc#K=ZIuP}i zvD78~DDpL^YG(|y@&0z-`QTJKee1Eq+EE-U9XWmuc0nc&12y3};&(NFJr>`3fXC$k zPsy8_hjr}j^y*IaUf48YMBj7?6p@~Gv1t?ve60F-T@G&$=ta~j!0}?pFy}O0O(NUG z8>{Bkdv92HGwQahJHD66s7!9@4}0&&u@;@?gYd&qpDfylrAMNTfL3XU zHe$R2Wkt9NalN5>5?BeTy)_a@bCZ=7^j+AobyBI@8LBYs3{craEuF-ypq)``C#JmJ zQbt+CCIGw;djwj>n$Gdbxfb?tEo!IFBeOGo;P|<%dmlJ+UnXAZ0aDHF&k1(BLaBz%!#72AyJLrN@OP1xp1*IR_Z;F)B#4@^pnUpr1KpY|? z#P&%fmICib!!I1Fep2;zN-be0VhyrqIu%656Gr?AgX*T<@Iy&VNxezY$^-_L6>in` zC2CHnM1!{1#(hrvo6ntpB%AAd;-zS;`8ohUzCM5H$k6U$yXR|;lsA0-;YZF5?=JNH zIu*p9&+k8SXXnrq;i4FmoTuXF!nWwb)?q4t18(YC?(Y9>51_~`_9L9!|J0Gnn9>hF zQcsABi@CU2*7tMp?M@pthpZ}UzG^rqXm7=3&FH2I8y%9s0$&iMWfUc4%kxp-EgL~8 zx(G7(z*4y=qWB0W8ud5>s3|O~1Gy7RF-8mQ>5O1e42lt5uWhEO7jX2OpW9XH`&Q8T zm+uv&zT)1sI~LpB6Z8a|OR{cW-0M;7?Je{ha0OYUwYa+U!#Xf+`2q&g55 zb#g6`W+Cnxh7ctgK=+G*oZ~5gL!mdk5^{hMSd69^5}vgJg6N2+)eUs$6DQ)T4Nb@_ z#es=`M-1LXvbDv{@TR?O&i-+4`7LM9KhivxdSP3*aOp_@LNVk0%K8@HI@n{LefW{1 z!@iy6*&}ym`p1A!(tF*DagG_!8jS*FCQy|l(=EMs8^e~#SQ`k{+AT9lu`4awT+m%a zwZ}U6n`xCHZ5$1i8$$w(9b&i{P9uVX@d5NdfL-bRjdTmJ9-%yHJD zNO8uValp0xL9pg}1`vtZ$cS2YxubZrQkDt2pp1?TAWX9CNMy>GSh2WG?wbwTz*3Be zgKfB|k5*YBP_(n0j9(lRN_A>GAo>21m4nq(kRmkKgCiv9OnH!D6{I+cKvH zwwHQk|1s{|iM&?M00v#oJJjyz^A4e9i6GFFVxQjxwj7WWIPMrd@GxXxctB#o?xsBM z;!>m7uoJ#**x4C#{kQ{OqHbpjhR0phI_`XgSK6gpSnv9Ar=6|VUfO)n(W`#NA!k_& z*6{}U7@|%Q8xbA8*2cu97{vVPs0asysECBmf7qZ9V{Q#9BN65dVZ|XG4v&6N~n#~-POkM zq?Pc5#a;`x+_*J6OcG$oc;b+P0$b_Rx~UH&bs?6vvi$+NJuzA`DOG5bFvMz;p_QtB zW3}9<3a+=x9XKgzAEraC+_M1UtCAB0!+YI3Y3ZY3f>d4T+GU4c3T4Za^oL74^sQuR z%f^R?2kK_gRC4-P=QOX~rgIN*CGGJ&Zsm`s=lyC8{xe=_WtFQFs=Fh7U{XDGsH^!+ zI7qv41J%yv7!Nn~=`Bbuubq6qHt_r8vU?TQx68Z@*v)%1&!c1yQBUgT_C}OM+@(ej zT|iiX2tp!0Ermh3D<}~X`f4A1MV1R2Q5d-}cLc#0r8}4tqbG-a2KaW+FM#+>qb`*) zgeypdp0(4=7i)1A?feS=P(3_zF=0E`GdllFI{nNFG8CUi3iXCe=7t;1+d`3;f4F}R za-*v(Pk`iaW)Eapc)CTFk3pV@+u13&ol!{CcH`eH`>01dwp-DqheJjwYHh4i8!Ssc zl|@iF`1hZH{1G&PVWX6KIdH%eBBeEX3KAw%rWoNiMwc+9;mON080` zb~=6I8k(ogadKz-HBmgh`%c>*l}yU7EoIB2Yj7UgBRoU1*!NlEUMqJ?pV3YK{fFdk zRV^o^fb<*NXk6$dE&N6%Y2i0+NYc{n^3qdelD6GS(uO5Ti^>EFWlI*X;eKD8oqa`K zwmD-ZWg&mx0)PFFsSkr{J1hEZ+4zq74YeE9zGb5pEU!3p0Sz=E9v+BW9Jw+8 zk$iT;jW|0@CV1UZltGeD0HI{I13+AL{8@LntuI|oMg7r{_R0KsI-quk}A|-T91fvpQq4g!KP8WOW!Kf2E&emFuR+?+Wv1fHzZarwnxAlH2 z(mH8T$si53pZ~)ZAf3!lrqe&k|F2Ceb1%r{_Z9Sl0AuJftl+YCf6Ss{O#41nEqmTr9q^e%hf9Tjp7FCxdDQv zfMN|_;Vs}2^^40l$lKs6pj0b(HLP!MSb}!gF~mvn*NF;lG-2In(Sl;)PA5SWiQ;nJ z{nUo5-D^Ynt5hVC3g_hB!{LARhplnw$NOcFn@1SFpoe~8(>pP8>TW&uKLJ-r5}D?C z19`WVpdPeaa6W7phV)dC@Pz9C$*zEOmUffK<$yxB}A2OCtyw&6P zd-&t!tL}c#@8+-E{`2~d&$sTFd(LJ&ChPR4M#9QKp+c(`t*#{X?J686RY@P_*&cbN zXx%YlkRXVaM54GyA#sl+k(dB_fnEVi@^Y1x9zCRX49SHE(X!pwGaza`$&{PK5)oC$ z;P8W2ut3*Xs^>=Bfq?sM9LOlg^XyfFJjwy`7w7rVch%ZJw?O`XN#x&y%s&$Qlzxbe z^`L;PM3~r;_Dw>M+u9)P11$l^+?mGL@R@IcjC-KDrueC~wyO1s$d9jmHh>=bvn>$@ zK$4(Dywc)oeZ~6D(&G3wie04jjV+Pnntcsx#_#1`P8)kS?`CA}tql#91Bvp4uCbfA zy|J4*u$$toTWM|azO{X^eoNM7ACQfr@9e4#u{JZxnwQT){()K560dVu*zZ0J-flPi zGI#_C^x}&s{LS0AcyGf>Z*9peWt5*ctsY&ajLsUhHIZy6Vp8W7(0~e&%zsxPG0~1| z%bQAGC0d(BNxuL4W=56GlUV2fgzOk=v#6Y+K+KcSj9f;+`TEQbwP;=!a@qgqYSa7} zzNe)(xUSThucE$eDNAT+tt6L~8+IL!W5|z~5}TDJrdR81E-xUsD@nb)^&q>O*uY{{ zQ{lSl!lOG<_W?k?uG_DzGOeY*;VSKT9Ksn%&9=0Or4Kn@s?GSST2jIP*Yl+h78P6* zDST!MGBCgTzYHH{2D^2|Zleq-1U74i^weOnE0-BMMR%p`?o%m$$dCV4-cG^d58NK) z>4JJ1ePy2){d~22f;SVN0)|iuCTx4^sMo7I_yNtw@KZg1m25&b-eNA|vwC5p(@tUS ze(QPG3IN z8Y?)&GUiUmuFsNPh#B>sevl9Lh~DExeahmaTWBN>-t4t>?V9|C>3ffv8RL?967u_e zli3sML(7Jgd)}g1E%`+uWNb;|yAqY>(oD!J=N%M55Q?Y`{HOBF>evM)dK>}_C5}&W zzFN;!(VH!mvYUV0*E`+UH{Gj-`}o)=7<#P9lD@Z9P;26!Qi0X`)SlH({^w*88nLDS zbFznzu8Sn(=uv#|Cm=`ds4dlfEts1^xfNUhX*~p@0@Oh^0<;~CfH<51#NOF@Xl<4U zRZtS{THdgxRds~tTu-sL8OFp*ImGIe};$tu;FQ? zg1ZSUw;>T2UUtTFOqtiB#I7OP!{49|45SLJ--u0Y)pRD&)|)go`m~Qbg=-pTeG#<| zw14$>KoL=~Z)?!$ajNv}+ytfgjC%M1C$BXBYRh(g<&g)R>We;JNlXtt#GUx@3zH8x zJ-DROatTxO@r&aRI9qcuin-_n6BQDK*qeF=pjUM&6;yfPLxI25unDGwG>MIfg!_mz zVYo#z%0fxYh+?C4ZD|n#o{Y&b!1e8P1y zs59Q9{GO1#^ZK6Tc){yyyET+`|3tAj8uQ}1+wV#gw)em5UCO51{?Nnzcwk27Pg}B= zFs3!1*7{-nBTs1Nthpz&#w7_9b{=_0#41#eHuZcy%?)t_7hXNxn%Cydym9EC*7pa1 zD4EyQ1~p%`mM?2|wW1sqZb-LV6N57M)h<%r4!@4%Fh)I3Z`+PTaf(|JccJgV#v8J3 zCX6Y?fBH^uX&nbRIQE_7Eh%p-UMM!7!%czkgSUnb@;ErYdgP>BbgSOJ-k3F&vS*)w zu+Bpkg3!?oJNGP%CLx4F!*ZR2Z8(K8%?Uf3r9Yf0+zr+hp*JpWRn7ZDfgAX3>Um2H ze^o?ngRI4`wJr%kwTdY(F0PpJaWUoNFy+}ki{EB_&4zWs_p0QBTo2X5pAX>%Ua=^f zbkYF7AKYs|k5iWOK&PRGu;C!?-f*D$nVoq_!~>{oqeB>oItrLYpwyTX z!WQimHn*yUH`5z4j9Nh0!rM(Or^HOu_?!aru+YqmRdW;cea?7GJ;ghFiY4Efd!1gJ z$8CS;10N6M_f!;$vfTdWvwb)&l&{p+t3p5Jwt0h&Uur%p%>N!fRKThF%UYb)%t?_s{$Q8@;GckKv7n@&KC)=s<}AVb!& zjbIfm**%fPF!@P1;ChNL>80Y}no)WFcSX>qi|T2~lYCb%xFpy0dS{tXDd9#zh7%&x zNJ`OKk|!9@J#ykw1FqS1TNHV0^=@9f6%n9=$D1$nn_t4L`jjsk>+Q4dN(AUVhqMav zV!GecTWsmY)|rzrUxit~nM1i@>Ydy*F-g70kUVTPu0tmpwj@Nq zhf{FIVpv8(f}t3+kQG1?PClGr1N*hqtj!nXX4JT#YbI)0 zopJ2O*(}WYFjRynrc3h)_E!jL>$MvlupCjK4;-5x_lnzZ`0aEEF(n8V;*!4wUh8Hq zzXM<&*;I+GV8loKa1P3M|WA)F(5ezJX$GbA$NuAMk~h98}|5&0yhDc5(l^MHKM!2k zPgp#A7)jmm0+C{t>wy31<|H1pVkRhzrB%$9JoAn3zg@rqYNX~JA&Sal1Ah^adbS_@ z*4$70D@fpg90((m5(09doydU>AO}JnKn^@@D33Q3ZpU-|j~Noq0e(`#j@@PRgpWF zKH~zEE$PxygfBrdT;*wjXmkG+=C~U9erk_A;U9So|KJ&tkTWC!sDqRdBtY^5ob>ux z(wg=P)Qu)@iTZY_UR6$jcHbki@V$p3cSwmzr>64?9vCfP5Nt z9}Ck9xM3KEvKyetS~n|azOA(YVhOzD^4i?bWwqy{zBv4x<18nc_vrT6e9n(1yPxER zznJR&HM@#8r(`Mq06#aLxheV^OjXigld}O<0s2kxZ1C}HFnmaiTTIIm5^LSWMi1!7 zH5?!H{I#7O-NSGeb2iF45aBEZroHY#b$6{;hE+OHg*Js*Qa7aU&zQIdsCH?TBx_%y zF>SHKL4`U+h4zos+#{rwpnipKH?UE^2BBXA(60&V*Ch07AW8k|_5^~7WdFcb`gJWH z0g%JwM)hhhIPbY9FMev zXl%oEGU^CeaYNL4!hrJ8@Y!g3ivw%5O_R`ou5^*jp&nef#$&Ob;zu4Bq@~|8+OK4b ztLMcsr+*7qwpfV6m2LjvBOHmDpHvf+JH0>1_4ygBbr3PzB`YFet5Q=TUrQi0EjuK` zp{eM9iqmO`rb7M*WK#qmZ*+W-4X5IT5Mmp*9A7rS+wf9-rsW48X+7EO5N~phP2nSL z-6J9~NI}#4t|c@O6&*FiQJU9|67KaC?l{kNFmPNo0s^LSuhGr!Q-1Tkes}E-PpmzA ze%-Cwbf?reH^Pr=-Roh|_-pgnNJ_aksSzZ^_n)8D&3B4b#jLLTeb<`R^*ddECNlgY zEU|mcn~(+WG62Z8miU~5ul@%r3+K>&D#jLuVhy=#YY_*Tu&6{5C>A^TZh`;}W z{JoFN9(LdktJa4|sS9HM_#E(Qko`og4zzo#E1$ScQTfCv`T3jjv0q;Q@R#`o_yzg_-ps6w!nSb^JLyoXu}#vKr;&(w3?AJpQ{EXQ(ja9DQoX9GXi;W1<& zeliws@5ps(-J~|X5~Pgk4M#xd)X(w^SGeH_mIZZR9>tcQnyE+7%ZCA3RBnW3>Juz@ zWGyaItS-_&HM%0cb! z*lb2GCVg!&K>EEBmumu{^Iu`D1^#|HV)&6R%gKvIPF6Apn_=0=0oB1Z0vUugZPM-q z8Z#e89ayarsYk%!h-o8px1hMic$k{dCXv3v5=pBd$cYFJ%VO51?JZk|_C+HHr}vk_ z{X0ioa8A(^g0q^K7&U)0X^|Q!q@X zG=5@hg;qOT7`P-}H7x8f~wb!AO_% z%B`}=Vr@L~R9kjvxMPaVUjY!uguuGT4;Yf$)D&Y|n7uiRLb(mUp`)#PyjmfSq5!yX zb-d3^Ba*k{@`1ybJcCZB%kMjFce(>+$H}>^$5NTTsj0r=T8zt}|MgE!jl@i`}$a>#*n{Sl&UC{4e{Zs4xW&QrA`TpDB<9}WKb*K-Y z`>jx)W8dz74!IoXAnWfFzNJsC{PpmlXh7ZX({U8`CRANEjIU`tlkV36`6Q(niLAaR#Ly*)cgwc6W zqeKqS6$ukr9NamG3bI_%-NC^mL^1_o=?K>{5E3_F-pGGJd~mN*EGO0s50nh_WmK}I zH~?1{?2t>H1LE8GJ} z-Sy)EAtjcKmocb0JhqXy%7Grn?7CY^N5JHYVvr{T{(R3csm6@@7O*3dEpICPq|McD zl-bI-o?``2030OfBQ1rcNvEp_ZdY`oK`-NLC=sct3!)Yvi8Ymt(>(=7EEg5b6YcR- zxZTT+#@o*BOQn;kjjfDrZ@A;O(dm5`|6%LV`$rG#nR{O-s;$VEWXyv z7|F6-HxCV+8lU4?`@5@uuij;LAuk$74WdL-S3*{v7O8>Y74lOdNy|JyJ;vG>L&-VY zTZn4Wb1@{Lfjnpfw)fF@2O|Ek`N&$9wyEIf_-*c&|JlmW%076)xE{GvBjjqk!!Vjp zi9Eg)7{qUzI|MHVz9gK~gDd?umhd+Oz>m!ON;ufl0sW6c;5f9qt*3+J_z1)!47TStxbHnOn1gJwA!Waud!chb8%; z`gDyinEd(5>fN1Fw)LcRRed`i+qG@`k%On=u`_$l9VyLie`iN$XGgldtFyxl?@Fip zyXPu{i@kg{bYOb%x=^;cBQuo2e|jH6hE1FC4eb7{#%~)nPq{t~(XEulMh1NaaO1Th zgq64p(LPEyR#}du*@Bz=YgPz!0#DIih8-Lry0vns<|M#;7SCtPbPRAw$`&{a>>rh6 zfE3l96D;dOjpr=rm^=W6J3&^0pj*9*%=id))GO22#ZEs6$D~IBel^A_ATaIC)P};1 zWPt>4&_xhhm(DIiyr-z~TiS{AZj%$T+gpbz$);Qu#{iUq`ubcoFqrEcbUm7J2BGHD zZ=OF_oSZD8ZgdKJKlNxZ677gYj`o+gwIvd{!NJZ*&~!S@D3tP}eBQ2TW zG<{%J1=~1XVa9%Xtux9Rp3Av*uyI=htISD2gDtfumCht%LA%Et2o^569j-uNaeT6` zv%}*HxkC?iB;skG-3zBZHfP77skuw^P}h+*`+E}_7Vac+9T_p2#;6Pe5qtsU;a`oW>#Zz zcAkUH@@M$Yv%JT)+bHh1zOwpV^-;BgGZL6k+~F4MdI{&zs5)Z3--X{As|WZwa{V1^ z*P$_1cjEds?3;Eqi8Gm>{0@8)4>r}hj(F{&!SoA48yh+ckOc)vJP?vtL zAXl56Q6v6#Y!VD35|RXHza{voosfOS5jgb3-SfzDTtgb`{Q0{PgKY=Z~{CIgI??+ z?Ffk>Q^c#H?h;E<0O3L)@d31CI^1x(VB&}(5FGY{RIwuflR?in3fh{x_PjpPD;Pwc}7c7+egY?kgSh z`Y|UmcFai;=jJP#$xFU|VenKx@ninn>1-5JGbqeSDILk%SthAxs5Yrc}a>Tqk|c zx&wAzJthaL8x?*}qWf=mFU=q*OU{P2`dmd6a%6!A5}TFo=QXEpLe1Q`)ITuPT|A!y zp;`Kt(aOTE8PHyDnzvHhV0W%`Xyi~T6))}DUYVQ=y<+k(HJj$_g7L^IC8=NLjQrr= z2Vc4mn&KfV8cC=Wn3suCAXl(%3jo;AiV;t=lh;_pgp(M$P;)+`nWfRQ;`sB4)vTBUg zLzobmnO{n$%ht3UI)R>?lmGS5h+F1Ey&bhhXRP^%M@`C%HBRp7BOLvpwU6Ec{pp77 zJZ#(`RtE}J&{$r&a)N^s<&sh)4@HA5upFZ!t!Z}1&t!Co7u`jwci@rGY?0MF@SY23 zy$V!mElbk^+kTVx{O?=7^;0`PluiHIb$9&`Gk2jh9*ggvxu#Xib(ZmHePM8%a0X=k z<@_-B^1P|-!u1cdu4DY8=5AbnwVa`?>vATVew?9)V4J>xzxcF#jy@}?k1oEz&&SnG z*Rdy8Uywb?-zfeTda8u*SI*F7wJZt3UX|J)MHUMfrL-sF`3dvX%a4~zC5}U%cV7`- z^NH0j-)E9@3U60_5XXkvyf$Z>cdlf2 zfYqr|EM}4sm&f6B_~!p>81-|9)$7A=kHCiwswu>m;%-~e?d(a}+)j@p8!dGd18{WR zHrKzt{DWw`G@;%(VLCisPQSuD9Y+6|ajd}2*x|(If}WQqHfrajq#5E3wYD5gQoIR$ z1dA+54`44hoHAAHwJUaWly&hTn!t6Bs5%EGr5-^0Ld=2Qw5>=7h9+LQ4sf7z>OZB^ zqn8dId9|hZ8Nx6j%`bmxao7CPn!;zW0)bE5_xoV4T~Yhc4f&Ws!Vy-LMWhc!1okMc zSfG#^1_L_=`z0yR<}!1}5m>#z5L2(yITz9|4_)%Q0~9Va*rb~ky%|w5gmzF7PlWTZ z!?$(x$D&RkjB-1AcOP&54d4zcF^RXt!#40ycIR)|H_-eOiT_kx z+!w#WsfS!X1^xOFHDtKKNLRH^YJs)E0&}hTYh)oZajPJotA~NqXxJUn9TK-GW;6t4 zO3N2c5jFHhkyI8wuaG-Q*=rCl2 z>j}7w>;3>@R`3kP%weR90#2e~9rc=2Q&z0%3eBGLtq7J_KM!XT@l1Lk*Zez}K~5d6 zx~^pkagIgz7~jxZ2d^v8U8nH6*mgVY>96qmm-)WmHg8rRuyw*#-f4UcS9cn)889y& z2jl7|Y+Z=SkE7FiEezKl74SnywsCfF;yG!|%{!qMI5Vy8H{~lUrk$KyH8aG#65dI| zM_xd8m*5cp1`ZfjkiKGA+`-U5h1QXH*7Yc0H#KLt(Vjys6Q(Ru?}otu%b`{j%>l=- zZ>^GH<@<>*($CBW^f4ZOeMv+mdmA-WIWYJm$3D)qS$PQ0UA?yLm`(-mu?3 zu=*lmb#sRC(sQ_|FqcnbF1sXu1*FG{*JebS#{fep^fp8N9w}pq>IIgtCiLHP9;eG2 z`-IQY7BPR@?so;;+ih(@0iv76H^5o`8hoY)F}EeD+(D@ow6Db1Vpmq&cH??XF0z;= zzM@0FLZ<>ZkzszQnwlA2@dcB)P8e6U4&i0NPO%Iu#Ey!t;?#&;lq<2u*}Ps%^TEDnV&=U_|1%00;k373b zz0-;Xt_Cm3-qX9$dZA9lye_A&Gnw<4fn2oH8}tTU-hkKdGvAO)yL~=)Cf(g0%O_nf zuNR9L2>U%QkH^)9L^b5$1@qnNPMZs732-r}=|d7iRt_8z7>Z6fGz>|}8cfORmN_dS zoqTZ0;GNto?2A*6psfaVv4p@U#(5yNA_r7?&>cKWl?NwY)Nc>E-NBymzOc*P=JcCi z4u<0)f6$IoGm^^n^rm8AN5J9anTIud+I*Y(Fk%L6=Eu!|vXAoGSIv_WFKBbbjd}Au zBkjBfnZJ4U1+48}tnD}XxA6OSqR!^4xON`b{#HKk&mpsyY+a!?$AY=!xbzI(xBAoO z+l>45cNpKazT>~4cffn_K4%D@htF@lZgU(oPdV;=hI9DR>Z{b5)i1&3MlF>`<`D5c zP9+|!LKjk0Bf4ij(03F~s?!KBbtGcBTrAmsTR5MK#j{-)GxXaxagT43ebsH8l?oxB ztPU$(kr)HpE6-~wn(2UMpc=iF2bX?$+2CXg ztYmG!xh5)8XrXD{AcF8oR}rb}OvvXCk92g{A|6j)D$!^6Iim{uFV8LKjf8#v+j9wf z+!yt5zqx}Uchp=AFF?*PZyy4C`WK>6%|DAdd>hv7uGO!CQ~w_N z;+BxPhFZS_eXZfLSw8im^kbTM5ku5}7m^O5B5>*Y8|6|AbW0*5`pS zpC$PNV%SawNB!_sXz19aFQ#LIu_q@RBBYYl;bB+RR-JBp=JxFqi_YvQ_H=ukf!^+Dv7;~GaJAW~FcoC0pkAjwh4Xwwv?UY` zqS4ehcu=Tnv1%)M5%o)WAPj>0iXmEkV`?b4ZF?ZNZBBh^M{3))pdTup^ZZw=-e4GwjFTdnxJ@LI53!s zIy*`A&cAr0PW1*tHUBg95GWB`&p4KyAs09ip4tB5W&GB+9Olagey%5d4YXJbgu;^W zRZrr`YP}F<1dc|8AK6+M*GSiH%+{)}?VH_S=>uLchR$Xs)8^kdT9_O#donW%JWsLFr7~>!!qPjP|Cts~LddZ#Ps0tl>u+oU&Wg{u0g{*)= z38S?#-K2wKDskh$Yk7ceQpjNB; z+~R~#V6sJzXQ+J_tXBk}^;~SqV*`xDg}(OW+~f|Fw+vj2ceMq&D(PHg!#o8FJ^3Ea zQ+uN4cte zD9K1zezPQ?bqngNv(Hi#P_+DJbnuYl4f&-APBrApT7h!zsUMy&a>8rAoK4?*;_j{O z+b--GN+*XV4(>WKnojT9fACi)Kn-wa*VJ*Zv-fEASS-G+yM(rYaBHbEb$_&3=;DO2 z(~2^JnqI0GrCOk`>87hi9EWI$~;Y#t=Jco0iNT)pLz()bCMFdT}Z z>)-hB;hXMzHQHXVc*}h`aSiMIggktsuxXo(O}`nb_i8f4FOkpED-sc zKU7EJQZVvNi3OQEF>y?INo+-=RWF1QTU3sOWZFt~yVRW1A9DPb89-?Ni6fThf~ zklE$5qw7r5qyEJoaM=*&rCze`1Ly6Hz^GK8;aa@`bMv?8e;$TMcuF&!(@rM?OuS6> zf0^nKJ|G&-Fed~r7Io&N)Lw4$MBv&YC@bDz1Wv3l(W8=29QwHEcJ0Kr?CdyzOB{t0 z?Y%60KdExN&fdId@0nsOVs?)8tAlsW@2*VmRUJ3YFC0F)*gR^BM%2lz@2_m%QmyQq z$2eY#yvw((-1*zBT>0yDu3Xmp>D8Z8A4Z>{DC&Z)iYfp5(d4>~=M#CXuN77P1GrZs zTT9n(LsWSxu%ntxrLuFU7*)nSUDa>Nc&%|neOsZw5v0yo)jV4ge=+_HUMnzo2;&HT z9wn^Q|9)v@=@*Y1ZY*XS1!b)k9UzmUy)Qhhc4NhI@K!C=DOba1TfUHm!u}GFMo4 zw45tuu;2wq0YccQ7Hc`T)YforS!}`*!UEa)yJl*kFni_N$m&b1M*!LuQA$7z8on&} z(dp+dw}rY2I-1)Ohw;87weChAcT>KPhWCVe&~kZmEpL}-jtw!8hb=_L1KQ>NF+p&& zQhJPrK;^2Gc?!9xzd0b-EAI$Lnvy^}B2?#QBb){C*tv6@HKF?}mN#2FaBj+njT=Y-lI#WwDiCKri#3 zlU&>TR^Pt%_c{l+EZ2?Zjpq|3@1>+sf_u)Dit?+#%o&LR0gz4aLsI`6U{n1P-|GR0v?!-Jz zUt_*>{8#2{?fq}Y{9gJY?|;qu{NnwRhcqcikc;L^`24)AhrT|E^|%XIAS<`mdcWi? zZ-+1W{>|@~>sUWyW%G5j8=rHB@u%1PoLA#>C{G){S3c)8xc;HF>yq0u$1yHQ(rOIX zncXvuM_1nqEQTGK-a+H_77obK4f$Y~WB}U1NX^FGlKOxd2-aBehc$x26cz*n9YR1P zI0j!BSKlhIWfNdzNCa)xOWa5Z)WCG&)4*vGWkEK`FjP?84YLkVS&5Zxlmt19He@fO zYvRB>=0PnEy9m0r(wU1|B$WNlCkI+0L0IzcOH6MzUs@9itihwJ=Qw7}>20!>lILt) zH@mDkg0^s=s}HY35q)Aq&U*d*7p>2^ee>tY`8}Glg={j?u zkC_A3{djRdy>dUi{vI<7nL7xX`$v5pYsu?hH=aj)sYD*w5JD5ukq%ru4TnS^91@x+ zlS{r)GH|BNmn0X^PZlDfNe3G`psWJw0*g<*4>d1HaG8ShqZtlc0eJvoZUcy6AV0pg zFl!7VVv%dFSi+$~5bRR#(km3~930Fg``lYk;fQwWB(UQ~YkB&II&*^^u~8n_R!%qu zR6N#zV{01EV=itpJ|wfP=OV^D4zFV_KKRmKaW0lOz8`B`n}rs|yMZ^7zg_#hcj+~K zfj9*Ge(m}v1jDD-ZsYa$t3SZ^Us`RhKD4$L#_BnoP?zPtfbzrR1>^OrcjEU&`8?L1N@*cd7 zOdqc6yx4a!0|<5khjGV0e@ynhK1b~EtqaD(7JD5@Qj=1unuMjZ;!FdX4$Te?06U|L z=K=~5EZ%5~CYlK&q#T%Pu%hVL`!qCYa`K2Ef3!r(0VKq;be9+yMAN|TKoC7EINKJe z1};M~zu^?f3$>jNu&A$E_QakIW>d?wc+RpVo@*Tgjwx+ba1KgXqfzX4%8p(Gom11- zS8)9T>N9I=`T)O`x#Jw|f-ErK{yxbaxBhM#*ZFy^HMOqG`~MNwFBy-oG6Ju!iyfzK zl<(OsYjaLMAJfm; ztJd%ehQ*6YO37HYA$GMEXw=!a(M@v;1g)5)F_3e8kGZyJpID=P>ft?4{?t!9Q6}S5 zuWQz$@hz9dG`;M&`NcCA903G1@z1(X^mpur9j|-WF)zhlE-P_|KL^=Cjh9u{MRrwW zdK#wmBrrj^&8!0RKQJPsiYp8!s_vB5EODZbJ%CX{&j{zZP67!50aJ(?Qy>TzFkLYC z6m$J}IT}@WwO0-{7u8ddXf)h>tiP|r8pby}!)=Mq!oE@@;ER+y`lHq`U>{-4kOzj& z9HosNrhUY&!QcwDaIoxfUSKNKF&r*nOvoWhO)}aovko#UUGAgoOoTc}>P`^23W6d< zpkbOPpDVmpLL->|l1ZIT~j z(vjwVhKNVzbxTGaWI$Zx2r#~bnKa{R`l`-uIOlNBsbWrZD5^K{-VgMVSFBP zJSFlf*KKZGzhm{Y#_z3Pmt6W5>>q7Q;oWQdM{?|;CDD8zxca)|gS^h)%KKxo77yU_ zFRtEdydIzb5&b!<7p+{qK4)>&y57p+Yn!)q-HXpTWWHbS^H$>sKPN{$p!30h4_!Dd z-@DiNvV4xd{s|fHW#b9FAHQFF|7UoPe~Wj6tN3Tf=S)jpncQz#;5PrtXh7Kyu1ss3 z%~l45U`CTz4&m8032%c{XBkWp$cG?&DdBFQIR~!P0o+_3nizo3#@^k4aKeFH32RE< zaEOR5G6UidmR;#2{u*&kybw=FKLY@C3xuu*qS&*AUTO~@C<5^eOknqn%+zgTaOb>1 z7~M1VB!aptc85`%hDTRV1KnJY0fnNwEZA( zv+XRdJp%n{q(3aP| z!b4OIt`mGKvtXUUpIf~m>uAUs{2N#=rE}EAi-?{ecLU&sbVHDcZXEWIL{1RkL8qx@ zPsD|lU`J7Om!cKR2rmz}Aaevh!nuX&8xChL(&pHIuk?GvaBI}pU$Z&iAB+EG^ZSd7 zJ2t2Ll`pC9A31b0f%~SH_s@_t-nY(Ye;xNt-SmJkYY@MHvlzuNqJn^2?ZaJX`_i~; zz!=%fhTQemQA}k7AOIN|k-n89naHkgcDU(;T#)Vtr9v?4q9`Y-g$Qx8jJr?*7htdl zAsicxJQlTWfccJq-DzDtSp^*&f2USTzp;DDPtVOay~}d^Z`iiwcr1RirHj;jZE|-2 z?a<-ECBpt)vr)_ccT^@O(9{>xAnQwv!MZ)o*&%0M1vbxP#-Af*mqW%M7!Mz{H&gur zniP}=GW<}~+#S&#+PhKWd(kn6Fb@VbncnY#qNb;s4X-%eMv4g_5D_Ce%2tiRu$(}> zR)Die;sM|)D+KdT0@g3Fj&axI1f!m6XnMR{9qJnxn@go;N}2Gk z56l0& zM*|p_Gcc0~4upTbtpcFLbenwtxA{G4NqpQ`b35d7MXrBdzW-a0+lxl5`4{>+)}@8R z@Sv(d`T(=CQVNv-Mg)7|Ya}Tp4GM>?0uz+eK^aYw=)aoscU{-BHS`7%7 zxorTNsS_t@eH3!MY%j&%F6+WpBV(5(v372 z5MW}T_{BbfD~yJWm{&h4QWEA62gx`7~DV+jRXCmfelGufC3R!ot}g$8%rnoT#CC2-+5o z4=V>Gw{xwdf*@iGHrzTjkl@n)%sopeu6d$Z z!4NrNX&zS9*B@QlbJi-AI5sx*>W#G$NBj4@7Nu}&fz*kcs%ym&R459)aPKw;rRL-z z87IrJyEd1aplJ%mi7Kx~2T3sII~X18*jQk)R!vgHKs#Vaz+k5#8?__&H&b7$AL$}e z5GUtYxbAMpfQQg_FHwj>{5FmlnN8H8UR@mc%Ayg|x8*ots?X^m@Mmk-tkUi1y z48|mW4$hE1$LBa!$KKUDn~y^lmRJ8;y&JU>5n%iEu_@=L281CK(+E0whahytSpupE z5}E=P2Jn;BG1jW{BoF`xA`V70sd*?m(K(yWxFVCe*+_eEF&fTy;1+|>X0&VMJj1!| zm8wVmtvII_*LtdPBQ|h+q+A^vJ2#Wb=F)#& z+|fTa&_A)GV`Mm+N_C(~7|+*7u_&KljD&ik6$8a}9Z%bM9Z?@#x9|s8=i~Cag&A-< zPRMHuD}di$C$Fu%I$rBs(j&;*{XKlkUgN{4qoG#-h27}FlSTnfGD|C$+X^YzZnQ|* z>{Yl!K#U~q))=2N`~XbRNRj;K+>5trj6r9pmV2h&jZ}L(Fb1jacGQy>_|s-Ax92*o zjCzVs3(Sa0PPFbP^eEXwb3es`hR96agLu5uyhYbr`%%HS^3TP>2G~Kh?^~Pe53d_N zl3&9f1S&4|-`1-Q3(cQehadbLR-QTMRpeM_k#Q~>2dv0OTUBGuHPL@7^I(;iGJr%8 zU}(7)smg}0P2iz*1Gl|m^Tm?7O@w5hgJuC}0Q{7;1Cy;RH1-TkG!~g57wt`!yZTCz zP`EGMHIno&E!|rT_l2WVJqL7dx@U25K*i%B6k;##F6u1x&b#An(b>kSC$V3?wfZ8~ zbP6Lt)hya206J|g>`Gg2)`kWJ(JI?-$w)R*B^fD#=I4deCIyHHtu!PGY0eW(!6Ev) zuozjaj)%uGD-mD0Duh@|ew|5LF-M4y{MPxstP7nyuy}Yhoj49yv>6Mz%U#`LdtR-6 zr?M1_kLR;vCAD=Zv(%Q@VT<%GcAP5o_5ch6IdA2hoer>eY5c;4dDIXK>qbIa$H5nK4U}$`MCNl9*I-irKSYC z_IpN;`Xq9+e9vvxdz$i|8RL-gTk;<912$py>-YkNt^=3U|29kT`>vPVA$a&(nQ~pJ z#1;eeumJ?aFBrdpkx5e-uf*;1q0XI7KoY2Lz^CgWoZHJmTa3Ojla8&-Rg!y#QK=DI z8S(}`GWEajJ$C%QuCDZ%x8K}5JzH_x3!(kf3##Mj%{LtzoQV$g?%ca~=fGqxQ!Pf^ z$5WA$oD>s%pf5qbz6bf5LQOMKa++}hT^fbmz;|V(qVKk%o176R%A4;fue7&*A>`la zfz0b@&teUV=rF(%L9s@39ft@CgnAZ4M1XrC6Z~YGC*F-hO1v-_IH;9yWDnuUb_xw! z9d&+@C!M}zNBfAJ-YN!GFTswpsu{A_hj0|@oF}HhX$f-~zl>r=aLuwA)tTJDK<@63 z+|xgPDywpRm7p_PKxs`n8YzvcONs8DZujW!`O~`=ZZt>6=JQ=MLq|@ZJ~F)6yDiJ*~oVw>dW8E;j!nJJOjgB?krvckf)>&H1|-IrD#0UqDQ<%D6VWKd}Og zGlA^!m?Z&V{&ETn^@yINP^C5!lCur7#(Bc5p_aBK3akhpp(sSjbo`Xf6G^59$2d`_ zbScs7sdph;4%jn7El)a)XuN|76FAN-=Rvx<(R5C_xM^i>;P%?sM79{TYm-|2+Z(Pw z`TBiZF3ud8Zp%jwUOuRD1B1Dq!tkgkJUua%JHL0&1>1?Uw_dmHOeFVEX?ms@Dh5m0 z$wP-Gy}h0ie9Y~GrP!dxsS;Cq>K&>g2tclUk{|lGoGHTanWU zmhMXX+C+`4PgL$o3-uioW}cNnJ!gXM0s|0DXJez+$F0D4z8gisK{k zLa*BrF7TYx+p(A6b=SQkQ0#^ic>qk<+K!@sYEk{iQwlB` zbSvR?s1l2EpRWMH*`P{DQkEo?8TMQV@?!?v?HvSh8&X!k>~M8tTJl1rKBW&YC=}_d zye2l>Q|h1Y`oz6QlHK_;@4E5u)`=TCbG@(LyZ?gYx-)04JGLvGoht5LTH3AVXDVZu zgZTK=;@;g7V5NV<8lo=%aq|w;!*FK+M#8%VLKKKuK1-C@{sYfcZH5RJB4&9V$QBqO ztf=QJr2m0~V)vDCeF2m=vlwHZ{*8k%lc&*+H&N&vpr$bD3B*#^)M=c{pA1vC&bW=+qRU) zWjewE%*Q7WByw$FsM&zL1{XvAA#Vp8W7kPW9{=(^KQ~a&CV4c*)HYBuy^%N>{V)CwR?85Z)9jl zoj)=)dHB%O^kJK0&$0C0(}DMx9g#Vw?cCz-3+MOb7hmO%Elmtn_cvb}p33)4>?n`U ze`o6Ok;$pU78Y^dxL~H#7ZJr9VpT8OT@$`+sqIV!F{~9>wlcw>Zv@`HS5GNW2jJSe zN`}pKmH9vNb%1j?0 zZeO~-b1VFja`o`^x!s3fH8S#=*~x4!-Z}Rz$?w-Bz)YjhPoj%{m>zD4jEV}PJ^u+hoS8!h3Dt$~n=x@)>L zax~O=Y!>!p=eM_a6ij;|F{gUEqTA2UJFeez zW4g2Sp*Qwzafh#)+2M{3WCt_3TXX55b87H4XJV&sdQ+j}-N%;9c;=+L)4A`Lqq#Tj ztp$`jFfkO~J`^Z;9Obv}%Z!cZ`tXl-nAqOXqr{!P)oA1?)#zeFIffn9r=Ov-O@}4v zK%6D7SQ?QD3rc!H&v`+2$3sIYYirY23b7tDiUbwSGE@k8!^jM#uz))abL$xeFZvC{qH;W?7jW=y{AWSdFw6lye;Uu@7VEs zdyd_6?C9)_>O6emj-S0_&mZqNaBxR%DCF+TwjDWl>(S=V+;jZc%!u{gZEUSD4a4yV;dCxga6j zE(~wQ^oLX8h13gRd}{>TK;A;(phg$bkm^ote(G)p*hW(cTT6f8mHPhJ$>{#r=wfPr zY3cUcm-b$~aeHikw0AU-IC^Y$>cp{`Ek|v(!->Vw|K8Pm@jVxB->YV~pHAeuLe2Qh zu@h6XM>$;Vty$wXGY9=ufWByiu+lYmmE|ZLK^`=ZK#ic%II4#PvpE@2p5ggO z4%$XM#ULVf3G!4mN2FoeXiRDi*HUn(0hb?b~uBpMLDItw**j++qgpXRlkjI6Zw)#&Q8`_fM$v8elaw z>pxgQgn>x&R*G)R_(S6Y=SJgi-P~L5Gr-Q%^2Pwx)r%=PxOP}VPrGfNV63x17eO0< zFn`^8`Cj2rg1$prLhz%YA1SIaXh`)>_no-z-eN9)%TL{MWcs?-^o$Pe*g5X(G3P2{ zi`xfARp!{4n~qnv#>NH~_AV_KU3;I}@p*!{$+y!OFhj*I#$?^@lvc9q0G%K0mt@IXE@D=Sq_a9hPztCr9x~2{t+-fh+GV{;=;}^^h=(GxJgV5gt zMt^l>Sj+h+|TVv;sAAIfn;83Cj#c_W)x!b9?4dzDT7MPn(<8FzySpEc@%9cL? zT55Pp>;420@{(X6rNn#(C>be8@GDyL!Zrjks&5-nqo`mvSql`@A+i)UQr!Ue6c0qH zUq{ajXm6TsQj!t&H_+bEHHAf)Slwk7(8nQ#EL;gT*=LMK23`PrSWy%7P%Ct`Dh!2cZnV5o zlJYxYUPM=|Nxkf7!%V{BP=zZmaRpK6sCTWkSJ=yvvJtP;(9RNvT5I6*7G2^3cGVs z`T=H~_Lr@+QdRcXSdlewvmY-vN9aB1V$~CiwSk>xtU$69gDmkvB+ss>01wa|I%kkp zOf!x;{MdAB>{kTs*6drb+}AEeCjg2@nsV8pm}1y}jlMFrAAEVYWm#j?RcXdu);d05 zvNU_-FZw12w7uD=z_ny>fyJoHUo~nyfmFTuP*dXYDqdDe35Q2szs~=!g`vQ=PjG9$MNn$=)v@ zfg{08+W*eKvU&_|t2t4bOjicc_B3F3#xse&Trye9!fq>L4PuND%#io1rI{XE^~LN( z9OhI{h-MMi+Y~=qu1ry1LU_-3S=85a6W2*)swWlT03I=kKWYmBb6jIE)9%o46IZ$- z8X(U?_6GnXG}7ak&k@VxcF+;Vrf7-{D47#utyr`yYu!PbPe5)sIhuV*5Ll3^I)!x! z5~F3sU~!I2@`T6RzU>9x3M)g*Cjh4Z%-cX7NwNZrY1Jeg5}$Cdug3F1_x6R2yLQdb z_NT|n8>cg_YxXVO)EzY(T$tbYmM-^>sT-$jW23c42D6(JL;acI4cA`1xNt0=-&Eeb zsn0nzy65n|9g&ow&%I%OVMBZ$Ci=F~9Xm!Rwj;g-9eU*aCh=QM2pz1z_JOyhm>T5$ zqFXG@PlW5d@+707)kWq9BGA;$lRkx_Rxn2DA#@?-7r3d0Po=XsaJIr!1lb3>$L)#n z99nxJp?=P5~N&^H}`$=Fhxovuoo#aYZ~V*7gNJr*ulcIA^@|JZ#VrGVchDS2j}FvCC2KUe^#koauV9ly z(tYZg3eR70&FUqT{%5Ybs8@*}vT9OsBFY-KUwdK04nuxCL=A}nki~vnarh%CLZRs3 z7-Z4VbOgfWN|B=#A}fT7F$Mgb?&PpEHN7}+m16}*E9#^u_H?wYI@YVGBwiQpu5+7)zu^w$1E+PbPfw$HuBG51S5bscuYKC!*CYqHk<(&YNFa zOb(AE?se{QB}Rsmv-5AZdT!WZ`F^LfN4t2}>=3++=;k41u8zT0dsTb`9@XtqGeL^b zrz%2KQjiU(KvL|XUxZDPhP&Xyf_IB9R&6Cm{*(nwnKK$$iy{zS#jkKcC=>;jpb2t^ z2u4|>zuH-z70VxKtO-#PuU`9^lUs4x?%y!K%eR!QUB21lvU;`@29L(HO9u`tZJQ4G zEAhDva~p&vQrKRb-0b!^^NzyAp8Q_up&D>F7wn0CSY6A!8_LQa$Z=Nu%>_&WE^`&TnA5Vo16}su#eA1K-8Wr6G8X&@E*yDNke9D(?0Lpc~FAxVvpT z0;DR^Fk$drmZk0jSzc}ORK3T#?C>YagFz!7knfw?9GhyL zoVw?2;%~c3p>@?Q=@SoMzA(N!CW2XSx>Kuj#zRh*301SllOP5UFYSNpXW#k+DAE_O z86zy*9g4@k2Af%fJ>rD9$ZMzVqB<=EKa#7CBv&#;&EViQQG}$eIWYu9_DieMO^f-f z8t_#MK404_0y_>J{@7*n;#A8$v$%O?Zaq9xnK{-vO&AiJmHq{t z14+^OODhA)R?ifHpHN~pnggua{pg*E-LHm0k*-KSxdNO}9{U>b6zDqKNs4s4^k^k36TDD@;dBBf-Ml@t@r&e|jMuvc!Us zLM~{`8w>-+aK1m7_{q6s`~tG>cM7-EoF;Ngi9#NT zJS&nwfB-n`v^hZE+@U<$l3$4{kD!l7_>rhDQDst9VwNK|D!#u>(Y6465JO$#6;aiU zWRckHblr7D3W*S~ssyH~h{-Gn5-Zb+**KkFndL`~T@br%MBgk?9qxK@q&~#Qy5CYW znDkCdD4rW%*s=rjiUMEw*ap-wnZ|E{S+|gk8oQ8?!ApXEwjsEXsA%2TvIAk;Kt!%> zNYY{%uhY?) z9Io|gJ#uJ&>1b!do3=)Zg=p~~d{=z9cYFDgrQO@cwwT=p|Gw$z+Eig&_l1SoVqtFU z^X|B?kItG7ECp?1H2JR^-y&<@d90A{Ot_qd^#!siF6aUix;O4e4-`5ZRJRHaRvVN= zpC%jws(q;~swQSz7ZCf=3`Al@BX=8mSpQMoxrPK#3lS=?|JFF^RTp6q6i8tvDJg}U z9=$2{g#VV5cl*sTV6F%M^q}x>JA8P%__x+QFE~G-KXS>`=1o)U7Zz}G6(O$)_$LRY zZ%d5Q*Oy8wbBZYLE)r0s#x!9Ip=K9J7Y;U5HOC`SKa{V}oL!kvq;7)SA*`{ANY!sI zwk?GH#l~R_Y+?pNkfiMRn%>v%FBFGrQ*#(2golK%2cb@r+2Pr`Yc~(vLAGZcZZDig zsQtRhx?&Aw^Gp^n!(H_Gi}c5%*3V0x($vh>{dfie3+nhZNLE$-pGSja#Wexl94J1- zhw5p-;zSrb*Rj?hFBOBGA+Ycve2f*+6~s=x!(i6;bocb??7^|leWj=`r=NFjH4RK` zC|X8_Y@MD=ELto@^gi+70bz#KXZ5Fj9?OP_0c+50vS>|mtZsCg@4YvanfRP%BARje zdW;=GN2({`1Ywx{}CTDHaBl(q`YB{W<`TG3X`Zhk!>D~Rs<~-*E&u@k)2$m2;e`UAQ&t^B5 zvt6kv&uXAd8NeSYu8b)!REtd)B_7pWNE&S%!x~8Q*fJYN>W3}$og?+#*2YX1W?FAK zWA}!lnS7c2@_LN`6REL?_)XJ1S);WHSUxYgY-97=Xj9Z9jHn#3EN7~_57P@f5le$X z1xqT}!An7tuaZvN3!^>-WI{ZGB+gFg8vIih^a4-}q7qCLj)C*RO6AtHDvk<%-Mu1N zUN>kS+dd)t^>Mv+V_>k~rwMi^%C98e zTnHdusIdn_1zmGh^BlF$U+nJv`0eA}H<=Q-rLpmyyGAXJoTru@s14s1?I^@7(Qv0_ zp}#z~YiYc^KV|LEda~j2z|_=0xya`aVIK41H;DbGq$7$?OSB|NWHW@5NFgSnD+>Hu zl2QZN1kmO57;zQE4)I8Ue*sPalHAF9ptsQ79=Ypa7?B4{8Ku;bgu5UC3a4|NNTN`+ z+;v&ino7{_Gf7*|nKmywW@959Ru!)!hTJ za-VJEzP%eaP529MUOzjYC@*>pLNk#~O|2`SK6;-uAB^@dRnv2p07%vvX}5HZED)ay zfh$r$HaFKVLtELli3+wzXTDK}hnJ@=$8BCyu!!cYYABCU0$rO&xoj=!V5p{)nJX$S zl8j*-ElTmzh9>|%fSe@_Cv*9PVuP@PEb#V5XNh!Jp5R~|csm*VrUG7qTBxyp!Aw=u z0{oy<18^)kqK;CM&uX49^w>f>N}gO<96qvs_R=Hk-#U1>JU?F=F3%r6eE6PnY&cpj z#|{s^6`YZ4H;MPZZT*qUKzrCTUoI^yl=j5-+_@)Ku0-Rd*1x=M)3qBnGPm|1>dpLK zJOM5(*4KCAR@saurdvW_Bq6OB9;WVqPO zk{f3zamlox6w4x=n$v1V^Y!Fv`j;NFs}`A|j3w$QVBqOD3?UyO%gqI{1sqeL+yU02 zbj_xl+Yz{_3Bnj&(!=&|24kGZ!nqIDeg)oEYezoP2oit@j@C`!`>A?Uu1i9pgKe#wV6WbJpuN z?D6}5;P>zS`9%M~#Kf+Nr*)_H9eAt5etdDuHMO#Ux?EbC%x~(Q+1qu%VYfS=*Sj%~ zo8=lf6KSf&Liu~#ufz^np*EzUz~Jc`EF* zc^&rFb@!jR{(<|iz2O0}TfinqKEO%Ij69H?YW4|XGZLx`lc0cg2bhYJrI8te=A_`u zHzi(w2{nt5?n#@{fYdgK!g9o5L(d<;dl5iYNE)G35n>FYOD`=NrR6%Rh>wceBYuDD zLq6Xl4!eFVZFeAT^Mud$?6G5{x9712W~{*s>1{Ld`wg@VnDpi#mO&|(I7$`&;)V>^ z{%E954JbN+JtFRb3_QwKbQPCVy;OK@0ymtRpu0>(ahD*_Qr;<54byRjX)pnZYh15F z#n-cFON>kaih!g*O@m8?#i~xgHI=+!Lnc9D8?c~uPQ7Gp3|46U`Y0(v*Mkp^G)9T+ zmdfIF(m*uwj9MfhNx1@0-SAM^kAU_`DwCpWZ>16wKEz}qiu`qmqe#x~FWkKE_&SH( zVH5I^^>YVHgRS5Ad{^wedh4!n$gEvxwvSG3v$_VuQzBz`#5OD(nh*GVCw=b1`i1I1 zz87D-dwTDf&F4Gnb7#k=3(ly+s-xarCCqai^Bjh@xk2f63MdJ6&PmoO1t|#$P?Q5Q z8fE(*Jbe!Iv1V+0e85}+}VT6h! zx95U3HJga4%En_h1^Gl1!M>raE3mh;bjNMGeZJ%Oz3=F;2eyx9%AfHS%cFhcVlFaW z3>%9hA3S>PgAV&+r*8Y3&)m6t|MdSb$EN~U&8{DVOf5p#d!ZkPkR8@^lQ>a@32Twd zN>IaFvZlMrZmKSUYAY)PCVEI%7g;j{W-8G~2N-3{e zfLcBGfiS6bpeRDnipc6+mWm{3INU&2{SnAOd)xc}|f~g|)U1yac8QA$NMgZ^CR*y#PPaHMM*=prtTKXUiSl7z1Segl>06 z3Cx75ag?eZOrmp@)*g_Kl0y@*_A*{BU^;?<(K0pFPSQQn`GkK`PQq#S)BRYmf_(oTzo^IuD>LH3B8dezEXtd zC4rIpi!2KbBj(8HB>6C+Aj^ucE8@h=P=VHLnjY3WN{uWXnm~$eXX~+IyT&O9tkm&_ zTE62uOZ6;?|1e=&A=R!Cn~9Q z)nGOBxomx{ua#Dxgl_)SO|6H-#DUWX#qrkj@v-3qb)v9GMBMtRPpx?xvkC9g%{PB^ zVdmgL;#Vs&Q-ZgA+r?JKn(egWj(1rwL&(W&#yMp^jv7TxHy65d`-xD3QWA;{g7bZi z83J9i))Fxl2|hvc5Ewp<5CjFH^@pa&TIGo*QB($zPw1$q{-Mfzz-hBNA5g3mR+0aZ z_4)4c`LfU}lh6}h(jyw=t3npakcAlb#*nm5`l8Ycrk*Krl>pqaK2Q5Xk3?Q>tlJoe zSolJDD)EI4g1sQ>HHw5$DMtYC#KdY$;vpCulqW`~4vrOy`@q(0CaGaeEg8Ypyu4w& z=rG%W8PMn@GaL399V3mANjxz-vIctMC98(GOH#w9$Lj`1mbf#Gao}^y)?SsbQRRb& zH8Yc!X~zJ#Ascj!Vz&1}XA_9S3|<%6r)0|CrZW!uO37?r`m)Q@ec5Ei1KVTK@9)(Z zy}4*%c(~ACadegHBfd$`PmNug#Xu%HKXT8|%P$YzGcrH(Ceqr&qcy*6(#T>LeB!As zuf_SjV6G7k>FOc8XV&07)00oEd-v;C@OzeAJ&+@sp}+HY4U-vhDull}e}}5EX6hv^ zYCV!L_Vk*}76ht7VUAL@f2j46OBbJlrHc>3s6pI5Zu0rA^ZP5q*R4N3GVJqR@AH`^ zXRbRbck^i%lRMA-Ui^q2pZks6bM7_BowM5>OMtBeFj>$27?x$y$BJqXnBQ8*4;p^}LCANm2C(8?Bjm;OJq6qYO2)sB#t! zkvjSHawk79lDQ#Lj$Er)iL0`W<25~!s~Qm;U`Rn}xRH|4=%=)J%0-hJ9mALbWEB)J zEsYo_Pa#8vx>&0Y6t(lisYL)H(S9b;E*()nRe@J>FdaE-kqqHiCei`g3V~6uUpB^ZrFD( z(ZeujsvQY?_@5PY0DwS@bPoAT9icSr-zHb2ce?Q+8rL=Dz(Ct5HYq(L|I>+Fu9<+G z5VeD9e&Wz-hWbf(LcqK5lazcvHxKMjDwZ?{&%gOHcRZ-e&}c4bG}QBRcKwcs&!X#h zPfRSn`~YhvcdjnwD7Sh55p<_ZN5`Y9>vvKe37wypgzR*=2UJa@Dx$t93`bJ?Z8=l) zzE5>(FMdm`&;5li;`d+YL;Dt=@A8j4bcN67i9k`|1)bd83bEAT>})|D!6sLqV?NOz z5&p#tG}9+8whM|RfvCFEOcqHwB?q9KjM1(lgb^0ziZ#Fw$?Le_gst(wSas!AT~x32gxT+{x+-{rr`_b*| z_d~@O($_3v;-cq&C0C%EM$tzXr=N?o(=;YvfTjxNWb1|EN+jqSfQ{W-YEUP|Aq1E7 z^a2$RZtB@<({t_g4U`XphMSFbn3Z*AMm+3zXjjZpfFR8h>#xixuj9@Fzq7gBAUf_|P5ZFcu-U@YVLuoS!JZQKtAOS2}?Vd3@9FQhyL=_CG zNsZ4)76hVj2++PJK_m6gA$&Y1L;wvKa3s(scLD$AN2=e;$AR40=INqC+5Zd`FvCWB zZ`3pHb>~BqmF(n}LKxkj4`sKuyfY45LH?*?Jo2&n3AAo3b5Sz+CSoHRxhj890bWuPh6vbD$Q2(leT~_6S@tunmiZ1*+PKD&Wh#vA$kv zhCn@=dVxteik#SyCho0HPoYz#DO1A_L1o;f*~JMA4J%v$MzSEjZAg(2#kIEF+6`Ul zT#GP1CT=2d@f#=QMeOa}e!m01_W6A7)=zfq+fOLtZ*Tg*C-1%c(U0Bz0&VkFh-T~` z{^bJ7NP4q`35!E^OVaz*sw05f$W@dHGC)mL%u~IO%XTzT$qFy6c!kb}DdG}x@RtCv zqIfAR`ld|nt!n5oh+OF+$lmm<2YsFJlS+U>2O{Hzh|DQSaf(GZg%ZmE0O3D2ODXz% zi7I9f7Rj%ol%_s*3NO}mto+a!Qqg?;Z0#(G<<_r;e7<-4{aH-Nn{nWZUOdMGPtNCi z4?Q>Ygb2lF_y?L6Me|`k79l%5n2#*^LsY|r>p?Thj9J0rB1%=DRP?@2f_P=I$-mZa;QZ zyxn54nsh=RB4Eh2B5Jmae=yeEbkb%NUZ1r<%Ju z(9Zy49l=;b7;C@uF14yHX<=%D+4K^&4sk~pY#@hNYl|0JyQicqHvl8#cPRo49oIRe+q7}nVH58G6WS^^Wwz2}Dp^DTl8n^{>l0#tX#&y+6;Qk=hTnetUng=Dn_7o)7v&maSExOMd9dU8dU+hr9#Q83wU!zNC$>FX*fc8+3$3aH-LDk4 zgm*pWA$FE_F%5<6oppcq`gh5o=>=D<-T|_pmdS!ob^NctqL(}CPO^i*6g235*IVy? zQhFSj=B_|cv2DZ|JTo{~pQ~MJ8WQBT(aWXOt4Q^*@xkrl(!Z)k(6Cjh1ro_%EKQK} zm^0Lw>YN=+%=BCm^!p(ru{m8n;2-Qrbxfs$19pecQ%;H_smV&AwS#6$oE{h*&b2J89bskk&`M!_YqOMtCW+>iUfOTC!8Maa;U^btv^L`X%b^(ZD!2TKu_kTYxQ z#4dp(tpp?d5aB!`_DxOjvnf2Q!VjL#*8^uME|2-d3(XW`*Qq!pX%yHs%T)k%Yl#U6 z(SnCk&lRaMbW_jbr-!xQv=fqDvqQ}`W*}X5lCDll3bmpB44LJuA=PjXlnOc_u2G+$ z*piIlD93rdYFw6f6*pxqTTZm?qz@Paxpdq$;P*rDvhj(C<$b%xO?{KeEWaW?TA1-ONoRBy=5M=PlMJvhQ31?InwI2^V;ltZ zr$R<6rjM%ehiDEYGPPG+gY@cZ-W=Ru*zRhp4Gjif#vGOvr5nHu1v2!P0h&wr{8aXU zR55wP8h|VfBPGq1Ee!{(Nvg(z90&xF$Ri#E?6RUac{NI}ZipXG4~6j=9v{M-a?u0W zL5%IQ>%&`?av`Wbg3J6PHQKk6Vt^a3bo$N3f$@mN(WNuS{Ha8()`jxop7wd1reNTq zZF1V{nR8?kJzCSiNWitt7?k^Ct?$x)ZTrA|RCh&3GY0pMB7*doqWLD6A81+XCJ}@M zPAZBXQ*58!@gp+4c zjq5(L3E}BQ#nhuPFNW=U<0n6ffZ>0`gGbW~KHrTu`b@?J@srhu7=!oxEApf~0UYjL zrQ-@IS2BsS#b$(Cu0)_imunF;;$!ScX}Jm>y5lT)Uv*R&NZ^LmkQXD(Sm-=Qa`K4e zFbAEQU@lwTz6hPIptYIEq|H+~PZS72#>TQnZ=#Z@*dG+^pw(2$StQ$bnEHwWwOxpZ zsGFpv?Y(MrfO9$lTF(bj~8pB4!}LB(fhZLMk{WI_{*OgwMIC#b8;*!sI%c0is58B-XoguICRpeFHq&!$LpH`x)jS6Hxf%U8R zE2R;kU0h`QOD^DvD;<(KfenHdd`y4|F~~u12GHpSqFWTwB1uy824o^%`Ia9eYoG z^_On;<4ri;5UR#z0>s!!x~8`iz~$I4d-&}T-bPJAV;A1*c@lL9jQ|?y>=S1QcO=wN z_41h=-!%lwDMo<=OurSP43==iIf7-d_vJaQLT70t@wT!66ik8U7FtbIqp<#98N!0X zNoiAsyRE9!;utK|qDB2et)ZmumzBiz7GYM|zK5qdY1uh-5pYXg)G_3X(nAjy!f;rY`w|7EPLQ8$ag$?|%u}nS z5e0j)o@m!|w<-F7=Lz!M)(|A#I3)RHRTHq?@t^3A+Z27^4_LZ~B5@>|`|SCl0_z2r zUH3=oL)_ce48gvne|)Zz@cA5wtZyX^;j$Zb7poCWG3NLfYG1z!oiRZggHO;QOkV&~ zsvH48NLQpzqZ{*zqje=y;D;F!@JT*6>9llUV<-+8Q3Yx#*za_LQdq+q0dW`*GWEzA z;bN>Kq&C*!tq~X)j~lHvH>dePNkC*9-86?NUW2%KfeGLlR8OZPHjb(l5hOd2l}j1| zv}~Zr%G4}iM2*kw(J4jx(8XUV$a{a_>H|<=y5e6$deJzUdMSifc_4WNT(+2?UeQxRIn|6t9WO4oY$Mm!H z`oj7Hi21eSqBGJo`uLrdns5eir=`Xm)FiA2A`Wm)KLyiQlBIq>1`NP&Wz!%(+=PcC z6Z{Zb6XwUVwI6-;M14!Xo;}-`!Fzj@) zIH$H&%PEf!aG}oUc<-%?Z@RBKTAG}ntZtslcjtRkv^4i*ibKip*eKQp)pO+z$YK!u zhCe}$Imw~{(~R~u3_=i-p*Nxh0)h781a31UjCvQ>dVxqcBV3(0f|AtST*i2o>^Uq% z-2zer_b(DiXjGau){H1(yj(L9Qh|^g=vRdNG4i=s5KIUw#>?3;tE+{CUJU ziipoX%(_hByOrP#AW}a_qb8nezXCw9or^?6*)T}R6T*lq{vc=`a6qFf4BA(KO)b|dMZ9KVsw&!8%Mc_u7jzCJEzA0zIMta$b%^Co zomQ!PMx9IP3@8#S>S9PnwQMNV3Y&i|gF(1r;rMo=Ihb}?^KE!D5ggcL)OI_qVVirP z9nOBmdx=eFNF)<4kSu)Dv)!UKx&!Xm^E7+JdD#U0WR%zA#GHf=@Ezp#(EJc1EJVjA zNE-~(HQ-S;v)rvZ3y)_-Qy!(+($EwwsEn-psljeJ|@yrBh)Q537p% zgq9hNMfxC|aLOfHXZgIYM`J6G_R{n?*;xWSoW~v|3TZuKS&oO2RH8*{Xmr?+)d|dK zne))HNIO9^(B~Okw|)aY(Fjp70_szW2okm@Z#61olLLR6E$=SJB=Q*AD6?YWsHIxe z7b|5OtRAu`OGtzrj>krZa zvxk`!J#=EY88~$lT@8T{guPJ#hkBMYBRZ1g2+6R{T(8hk4H<6>GY(@#m>U!Le1(f@ za0P3|lwwRcBZvLqy2}PNhIDRAKZ$T=W42GL z#hejmjj-kb*sV=3$uNa4O~jUvOrTmR*onZTb;DB)5-viFo2HQw5o*)W46q8LuAEfz zj|3UuD<@rBK)I=V8S)?{u41NK_ok{iu!?K<*-HVFro7(2SQ=kMym4r0aLe^-jQ(MS zb>4-*!x~AU&hpZ{1l^N|ei{9K{x)eU`f&KJa>QzbO8cIos~dK@nEOTN&^s zN8#hY{B!mMD48~icp%)g1Q-rUkVc5%a6o;#I|rnUO0YOOfsOoUz=#%6r=(`VFgvYz%@{-|zrXrhaj5V6CVK*3cnjEj9dz0Ig_s?lkAuq3TU1*d;J9lnthF zm29hrT}Ei4Ni8Ca?b9zKY^AEq2V+QxU0So&I5na<;rIEj^7%q{jg*TccZK+g|Ac0^ zX4G1``emQnpwqP8cvZpG&;90^XViHOy)2||=rrPJZ-xvQV6)aJqn2c#iS48?xY-vV z42WtA)-k;>FPa)WZszb~#b9D&q4hHcO}u8Yj!2Jdb^3IFMp+qg(KBAA9FgI?XlK@#1p6K`6Qf0G zBukK4GuFyXbV92OAD|T?trqM6v;4oJDFeYHU3t-_O^DE>LOJgJGoP>AW++=_W+<08 z@84X$dh5gL)-B7fHs`qGkNC;`WsAeLt~kB-e%iqmM1!$*W9MI!1K7U_sbq>O?fOaOlH=y$q#ibhVWp3bHsAZZ23-su$M`G9Vr#RxTVmSRBksjWOjVMt%ly zSz;)gjIs!DFieD*N6csQkRwy-JA9Sc{)o@_Eg)Bq?c6Yh>_i^p{Q1|JXA-08glPGeQQ87iJiw@e zyKMy(X9Y#}Kg@gtvI)awb;Xll1XUC@3foO1F(oMG!_ZZfx_(g7g>Yik-C;6lvFd9! z-$go_$`-0z6CrNg=k@*wnG<{c{vUb0`&$3erE@xMLbP`pt(LI2zN+;B%0nAGuJ?m_*nuQAL=rKY&;G$+%uZ2gb<{GRWPlYkW)zu@3kBa`a18beP z*~L4&-k~8cFru|XyYC3t(@NwH_T&W~@d(?O)bsNVj&)Fh7+Fd~@8sngVjU&v!&0-8 zCV;qB21sumjTX|F0Klbb&Y(!cd|GyN8px|bgcIFffSgewG6c3*(I6wA^oY9ll(ztt z!d%cIaa}vni2*JX9vMg_M=jJxC_r!nk*cJI0}jiwR+Hs)=d@*60;UapK-rrrh(>No z8?<5Lvt7qd!PsDHwz0IoB0AsXWdk$L^wxpNZE>A37z@Vy<*LIznwVo8?e9lUS6+U3 z{U_gCDlHD=HuY$c4HEUZO9N%>iksUQT8GLhuVF3+q%v$xu!xc51hQtS0RmE8TWp$W zufs6s3Sx-+sq7+3sH5&m+_-^AX)(^aW42_Gj203h&?f0rr1QFl-=lyPo5;1mgfxmd z996w0TezFFBIP1AQ3IT(stndqty^QXxcuQ*I!{$$FIbJmAr0H*WWOMN16VxU>D8o? zoTNM%QVG@?#!^1)sUl^R9ADm0wWI^ikwrz#jlx5#O%T$aX&sg7>%;zWbtpnjk83fN3g0 zU)F#t998xm)6?O}^)Oo917}2ucOWp-3^Mo~1R_AxNNOcZ?sG5W1ZX$L9vT%B!2D5I zS;j*hrViQ|#1K3*=SoYba+;qT5gwDx=xU=<` zq6)?Ks}B3KDZ9_zU)jL>NErjgz4FR)>?0@dE6PLAFvofvx+Mzawc6$okccP{0w}}K zF)KNPj!=Y-JWll-VvLdl$dbix4nL{ZutJYBj5vd_p>#gTYM|V5(?{qX)FuLq4Ihrh zDN<~9F{D5^EUBz2as6sl93zZb=_${U;;Jdg6ngeSu__pfJxt!c3gsb+u{Qn%G6DM` z#xtZzw z)+fn>dvYsva_GyYk8Rqya}#vWA8?N;bBY+oN_wW^Hc;fy#lb2UV9}LW*o*`!%4@58 zVOoNF99KI|G3T22#LYPC>UadxfMQMr`j24xMidiB1^o%SCTx_r$5vtl5yQAKNlBeY>gC{BxnamQJJf0}tUUXh5SpP*fNE`4cgl`Sv|KtAy^0C~^wac7eqyg@NtFRVS?TVIQ?4Z?g-Dp| zK{n%6pBR+{C@BQ47{M$N=J`e7d^AIhxP+*4Tmf^7294H699h9qql-*Er($Zab*SLy zQjwz|Sdo}S$Rv(+ls7V?aj$rz*Y5{kW#=W6gPR6{A&PKza?m|{S?bQ6fsuuwxai$< zQXD4P+IqDkSWJxv-L=aqVf~g#k8jJBhQ#E_1(CXr$KCcz{ggFGxiK6>L$6iNkujJ& zZXn4_t_U z{Xpssuh%JmHSG5v^LoSIos+xUhjiO1ptVUsFB0B5L9qcsxX9RtUEdVBVY{U?emf=Tb8_LgUIILAE)P zqlY7K5QsuewkB~qQw*O_@SUL%Rw90^Y6ccf!-HS}yy>;#-u9r$Qg>8gkFTN=>Q1@H zltYZ>FRNbfgNixx*GTa>`l-=K%x&&8S#2gqKLY6xgCYB(`NgN$l7qEscf34yq#W@~ z>bpAzY!1J@^=W=<+K|-~j)u8}{)K+dVg1`Zo7baaO-#C4Wo{zjrD-Q|2dA9m*hmpC zG`17S`=+L1YPt)O8lY_yq?%Dg0XwjL0$DL56nE`Kt_uQLBmt#nf7bDJ`O)~MJ|6Pgf5d&Ho5rP@b4AUSo2pDss26)dFrp8nA z2#V58_P_c~#jh&^L*XzSC@4=n_48M6rMwuRi;7lwWb0mt9Ubyup6+a=JeSZxA0?;|=1?HP7$~@(n-VZE(5M<%;UIi!<~* zb>RgH`3Xo^fean^k+Q`TjwUGD3PXoHJiAFGwMkUoSUtW_(#BO)I^E@kmR$Gb>NKNK zV98Z2x#YpFZlgIdm`WL4C|?c+!s3F%m`V*M%tk|}KkN;LgMQNupb73AndtW8nK%4Z zeUH^WK0aP5)cT2y5E&aR;;Gx*t1Au;4qjEJ>||f!s{9}jS^?hchveN@mqlcJG*PpH z1*|y*No#VSXZZ5ochHbP(!$>e{!ZgG~8w3(O;rb*6bLUGGYiMl3hpEb7Q!1b;>PUT(G0CvCF*Qd* zYR0qzq-@Ml#WCFw110rI(qxJ{?u0tNn0Lje|LR6=r_o%@W=AgYZ==Ieudz!AU9Uei znT+SN*8ma&BP-SC$Z52ONl$K6zc4X#u;B1##>)Q7XRw!TLUXC?a>pu}5wF?ZH<~I0 z9X*Z*X_tSbG}U9&S|_dAm`7|OoQ!B6^(1BpvrHr-(2AkuRjYuy2F_xtXU%MhgAzj)L>#Y<;^8P4 z0Op(kA`&~ylSDk(B^1?3pVe}rAmJe&=G-JJTM>!=Q1Y_NGm-vi{~mX4 z^Ug{(chyw6SP>K5ZeP$+ecl@ZeE^N@C`m~*C1!@-QGNO4>Yt3v+;wT-#Kh^PgL|I1 zZ^H&rC{AviL}BQikC$(bQ8v>-(m9&d9PDn%3!ibLs!1sW4oVi{N!k-MIA~8b^M;&2 zU{Fl%3pg-MT@q^S6B-^yy{{E8fo)VflB)wtx068O7{H%EY*L{N+*k>Cu`*}!!tl>8CZrYvK0aQmsd z5jrNf)8MEC1>$+!rKb#|5}B}&hi*VMj72uG@Z=Cdk{Uxm$yV&XzE#>tuSRiXiJV53 z;u__KCrbV~ue~70myZzfMVq%*E9?$aU!OOUb?Tzgq0}J$-5#G!d>O2v`)kjFZ z)JJ-VO%sl&aN~wH9f2?{H8qfYT!;j3r!GpBal^$WbhZbPaD+xMIcU{LRS6&p>gk9I zmqTio21PaquvXj<@o$JQmVARrPk8XrO@qT5-*@!8VAt)6?-TExU4Qr>w2)1|bm#c} z$4~Cx{pFv$0@Ua5I*er-#-hbosK>}ns`p8Xm_2yJC7NDdb1%%L6-`KQf=1?Hq1Olu zD`9@xC5BQ#)av(3i**Emv7y2g>Xbt2lnnieCWVK zS3MX^@ACO(3U@E{&fULZZf+6hPn%1&NqXDGT(X(9Tr!14HcuON^B=OvY8Y8fNwNTm zu(1V$CFB1kBs;vbe^cbrK-j* zmS|uTPQ(roVNNPJ&*~{nCL|OQB=XA!4>1B%`G^q44Ye1_*-lEeN@P@se z-92WvC+c9vS*(l>XlD&Nd+h?_ta7WN%{VJ8y20U~~2jE@R zQDBr*2Kf?Y4sMkxhK0x$8QLU9lx|kWnY_~NG$#oBLXreIQEqfVLM(Bm!`2rgh90r@ zkca{v0J$GkgkW73mD0+!l-Q3_i+FJ5U>7sYAZ2i}CeiJH*p46h#PDve|ESq1sNR2KJ&qssYpLl;?{@*GH%R8X=2=xJQjxL;EhL-e56EE zA1lO?s6qipuY+gV#f&I~2pev0`+K!{*RiqI1z0^9bhSH1Em z8L{<2*sz5sv4?+Gmtn*H=-)r6*tU7Ro_i7@a~-|c3v#vuW`9Snz~-pfvaDTIl&NYuZ z-#{*jMJ34hkbpuznV7S z$!VW2cWletm8k`J>hzIZV(Y|zkc;%&>`32W&w2I*KQX;;q2%#}VT05B1R#@Rat)oU z-y<~#Xcr@iop&))dP32GDz>mvTSe68^7X=5PQpV59$7IIVeaRUYd_Kc1I(PDUESD# zcLPYjThLM-$iVp%O32NcxREOl0&R`hz2EtxbcFNU9?z2>f7O|P6zW{h&a2ikHH`YzTTE?>IN9UM=Ug3fl5M1I$mQ;R3Z zB4o{}j z+pf7Zcl5QR%lBPddE;xEJ2u?<&=;S zs6s-Jc;lyh~ zec;n^pD3oWTA90H1-Ysr*{J#sZqcr6G*k)3UZWxsI3Vd%TziW)JEq*ZcFM^a#q^Xx z-cV=%HJ<%K-#|K6xM&Sp-x-Rxwu{deT7B)skfn!lf1l;G(6jV*Ul21Lq!(GYB)u93 zF7B`;E#k8;rqhoXTNMUQxGSbWIXn*u>OR zhC~<26hvm?r)b$YyzpJs3!j_12FZ%NDtxB%45B~&X=>|iNrApZ_h_rJ3CRu~Y zJ2%fuHb4Tl^QzCex z^;5dPPvd%ZT+asYfL9LCfW8Nk(18spnec!X3?hNq!v&KSSB3sBS@2v;oj8#`f&cJB zES;{MZk-m7;nz+0|A(#D@RRncke=6893B z!cgQiqknW|buPufoooE3*ZeBaAzc$^j$qG>D|<*3^V5)Ux-f}0ht?L=RFnURHGkjx z1|Iz6KON;I|4i#X@#b%w!~K0nx>>wOu1TGe6W8jZ-AXNR4A`C8)y2k8<##>xl-T!) zspp@s@j8A5>-divhot4d(@|#~E+hf21HyFpdYi=-O+t0N@QdOX(;COQkF;KR?pe(p zw03{=x61t!4?_7{x^+jjqCxK*>QfZ=e3ta zNiR@gCaM=Do9xV-mxQMOl~?+O2I&8b8hdN5wNCtb>k9Fdc%pTUgcWZ8Z3r=dG~xmk ziUd&SU;_Q2v?=eM{SKs5pj)V|{{np#9TbKC3gg>p{qR7>gSl91kUOOcn zYiE6XSJCPcwDkuYOH-*Pf1XbN;unuUPFw25*1t{t_p?}^woW!+j(KlFheCaVBtfM@ zaGoQg%T|=4qhLL$+RjoE&Ga|Z_;>WzFSmlY<|kj0acn^5FI>}v7#V93<*E%Ww6l8M zhTaIRLiU-WQrcH`xGW_NX;Ss->2H2h{en-tBpztxUcUJylB>2Jv>-yndk5KoCMDw! zTUy;PP=Si-hZcn9$1P8txcX|SeEK2Ye!6z&U99-;JYBo%PUyh*(%cH^Wz6k0m|GLr zBYgKA+@Bd6glt&+Lg~2_o%G4NQRA3M*|tkW`JEUHnydWP{R=-5iHJ-6ET{u|u;dvWipdQP<~I@`Lc zeOih4(n1OzhYR{<$Rr^I9{UCZc!xCID+y{k`%tj4#g`y#-s6SeJU^c+p zfB|U6p0WZCY-m7WZFK5bYY3VFS_7a+I5KEjuny?(iIuRqbeoTfPLa-NH(n|0(t_s1 zYTaMEQ^-*FH<*Xamc6w%3yrv262gsfO<`Ob$%T9CXiGZ_5Huw;R40@oraBGr{{MXq zW#llufxPM)u^&-O)`k&RVS-j%+R8_95WSBMqj&RR)T%#Pd;a+;$eBheKsP2aCOwc^ z6WcI2Z*jJCkPSmSgAd-eP*pTGXkVKK=_gu?79)M(h4k~!f9~1((N7DrFr4|@)~l^w ziyyR(KJyLn;b+$FkyW|sItxZ}``AhVZE^B@(jf4{n$%ihRt%fvY+n4-~NEdNp>2nc4!0a9WvM(q7%|0qlF?JaF!_V zQkO&2VCSYElE44P)39old7upE`B=^r<5c;&goY!F3Nld`|Q5gFLn@)?om)!RkE8d`8mt z#-?u1Q`DjU{wPvzwEwMSd;e}3u{G>(sB94l;#M8O*X$Rp}7NH7u=dT)DC%EUshHJDkOSV_N- ze&MK`ZM`fXX_*94DrjE_znGGL@?Y!=c-XuzL_mnAdAu)6-xt@&KgT={VLdR3C^`>I zgY=v7YuSKi#mvjXja`^uL?;zfO-Vay>o{>u%;%4ih7-Zyz(5cR?%viN+9ZG1-O_8Q zJ^nv&N|Eh{Q%Yj*nU5$9>_8^uGdtIpK^{_krvUw31^ez;bC zV3V%O57J&5U90j#_HGICGqox|Bs^yn`60}Le6B9O7cJilrks8FF1i=aT)Ew}K%is5PCIvvp|Iz((le%(*UN)o7WQLYZ?^<^Z>cYlF)L z31@u>O-KnMRsJGzgUy*v+ud$EzE2#14lG++UlZfn)L=?0ZotjsyES~?J%jJ23}GYW zU%>vMeFWi0P8Iv$2HHv;>Qp2xF5UqqVX2DyzWn;@>GbP=m-({n`%3G+@BdzN$M63y zd+WZhDC7DgA4kWjstuX!epUPl3}T0(WPy)E&WY?-ypz^zI6!cIbpF9TI&qia9#gS0 z>gPb%keB;SMQ#Ryo0d=N-i~Mg~3ZXP(%n^9Z|zY#2sSdhKT*8F~W8;TTuLn znvvBVR&GOt>0LW&Zq5fHN71SFy2PV3GqGJUomGJ8;%O20sFrl=*s9Bp^?wxW|BqOM zZblz~1c6XX@*@*=rT{=}m8V?`&gE)#ky2dA> z!f9a!R* z8iua`qlkekA}Of0BEF^B*<&IYRB^|YZ%UH&6cU!z?>lws@l)z|ioe6&`VjsTMBBj} zZGtW}Vq7-qutfdaY)WlXaL#281;_&X1M8MSC;%mu8l7;1+P*t&B(Q5Q7@ca7DvC;) zG0zOYfLEfR)6k2s$kBj{lrmzhCB#v%?=QP1y=P;3VQ>1nLr3K+bss&s?_K&=UN*dY z*L9ERaer;wt_M+M)_u}hYAHgxPx-anjcyqzA*dcIdZbq?O3$WZ+~if~#LI1+CqJX; zymJou7pl(tSFG(PFbAu8M)h}TVK12FWUs(Z5Qg>=Vy6q~zu|9J@VBcv1~wRsK(fJ5 zx7f%rC?bS4dpLuL(ymy?7~#{P93~dd#+QZdiTO}$uAyH-NJzt^(@t?jG@(;k_pyFy zeO*jg%WhKD&@8*9`(z=%1=jKZ#0O!Az^l~UF@3IA+}?V-{HxYGaQ|D+{}8!|4?vst&{*3yuZ*~T+dS?M zDtEj(b|E>``|Ux5k>Ct%i6se$&RVHyXk^|#x`%cI;c#ujxHYI0NCUFyU3d28IY7m?aG$3%Y7ln19L!$xoGE;s?U?oaku1 z@m%YdtzSNO?$@$`?2`-k4g0A_v7eyA=qRNv215b%lYZ3{!?wrQ(5+)zsrP(J{--y- z&NofBybksgVhpPNWHxoZP2MYi_DKoy(_T}LuXJkp^g4dm-KKGC)TasLE0v=odEt9T_Nx%RR7t-_Z>;J2_zvq*g4^E{govEFf_19jxI9S+U+;sW%@EltI{HYJOe*PazpZN67 zbHex0kBM-rweif~E++DCnqRnm%bmBL`pD>gd-uF=&y8=p0Xw&?LuIzjt%!2?z=LP- z8|N05qX8N|=QxtJW(FOV?IgnHd)nu4U05EM^u}I`O_9~O%Yvwps=(vbB*ZQ+ECGzC zk`xktw# zh5LlHj{h`v=$3PxY;m2Df6vB?ES-i<`!ndYHCTnj7B(R@gjIBs@x>rur$vLTYz_RI z6TkQc(EFoeGfZ43_V9bfV}w`Gc)MUbyQF(m&aJEzLxBZG8C+x*fh0@`ei|FUCnpng)(X8`O9-e8>wvce8`Df;vFWZ+3Di+D=rZrRJ3=pi3M*RQY0* z2n8{s5L2{=!#(3Cq!h1^t-gJGdgsm;_Uxgb@JWxR#Yb8%i|5jpUYc(GnPR(3FN#g# zdhtJC1LP#QMJpW|D!i!vVxs_ulU6yLlwYbfW@EF4vi~EA7Zbw?{1fl%4kVL-?m#-t zd-6r;w76dOn4?`E$Ma#cjNc=ezxPVt6wjdRkpY!ao81(@*OcnoJT-RLl3fOMrsrnB zc*ft>=w`KGU@}ZeTg4WPR<#w^OK*w11^@6v?!eE!x8Prd&V~!Jv>Mw0HiI*aRvT6l zuz~hbg(O~_fTY3L@P`bEkBJ*0HE%pEzNOggwBBfF18(?H4KBMgSp-4 z?*cNf8(T2|y#gc#Z)y-a=eVx~W24c9T7Lp9?dzVi-~y@1mXu;irn%(D#w8vH*}4?W zf+8dYjTna7B9WhvfQoP#4aWQ{pO z59LJL7HKsuwoSxVeonT2`)swq&J3cTF{ifC$dM{fBiC`pgpO$8u&~gD348!lJZQ?y zTOkWepg|mY2dEuJMSMs_=}1KpsL*Z2NMU9YdlJKJglUW(#i}AMWG_7Ex=xAC0+drw zPmEXrvplFgfgryI4Z%^rZz_H1wbx#nK6w502N(A4xoOkxJx4#I`{;?2f3ADN* zzV0tjp0t%VynAWiJ6Z+ttp1(NcR^?A&ku92jHnEN;M^Y6gNU)r&g1of_?($u@8Z|j z^XtS756J&$j>vbtegdD4lbGQ;PVfT)@>zQQmp9?{S@reI`D2vdiR;{T{`GTLD}TYC z--g!@(Cd{x`HgfBtIj z%|-YB%DLmpU-0J-;`7S=YtJ20{(|rS5#{r^&sWa9W6kFuRz8pJJNk3?sNVo3d44B- z9w%}|`hdjEkLp1#jfx7tN|Oz$x|T0u*ASbosfoc}hhReBbnL=$ry3NkGoPjGrlz?- z_Pm)?sgmdf{@R=@3t(3Y@T$;$26}*aC=7iHr5Nm9#zLACr=Zw<1MQxmo zUZ+YnX*)duW!qrYI#l8*v>+w;b1i_O$`3jQtTs>1{v>KbM^HoZvU^V*oEnIFCyk=V zn%q{o@4@+#H#p2ao=cE_iG+mrA0IyUzK)zc1`@DEXAqyWk1v#3p*t_ZD#%!ytu%M= z62>TN9-1xFpsGZ$jA1F^K{p;Koua@YQ#u(T4-=Lv?Z&3ZOr!OndXvu z&r(`9C2%7rr)}Z3(^dRr-b;R}(~V#{;#rL}`k#`4))b5`P^t@>!qBI*S?IP2d?bYA z>n7U$k=xb?f{F%)S!0MGhmMiPAi`^p>tgYAX+)u}sCgWfi)ImldCum+d?VJOboJ($ zFKLOhth|{Jzd_-I!{r5KQg!Nd>$~@!I<@!udAxVm;;~~J7mpp451jiuyJPQWd!{|P zH0^WZ^cU_q_L9SX_V}jF$B%E^a-8`o=Vw$|KMvX5yGC}0RarkSxz24-Wmlv0t$BQkb_VmiD zeZe)9*GKi|)oe)G6Qw?dhKj<%%6ptWj#3Cz_Q(FpjoMD5UTe4!y;HOkVicJulS<(W z@faTe@Bw1#A9%^l{rD+W62gL(^ zzGBXLCb2Hu9)jp?H8a&X$kM*4QRhG84$=qyAL`yb&atY@AHL@%m8FtYWlvQlwJ)hX zRY~pp+Erb>R9Ela^g;vD(%oQ#fGmx)4obU>Ah_TTq9`hwn+nR{)}Xk|ID>2BKF;WX z%YcBWfK=}LeV%iZO1cY&-~8V9kCzW!xq0r%x#ynqoM$`FLd8v}EM5(hpc(|naHdgs z8d5B7JJy_6vI;qal64&1jj~siSQO!cl>dw336Wo6xv7k5wNYZ7)|l3Wb)od4190=# zOb7y^{4Ne;SfofxWKl~tV_hm~D}@KOh8lcQ`Vm1MuQe=w$ zZ~on7QJ=pEYhrWo-<4-yZdKM}RhSp9uFKW`lv@|(4QKB!{D9YS0Dg$XYmb2E(f*p3 zyjFjWu^&%jKN7dfwg*q*_9xN)Re9~Vq5Tu!Z6}_(-mRiyZFW2}fL}&%N<<)jYrq*# zBrku8csZn1e(oi}pGx&_%$q~X9k{bT_g-=Jec-8=JBd0V$(I3NhrBhF4F3h-3kJLw z{X}K|D)o=&$@gOTW5k0UR|38v+g{&;_ADzvCfF-~!Sl8qaOSo8%E}J;3&3Un|BB}x zQHe7b_Wz8#UOA$_q`bp;fRoZz8={Ted{l_&1}!p<&IWn^3{s^pH^&GPQ^ww+v<(QU zC1yMYZZgFN>ZuVlvrN%LEc0kE<<|PJd8*<>zfx^|v=!UHu<;k!^iq8k*E`Wifx7T0 zMZA*1xu>u~wZ$WyvbF-j7 zf$Zq9EC(XJq>a~NpE5&rjd`KiM;(qaN1udu)he79f|gr>z9isct4(mZg45!aQp>)pMDwD z5k-4~)6Uz79c+eVtB5C-13d zX|0z+$k<~>3x{y&FUNs+9l2K5G?D^+4hvy%EL*))@^yGs*Ee!(m6XIt*rhA+kv!&c zG@EqI$jFv?Hc(mBTvhdq{3zfanIB{py*|m(y*{lf;OD0DgXb1=_>!pzwCig9Kb1FQ zZnYfE98q5&edgQO0m>$RtOe&%FqydhkCmHn2Tmz~x9OL03@_RrH@x8>T81D6cF4)DW(%Ullo^j8Bnq+X=3zPD&Rm)hS+ zJVHGm7LZ>7VwPTwS4g$oBss**~>s{V?1Av@xC|XiwZJ z+kV;@&k?jIxNHxSqb^Ck@$1B+5q!$emGIZnJb(Q;z)1t4aZ30@#IvAPp56XDhCiR* z9(vNSeZqj>c{_zCB5f}p=%Kbl2{dXJBB_2n5GM>Bg0^myqJg8qv zx#EaNLk74^{sR55L z={Hpt63)!;&RT*K1xt`y1NFxwD=JqRE0B-3Zk#DJfky5ycoxNIHj{pYYyfFbBt)rG zptlMtb6=6m8c!L>);~e70d4;n zNja3&h2|)zuLF&}oZz2(7VvEbT(*CdXz#C&ozwmh!Jo<$wnsgAZtte{pxxX*<=?GT z_fmW3Ujg5d?H{yylffh0Osnw=jAuLazDn_F*1w!9$K@|*{M`NyjPr8$ZnuhK2%~>$ zuUZN2{3hTqH7b)G)

UiepL!YBe9m^&3m0IJh{)DOA{Q1>01v>O67>kw3MxUg54RS8qDYwj$~zfquAY zY+=J#Q(-asrm+nRV@-u+>ud71v8l!&)hkD8Tm2<&l*t+1((5TN-&Y=N=^O5-ZuYr6 z;r`~S?GqJcWv?m=bc{}^^Lh3b#wWtRCncUKZv_>j7^y5iRWm+q7gw&0PpJluvuvOm zV0n18y?A@ z7l6;}ACv7_4|;oE`wz+XkmrBR`VpSM?Wc|YvwBdr{{f@_BWO>yN7=qv-$nVfAhS|? za2wQ+k>lA7-f^)n5dV-HJu6U5Up>V8)_S-@Hw)Ee&SoBB!iJ?HhaDXs{FG0%ENgS zwlS|{1J!AeMH{~QKWQznM0|JSt6#lQbosnqU*@|b+qaL%>q6m~aS&&_CWZBv2bDKq zPe3w)3@O(n!ukuHG8SlvR`>+BX2HmiO&MdOs;G}X9b`dhw@_wuWdz0t`@W6ge*rxEzGN$DQr`{we*x^r3hqe16L}`}XXcL~ zeqNq(*!_Ow-eCVN`ee`O!|2%V|j0RI)jEZGn57gG)3_o-dV)p_k{e?7qPZAwW& z`-`Z(ay7D}-39nf{9GC*N(L}|kYrNjV+_YKsEgcwT7T6!?Y(F}tuHIL=i1YF4$1MD zx_LaS{a>Vhk;Y@{Q9QCeXs$7S_}mQucQz@nQBpOeJjLbFY(+;u&Q8D+DH&`M>9&Uw zWWk_d4@le$+lNgiPKSVstxo?RpLSD>p`XAWV79-+0!n{zkp(Y-h)$DEhboWnH%;$O|Hn$j!N)A)2ND4lLiRJg=l60JbGZ6X_VAzNQ)n zrfAw5V(B>sP~noWSypj3d9;|4hD{=8Un&lzn2 z%dsGj0FAul6m*c6#uJW{2zMm6Nl-6sJC5V9VnXprspjrow0JI0awB6uXC2 zrHc{?N?%`Opc_;Bo9f|{B{Rgs*C+TtE_6)PTTN^1uD69MLLg7UijYv_eJ;0aph<1+ zseR>3u0Cw`SJ*wSVw=llD-K5NZfBpGpf4?b11tAk?e=)w;td{m<^Wy2_xHaS53Rla zrij4KFg%zIKNXtGoie+CHUz-tai6u9m(Z;c^<8F4+T8~9t z!lI6E#!{A-auU`V4{D1k0cT}<@6d45hK;ll*LzbYDt~~rTZ@Tl3Z)wQe~W#swL|}p z8Q9t6@s)6}9~G`t9)0{1@i6^)JR zGZ!L+10^3(4<+TMywZxBO1jU)k_!nw2c3&)Tfn183fPvd(m<7O@(MM0xu#*W^az3# zgX|SXxrXJSO@&pQGw#MHwnE#Eog*m4Zr{Klle%V4x-~Jku!YE41s&&gbPbQO0HLD7 z(}Us$g9pn8V)Wra^5B_+fGq6tMpl``MY!lJ&+RP^@|Q`tg_X zy$cqIdOu!6Q2g`DdE$K|U*!8}zKocMODz9k?~wc_aYOC}*0r9Zy>lRktJn+b6(kov z|0lo?8t}`ZgTdd0_reawrFQNgx(sZEaxd)Sz3?Z7U#=fD{Aq50j!jlUqrH9!+LJ#G z!HM&3B^LXC#8yaU9>rTAa z)F*kbeg3!kXObaE8Xd^cbG4F!`iP=U*c|mSjp}}^smH+JS9^vWNs~>o&LDZSQ$3hA zB#}*f%Pieg;ziNwVF^mt)HXIz(qzpJd0Fcuxf*9CuY^auj8DN?c)qqy(|y)y=B@(d zF4SXwf7Vg%WBJ%D*Pqstbm5c0#5=J5a=sc1yHiGubgiC4T>U(o*{bq9QkXy5o(z+H0f_o$oIFvAt{ zBQ^SO#=c>jHt#dqH(8tWA;acOypFgOY|a=@Gw`&lU_WKsldVAR?{VHwtL?8ce6b!O zUn~I%fquw#H>T3L?bQ#J1DahI>i;UapPJ|DLi^nXCTO8urWZ?I0~#&ZDU?XN(OwnZ<}Ph&)U zR;B+L^R}pN1N@Y(>JR7O{{r~C;JsFF+pjn3-zNAh=2`zF!!HH=NJiIxnuC88@Y~>7 zU2i*}d-Wd?e45&S7VultqktdP!#b?XruqqjLtj*HSN{d@gZixg4T8@Q{2{>Kr~VA^ zV|uM@zW})MPCX-@RNsa7+EQUc>#V(H6@JQcE*$&9w*P6%Iql(>ZacCDoZ26Vo!g$^ zM?3T2`jZ&vCiO7Jxkb10IHNp2F9ANs@Kag%IK9`cfbUi>L;L-i-|IiYh@x8v{w{{o zydBB>K?lYeaBz3fC7QPbneWQ}*HQa70lrJ6`9GR@R{t@zU!?izWxo{0&nm(PNYsqC z5)Rv`?Nk%tPa*s^!1udX&Vknc>)c(NBz>ls0*U|o9^|{X__%VAPTz>}dHLqTb z_u8Vn7=K!7>3v@Sd?Uk8=@toZB=`e>zmex*Kk$v_q2)Y+-w*gr3_qef89!V06a3SF zUt`Q?hUc?oiQxAFel6qeQ9UU8-@4lWzhgWH^#R!r_x~8$ze)WC;K%fS`P>6&uiS^T z+P_Hp@MD_)1!MkK;it~z!!PFcOdn3uJY?Hn!f>V!1#sC9(+6*U`#W>}9A&zZ?PrnU zOdlT2f9`8?@MHOK{cn&%UsPYs^B-b7Z1d53kq#u(O8`HmxAOb8RnYwW0r1U;)FApW z%k$HAnBcUI`_xwfeng+wzmGR-yNcky27JMw4}Cn(Z5I-p_R(%*{`X4wx(Plv@cFsXdVjTJpa$od=|oa z{?~x>{8ygap5RAg`EU!yGo@aH@o&-V^dH~}-IX+cswK3I;ivR+3Gbx$y&Lc=c^>v> zR(Kw|XD=}_KfZI}jGvivpUeE~ulaBcSKVvS0ZAYF zdg*=d1pF<={QQ;Qx9=i?6Q6p!q?3Ap@wSig^XGuS&5#>6klbi}x?9fA(`vJvA3A$7 zWzU{S9+U7>l73#m^kInTr2bQe{}TJ+m_EdOkl{v5HRN8~i5F;o3gs(; z-&_Efd>?v*?Sw0jFB@_K!EY{rOFl?^{DkLRen;?|JM!U@-tu^ULF38N)m8Y-1#n4c zxP1w=FQg|t{sOq%cii4d?F;u8w?7L`c*yPD)V>fOxP1Y9TpyJ3swoI>4AqK#V#u+D z@Vg4&z)z$7M+@MP-y4m*MvV{(6>I7lEJD{!_oF_OAo{LZ$6v9>6bQ zyj|3L82*{ZvhdfjoUjP|r1tmymf-I|`+Y30F6vQ+fA-r1e~nIA3cEe!wMLx4jb z8-(5mzGDi$mFE9T#`{V*2*B)}0@!W@B2L3e(*PlU72Puy&LjLK)6TVLU=q~^+<*_Url04uK050XRMg6th z{u>OxA8;v;En+RGpKmh!(|}8PY*BwT!@tGwdjXg7*rI+3!@te&e*}CN)00Jg3&a2U zNA%ps0GIOEqJ9m-p}8=>8<2F{6!g=4K4-jd8`g#CIp8#J(0}sUGv0FhzZAe3{~7+* z9Gv=*?Wv#Vb8vd@YCn))^ZIA}=YC#%893-Tzwh7hUXq??=NXy>(R08hJtsV;`CQ@l zp8;Ib^X$B3a&Sq{7xk;Tz0U9_(O%N?Mg0iD!6;}x9|v60^F{qQ!v(`X2zaY8|1pNE z4^sQv0B=@_o-gX%3^zSMqYs>hEcs54;}jp%v5n z8gT48~n(oDX012CkW-g z#c#w8lc(`i7U_}Jryo+vX}^vO9XkujtgPg zJYjd3RhOtz-`RUfsX*ls^ZE1=Li|xt)lVt!Mb3pb%3#hVP8}vXX>g$Q^RnVVZ3wQt zfs8ZA;YWc#$m7+>e%=Xx6-uL#6<+;RkLN1UQc@l)4OW<~jzM3;yX#{($aLZv%5U%< zoBnt2;md!I%$lzuly|CT@lACg0Ov>AZF8)>h1;$W|nLMsgbZRt_E|bwz$X>9frMW$ptT~eo zOE<$XyNsqA`J2{(b*1Q+lrw;A=13vYifhF#!(v|XIi$2PpA~GnmXpj!3WBE8`|{s! zq0qaKabNN%$V8G?*oZU`F82bL$xn*lv=`@daNX!LFEOvaS-0}uimoR(>6%-Oy;#nBv89dRp!J4M zc{6MPv=>`g#!q1UY*Wnb#g+`c*FEAt#7E#q)?_;|_E)#Xw&h`_oyvWH-^K9F4=I4( z+y&~7_9ub!pQpH6+Y-k1NzhJ6dw?Hd-I{6MqeSzvGNXhu&3lw+UKTEG03XEsT=!kX z2Ib(lb3Y%(c&;a}wjBI?hIi3?K1v#8wx7KWzaQ|e|MufNI4r(6Lt@&d5N&HS) zoLB%i1^jl*+ig5=r|#r=tCjP1C+6*poHyhfp?N#S?WHWt^Om7`E0l#pM1wProdYMC z{kjav>;>&%8)gW`5x{!;q|LC5Y=c{k_g{sdDuByzFz?6^?(M|cmxbb`O4fVrriwUk+;2%H6yBy+ospWb;MJD9v z2UkKqCY)JQ0iRNkbD2y1tbs-I2T|}?Jf{t7&-#*-|7>n!Z5S>-O0mi6*du7|yA==M zMBBFT-a4U&cyA4j(H<%Ryq)*r&CqjbFAkC2O<9Nc^3u6Tj)o2K9{Mfj@p7J5>{iU{ zpRm{VF<*`=l>EzS%q!#zCua?(S|%i4DCDgvu>MX&lxb$p7l;Ix6l25Qm%U=jQYBm_ z6g!!KgmJ!EEisAGOM3B)Q$iKrh0I`A>J$`I(27i`M@I3V#WBr<#e)z>yx6GkL`jem zwaAhn?$4l1sPKjb3a|PD7%tw+=nFIXKK?#0ig0e-iSHle_c6ZXS)p^Db#<$M}rjV>}~2z~l>1S&Z3>x3X&s&xmy36(S3}Ngrg5i%-!?^dOh7s+{$R_c4n zo068GREUeOKIHNinf+>qxP0->peXX2Ew`a-^!;tIOCV{N>|4r^*}kO=ne96(Lt<~| z%UsXox>onCb}f{f^&_GY-oiZQSJrQTe!ri~_4}IDZZU&#jQ6|Q@k*S`j#uJjcDz}f z#9S0$;=Q&G)o(8Hx(;E$w^_{oA`!e(=4hD0Jby{O+8wpsrGw6c8{I{ARb{_?CV13G zFCpkhao|Gmo%;3S2g*sn?|#(dw%YDiK1=%%c(_aa4ZihJ+sAa!dHmvIV2?sMp+6*k zjM}}Bs#2-m3Drh|H41SA&rtSX%0yNiPkA|UDHUW( z1vx2M5Q+Jy2nW{O*V7LfRb~W}zR?~8U-lxt=Y-#P5akw6Mj|Kufsj_?sa3+yN&D}~=Rcb)W zIx6j1>_I}Da!$lqj?`btl7h;rNC?{UGDFVh1mcutGMx~z(dE5B_9en63tr?0{s7+S zBLV*pBwqis@{dRxyM%o0$ge8nFi|IpvKQcki>@KnDe@H%P<~{yxUc3C5S`J%q2`vs zA+f|CTL)y{4=6juhs73T3PdhtmGv&hK~oGBvqEUn;};Q+ zydP`t!0CGmhY&m*RQ}7q&Sqi7m~e-Nh7Tk4tIP}|vq*@xKxL&L|F%~mnG0Rf*i_~1 zc*b}586nV*l$ZQwEeRss=!Z%b5XAV(DRKj6(xv}k?Q1y)t$b}tB!NV&dE=|b zw{kfp5JoV*{=6}*N)|U%Bb)2h_ze^qA>CuDns#?( zf=WB$E2VGuv*WQpgzR>m4XxG4Za0ufcgovS5Vkf_+H;B{qxQ&|(T-Q`mfyN%4NXi6 z-NU3HgJwD^zoG6GJde_|^4qMNT=>(&{7u^=lf1Nb}oKL^p6+ z(E^dqPOjSRK1K zbs{~-{cY{h;c@n1u14KQtprsB5aye*+Ds4%^CR6j^+cSi#JdCqWT1*jqtvR4cpXU( zU2P-Rwh(q}=fvdR$;poDj*hX8b}^B8tIbqY>=swsOcs0Q^96UGF8HQiJQeGhoZ2^8 z*D=xAQB&J-`Z~4P;xhB!)LHlH&iYP!%c`irg{?VU7)6%4?aD#1M@jV%UblAvulMtU zYLi=m*RL2Kb`SOdAGD?sbg)}?Ft>@)RGpuP6U*nhYzR5*mkZq=OP?pNfGUt|Y1aZd zmbaKZXtYD#+7U}{kypT(1vDCy-^SSKIo-nFv_t<3EZKpI0~2;ju~J&mGq`&{P$9Vm zT0ndl3r=`4GMHTOERS}{`0X+Hy&=mBd^}T4?RBgu?B0(G=eYkh;#ejA zkS)vjCsUaA#Y5TLrO3Qvb%9K^qNpkt@;VaQ|efQ(=#-hz^R zpeM{*P&vt(t9EDzs|k*3!X^@)8{XpdIX$Yn?~lP!hZ%I1a4tgf$rqJxK|aB!KFD&)7x6P8!Qs4#_dDj^!&nhN$f;JSMc@v3x*3 zPkdQ_8urIdWGgaw7c!+IQ8$?#DQ7p~KIsT#dX%b&9aN_%HzJ-TOj3J+52U-m%aNZ2 zo0Aw2cR_EeQBp2ED`f&17btK{_EcB}iLaxoD)^4f0}9zX6oeHsI{saX`EuNHi^J)5 zeeV(cb$;?*{Bcmfkb{wrh5Kz&QZVwOfJe4puF6G~myE6vIE*Q^kqF-?$BjY1)iQc~9F}g>umTOoAw{SG%F7jDKZH^JFnkIcY7%FMgHBCE(!5DFUWWzRcJA_^% z?Udq#{Cto60L!NgdA@fz+%5;rptl%*VPh!1<(6VfmMbF;QnJ3| zmSSYei6@`O66IFvr+1F~v^kwT0uP8t$uFZWp8Hf*#0 zShDYo7Kfq}cWbqn35YnDiRIGTVoU?7l){R$9E#Q=_y7*ql;w@$wOZp;`xDXH^i9XAg0xu_LWCk+kuM7m}l8p8zs(@bW>aGw!Aqrjp5jW5X>qw z)im`Q*u|(v;!AxU-|7|B;fm^-a{Q^jv3P8}dCLWJ2mArmm{8ZXTgFCb{DB>j_6YtL zUous?cgzlKY;OsNy%y`g$$7Nd-aRxIAFl%twj5o(9%q#Ixvk0+uAgVRbdkPQJg5H< zb%9WAy(FfY5!hiaf$l|g18LcTSSU#5K~Z7M2dY+6@^IA8;UWiCO74$*5tOEtnI6*a z(5tcZb*=xxk?DN_oAb3hwqM!a-qF$SDvr*~L_>Hi_`pTt8{#>2UOv|xqrw_%JQu}6 zPy{_`l>s71Xu;1_t=Z>_I>~XR-oj58|DOL~@r{Bf3q{k1#3z)OpeLhh1*yQO*NZ&p zN#Uk58A^bbv?J{jS1_RZlXfzDD1%xFq9Zj&h&P3#p5TpCv6a`&;0ICBxy@E$a~3Vz zUHH3xe~}Yk%@25;RJTr z;^rV&F=SkZiJGGhyWQb9`rb(7y+<7`tKD(b9{|kiavZ&*qT-ID4!VCd;ODiED{qC5 z^cmQPgIu{7Y3#vc*`_KDsrIY{Z)C()gPBQ(#pWy(U$xt;F7esSZ)`4%M?@@6o8*H* z{U&icWSb_$l_WzTg(yRWknxH$;4)O?D``d&U9NB8rt)BjKGNKjAiavIoWBHB4695O z5sA%L1-l`L{hfk75^-^ROS3r~2?ir!b9r@nS?jQ`yxce3TBgpsio^8}2Fomt zVu!<27W!hi%HnYNN89hKZuEK^tM6+c#acds@l9cTjWoV6N}ytVO%&@v1{fp~CtFA@ z8)s^%xCmu6EJNPCq@TvuN8y>hC`mv|C~Fl|VZgN0dV9t&!cO&(a|Xua{jZ?ITo!up z+#&gC)L#sinX%6Wbm^U-qjjisv4q?Re^FAKh`;Pkq}NIQvNfJElMO$b(2&I#Nodk8 z_a-y+OlTD@ss48X8xO>ib-4tXke-Y>1k0t41yEV&3!t*eEwS{1yh05wN^fuM@L60U zPCPm3*5bJA#AT;@xwLa9e!5ToU|%e~Q(kp)N@>cwt{p_<(k;MvwSr14CmkNmj4C=> z92v&6#!)~zJzA6rwVMhw7k2;@^ekIzTBnIfn~_nao_ESwq?rqn)Yp}mB|UEZ`?vEM zV=Y!G%Or)`-4WTmb!d-2ux(;u#8zr|7X?lJfEaw48$ZaAeX*3gRLez$YunuLhE|aB znflp|#F(mDRfkGnU;b9+q+}2BFUb;3SnGL1mKajjDQA=vTpw7L7|zNPsamYHmHDG>^T31`Tl^1(?U<#a4B$3{3=#18fmFkmhlBiHK7Oi<4iw3&^t@ms!Jua`1 z`G%8Aq7g#={*F8{k0qU@q@2}*5J|9RwMgHYCzog)bRaFFWCKVJc(ewJ?n_XO52TvI zkNyX_MXbG&a+#6mA-8sFd~IPR)4$4)cW;u6_U(2K98PhoNbegIddwzI^|sq4WW->&XIHaf3tcTtxrx$Ja?s( z|9O3Ue^0LTz0&o$V(sO5@Bb&OBT&2TS@BEgT3%(Vp~Kn0lr z&0!l+FjvKf;G$3^VV9duGmEIMqI)ojpj8Fuo4{D)tY^b?ZGh&6>{mF5`UMEZRmZH5 zCRA~;w>}ctIGOoGz-cS8LH=FTA68w=Q#zFQ7}cDd35|-DKxRYv@DQ#9vJ1r0O(tar z4Gr1K%g&H+lPY0mQ-V5=sWOuHCd$~B?v~0o#@-Z939f3!#bF#&9A*Msyp2R9SlH9` zTvC;b>uDh$UX1dSYfa=1)HsNOPst8?E2Az9_*6gYaY0yu>d=c?BH7Gf%*~?2!%*!? zOdP7HcyoDRK2Ux^Ma9@?dHJ_4xc2gWLz^}Yq3R&g?gzZ8I=}LO>cxqJIv?bxI?t2k>6Tc+J-NcVwaoD9f{RbWCX zD<6PZAsQw6IG|#QqUua}`6xzu=7OV_zGYrK;tynA4EXo; z#ilpcXI>N**e8rJxl3(7QV*!{(`2)_T(OEL#3d~4DLM=S82j7w*T6n}7xe4^qG?kB z!YNd^MGaAr45H*In{Z*D&_ci|7frb$t132T3ytiZL(t@=jXf2ZIn4+|Lz6(DH^C}{W4_JAzA;| zu#0_M+Qsw=*u!8qgU=5B9#hcw7X_~Dweq(Z2c0+cpzjwk&I5>rrg5(J1urGs6F488|( zrxn@5=(#T#d9&t~8r7qE`QB!Q<`XC*zv-9Kr?QMs`o*&H=d+)ye1=;n;u-laI=qDP z+BD%;t+^J|*Xl@1u}4me>%<-EFQN07Ls}&(Cah4GVIR9<>0-Vzv^q+lM{rbBLVaJd zEaDrTW@-VlBbn{RCP;r|;^3qwB<070O?8|SC5~^MLfkQO{>bnJ!v`Y%a@Vb{GIi6? z?Dp-mLo=H;wOIVn>aV>v(bz@D=or_sxB<0!9ZYv#1@1uFnn7h>b`ky*V9Qgn94Y$s-Qyj9ki`Vl!s<0_tio8QK zUSwd0hYpP<0tF?J7V?V)L*v>NG&Di=5TR8maXLDqwm5$(WyPwL2&XrmswgE^QGtq9 z_>1fiS`{g!f+}sBh>}`S?Gg1X*;LTB`W9dcz4c=8$dNnVgd(6Ae1QL+Us@WyL;b?C z?LU9&uv(mBc#VcJnscGtFHVS$q6U1kazepurt9T+!S1|#l~VW^TS4jcq$8HLFi<30 zxP&1pAE%sDj#Hts$4RGKYXX{pDG-`a`b;O4TU9}{l1-jv#amWS=mJ#%l4l@NO_C>a zFtWdE9g^mZf(0z+oa@g&Snn5j_^@ zrMx}T5{s&vD!rBuHC%vfvLPR%|7K2q{E( zEz|_p8Y%4-zaM;JNFu?}MkOocTr5xO$JQzlqW7bl;=B=uRjnu;*xa7z?&;~8p1h#n z-V#+Kb*9PSsQTX(wqo19NlX89kF{Z1yn4N5^W~Z5HW3T#1&O5f`~u$bR_tvr?QGO7 zg}_ViQp|}j$vZ8s!aP5YI>}+^>Ng+@8qG8^hSEr| z4pd|uTLoSxb#nH7F=HK=7h2DtkY>7C&UGN3tafY2tDmfhr%@Y~mdGtRA|=?r-&|yO zl_-o>m32TiD+)VnbxB|!TvR#_0#Fd(v_1BfMhD+bKoxypLcaD7W6HrUfO zGjY+}h}%+Ui!Z571HNsY!KNNxk4en;5>hSfysW})fnyBeQ%Zb6+=DQHFmfP(03?+} zx>UYgFrIQSm2#l|ZmN{U+){`%T!-0y2CGB|KlFNmc*?{D=1o-l%Ff>DcGSMoP?1?O z53CVn+EKQcfrPP1G|?rnjMXbM@5?oqFV zRPk2@TpxdbM$Zt%;(T$XsRq2M3N}v&!U|UuPuV%Ej~xy_#3Jc!$kG)mf%yvuoN%z~ zR7LT0d9hj4SB$70dvmioyzV1j^Yo6`-s8d|;Aq!#4A1=y`ri!Mq15(*y4!s6G`-im z!C$_nz75u{Phk&%yQECcR;19)^1-e+Q7fn=G|3gPxTdT5bXp`6R7E9Au4+)5o~zF~ zp{RgJ)A>dveu0#{m0YO45=9w_c*7+o-O5XrYBdx8p+v_iGp9c$OSaYn%rt|}C7VFo zD#*PQh52kABo<6od$mfo0AclL1)HKc)dhnBouh*e2Fi~>oK@3G&~$lI?5-|qs;sPo z03KMfIckdorGdD~RZ?HpJX{(GO#8$;OzvCCt3-L_J3Xqq*y{bN#p$j^(*3UJ?EtXs z^EG`+d_cL!ZL&S3m{xFViaC8)pBEoe?uGb&Jz(EsSV~_n?od7j*p-0&8^a#ZX9ax2 z0J{pX?=$Q{eVx(Y)4Bd0&}YQQmHY6FXL7J*Zucd?o(1f?#&74v?Mf1`Kjr%Snm&jA z9sumQ9PE4O@1K+h0sC_fmeRM2dz4*}Xa53OS^6QS^nVuj=uZL$AL|?p`%FC8v*8!#LuTbVX%#R2%0p(J_A%o()JL=hSYg%rxO zuXO`&8pzz_io$RR-08r3qWyjkBU9#Y<*R3P?a3)T&ut$1M*mx(pO&5x3KQO&Yyq|+kT z*E&oLwn_ybYlByDouUfpl4->q3={uXv@q%Lr1Z)f3dV_nwFD$iz7~}B#zZH+TGIB( zSlzJOX0GGdaF@Zw#&xlM@|X6BrKY)f<3M zk159x?SxtM#*${XbR+1+Ny#9~VyW5&&HzqD!ds#DcE`v{t!P0~CRA%F)y7Gdc~YTz z*dhm$5$|$Eb(GSV!drlmf{Au?HL+}>KbocEZ6buSF&I43E`q9C#e*o&R8bM!vJ|So zMdUSy!WHus751r_;;^%Ht|ENsErC!duxS(hJ$fLB|0X5^_L5-m0PhzAC)}lWr#h$q z>uK`IAJ;#J+R|5`UgQ>Kx5yY)Uq$AY-w;ntaaG2tSs;Hq2{_dWZ6gq=oe~~Sv{UK8 z00}>p_#={gIJ4VMEYD81<7RDLWEY%9QPkk{yphFi+D&!9DOWw2d=n6bTH;jOMB6Jp zgT{XU+zk1TnUg^{kQJZSB1JEllcv)TW^W{G0ADz*H4%LJbWw7_y{s(MpkxVJOmoQ+ zlY3b;MSQ3mEB^R3_jp zU+f4*YMP?G(+isym$s6NT>gJz!mp^s4mi(MqfOt`!iJ4&HKU)fXI*d}J9ZWc@uu-v zHmJqmI;Rc7V@lCB!j8V&(mgy%2-@yRZX=9rislab_<(E7#7-yV{Sbs71d%bilEMtE zlF_E9X^f8!>4>a8_hTR6!KjZ;&rr2JQeNhSkC;Psj%;w$#`g?Z%wCUbDlfB}d>+*v z4h8%$;F?f&YXZS8h$U$U>PVqe+F?#0*iyUfKCUMPBw$@*)jxB2?lkF<#z zudm4AHQ8hI87mD|hQr6hWwH89o!Z)2?6XB%Yh(PI_2MbJDdR7$5QCLaW1OkD?TC9c_ZD z4q_kcW6Ak5ZCEc2*Dz&cfRdOwa-{bN_NQ zHpsbmOT4L$kul~fRsA5rk%rb7dADc*QU`1>6qD!m7><_mmV)Un&3DMKp$51hCu=x# zkB%_o5S`^RVzofD#~pR?y@RiU#grD{@btE;&!10(!~3ecnxk>2-Qtb-+$i%EG#B-3 zZN}2n^lWdx_(NjW*S|2*HY?}5bLPf9a~rSs3Y6#EJ6InBbqNIA9pR>TwRRQz>`iSo zA9|IXZ%}y3!wFAhWHcsUR<3y35Kp;iib3GX+F|FK!(&wM!t7}0JihXf z5xj(p!ec^>+V8RudCurAS$Eb&Tz+E$E!&+y_0m546A#0&{Ts)Df%NaKk;v~1xESyU zJ{j<%ZsrB}7zp@32Xi(3F>sLQWJ;NaO-$B^oyMz`##xi6`APdijz-MFaDpY5322dF z1`H%9xiCuO3QDty^VM$1Ds|oT?sa6choS~2UazAqSr+F5R2&dZvf*PAxSi~a(}^m% zZO{R0HxMgTQ8P_EYSN=oRlKCtyKHJ~rnJwB<^uuItVIxRI-#Ma>YB5{4avQPVjLv{ zQXt*uo1(_2h^L0>YjJ`pgL#r#nW5#0(-IigmMzOiTXWsuI(%iBhY{7tW?BeN( zk|M)Gsg+F$(zC-je@Xb@RR96iiss{jE{>OX)lAg(4^_a-?{?a1E9%1G1=ok@@2vpX zg9j?Yb@+C}>!KC${<`9OZge=IrQg-lP&GH_YNoRA#m!aCna6Q%H6HYR`HlX-b)N;) z;s_(KY7u+qP49eDWyjXcpTF|{qOGVK8N|Lo?N4O_H4Wx*VzXKKg_0`9+Cf_Ayc;&r&7$y2Eq=7 zazOXRQyW-^-T;i!wqT7nKqa_eG)%8spk%d*<{?|u2KXS8^>Z^;tF}H`Z>-2Fbz$o* ztC?~;g%Sph;*~3;NzvZ=P<;5~%zS-m>6iBtb-F*`p9=Wz2>2huKY!p3e_+ZVXqlRt zI(oP0e=`V|SqbZZRHv{$1F-$S4w1z-DX=YKIdC2wkpTvSpzDB;%{2@g8F)aOb_o{l zU_5o4=>0KV-xyD?+h98mNA!;EumQlUgrW`h#?x_=QiZE^?qna?0HV_Ram@j4dkv`q zZ-^z2oY4+qe_az#9XvvFfA9kA-y;X<^2lKYN#Z&ucsKq`I zLHyl~pUmtdz|4C5WS^V8dmUzF6pj5z_fAe9B?&-V=Cz72`EkXn{1J~C~@#)wFmjulUI7RjyP@N2p#tp4<;}1ERT#` zaF|+ny{V!A+mYj*l&O|{*OTj@(KH~9>!7v)Vq2Y_{#IlnR(E_H(5EIA{4MojLDF#?ZfTHr?raV-4V_5_z z6_qv3RaTeFTIIA9IS*T0F3aI#;Yj2z}K9y~~Qv_V4ESEEqC8Epud1Kb}X@p57aWFj>?M3=Kvm3@6Y zIpo$Z1m-M4Pbyu!5D^utNVq615`>2E&OksCw52?Ry97VGkSc+11zO~xUUd%jdI+(K zJjr+-${{jhbc*n(0OOW{aoeeNA5M`efTF`g5jcZ{YWoM12R+)-RzT;xT!Qg`fpCra zR4#Yg4-Aw?2E>v&Ma(!H0bV`$vUt@}zKU0wyFu*!F&r*04~M_H{*sF)>%v}NINae~ z2rXPOw`p774X+%Lwthd0F#E?RcbZ3zNBx1|&+RQ!@eU{nh{=JsBGp?$U2>`wLA+^6 z*{&W&o)4f5LXaF_GN~G3PN;?gX0se_>Oh6(c)G;vtZ}t{LGevY&&{0Xhx^t&%qM4Q{%2e)oRlTQ@bh<>_Ui3my(sBC1 zhj%^oGrni*)EwJw$?dqe*@Szorym@D$MaY6y^hYLwKLh_)&`1R&{{X!$p3aTbzp_= zZ5@EhYoWi%?e1m!b{j4n?&V^~c3gx}Xf%w*v1}^cU%@m!nNfxL9&8H!ro| zqSd|J*18!No88Opn>%oU{s+1Laqb`R34$+A@?Er_qV~(v{3|?kh98Py%+fHH*JTGY z$8bDoo*uNko*#z+EYJX!H}Ejf`NnLwo47-?U(B}L!fo(~t=UIxqs~oAYV*=|Xx(C> zL$O%lrtfgNeEy1XQ?!{%pmq!lj!#TZVb#~o&98^nzG?H~mZh!R=qIoIKdAID#=~oz zHi;Di1N~!Zq40z6hZ6k>{FC?PFN`1dOF_YkPY4Uq`j8Lu!i$~ZaH4!-_~__(SywpR zW~)bOB424Z{O(9(zwgH2Snvkl`7BSA`XDzn#cY>)U%$=UIqbX4Cg!F(|9jg1hLB^$ zANV2uWAWjmBszR_^Je)85kmgga?d^Yh@oa|AvhuECHa?7T_M;bQ$arS zrn7B$lCYX+ji;JAK`)i2N|L`(qPpV@m&t`EKIH>IQGjHSiK8qN;HFT1c1eeCp`tYf zLBLGp^O-agr-UBtp;HZr#F)(NrHRbvbnb*dAQ8*ls$;xU~5Pq^Gp zQQX|r_#v0fn%GC+blZPZ7VFR^7XBjA?4rWnJYzGB@(r&+#B00ws zTyTCSo(_i?Fq&Y4?f^&v>MR9dCWSIg1xK`jS+KKlrW)g5Hp>&^!{{(DPTVIzQw;wc z6eWS(lENQqAoy70>_UsUPAxtTJCReVrovgtR!Agu&{SZqf)6d!K}G9D#DhjKh&Eay zP84Z!rB=u`1_Lz>!Ie!CIZKi>fXmTnu%30>nl884C?Nl@A;O`z0R zs+>r9G(kon*nG%(koJ~X%b{r`CZ8*4SIf&CQkQP$oaHaACi9&w{+|>f8dsD%1eQl z^OzgdY=tczHX93es8$To2Ql4I!bdoMC1dhB-C$z3-nMwE2RgGzPREi>XS6zO%R1>i z7^pVXr7l*N_;6LB)BAB)X%y6}g8hSL2Vn#>S0w6m0s&hvlQXoHVWL0{1608&2F6u) zGp0$BCN-v<^|5#L3J3i4ID=(t4hgt{tSX$l5R;dL!+#5hFWJkH1ji5X$CaOe$oMV4 z-FeSWR2eEgwwL}MZ=vbBd4yvOHq4Ck=cfDDhr?G~5iWJEf8QX&4vLHCXGZvQ1L;XF z{r&K(PT(A-5s@Xc$|sE&EqGxgNLb73<3qXu>B{lM-w?k)DEbWjWAwP)ZI-W~- z!Ka$o2GNAlpkV%F!C~tSWX4eUENts`Om`KGXk7o5&ZAQyKBW#J$pe{#=o}snsM;Jp z8|c8wqR=KDgb4x>fXN8+zvD0C*mbh%bk9SVBHixSkn<}f%CjtKUBLhco$HD~sBkL2H>JwWaj$!{EhL4(C z?ML2^0p%k~s)aC<41~Qg&4rJ0P{V{s!ZZ^(!{J*9E~GdLnk&0x6YaQccT=1t#e${W zaU#2JOftJdM^O@2tA=j^3I@Vrgd<@JI8OWgwX!H$oE#gvFpVyn#{R)%xkqzDy+b`| z6th$8K7TM0ZSMjB#ZkHuJ%FRAD&b?rOhm1#X7^BkrrrB;W!wQtGRV0& zRQ|~2hl?E+llk+ndsoR+Sx1Ba#-Z?~Z>v2%ADg@+5CD!6{VNBKQbgwzqBzsiiv9Ve9Uh_m5k z4uM{pc^oqwM+u)HU@**Gz_`^OI1OqqEBBfc=Bh9cRA0?tbS_>VvQq+PMML+)L z=5@i$Ukn>=p7rGE{yk+i;c%(hR1ywXmR&gZ55m$u`)*Ndwg`7s#i6gT-cSuiGvJQ} z{9Vnks(VYJo3{5p7u!8qQtBn&4i#r^Uxsb#IBGwPQI%&T$AaUKJXOH!CiqkVk+s;n zn0ZbfhyX7x_E!rI(o3`9#AKzLTGdj_JY`IY#g`*$DaBr(P>k)2HUfkmVebdc*<+Ah z51Fn&REY-os{n+-7_2|l&9?Dw9Al&!*nHg348%u<2S`msbGrw%UOJ`l_YqwvVuC8e zpI{{Porme&4*2h77BaxCl z2U|MkqZRg7HHX6$j<;UAaM61$!EiWO)P>nTFI-;VW6qfB-b0+~J{%Z|znPkQ#@gNM z_gz7DUYQRwfh)2Y?od)qG^@~_TEVov5g;E#AVOr6NJM45bVqm7IOfg){!JbNni~_6 z9_V49eGCC7J#ULz8lqvjRY>O6Doitc=;fKkZh(31bLg`KWQ0g-Xdt6h$ zQTKCTwF+K84q&E&FmxZDzXp{--x&NuNUVS8A%9@lAGn<`>~{G#;2&njoq2M2nCPHT z#vnsl)Cpxt95><+fN!Apn1MdRq+xphcG{;A?=VD2l;)y(LyPfrQyl@cTG&k)ohGnt zx3Ulkn?owVY&lRN_ow1c#$f2s62P8Bj1Fy+1B_P)>_UV*iQLO$4&WkjZa7tHW9VY6 zg|K#P6S|m=YunJWuj0Xy??3cqRxE3wuU4Jb=17-ZIQ^iz?qLt$j${?iXRB(Vn$^%> zI)I&Dm?OoF{-$s%?_{-RFom0RlDd9SxxcEq28xzct;82<7xKRwXQPP#a2qnM;?cT+ z*~u6?r?sH$xi>nzj_Zhx!K)xS!J%U|7xBq56RdAGH$_lt?Krkgbq~@p9b`9-bETs- zHm!|j0~MwEg(y9`NDz5}HQhq;30#S(F{>asLZli(u;tYLbu$;J!iR&o@gsY$Eb%}) z3yoh`3PD9|2rP{aZ8fPr?1Yh>i>-L zk*ClIir)Li;_A+=iGY6(C^VgT?j21m(YYoaIk#*)d>Fg2%>AzOwt0KzMmo$yeJo@0 zl(TiJC}V1i;Z(V)t+qqFV84Fb+~N(i@3;ye%g7N#nO;>WC38d;7i}fRNzzC-p7PamS8W0Y*E>t*C4Ul5 zU{{}$b#v0~FM&_Dg~Ke^iVJg+?CXijGEISqC05iP@TyxBj`bu)5AihBjh9Fws)*dH zJXGp5-NIH-k?f(<$aIsmhof2<^vYy=T!Uk^R_w*PJ4w2laKfKT^08E$Rirqu3v51y zztzq-?4R&Og2kQ^4{34iH7!6<)>=mk_GBaQfZQGVwc1d!G{?y6qM=|Yh#eL+bqZiX zwFr`6AopSKRe0S?!{KWna~`+Mws~xKncxO#cUm5U=Y_r49w{xWYqftb9R88&peaCb zX4ek&3A5Q+qN>FRr!6Ug>tnrXumw0o`uxgoP@D1xCX3r@|Cg1oiR&5mJN1PAW0MNl zqbuJMuOnFIi~3EOUn9=LsvKOoo6rWo{hm6aKLGzotNkM@pA_%rb`HRvW!POSKa#MO zpQ-<#|Eu~0ML)dsAL3E^T)7vYR^{5vYs9167uo<7xlQJyvQ4h-HAY)EwN=!Yp3sMs zubF}h`B=?!wv~WM*$QMVWyfAzv8NUCt{_%|;E0uQ!Z-leSA1g!D?@K61r24lD+so6 z1!3IvbxLoKf|xB(Iq7VbIrD_DWd8DBzsdYnl&Bwi?pu$%@aJ!Rcy`;@;v+lF}`Q@aph4{1iajeC>hE=Xf<)r1MZ+z9Czx6rM{OD7MSKjbC_0Ig+eCYx8{2Fe7ta19!AZkPF0-b@}*u|+>`6LXS`}FnC zcM#_`C%s_Z?$c&1Zbg2tp3@J$IC~l?B-@H%RmL3`GJg%yo&M9vFShJ-IpCmm`jY3r z#_ZkeUiNt1VC-@Caw6V?TQBZDJ;As7-OB_0gJAN2uQK@*nuEiqa4QdpUIA1lik&Xp zjVF5g2UcyWVuDPuZVy2RSHbKZRo4X$ZY{3_2UoNfN{*lgYnYw;BBI1xSKlf+Ej_&> zhzpJ1Ii26*5GXahyjZCZvQcrub^{Fu8@f z6y9F7byf>3ra=(izJ4gDwRKUDTl&_^_k#f+L!0omAI3g9Cai3TG-SrsH`LE3#OcI* z!`8t}prhXIY#kMkkL+qM@kF*Bm|pqg^nsT0z82LpeV|D_FVPUOG}L#md@QkkebwOB za+kAp$EbSUXj@5fV{B!1`oMu{^^1co<(`AnKxxp|a{Y7qEvV;cRmN972@t{FqTi%f zQ*0e*?&XL?Bij4##=RDOv%aFIh?ZtPl6gPV)W`7&nz|Cpyq{_6_mofQ4~Vx(`YeC9 z@}GFlH}IUO;x+p8X8T6ybt+63G=dNyOR~(}ti+Y@=gKW!p{?@h(DvUwI0TllDE})H90^@mS*DLtK7r z64D)VzroLlT=WS&k#dNbi9|u|Rrt-p1-v56CVM>hw+?m8ZQ2Qi|Fq<>l|9OASwa&ep;Oz!if7WN#(E>3RpYQ2m2+(ze~ zuu3DDN7hl9PQhiujJi%Z!bwWYF8n4TBWn!7yphicvQ~v&$n9r4aOe4Mt7CPuO{0sD z&Z8VxSL*JlOY{%KYr7$4n{9hCcaf`%IN-3_oSB&&W$gnMq0o3=Z+riSp-4wVWnfyh z7JsI)wXMG{(mzpLQ@MR|dRHS3lZx%5Hb*EN8XxHWe@J@|ILFTNT>N}Ty&rXr%19b% zBu$fMq?yt5neDT)eb>A8uG{wF#SIfydd8ZY>;t=m|9A-$3 z_z?X%<_nj4KjHU96N8z7aob=p8g_hU+L4X>fTF>?8^9sBMZOL;+*;|VbhCgZ*F*C@ zVjz@5{d#f>H)3M1-$)ZnD2kP^t>=1ph$!|$RU+O*D2@nzE$!gzdn)P@R>w`{mHj8_ z!peT#_PeE2+jmYq4NY@e;kn&;HP*utjM=4Qv>uko16)$bY+cD}Ad8`L%?y8%%sm2s z#DKZblvGN)5WtH#pjH%qyLzkUJ3jxfU%6U4fhgITII9}%FV*C;%Lb#20|oiUcr1bA z13%kN3cN@s#TnTE$`uWY7NpTBU`-`MJS0!xKrx<8jM| zp=`D|dtIbwX<{a?;NL{wKy|odsrLSkrMe>=u{lzSbbl-m9`GxHNYo!zTuN~3NGcc@ z4|q+1WJ-LrW65s+txJ~uQ4O2hhdPd%wP=nS#TtzgFR&Cv1x6=2mkT)h>|pT&PLx zIT57|bIUawK9u;>&CwjqgWIbnfV2TADVbwdrGEgk2hZYi&{~U^9SQjB40VPc2zycY z7`PZhTOF#-wS#4k&0GwZjW^cyu0mtqE$&#ef73pSVFeMtStq{-c6kuFsHe1Ds-;6I zCMbo#dR#A&)6F4nSZrG5AtRKXeM;&q^Cr?R@|IQ%dfn7oW)cLWK3xZ+elIjGvVzN= z`Cd3#U>GE%6ViX6GpmiUy{E3jTY7QsEE}APjh6o)mj9VhS5F_PGqJvhy|oymW`l*G zR;m!B(_0$^x*R4Hc)x0WUQ~(H@Yut`gvcm?0M!{TkB@4lib78jYQ-b7h>GJ)(8^l) z1j}FB0ia$91*bBF(aCIY^_oyVJDlwigZ*>yxFe9uZzvvlNZfOvw7$1LsKAjEwmmbu zj(|>nCSR`h=3!B$hkH_yuAW9DnM=a*?(O-bQ+sZCD4mTZ5FYUv9j1ZFJFnSv<%4uS zA})&`3DiQ&Ay4z9R0qrhWJ^$UPEciYPx}ZJn}Hh)+Gt)o5&`~PDAndF%`+6rtfy2P z;3yg!(IBV}A=t69PCNWi_dw+;sd;Eca)>2l6PuN61n5A;S_!IgOndj3LM+ToGDiuU z5|yce=5!2M8suIfcNb-CBy|Bf+7)ZUG{eM2DQ~@ul1L6q5k{PZZ|Ur5Kc*E`c7$n3 z{Q98?Y?z$4jJc6$3H?hn?^jGl>&6nYtAWOBEOu~w!}=<)06;h&97@tjXMy37iT3pu zi!U1#uiIhrjF>+Ckyz~O!N8}?yJu$iS~ik5z0us=Jj?=-i1EiiHqTau(FTCdC>#1b zZbQ8zt^HXbb-%2~QIJ~Y1jKT0E(PwOVHW#}OETvWuS7g$Wjsm`bqH2{oGZ@9k>6&B zZsOcZMH6L3YbBwXy5J%r+UaH?5vqeZDMn!29#lx2Nkxm}3oOfmR~>Ce%J{2x)h)XA%}wvyH#@VhcVVcwumC6Fx0n^Tbf{>W7sS%=NXN9h> z?9?vqdY&n>J(H6yg)AO_x{JY8TdOrT8Faq`z%sf_N(7tmin8m_taC zkhykzD8XE!^tP~_%nqU@soOazYDZZsDuT$|NWd*Wi8)`~hV3(1*?E9Th1Ys(djZAv z)k>2f=)qx;IeZM8Nm4D81c|~y#Q6?>xLUziw;#9`FR4pAG5MwO=?#aE@d45cNC~zM z)LmyLM2k|5p%eqkuvr;g&R^CrB}=mvMPbwQ;$`t(zqeUpx+CBZqpY-}!t^yA%LEh& z@l9>U=wG%klVADtQdB=2=(FBt9jecJ^y1axw2d~6(2G_Jur1f1pU-b(R56L&SUZRh z;yjb(1^B1P%95u+h?TlRvn45jfof;jD4Cy#Gq0qA#!+nQ6y6g|(X0hQ5>ZbvZY9N_ zA5fVNo>PfTY@|$knBt&>`$?7w`h)t#8c6*Jc|j3mCJ1vNY)_m+IF&*upEu%}A21a_ zX+dNFPeScSo)){6&nJyXPb+RDtiY<#g$@Ta7oBnYZi6g;@c7^rqp5(+R@gmy)!pBw z^vt(!-?Q$70aQeR;Nr2#1LYov*s$l=2U7z%o8La%f8T+~P%L&2u!-jVZ(d*2;`AE^ zW|peF->f);_P_?n!&;w}{#8Sx5EKQ3O?7mENAnXrM%KuZ8xf;vr8*LtzqMe+FYqtIJYvR83;%kR&f9oHM0P8 zdhpdqq@XHjgR->aSd=m`KT*9yyxfF)MDQU{W~%e}U7dlN92uP?+X{vK@BL zaz?WSxTEE|2_mogE2_XXyE_7WZF$9+FPbm|PV(F*O0~*4bqsz_h4Zx);9Qm$D#Oq{ zIMl~!c|xX+a=pOyl?8&l4DgzUc~*0k3By*8a@Hk^DF$H*HbA|XPq$vwMxh!?ofH)7 z^lPDQM)*NoZO__4+hZNY1%41z9S_E0UyH@c|BN&w@{CP)Brk{-@$?hk*1RF{5Bii32tS<13jZ0sW>Top&YNL!nTQ+#&C>uh797ze~ThzM) zxuF8(KmZ?N+Fi>zbpT@=;N1Vf2u5capk-@;Mpr?pO9{fc4?JHdC$APS*0RM~9N>Jw zS>YZg0B6--bf6S?+3m?9E!ayx>v+8C@*#AENLDZ0yfbc3JdwpC`bUl)2tfc{gHuNi z7*Ow3JvrymDw-I}g3Byl{6?K4n^GD}0zZ(~dx!JPY^D-W{$#Y30q&nhS zjD}pq3d@enY$kaINS?L|v;0F8K%@ZuL$`uMvn0L%+ zvO1K#U7Ztmxf$Lg-j3>Zr)_F8=51#dU@xa_`n9W5iK`dQws?T{%gBX)m5cHOaIawM zo}#%lR45<6EKLnkx*YR}$RB_;qBdKqRnJjwyd4kRj)pD4kuYN2fvm58Eiw-zlYQW|q?*uv z>45I#6T=*l>+nIBKzek5fe0|`xSB47V4`C{&ERx_#|oAPoGwOht`7oGGdR&}F52<1 z+g2ohFhiHYliOljLe?F*K(fCuO3of;*vrxU)3MRBTD4){FLMK%v5eG@v03S;B(WLo^yDU3xA z%oCph4atJKKa4^RA|Brynxq<0DbzM3saa4-M2WyLX^7ia9VYA*8X)Zo@u5q`#FduX zn8Xh+9h7|IhN1WSnuC(1Ke+I(;$P%D(38E4QBjJBs3ehG0F{OyUY6pymAGb=#~Gp> z&*6lEwx*mv^3=?+5-(!mDj-3q8NGGMk8UrRCo?wn8A%EDu~D+LXc_YA z)Z#73Is`CCTDO|5mZNBR)nb16_KARF=)@xqyL-&veeb$TXynvJcQIQ{+pzSmol6hO z!csDe=GCbR_NWu8v^c!RP5hZBS8O zf-mZU4F;YXhS3l#6OjO5Z3Y;dJx02hV1}NO8bKcxH4DU0VgUYpmg@(zX$YZ!^$Ytd z%v6YQ_(>awIo(Z&OgJV8>5$V8Hy;fUR4|#>ZT%=^1Q1k1CQjfpMZmZrNdwq?)G;pJ zm*9GW0$MdA0elHA5B7N510C_=A)oXpw4awp3fUu8gIvMm#6}G(SDw zA75Pf>6_pDvBs}ru^GT2dV_&C2Ls56KW$wIEp6C2nVyU6J+kfOjhh#5_KN@AIC3Rc z*f)a#093s6AL=_lm0NSiu~V-jGBz2^LK4>r_5d64uvACKD3sMz2so^U2?WB1l#30i zy49D#$QciTa%R+X*LxV6)12{(6{AqcA~|kY+`8fy~W%fQ^X&ahF;s zV6$K$T0F#>RSQ8l&A{z|aE_2A7N9v>wv)dJVkI24H`7t=MyAqE`TyXc2o?-r3L9bI@WKfqRvCfartUa ztKuFguY|fPC|;#---_2v4~gdtu{lu7abyqewD3&S)^N)L9pDGHDaz2+6T>vz1T8sa zYDr2%SU4!vv0`CS<&kF316%?xi4ytr#j!q^2y5})1Wfc=1^s-H5LL59c(i_)>2(u< zgCVgH!b>WWFG$K@2`(F~Za*(KEylqI#%hQo48B3|l?0l|Unp}=Rrkz(96&^viuc(VG8qh| zlA%z&Q24ZX921Zef6Lb1Wn!4^Xks!fT4}2M|)l5m9#U2lp2;d|r*q1v;+znLC2ZJ3& zdt}U~@Qe;7Jhb8=xX-}!z(}Vx_&tu=yyZ-RY~o4Phb|g*Q-~_;!S*|gS5mq0DS9OY z4Sz<87y66(ZYD~Np1ju4dQL_OC-PTrLlYKE`Y$&IFjJU=d}1`OrLo54?qL7c@{MxA z=uSrR(@9Tos+d|BDo1?X@#Ju6pinFpy21nN_LY|QHa@a0>5n9$jpBjbl@COEdj0+D z7qdf~$8saPhI{)4wq=IbZ7N=QY~9U~-Q6Qg`-WfN_`@~(cdtu~4v}~bgdIk&*9v;= zrhsq<`pH6)yx3m_Eu55K{}kvUCd6}|W3Lh=5vv6fmwJ`RPB&djLaoBrC*(HHcO(aw zVVqDyu!{;OntPe@CVqf=;atv(AoSv&uaYu6yYt4|_pS*>-*bO=s&C>a8gBPT!NB!z zKP2Lom+j}iT9`m_5{<8N;T`CIx`43_W3BwT9u+QN`(k%%=r=A;0-X|4w#(751WS(m z9Rm~@*eNj1QLKnjg_93CY}FACT#cBqRRN9~z_6h8YpF8%-D(bbBp5z8B?r8pl{^7k zG(!*(3Z^HpUWjFqr~$&rSExY>MFqTgK`j;_53h5i%!aCvVuS57fi_Tx_^pz0TUT<+~!WTVC7EL1F>bl6+48r)_x z!tm%|+%3L7vTkD3XUi4>vA!@$r5Zm=2$$7k%8PNkHE9+WtGmnR2&Q~}>rRavJ~AhQ zsj$uJQH(x^)AGl+~wK7-ikxMhg1T8}#RiGM{lE(f|g+eGR z>fwW|rx&lGY={gc-@7;Pk}Fixz}A}zeI=zN8?^cDHIy+k@E)`aVgXsPKtV#Qkc{U8-Z%D zo)NrE&h>Hr8LnRzZnm%1Lq?(QEkhY}QcScoC03%w?nFK`(EOUXa(Psm(%M83_9^z1 zw^5%>Wl3)NSmT*jlSKb$hCSrbLP=?>ram;B9Uo2g!<7|E7*GoH8jUXqj7w_Y1V|$Q zPNDq6?7Hj7Y$m=2>W|q=rqUs#;?7Y`Bdnd;u#Pi`X-`o$N=T@*_AH2xQCiQdJx!yq z880GfM_;{eJI%$qjhL40>*(S3Em)pNf8cnIUAsQ0hNj;oG_=eUq#x&#!zJN0RAR=DNwotYLC8lntt+QK0&Zv8#9Rao8 zjMvAfH&TL2-HEiw0(!fJV!7hTJb`jRWJm0nSAc2Y0Nc+k;6f`*+d?itzhU9!TS0U! ztZssZ%`V9pYM0yNzIxy?T-6pIUitwV;pS%s2Qwa*%OxB8QklNOotX8eA;$1k>Z#hspfPp+JHcs$dJJCsf(BB07E zWctSYydKYj;_k}j`n_J|*Ps9VuNC}mc$Lq8Uhy{g?5Bl!74~!#6(xtntOQaQC^u^c zzTOU4>B1E+fUb+UUa(kG25zVZ*6XWC1@6XHKd3!k*{ealuR*Eq#FneD^C?Ilo1j$^ z13s!g!SVWu83gG|1M~*a7mib1DH@d#(JWx+cT^sdi=aP-JGA4N#98cG4R* zEaJO%ZlH%d31)-FtkeUCPe^XfpvIwQs^xkgvv&89la72f{B-2|f&C;@009BuQxFgU z5f<7r8Lhk;DS##sIb~(sT*vB$dj*Ur#s9%ZOQtVVSY%+G!JUxQ(g5_dFTz27ukLqJ zrgB<@qW|*?1QuSR?V|p8;hMjjZ)9Gth0>Luh1W<0^e?v^q;T{Y2K&jpppQ;B)uVx zbEO+yCtnWz8bHNWvmnF=%E%RTx5ArNM7DpFRuNZiqm={L({Y*jEs=R4dV2b%%iG8h zJ4z>sVwpaH3>cvDM0QJ`=;yRUKjOUgF($hmLj;kiP2n5roqOvW*oof2nFU@py*_xwGufJp5?|*wB zFt$nYKJT^I#ofPUSIfQz@6^qH{|z_z9oEf_=P$c~W!NYZdgf4FV@$e7kH(p>rMw6` z5P7g<0#BMoK4-yY#Lz8#>QYf6Gd{z3$C|FA`6Bj|l@iCgX9s9e-N%Qu5A@HpV^k}^ z8LEjAj6Uxf2>>l6291fVG10|)5blDXLPYjKtr(eVl0^%YF(w`sdnX#75b4IR5JCBR z=XZBXk?7yN2uWb@Kj^e_>NfYv(q3GfPcFYeG9;cFqvRbA+k!d>(mQtN0AK z2^RS|y&o}J#Ub^9yP(3~p>sDVX5&xzJ@kB7{1cx4l%L-R5` zmr2!M|KROMkCqd`UzWQAjnBxAQ>P%K3r}A7jr=dm)7=ZsIPhmPSlP)>gD^2q#JvP5d&!*nt- zAH>|MgIX^be9Mx(M@uPhAHoMOz>d{pl(1cZSc#6I>& zCHV^4$MDbfYv`JS{0qmqn}|2wM%4U?W3-g%uGw))0se++ZYV&)RuYyCQSByzjk4IF zuIyrc*@dVroln?kh+EM)z(VD4U?!~@jA?PJ4kW`b3ZDwEmUgCuGlc+Icc4HTW1;(= zA$#mB>8Kmfvj5m{qq-n{{(;jxYA?&)m;y^ z-o>Ni@r9*|_$B^+^LzR3@x7?gK#wLmmjcHm^)?MG)f*6INhXwk`*j>j8o*!Gj{jihXEA|i;C0=J6TNE$a>h0YJ-UXv1a`k}ie3Q9{9 zJUpm&rTY!wxhaBDbKG%?Hxf*8l&8}jGyg$|?Qyav005~)HNH^pLB>8pvj-W9WeNK6 z3O8=bcqH4rkWc=b+w>@A8%kc)(@t8o^;;p^5<42tNY*fzw^&j*J zOlY@Yy^6UkG&YF8>?iSvA2ksFhZzEy3_>O`@S`(7FY}!f2`RB-P#ffUi9IHach>$3 zDr~dYtQe5(tQs}HRO>p6U`AJz#;|PF9%$$j+&~X&HAtz=pBpOzqPrN8X`VC`kwa*=*)SP+dy5l4!PfxR&nZ~Y`^Wu zac9q4!D}RN#5N}5a@g>|`VWdeIxPAdo5UADwV`2@fCB#!?Z*`6t%!M}sAZn`y@@-P zAO&b(t)K~a8F~T>8sH=#jF%1Go?iCB&=X0tXvHYyV1#&98$eK)zE87tzB2Z!kMQ`t z7=Kiud~Fwr2YXf_@jUbE1N<^c4?KKbQ7cI>N)!ipYA}&_x}q^nJU~rrk4Zw>;_jvV zZM!@Sm&(h4)``OKmgFD7OVy-u+x0B4LCcQa&K?!@>xKS4TFSh@#Xq-8OIzWDy@q0y zH?gcNkktTTuRSC!r1w!%YYX<1xMC!(9*$_P7);hsf#o&G^3pr?QnXfYph0O3@axoV zjOjvDn$z1un&{5{)eC(r8{+m)D&)C#ljxG@=>XBqAO5d&OLWMpbthSLNfI5NYN7+7 zgD$#YNPE00x+;kdqR~>1q|#|Uwabr@9|N+ta_&{hTl@Y;3gYw9qT0J9oRp){A@lT{ zvqv~$vBg4x3qY!!Q zK(RGm0Gs$kBVf~lg>w6LeY`z{@$#dc3M3^wmlC&j1)eK{tI`!ZYcJ73>ehKO^uUMt*a;>~ zuZiP*CNhyn%UVD76amV;13X;Z`?RcUKbXqDYpmJ=7OVi?7QOS%=sjs1^S_QpQ%8?> zN29kyqdj-t8H&dbM5BSDM%Rt(Y(ydDL&2-Wjvq(5BME~<_A z`T@m6U`6X74*yc7hOAV*M9#kv;>?z6fpZ+YM72#^p^j-icu6kRWj379feH&rMp%+! znPfW=vlsZmZ3$XotbA+=RXWkf%%wW`R&m`7umnZJ$dH25U%`LaF{@Lcr$>PQpmVwM zZ__VN&jM=P06+a+3KG5-G@0@(C-Lt)jR$$V5RLhRL}m`1O6zBvK4u4U$Nx*q2~m=` z?n(i8fs_ydh@5%Ed65@Vdw?9mm{gMuu|aM0s?kd4^a!Vj2x5Tor#i;ZFnO8S{-GFQ z6f)(G5mo#h!)5LrGo{q}6t0Y;bdwcn6b=khx(NxU6}u)Md#P^cD3~3MvbdMcs(gNj zj3|Dh3G0FBm!@Bwo(1vX`Im5NQ;79FaEbRl3j0fw*=3}r%TCtU(=AdT@{HgGZDV~` zjkI8>0d6hB7TB^J?WU86%HM~0$#5@YP`cybMs~4{%N@Ctl|gNcgQfZ)XA}o8U7|*O!SgSU3WSVu-G%@K}FSXRXYvacfi}5%tlL&nLzl+D$yx0E-Oq83! z64ux@E$(Qj&6R;2rqbF71YbOP659g%q#J8wO8gJ(6L5(@r<(9%AJ$ASMvmT_f^;ej z6c{aP1O|8ALgNlqn9#|okP%laFzpIYcOhU!4l@6Y-WnT*A@1_7Dtk;X zNh*0L;&VQKAec(`WXL3;f0ToRlzUq?NmINK`$d9hT|;XtL+k5r*58=3(W=AMgk4I|}s8RTOlrk`Z& z;$rNWp)Sl&FJ=fGce0qFUPXHO$JqLA{5%h}=xYI8y4TUuGr6(Nv*DXYduP9gEO75TTV zyRmMc)HLMIwKra6iN$t8Jx))VW3lzIn5FTR>A$O!mb@EHK@i^LQ?&j;(j>Ru+MzEw z4#@iQYSx!MSl0u*t{2dQqTNDUhJi-5SUpQJGLAq-FwvG_p|Ol)u8wA{2^mdkix}V& z6ERz~v_zYo!RuK@#|TYO?S@;+Z1_E`)K%MEJ1WJ`r)PWO@!vEh1$>3=QXyS&>Lwg8 zbad**h9<0bF@3PkrUy94qQw2w7>Yyjy-YeB5&&VDtWP}23@-=86>wKSe%SRTi(u#5$Q;}-s#k4ri6A=wE$W)3}FL49jKr<&n@n7IztOvP1HHE9NiGGJ2_L<8SB zO1qInAM>_|Wq@lFA9DF@qk+)4+3s|jUv-m^g_tqBod+AAj7G)2!%mOMY<}Vi*=*py zJC3m?nqx(Z8wk+4gW=fJfO&PS4p6t&f;3jb4bKaTw^q_^)HY(P)#*Ci0CTmt;vh5I zJo$w6F~wrHj0Xc_%xi-|cZ@ZN=$t@yk9tE=QGFN#bQ0plPXnw{9Yhio@6c%8Dxo1g zBA9m{2LxEA7&t<&nRpu8=9_L|!?uw019(1}a&L^HQ^WW&gxaKjgUU)r}x<@mH zdKZri4C4cc2N9Z81EqEmE3E={;MKvQ{p-b3+CJ2qc&JYuc6Che_(j7W$JkeAx?KcV zV~-=h`yqLd?~j4CkO4x+^{AOtSrjH-4Q%8DhKxcYe0yy&S)K6ajY2|tnHyxpQh$X% z|9!f5G1YnRjc>H>9rtz%I;(s~Jc-)}b*%000Br4eE-Cv=9>d%Zynfkw z>RHDd&&~X}wg7o|Tq83df6aqs>QTKdNrf zbwf*F_Wat4ZmlGGP#xn{p3mI@`gDj>I8kFb|2JM?bQ#U3pSO$8L!!@~GMH=*@88Ut z?Iwf4EhnGn`+N)T^B=`Gu#1Kz>X>UqWV;Is8#xu~jRpG*w65e=hlmfg3!nxo0En!y zxCFo>C=O59!74Ucm`m}X#Kbo&Zu_$*O@4#t2XkK8YP5;e^A7P1bW*TCmvW)S_U!k4 zvK_)Z{X8HDzzRzr0fyjFczuT?BKSiu2trKuq8ASrzFMKZ>gX0v3!6QdXg8^rB8OWB zahss$bCx@P5_u zCMMNL`&FEUhp;RQBOn6?`dP>0Rp=v{c}UAPAsc8Xi!)YwXr#ybzE*vNkvD@H3>&5! z^tYKjAc$J61fFm$1U)6~(+Ky1(G>iUpGXd^Ef+W+()F-Tr)RF4H$+ zbo-){;&zMGK1?9p4~n5wZ>n$Ln8|E<2MW^W2odp!6oGWWgl!1@kd01+sxFgLpe3UoOU2jCO%0D^tuaogCtC#98eyovD?XTp)z z6V2IFU4R{-(3XxO0M4nPj_U!t3T(z#hHa33Qof3tAkc8dEp4^lWF0s$Nq=>bWlQ?#bgjcX_8D{WBBT62Hm z5cw3HpdZ=3xkS!WzJ7%(F$E?l-7zf%W0~W*Ot(xCsM#3RE z%D4V_=UhCl&Bguqv)OvtaW5^A35$E3!@ZW8_nInE4!WLVOi3yOdj@mi76sBM}_a@_xL1s)Y?!>G61zuX!!CSWlo4AuSP|ta-ekC|$L; z$*9!p3*l41OiGewJ5IKJ<57~HxS?s?KgJHg5ft?lX)Z)&3Oocxp|9#$g_RX06BB6kKuQYKK57)XHB9AdaHr`Uwf048)wSJb z?hKaKGcgcV!wKuupe;K9keFcLN59)A(Dcyb$Q%vPxybpDFiSH`;ll0`l~D_XswvM7 z_EQq18#9a(2gU8JyK5P1#6yb;71Fx|CV=*t7;QYas(Z|C*FoXR#bv{q9JS%fjZBLp zmfT2d7;L3=b&XIfud+7(8=-%yLchmsNnVgGGa=eNXBG4ts(edIWdZ3+I6*HN`%j5ml{?QLk_67`cS2__FN9~#N1`_&) zPHAU{rppIH@km6-i4?}OD(_Jm5AuEJ@Qv=wl%pDDxsMVw1r`F!eT;A`G!XO(AS4$G za3yt=CV?x!v~>W=*2^gU<}fh{V7tpUY#hank1rpD8@@UXXPk;_vN-<1cp}{e z!_yh@i=$ZtakHHhqK&Vd8O){`FrcX6Z278dgAhNvHjs=;u1&qSZ;S*p zMeYqAkWf?ySYRY|0&b4#1WS;w?Rm8G{QpGL?Rm6wjX&1m0<~EL zO&6$hrNbS9Z0?mt#iQ~sknh`pH~`uO>u~^7HU5jSt|XCBq0{Mcu(kT`k~<`$6av z*(vRQ925(nxT7GaU%!WoRD`g*4nK*z_<6e~5F=RnjmP;uZeHE{o${HGYEITpbth*a zorrnX4P>%PSY7aLa5sR2fapcL8v`+Uauk%D7<8Y+sX-Eob!KOw=E0w;Sd1)gVk8<3M$03ODzmB26ri;pRUyAcwHMVcq_K)2!3Fwo6j zfmpgRWVepmV*Q&-6Fc+XfY-2dZQoovU>1)YMoFFQeBvpm!{$DinAtFz8Cu$$cLV|l zZc3MC2^I}GHt)L+wZ=6N4-l)O#4{*?w}O!X&@hykMfR0Kzr>qBh`c>-bjrV0LazZveeXrxr@xnewK|)Iqn+(T@|yhM1%K@HIepI2V=v;}h2nCYcjw#aGE_Lom5_pHMcPAMVw#Q0}x0MCfKr7a#F=!O#vl@R4ts2D^FgZyhF|!FsB1@}}b_?=QUeLsttWS-Ah72)+7|*@+HN_C@eH6tsP19Z4Ro?e2JC4V4J&EAs2y=B zcS|doFymBqt0QZD$*+D`GbJKQEq0c$DM)|-0<_!}iP4^+pLINL+cV6@=WA^m#8SO& z&)DQJ5z)N^oEY#M40tHHIjTy7XCXj7bUYwgLVhj+i@XRZ;y6}y8y>j&L$58|fAaWE zlYOVG2A?ZmJW!l7IZS4^A?fw^c!hH1t+(w71P;CHz8hx_0Dv3D=5j`O zfI6{^(!#{~R39uUIzMcfa~mBR3pRw}1d-=~%DJVD`JmtoD(Cp8;V#1IH><|;9_0=Zbk z6@#dC?1udp;H(N)P4R)}q%b15-*Df8R-1(i0VpGjge=l2V(?XFyTKwm&Eo}|MRq@B zHrp&lH`1^Gbv}hE6Cr&8-@gXmPxnrDO7vACu7<6xGOZ8U8P}|MQ zE~~pSwVHXwcX!Af{yNLN^}TI!VQtxw%?-=e&aV?)&A$c18Q(nunWx%h4*R5Tchf!F zJLeNYoR<5nOJ9!Y6H5I~#8Yk&yOEonAQO%Wy^PFZelZ}O=hTK+gD!KT$AF+YH;l|D zj=q3J3>q!A!Z@^+1zUQ^;u22*+4ISR;83)gwY>1FQ6AXc8kRsEq?mL!boeV_ z3Ut@uBFW}+m&Y2+H7#|DtFy@W9d;Gl($WM#5Z}D$0_@TODJYlZgmRDVds0B&C`o5A zB>s2FIE|cntXp}8tmN^hxfxYG&9zTNU%EO@E7|kDNi{|!7X36%oH-rKk3xS~H3mAg zbR0#8dA;=#lmWsobI56<-oLq(wU|s179{Uy-QN>`f6)bf&J=yl)?aSUknQ0!YKZ16 zf4MoU4N+SKP!avsd1Df_-y0*+K5b3h$R*Pjx@g*H4fabp)Xxx(WCT^rg4la5L6X$W zii(sw*&_8M15PRK#0)(;z61dcwV!*#`)WgNLF9H7G%tWQF})HB#eFFXC3Weg%L7xy zE5TJTzNNN3hWWa5?V+p-sHl31A?B&etv%$GF{oPVx353%E38X#+4Xn1HF?SM%U`_w z+U6?+gy%TsYlx}Gz@0?R`+6`*1y~74H>y5zJq^SgRF|Sea|IrI-Pao?U<{dY6tQl> zWlWd7ivDPD_1Zu*B6ZP)7YIWGqh}RX14HkUOSIO~(jlA!-^2MuENeBKzMUrlT53?) zMLo`CYH>KMt@IT3{Y4rKTfMgxY|u77j{l2~i-xUJ?S8M#9C&NnV`!?&-5&yYZPoz+ zx%mDl93sVGf41EpIQlNO7kbI2?9f%=i%p3B&Xe>6BD#N>%)D0G^m|=cDLnM~7Uj3--E#pakr7b)*7I-tD2T>=*z_o-$R$bvgs3nO7K)p^Ca2 zQT;+F@_yfSahGtL(?`7z0&V9`{l$I72Tpg6t^Ur!&(cpuLPCm0`eR_2a0QRJ?=po= z&iMDgAMYGxi&y({OlhV z9zNPl!;~(>x=!5v7P|M?xpTq5XejXWpGN`7altM_XF)k9J`8ITaT?J4N?7e?2GE%(lqKDE0*->#_2-f!7l9irBJHx}odN{}jgO~X0O6S)e49yeVCje} zaP9txkkDF@f=8_-g9U^2BBYPP&aKNY!hbG6!*!Qx2oX^g0AQ#TwIUBViQfrwQ4}w{ z6$mYF@v?zuVM<{H(&5Eg*t~|=#MV@|2>C;3$mR|mcHeCbSsdtKDE~>K`~06rqw;9P zDO{G+G37xdiCxk2--x2lO`zuO5$LNA<34(%T9|DVG%2ObFWnX016XE>Tb>hFt&I+? zloW{E=f=jHrb<|9nP36et!a(I4K4q%W&zhJ5_B|ts- zgCqx;yf$49z2T~tjsGffdsB2rk5jRp7tR#CTYvB(Ioz4(mamRR&wsO14DvXM0c%@< z92Oym4D=2Vu1J0pg?YY26@RO0Cf|qsX^CPj(<1KJm~&7r6k_PmGwE;@t<=NflWp74 zU_$}-6P-d5llf>@<8|1MuU*}W29rgOcgpOVXaqZv?)Re?8lrAU;k?OXWiuk0qD{5b zIkgw!L;U~+%gBoN7WdyorIcWE~>9}_iKJwVRXLI9O< z&8V8B2Egpg4Lx-Lt|dEvhz%@F$E~5I8s}rYF`c zJmc7Jl-Xgm*l!nE`shO%1oaCNKSYJ9=PfhoHiJF^-C{|PS;Av!xfp0Xj`S&M7E3^E z4D>ZVwYVg9iR&H+Mquk=QbOVW;JOVVO}-zM!q`tSe;%VUUd_9B169Kv-k!a9u2?bS$cg|gCP8?;A z48?I!xM%_<0Fw+J^XI)JaYz-BmPH&IJ z-W!W0pPu|0s>|Mo%CgCqe|D=uy!H_)208zNMiBEzU*qpI4E{5yO?^?ZAf_xypXK{} z5Zo{g`3B%OhN#m59Cp;X;r_xDY^K93Ooy37?BOfUUUL{X3y2YeEN$lqF9BPb+6yU| z6r3-Nvn&w$7Jw7QyCONw;S^>9wM5{zLkBR$jc0mpg0%xFf-q6N0!FM~5JePv=>*Mx zd*{b)*}wVdk@cr$<-~AxdcYG;8?26!2)euj)#T`wEfX_K@(o{j&pjW#W!Ki5UY|2f z9h?~o+vz+HHg-p1)x^H(g{yXv9vHxnUt&%Igz=GSq?A=*DmqNbipwU`TmX6~u+4B_ z*4*473AKbI0*etE&fx*}BA? zJ0|;30o{vLiHfcu%@v)ONZ>H!1u$lWs&#vYAii+kc=S)_`7fz;g^p33d4ooerzOcwa!)rM-CRmJT5HGOu)X&s#V)Lz8&+fi< z--4K{#`>ubt$lFI!5jZ%ZpewQk)^H2Z~VX;8c$U2y%xnVn05RZVvY$jtR@Y!L|&A@ zOM)<8;1q+^M7o}g-HN~B?@b#9em?l~#*}#5leFRr;s@B?98==&f=+%OI$7hf${~Ip zfqYZg2joElDMFGZ{5UK$&IP2It%eLG^sG8Xi7sW!|7Sya88nx+Fhg-%{vhJ|L5R7g3; zjGI*Df+bS-aa%hdm}gbT5WYb7_w!giRBja(bcTSoQSAZPaKULbnXTSvx_?BPB#;TP z{K&1UUJw-4kP|ab*5-(69P1^6WKPu*Y7JdmDYR{vRh$|o>6Ie*MUsMdNTSKZAu(Ho z`HXRN=#Bd#2~9Ye%!my=b4<$95u9TpKyBASDVib}kb z(`5n=zyjh{A^k05l={I?DNm4#AA%q>RNcdvU@**3f2^4@wH@HjAVkwip{SsD!6Bji zBaq0BPEHBh{~&>$ovclG(O(-}lrr9z;&!c5WJsw2XpG?z03tLYXh=F12OENHJ8-&- ziXe2$LVK?aG+3r;spLL+;F~mk|9C|rtIhO4ex7G{etiCcW5)*harPj+=fivUo-jRr zaNA?%jbFwruGsLyeLv6a1F6?+IPvJUJmXt&`Scav{Ige+jP*H&j9)-++!EuP--|Qv ztK!#++4*+tD*=xF8ggg1z$--m0n#<9BZ{gZ>dgWJ%g4=F!L3a+T;yTb@?1$#kF${< zr&hvwj`HPkLLoMb-9}Xw#BB$ngu09I>MlS#(H8=_lzJCK?79$!VqYC=3DpC7kVmHx z(*TL!Lr8nu@v?{QE@Mo*GZu@28Q~$jH#XLI0gMP}OfV>Kv{?-RsGfgMIN-E~<H*#u7FQES%b*Il zI@WWwe3@JvK1{8Am|6klf#aY7E(HN=327nHeoVi`fCCY`gK!LK!U$CT29!lnL<^TN z%)EH2@xxD>jh_{xCfRLPLXH0!GM-sBhs2d3#pIUdO&25!(2b4Hy*e1WC*)VGR7yj~GDbGR9AATv#n9(Q2(Lc&_y00;CD@rMY{-xL!?0-WOGkk2B{ z7De`gpY%4I2VPzyi^#Pr@@v3t_d5KEYZNmWInLp49c1n~=IgOt)MoX^?JY6I-(TDe zwd1q?)^Ukbv?qVkisR2AuEUD9Q+5EWy>Xa-(f!>jy;l50`~VDqNY`C>BW@ZPUg<$` zR(wjLez$9}Y6;CmouUwKLBgM_8Ii@qL=h8kvq3GXeCk1~Qezga0j+9)dw|)ZGhH_| zS9`IDp^nb6;!_uQgVp5s4e1@?onn__wtj-=1Lhm#_ytI;Ut@vOL$Wxg(JY<1uL?@P z!aPO~J%H3|f)r{PY*knh;9Mg+>dn=!JrN2$_R}loLm@HtUHPi)`BOsi`$!(z_y17I z*Nm~Lig;Bbyr=rZs*L*&hdDBGCAdT(>wCw#H%;VCsQWxU^><)Oi$l_ z@ZsxT``Q9u8cN4844nTajAIYR;f0k_0&byRq>%*Kc>?<|#cVCyMvB9}rIJ1+6 zvtw2tmY7-?OH65_&ih6i>pO*T?Q5@l_~7j{wtrk5?&nq?VijXc%IoD}#D>b6hp1^G z4n&Z*A<&wcZC-7{y4a9N7exR^f%r9@9p_)PTLVZa<6#5#rBen0 z73q$R_B1yorul-_M=lilJTip)3~9}uTVt;7rBUWdzKw>Lfz z2nK9edm~as&dDJ}xQh561*bg~Zunx;Jd`MR0|K0`W<&3E%#MPxGeG;unbPcRm{Dve z4N!($+oJijpZk}=fC^LR1wT-`7O`M;nKEma97}drcoQ0SAfDcM_?#c{uTG~2l7VfIM0{_1g7U(+B$w8u73^p*pJhU@3Dp? z3$;G%xWm=kqF^r?n^B~wvpx;JG}TpJa?5%`@1i@`!63pJ2mluoL7zKX1^PY{RJF6usv>RoPkl?GlTobxbRC3iLZhWJ1K!^RzgH@Pb(gK2)Oa5&eC4qjBAQ`nmfR2{i3PIsfT7o@?e!>uzWBg z><{=1waue0Tkwdu;-3?nMqa+!K7*M;MhaZ2olnC8<)0~U~VOMz&qXm8Ldq$K^kz8>#>DJipPw5mt|vx;gvz^m>{WZ(fmS31$n!^brY8xLLE*!ld@`UlT{D$&YO5=9ztw5dE6$H9=cKna-!e+#Bxd&EUW8 zg41I*CA%P5Lptmm?DIx4h3cw!?@C1#TX@0dy`lL){%HXJ5)mvqO}gQ5IMN~C-#kzA zkxMur{6xgt@q}!eo3Lm8334AsM0kz#7-xYvnMslDl@adXgWe*9ADO7E%fC9*@U>xgK3K073%~TW>O5UN)g{3tl}ATvT*Xdsey?) zLx!=868M_aTZ5)o(=-#oE?gM#q7zcB3>JAuR!c)cn^OgNLV)|%Y-yg$tz9~+#W<=A zhUhjJyxb(#@129@&!M4=csgLF?YsV(Yu0yc;l;y;7sp2YNw3rRmovL}9h=;;I1b8) zPY3;h?I&!f-hR?{XWO=ZgX#3<#gmp3#YKn5>ui|Cg!!u3xdWoo_`2=N>8X9FaR7f; zYtK{EXam?q4`Jv3H`^j${Lf*#Z!FigmR3$_!aZ859j>fgt3A2CT)PU$9j%-S+2&DY z#bS`6NHJUiUHU2_A6=Q&Bo3}h%5@D-LgG8H`VSn(rrDuzKalGW<03k2s?K>_3@Ai{ zw4!LTzY!mrCJKIt7Zm07HG%8(1A8%eVrf^5tU&<>@|xQCr4F$%i1%b>FWHh^`n>Q ztPQiX8^%T~rLOqX$EOFr)?cpl9Zp4ilA$zI-42JmF>C9VtO;_L-{Od zQpC2?Cc`P;dxW|1v z7LhbeRP-KOh)q#?V7Rh^Eltg`dco)hD8dpxfHE$1W`X#fsE|uCZKAz;Z>e_cxs^M$ zm2;<}(js`x9r|9igz@#*piW$rD7Z?;4eMZ#=n+9jFTz;V*iYU1CcJZxqTY?=v{PG7 z_u=(BZ^d#tK3TibtDd|b3&Mi{<+b?j^e&rh6Iau+Nc-xB%zj!eaqn_h{}i24wLQLC zVX}6&w?4PDKrij|shgmtCDnE{7SDeA3b$81d>p@#McaK7eh0mu-Ldo5J6`vuH`5nD zOK*D(uA02`MH4=C3cYV|q{H-upP0azhc$!7LzYk#CA~=V@*1M@NlVDzd=pj>DpC>n zsTx?%w{*R}#F{GS6*PX^q9wfpwr{AN(VU1V*2qFA72Rp`E+%`O?<5^w#bdfW<*`^j zlU1AYw%%|gpAB~P#`|2_svTRmc3e9T2KqbCgUQW{Q)6R8lM`YimaXhOo=ezacpN|f z?QGBMUk_#fLop>9|KAU@J$K6VR315@zat?UPs#Ey8QFYxu)%U`;QHII3*28^=r}{} z_8wZ8J>7#1H<*iA9mov_SO zWu;qNlUOP!X=QA|Zj^|gqqZ)zRzV*PI}oi&I5<91uKTz-mCu4>0;$!lSX9z;UY%7Jv(K%(ZHs)0eSTT}1k3(eH;^M3Ce5qGo{lQoY`$Nx#=+Rb$eYV zr&T|1%Cf<1vu@pHk&RY^afD76@gI$mwnIgH2ZxBormP+;Xe7xqgaZg&CA$O)a*Vx$ z^m%|KWLZCF_$=qS2*Oj^Cw)(K(BI_x2}Li4kur85B3 z+k;cRXAfzL9;SJ@vQ*o8ZY8Qs6++jG5PWC{_J0-oU!oS!i%bN)IE(43a?+LfN-?o@ zg&8*#%%gf0KBZKV2Qw!3(9SNPeu-;YHgCX$%&x(ukhiwPhrBV4S0;g`J!6R$`iHmD zRE8<~PjdzY&?S;@9`7&;p!YAHC}3rNhkYQ#!th4>bla5l;p~4QK&IIGYqS|#>)JPY z8A~DF=f?DX-MXff>6nkk*P6D#^RRnYC(de0Pj0tf-iC-rG<%HaFa&haJ_)rX9dj6N z%^{*o_R7kDw#_2#)0jsSMQs|hJdIg+l%l3hqux6a1okwI;np;w<51f)(riiU>db_B zW`=Rfc6h1nP6wN%}jF-aTyNYSVHyl?=dlB zOU7X#gM3N|qixQ-aYPuk$>IGpzuz)%wVDlvvhnIQmzkbKpNpo)w!NAVW~0ND9bK}E z_PNpamGzqW6bc0_BD(I-D;RiDkf4#@J1xm_Oniys(qFu=oqk_fgWm>EDzdjPW7FJPM@xq5H3q{tsTi2Kl=(=kMT|5&am7jGJ9Gkhmj|st=9Z zYc4N1cIezTGvuYWJl70*H@Td&xKy_gcZd4B73c2W?zPh)f65oKTbx@5yEjaq&*xx~ z?Hn#e5)r4xyL%Y?=56;8*RWKKPHOu7a3h&;qzeER)c_9;oo12b4Rfg&%mr^ibLzB6 z8Q%$I(=^9HuHYuceYssut25Rc^-Zo%4sLZ?>>*#uADUh(uG#6gI&%5a@J_$QiF+yy z^Zj&52jvBEAMCedT|}L;I;L3HF{>va%c_Hl$kQ_rGk`ElY0zCjEoY@r3vEvI?yMt<#0aWnmNj~j zL<}X!WGux>yb4CXDB^|SJ0T{E;=XP{lfSBB7#Y)!g^eCf=4;#M;)dOYn2|Z^)TIAo z_fmV{5M0H&`)k!T5WU@MHR)<2x|QYttp?IN^Q60w$NCI(fD4|cYk8#}E8c$&KFheY z43RXKds9{TwRzQ3R`x#-a~WH=rYb9b(_`P9<1&vn>g_-vVg_Cme;l zvu+M-yK$Ts>0ydZQoJ=Y2cu?3REaSOz+@zF7N|tD8o;aa&aNTa&HFr~PNioiUFzIv z+CMm(DNt2#$xa+Ae-F@*I(ML$grn7ldIK)I({9wpR?K%V_0%X!FuL+4t2(+a_j1G!8ZpF%N}s>< zEWhl4tJ|dKk!F3Hl=jFGk#|KAw|EIT>ZB9Dj8RWg-8S)e#C3u+$Xus8 z-QANO@T6Nv>cX`uik$vp&0|p!u^Iwl7&ZEfF(O;lJ0Xol3Dj2HyZ41c&vg7tDD)rY zG=Q?N@ci$MG;s3K&(Oz98RdXE_O(R!dR$_VxiONBC@qlCpx=0WQ=8@PfX(cZowi#469ksXj z&LElUa*u%&X3iqI#vqx zT~_f3M2Yq?8wBklpMSN@W3>1CeIU%Jq&ubTAg$#T25*o}^TD|oJb-kGht1DCZ2ofG0b@c7ra;%4 zSxjQKvNb(tJhK2RcokOgf;qgy*ihaQs?SF7G&(jRA#BuGszTFRJXw#ElFspYyxGt` zogF?Y_h_OF=7$ zPnSkYLt&-F;c*%@(eyU>OA;ik-F~;Wu$M;5s%rJ+XuDavv!Kb_SQeO~B*YEg#jaTf zpT%ZW<<{@=oee?O?{d&$SA2^+vvegUr-7fjm-@V3))X6UcsuLXYUteORGpizEa+J1 zgJar!h1rH8-TR3vMtUN@NYfXO;q$CVWG`N+$2`6O?F@YaJ#x0RMtme5=bWl7D!u?b zoP>p|!7K)e|6K|agK>yFK`J8=(u69GU5?C}q z>t(D+w1ECiaGV8Uv7-4PYB}mmhbo>2ys*c2*7Tuuev|Ez70WLb`=&g5ys5dl$!>|w zniUJ?4o`Z5Zn0kcZ|KD==foaR^Ju8i*%}g6s+H;ux@v$QMF8Ckp|mB&DoXhVh$qP( z9ailn&v?O4fJ)53VWS?b1Ls)yzCw({My&#>XNr9YpP-T`^lDUPPy^Kk_8nI>$oDDA zl=4>!fS@|nt%XT#ND(Bp-Gw<_)Ym#&fGJR3(@$SKL$@1v=9Rlwo)HS|o^+|3v3R=v z%k@>MR8_s-*SdIdtIxl!aKX2E45Yn024+O4mY|*tb)9Ix)Z7z-4?ruD1I3TdB47pb z5(~^g7g2vpE-H$F!ffP!LvSPOXULNuxd9x;LL!}t`2_$jqVA8U^Z=#uK-e#iKvkXG z_PvD7zU`zIlfFq(n`Hs{%wQh>$ntrF?&qIGUrwm^b02E2|KYT+zplQX?~zXXdcIft z{OglmZvoaqqIG4-TVVBlR z7f9L=XG&4Jkv0||)quxHGuMh~d@QOK-8M156~cKasDrFKF3B?%RA1vIHxMMn=}9mf zOO%WpqXqgeSc5ByR6wkF8m18(mjx+FSOta-uniL<-*qg^er$C*?)12B=Sd0O?sDG| z`WKh4s<+DL!i1Dmgh0yd^9L)+gFb(~QtqyGyK3DP%EY|PX8Yra%P51TmdAqUOZ=Cs zRlZVQt^4gU#HgWBj!e9Uw4W%j^`Ae@{Z{!reCFRy^OThK*i`B-no1OZOG@XX4@cqs_d-@{IP;vcYl=ES z6;mxO4LfOZmB)%#y5d!Fg@?42UU(EBd*KC+@;>8qNoTht@Ffws5n(D67xI5` zvdFkg;6Wt1n_fbUGspJ;sOtQo#wKvwknK=F`={V_Kh->AO$}(H8ig$o2^WqDDn3Xt zK1bAWRWSRn?R+5=`fmGwW1%_XDj{|?vLWtn2kpn(ox%a@?bk8fC#vs3w!)yjSE#Jm zLr~>xqb;ZXJ_$TgL@tLOs)j!N3iJ>j^Oy1Z!dVn}o!MjyU6T7?FLq6EXjz!F~w4w#Qy42G0P>B&mLnf_4L|Ui-ryCX{Ay<0H#?mH}0NH zH!=XzZLku8|HGq(&OlZ~ZLGO~*m}FL8csQ2#Q1>GCH6TiHU-sNUjpT`#o>uBi#PFV zDaX}J^lrV4Y06h|K=Rssqe;D!01JMs{5OXPkjqD->=?z{H)PQGyi za_t^_j9DVDxdt?Blb@aPn|y)~+KY_ju_(JkrrhznfdL6DzN)2q|S*8c-r30Dqho!2z+U8bLDk=A2*;DRl z;M5uyJW@83KUk*Pg9tEd$7hF5f&7A5l;Z}4QwrP-w5hk~Fq1)}FWA)~Bu9^%JXMs!)ZVV(CXwiW@lCiQ-#dJC>OPMHO7Jv+aP-3(? z-cGs~Mhx5jTx0-!CZbZ}3L_cr7x3@n03-WQIu<_b~k3qzJ-}6h(iuh_9FP{0YT&3K5Xf^b>jJ@DF;M%9O zImn#Ul=q{krx$?ZxXlVCN^3JCg*7)>l_;`>1`XN~nkoSBy!6Yls6k0Bw3-d3@hiBY zd%v3d?JB#dgWGVqkhrb66YC%TCO7|J+~6=r`cL7 zby3_I7F$7@1ZJyXnW%GB=yqWkFn8)Uff*DlH*>ev-Jkntomd(OX1ZLWH@_Bq_w9*vKOhM8E=y zhn%~tc0N-ycoA1}sHPZDfX?=&lrNer`iM2?0~B!}+V_I+HW=+DtG;v{FWmj}e!z8` z-smpvOE`n|9B~RJ>Kfhc#_c^{Kf{I$HgiqDra0!Cp51!yZkHAK=iwx;=(#_F-sVm&XW5iq3 zKN_8rC8+30$5s!8eCp^ZsVSoq-^ZHav4P*(Nprl@%OlnM<^M>WIyf)2-ZEuycFtmb zu{L|PVueE4`zVjpqi0@!=%m4smHjBhCQCQuKN3Hb``A|Lm$Dv+5z0prq8owh5Jto3 zM;bcPEEQ;yngkX3%}C(m$l9EUenCvkP>dNJxE~9i#mR;Z5p+Ou3QGwViTL5kmQC&l zP=hdeh}%{~bH6kRWs2RA`yE7Ifze}CoMZxFO8;N&mJ01gj2-RH*lKXXp_Ye;#)BHG zwVV1tntVzVA?3i2PVNSj-VIZl2+Keurdn(^p|ujkW@x1=_Ypl1^RjBM;e2^ReiUQ< zIPl&5$Wz*=X6|OIc(jLF(>>#vBvx%w%~2u-mRV?WvIizXCt!c>0lU$qcq7qPccQO< z<{&NAQcpHeMWHW{Nuq1ddlS7g`UWVN!;2PUUr=KZLXer`j0Up+wWYKRY^rQmL8C)7 zY1hI~D8`hMilvk1z&L!9qA!@Nx}4+r!JXYx?r5*SfH<8%>6vUNZatT(bc4dh!^7 zSp;_*vz3K00=qdSJBntX?mD0WhQ%4(_%HY}{1|&XlQdfhV}M zOlgA9N*S#$X|tO%i~muNjOcBpuGmHSA9OJgDC^z4a(xYVI&$0R+C|G}^m%t&DQ{gj zgU4Xx9E7i_zr*OS12sY_VU2*r2~3JqPyzMDiDY zZ=kuIv97gTDcT!0W<5a&H9(}q)CwladCMo(IHXUp0XbY&tCGjawoXE?SL)KFJZRuRi9 z%w)81$ODf;xF6|K0m$x-)TL~JE#Dkx5uSZp`4jVbI(whKmR>RMThexq)gN?2oXdjW zW$WvX?gcE>pYLf)-ON+Ev;VhjfsU8jyhekb=k_mW#-HXYrBiKrYx|Nny_Mc*Za0)kNH;k`c2B^7aj1^eXqA`S|?pyw3$-*=X9r#=7c3fJ~Q?Ap*MEOXB6T9 zIB%8VycI!|U=O#d8m)>_<#qBGQ?v#3sGN*cbRwR{v?Vx2Hey$7EY`lx3Z}Ui95!Y4 z1~+yCqGhL)K{o>SH5L}ez!`j38O|wU~yv|3s!*K5A5h)OY zMn%;llzTp@|ET7N{72{zq>*RKSh1H;6cOAS$jFV5;DNR!z<%+x+m#XM9+jV++JEqd z(wUmhsnmag=|lfZtISbiH9_VE?GCn{bi~c0F!>C0DjNYE{Lpc@4-6Ov#wv+TTBnER zEgrc`8;narp)c&o!-Lzj=kuM@x_ZTHTCeq+XOC>wMgxuqI3MbB`T42tIjSEuPcn(P z><)=?m0glf^S!F~$yfUU`D!gYS??zE&&u`SVoty-4zCNclQ1~ROdCe5Jdtf`qg}BH z`T5yS^2Y$J0O`&e7?(`dqsF9Z5i{J2ND3x2Tp0p+aE(LqyD?Ve@9rjYyErgJQl@j7 zCiMhYcS2KXvR^O_D1}oK4WKwqdvffjkzVk5 zW(JTh&^)jDJ0Fqw+=$iF#LGKiu(8;jv;>3E+az*nng1Lb*Qy8OwfC7?_(66 zO!6pHl?DP`BYQ%j`(N<{a^HhIbwnzgc-R^`7mwUHN0g6k@0=EyG#Xp{zW>G`J9>G% zQf(_oHg6G^yf;4usxjKDlld<~#(o9Vb&@#4kUWX7JlBg%qlCGI^9?OXk;4~EJP z^6G4yg1^ARk5#l-YN7KH<0gJgaM`61sE78+Hj=+VY6QtuCPGBiS?l~R`y{ln;9t?%j`%hJ`8>}vat|C|wZ)HDx zRh%SdlD;kW`2nTd=lyGKJl-vf@O@)3pkl{`H z-~XfEDJ>MnYDBvRsm*8gJE;l)sT1`I<8VsHZ}9s_oA`ZOZrjZBUC47Kes5sB^q6lI z5679VZbHrl5@WGuy#*bNK|jO@$T?6TjV5>@Dx;(w>Y1Jj}Abh9}`hYS+xS$U5?z2|P)|RXUe>EUa4JHAUp}UP$ zURzCD;cA{h#&YfwaWgi5u}t0Us4ac?O$T(m)3Oh|7l{Ak-aIe?mWf|bEKaiI-Dvqf zDZuab;=S+C{rung{*}nJ!Wjs5s7t&7hL5CLYY~)ARG*r(xQzB9+se*x48mZ*PfJy< z@IuDPjrCG1iSWOM{*H(@;<@YN;*DG%O0D0Y+e& zR*x>(^aqIey%Gtl@YdaD3f` zBp|v;{c<8UwV3-v1efkswz&Wk=YAldjD8>pkJgjD0Zx)av(Y86pW>jVlT8Yn6;3h} zPt#t2a||^E<|(#Rx$ekyig+v6Mi&zeUB7$x`Ud>LxWl;%o{M^LnK+S)U5azoa*2{! z$uvUaAf|;%UtToMtI`P(nIL7gmck|_C-Sh6vOL&Jm#^sQmS#ksV-~lt|JZ?X2%CNb){UEzg zE0vL|0YM;>ArgKl09Et6BwSW+NhvqoiVltG+yNIoo!V-Rb`IH=0IrBYp~ZnsxAcgS znJYHl(v$nfOu1fb?4H}!FC7#kJ-PU88k)mLIdWxDq>3$68 z6)E=2~8AB%|VR*3{Nh z(Kh-|9G?7~E2&6Gl+A`Jr_=e;=pHR;XzRj=rIVgfo5xRSCFlsg@j@z%iB^UXlVJpaY6YvH1@yQEVoQW$6-dp4d!F4uEJ%XZg~` z1J`X>zyG?m=imRpI}dzG)ZG*c-PEYFnVd?)E~12R?Aqn9nT^(64F;FVru)*h>o@M- zzkc0;_=6Yjc;LaE7v6u;T>QJdP~5N1#gsYPhdKIYd|kz zCrCo8>io%0qC-{i7)*8o4fxT&9)0&E{8xYUsQ4&n6W=>}^k^Laj;d>8YWq-|mUn5= z=1{IsJ;j45+KJL4#2{iy5Ge;vJVXV9C}PIWKc51drh+D*x^K-o@mOv{1AecIF5<6$ zz&{;4OuspKOi+(kvSCyfXk&t;-J({37RLZ+)x9j*s-)wj#el$(Q8C->P8K9Pf+64q zLNOhcpOnTyxKqKH5^*cBUw5n9^M}R$?2pC{b5k<8JD>mY^Uv3av$8)vd>Hem zjYDzmT|-U9ewWO@C%4OUu{&a)M#2UrN(CAYxX4nqaEgQlse4TLqm2-@YK>zBU_?sV zq~?0>glpKd=!?WoNERqmDN zpdF}D2zo70x4;v^+b%KmxVhL4>h5K&%pCwyW|S>tNVpU2)Y{;s(+ zc>D{$86>?8R4?UI1UZr#M866%GI<)1`TU5F5pCTSpgrwLWS-Kqa@d4(;j9qP!MA;^=9I|E1i(N*egJ`Ue zb5DE_2LPT6^%-duV+lj05O5Y%)e6+LaAPwW^<;7NP6)0#N{`TFZ9QqmPN4Wa(Gal= zL)*=T4A*hnyvWesK)DR;lC5K3N*&3hEr98%tRs`Z679>lL&RGcDNNtw7Q&W<1B{p% zKwF&kalq7v+A^8w)cFMcL}8UpTxryV7gLMR{zq+}+6P&xMIWZ24?$qd&OBNAaNZ;? zGfgFE0*FjtRO)3=GT7M=+(k*Uj2p6NW?mm#+tajNi}F9pcFgvEHzuewUxU? z{d*SO&=!j-5ie;pP?L047#T~*UPzY~^Jv0+unuGNM+d?cq`$bF4e77Z0ZOl-&|*Q) zL%Sm`H@SUKT_+u(6smEGI8IpAX3|saAh_!w*hIeyMqTqAzbdZiH`puioX`I{E{xV^ zu%!5Gtm;={REA{`54u#tomvjUu%0z^X&%~)wio3nFI~=E+Tin_ z%@^}?@S`%fn2y)N5U=MXeDTQFOrn8FN}fsAC9+A*pGm4ZXLKOhj9zr1e07pE5-(ZK zMbgwzfvO*vG;PD2P(~SV7p9H?B9qp|#Ii0-BByTAK30IT7UU43=!c^lP6+xVtmkl_ z7sC&}YQdL7p>Ll<6S-*bm*&5x&0}HFzkoS(nJ1>xU{j|P>uF#9-^DxNENel;EgPq8 zmohDI2*PH>GvyseddZkhPe2}``}VTy-b(;pny*X+4rGTdI(to>O3xV4D3RF7P0(Co zmWv{+&8T+`%RwYP91s>?t|)I$RM!JuMakSV-QL{00!OQv-k=?JtEIj=(cUxum@6KR zc7az*B|+%&1gsA}U=4V>gz$OnCPT2YD>}VuH`y0jf3UYdh&l^oU&x4fFBJbQmgC9* z<{gltjOlffi5M`Ei!0a;W7rj)7v{fOt`=W3xYD{~df2NUJJQJG0lTM;jsvhtTC_5x zde~}Q+&dNQG(_CL`(f@G@wp!k-86C__X*q6d+@s-i{)s`LpbX{c8uCLB9*|$fLIr- z9Vs7xPvRk|yma9SJC2tbhA7tmf(CB#tflK5qVRH6&JZ&)p$UNg(l33 zWrbxJV)AiTZ5clm00!HAwL5Ea;M7mdC~zkMcED#0K4L@)N7GoCgMdV#83-o(#GcaH z+P2zSu{74x8xN<9Up02s)%Wx^)O47hM)l(Q+92Lg5)8+C0 z64FMsYD9#a&Yg&n`;)Rj(QJb9Ma3bt;d6lXe!ke|!-eLCeSWp6(9e2ARcm02dL$Kh zAEWvwNFqW+h`{%hY=robVVry_@XwJ;0^CYix)E$+M5CKbxQkcF6r_e=hpQ=GiJ1hl zr+-^+3x#eA2^T+nZ}OAehlTfGEuNk4LB5w;wa4K%GjlFO!ODZV{XeyZQzzTP*T5ED zi$V-O|9jM;+y!`k7ta;(4g};DmPbTIAdf!j0>~q9UAUiA?+X*|BSd-H`$XU?7db<{ z@cOJ(@#sVXfKmly*~0yowKB)K6)^%q1(Tp1$%cSS3uXk|edG=;)IC8xqD&dx@<9OX zI8R%HxFE{!dg7E|hEbyUAW}}B5qa-1x7(vdN;-jXrJs z{^fWne)fyT+&fd+>%gfMRjY6hRb$XfCzD1}B!jDbBK?XE9xgs@xR4z}=h2MSMV>Se zV4zkb$*2x3OOmkwdQz+?F#af!8n;o3n`~Bb0zZs~!MYBiYV4=9tHa{oDU5CP$59$BX)$I?gbpQk`=11KYNLQN+dG4ePFs z%U{cd&bj^z+r%GW28PZ&?}9bBVtE^f4!u_NyNr&5{QMJX3>+FIU;qWz2HJhEv|k(* z54xPlO}aHd;lu=wKJ~GjGpY*#xvC~@TD(VxzYaoT^ zgIYa2kcp;*aOMg4G zc|EdEXoG4gQ<-n$z;4yqs$zAkL}9cBn?>mLkFfDjqQ| z$|h%HM8kXFNTcFC$oEY892L5<5jwO>5egOisvBs5p`xXfiBX4Tj`o%jvrvVqRX+m? z!ga5T5D~vm1&f>pAd>q_D@ISapjQ4eV+K7=hKww0jWwXWVFH=D)ZYp0$dMv;MB})H zj3lLlYQ3?nHIhgslUX-~F_COIR@qphpA^P~XOChq2*6OETXET{RwdJTz-ir3eK!Pi zGk)4WO2A1*jME7ej&+Z^D;xEsS~&SFW3uA~EA&od2~?1SnQByCGKv9W3xFXKMf_iV z^4YJ9d=A99V4J33ARbn%?(SWI$7xOR0X(==DqWjXF-l2XUaE|(g4`fDwpWc)756kq zs1TC?f`F=^kdqh*5Sr&EQ%7PUkyzAX9cV7}N`a1Yr(>#@&r!V5B^=;7Ue1+0sLIC* zu^z>KrW+Rxs`Wo|plJ}LT3L{xMbJCR{2yTd4P$0g(iedB$I3^Y4|38PFu`;J$~0p` zC}cMP+Cg3slqG5G#LKX5GF2eW5^0?EtjOO$;VgC;SK^eHwKA&*hcg45h(cPk8sEn5 zk_dv@1-q^dUyF2g^;oDPW$`d8l{3LQO)e|{e_8plGRG5W`6dmQQ^_Y0*a?3YI9##iMHgteS>k8-b9 zND1UIA_o!o4XDqsR1COc@>c2PTs1xB2k@xiF_){J@*bzpCV_ERM#qywb zPs2{)ecPeC6v*iWezaQ6VQHcH2AUGbsI1J19WqX}*=k0;Rs)zq`iz53h4htb0#zaz zvIUgONWKRb1|+i%a|M890Yo{0unYtNvFyUDEjn$Z=R2}Zg;=UUN(~dMv&jsKRS}~b zssX{HTU*XuAN~zvELALJ0ViwF6Hs;%Xw;xjtpswG`uGiCCfC3QI)n68Df*c8Qb$J^ z(~GU0-29{SQP=`(@pR}~*qR<1&-7#c^{a~yOEI$u(&~WGGho%*{o%TVp>KFDlkj=m zs6_+3><9rB+L@#e$R%hlg%FAv%Y105xeK7cOWj#~l1HcuG8;@mOW{#PVbX4%`;(^Syr#naz(y7i84m)xkB>uk_-!x&o!#&$^BCz0R`LBI7OKNRB`nbwE# zY=}0rXO4 zh@@;L7?RwXQV?3EBRsP0K#GB!T9r3OdeWF6S_w6wAw8MlU%M`%KB)o}wm_z>VVEdJ zJ}El_OE%3hSpE90)s3nk4(+RWqKC(E**_UyU(TZ!zGeARgWacJG(8*$v@9%NxbQde zmRD))?(ANwO61iu7WH4qc+mShTpbTG9&~Vh=wIkfKXb)P<|D{XYp)}J4EV7bK+9m4 zmDt+JCdhf{;+P;D7*y2ID&F< zxj%G?C3v_)MUho|LZQ~q>X&lAzey}x#$Lr<6_BzB7%4p0+gtp;MS#)e@;nAf9)syT z2EY~8)7q?|F=%7hB_(kJ5@Sj-@Iwbz8=sgdc?$L@Y+?o`w5wx~(2j*n$OU`55u-_h zmr&uUJOTo=TbX)b=P&{&V|4nyGD8K>mnu-I;Bb|7Nw=)a{Yy$LMTc)!!GksKP^hKr z9Dq>2nR^AB>3VH^s$TrL_;v1?DZTdj;hXa>#V8mpy})7$p~``ZllukG3awzk&Q_51 z+fuBRR>5kx0#ll;=F-H~aE)ipG)5qWLF^c~p)tu?Apk4{42BF0iu`~?4>ExR!c-tp z#z5FH;Hp>xPJt4UE&rL|xk)&qe>**1?qC6a^oNsCawP9+Ki}bg0uT5<>L(Y)v8qXx z5`3;og&X~Z7f6SG3bmgoZV7B4c?8Tf-x7FW8TF&~QUE4Jz8@7Z#aC48_A`4aJ96_e zkvHc6tUYr2Zi;tum;`Ng|iv4&be-Ci%Pry%9g$aHHN1^8Y`x+<9!u%t!E1m|f zzZv+eL3lM`Lr9OajXeF@rJS2E>L*&T~9z*DD)FIiuAii~tmIXuJ#w`9; z-&lQP*a%2W4@#oK77&yqiy!wvZf1XF(9BA7*bmiw4 zA`t%w_w%mybysmO7r0SUa8dlG<`C-6!cUX`0KV{V!Lv|)9&Vu-@H z$m39ol$}N#SEr0l6fzNkOz3EI^v9%8ojww!j*5&vqKR`121iB$e!TGdI)<G&di9qA}tTP!`q~Kk4Q(W=IdtD9~GD;U`W(609vV9U&g40wjd8lfQ{U6X2+DoC!gdx#N$8yvDozS z$GJ!1BJ`7=kiLRHOZ-9{*CF8r`kf%Yy5yJ>m@o>kp@N~8&hgHIAB}w$MaG3x=p_Fc zQdh+X6P`29%jNyzK)zcXm#)Tn?lTXsEhr-=If94{Ljwd@Rv|MJy&?Ann-UR# zoHUe?2nu~D^h=-rL-BMxcQstrG#<6m&&AA1-wotf)@Y`0QH!MDyVck*0BLkr4`*26 zyK!9XL|FnrnJ9epOKi^i5Ao+(R!;5`uVM`TfH8Q0$DkHJI8%HZ_kNFiUpj{Q zMx(Bj+vNX(geySLkmsIf+eEs8Ooxn_`yt$6H>mGWX{)~B&oV_Xmx*{B@)8$UqW#6s zqlg3oj)K^s;Ld{+xbXR;JFE+LCPisHF8=Zt{)XlFJjPa%+r*Xp`&8`<+Kc>T{0#`V zGG0U-$*>zmg8gB%pkDm>&-G$i?(3L7l36^6fr;;s={tBeq7@iCg665chybXZawpv) z_~*aG@t?q#F#dvaTEB|@<#~y{$_SLA7g>puxFke!*sl!WF-KPnG!FtZuL1tNst1OZ zJ*YXwGh`)wHx&A?kY~pDRAF{0Hh>Lt|H*Ae(bRX|K(nVL_t)HCOS~-u+eOd9+~W&Bl{eRQbjK8{?chP1 z)g13mgfq9?!r(nwO6A{`J>nJoQWMoj$8a@LS&(d;QV9H1zPrR>MaH zd9L=*JE~Dsio9(Q=}Z)Z2`6mEQw_5Qo=24Es0_zdjYbQ1tg<>1rKdPZ74(>vrNt@> z%1a~YQneX}VqpvY3#qN%j-f?6cb>6$SHm>pEp(&5>xNAm4jkOLe4yX>71mhO8g?#P zxNFD4S)IMcqkN|ypyC4uj~?vmH%dnMUORw~KNCGoArdtV&-FU6nP-=fwyx-aD-OZq zObj(|D0Be(c@~U@wTbj{R8Q^K{CM@d5ioI+UaZ8&Gc$2BPF1L8a@``WUO6BlnGJZ$(pB(?*sP`Scx0x} z#6Z^4cFJJ`nudm&@E6ben(2a`#$%!K*ib0;%$1@(78;6`hhn+s z=r5k&Hof|Ab6;Qc;9yffKGnake|3L9eX<`H`}#$M-x>SvzJ2G1VuR(em}t;GmiyZv zeL4m`J0d-aHFntLly*S>tcU&)qC{5YcI*cR(yh=wq+9i@Td@OS-2ru>ZaY~6AK?uY zS4BPrtqymMAKuIoQTkU|fnxb9>GBvZ2Xxig!~d46)`Rcym^6R|M|xhAO;v!xv@ zUZ?W2GU-y0p!MFVB&@_IHR*R%(%94duDclQqcj(aywUG+>1qC5@2OO=Q+Z*_rv9!D zt9Tvq_$|`p!B7=}jEQDi0Mh682myZBpP$tGBJvG=yMuHa`E=*4hzoB?FCGZBr z;hMTk9sZwtwr(^W98%vPJ-%VsdOG-E*+4`3s;o$g{Y-C<>G#i5&L!LLwvwc5tYib4x> z_ZC_xN)GYbc@11|x5;agw!8^WD%hI{lOGqMiksjFufNA5SYJ!V!%v3LEfuYrkKSRA~)yq>*+x>w<|V-!J=;mU55cO3f(CQa89n05tGA-tUylcO^{uPU>}IZ z1;Nvd;~8X2WxZ@?c~M{;CwWvyAe+NT?5coKmGYu8kf#de)#(Ub0Pd5YTAqI5H=)pf zgQruj4uwAbG_(QtzC|#p6QJRwwYwPYFrgh}-=g9K)(o^bMK-XE*vNzir_?;XD`UX0 zQx=J8D+ZB0fwN4_V)2dKE^%(|-VHkEr+(+p(nAW$kKt1%Vt;+XmBRck(vu?=J5)ch~K^p`^Z z7i4M#T&gY*UzFoyO5)>4X3EhuWSNvOxpfJ!VA*m8#h0_ut`&2c?2|eP>P+$^LSJXk z#223g=j&I<2XhuiXf@gcZd z$rGnl6xK?}S81sh4A8);RJ}rKLHV!08VWsn8x@uR%FCh9gIZ%UPTM0 z$e2Z(R%W6WD4WyC82MdKd%tGlKKfV>8FN9#BE)LP0YOCJk)TfG7m%+oXV!#)@y|LL zHt*z9Fxcd4#?!7urUC*6&m(Qm8D|*ghY|9kZPP_>(Ujn{8lDfMuC;?wk6-&Xqfs{P z_WO{)3zmv2w%Cs%+xWr@Ior7N1ZpiKc!E_g3|%-elD?UXHn$bp9H2FU3|wO}L#eO` z5jprE9 z#6}noTlV_=qoL5K-*?3(reFH@Yo;gRWKg=)W+J5sjqYT+DRko`U15HBV*DrT7W1PM z_o2bGbphVzh}2B^bMTBdX6n$xMux}=D!>ABtOlJy<;A$B&sy;qufcXAK}`p|l~e=> z7?C>Sqq4ZY<+duOHcqZ5jHcK%V)%#cglni_@2C=8UZI{SQk*hhtBAJq z@1KwKL8D=0?T!bQI}rp!sruP(=YBPO-G&9LXIyvYaF6VH@WS&K8o(u~Pd&Kv;)NbW zlx;R|Yi^cUIdjv-{c~onS$81HvR#?q&-qqS8Gagg=5D9yMc*4Szs;CmvOV0uoiPfN zCfJVdyAaT0)&vQXnAKtS$(sbA1u}nls|PF>uBD8EsvlJGJfp+znz6w;g42q~F@xSP zJ5zK?*dUGXD-gp!{mJJ~r|UCv((65eJSg$Q+#2yzZq?-Z0si&0>&g7LiTh}KC2+I7 z@(Nh85$r+8guwjs2^^LPpSB(2nFj2Gjs{ZP4$4t#aAhdf&kf8-rVF%n^>i&$Hj9mKD>GsZGk)|l)AU$uI_tqRpd*s-#xIhx zwY1(~Tj4xSdFX`0i;*PKk|Nv2P{hSxQv#s{&61lZjEVuVpxTidpdN$a^@Cc}GSQ}h zTZ(|XV5KY`vLg&aOU@S{CDCO>X)5CdJA^F6Sw@F#(``Qg17&3o_@9RTtwHSh$zxM>FtE<2)2ZeUC+@Qh zsCkR33~YnPhwQ;Axr=bSl#OS?Q)ZX^_mqiUf%zwoOvVCdKu3P&LcvTI~6graNi+^$%%V^I_qBj-e3Y+Bac zwN$wpd=RE(snk-_#kt#mrc95=XS{`nSp5tP7VVD$XZ&}et7b7rJZcwFEi*4f0yzi9 zaY{aT90Q1gKG4}87_8Q2dQNEYtX3g_?P_XBfW&{weT`2PL)o@0ce)btpvJH8)= zeq-B4yZ8BsrU~k)R0+HqvC>gk56t`&HS7cUuOITiT^c zH_0|^t!8=BeUe+;KLo7w_vMSU_dD=@{0`s0{-pcp8Sbm?IiS72R%;RDA^&@}B|_3B zHD*VLc$`=fna-AwCv~8N1i-*nn}KXr907>9jWawDr`3SOWSmpb-m2n`$VkO`J7Y!g zA>*SOcM$d@ReN^B9`&Q_=L~_TjB)|yzjLH<@p8ZKjMa_1&(yhG`prAV4?kSDK_}a= zEC1cJdFh8AZn)a)ap|twhBE_U{mQV{RKc?he<^bZ0$2i3j0qR5}&?4Rv_!6;%C4f%Qfk{+mQ-xeh*<>v! z;wuM5-WFu0qi-2A&k}_d_xb~ua7S9hzRe{ml z%=4z=`PBOt@O{{Vc>hR#C)RNzVSxEAK{|OKW_d~CG#!TQ%v5cVECvk;vw*i&#St>) zDm5bA)S79gF2-R`ywZul`&&$Oqb)ntjqsZe2Us8PVCHG(tT34mG)(HL+uY- zO=BKQD~PGciX1Zr_>#jLsn&F2oY*0WQAJ0LDpYcqDCIkm-^k8DvV`F*M;aXQN)o?3 zSgkXqfZ3$mskmjfHVm{4Y!M5{X|t|GC!VAO?onGw6%UdTn7L>@`)^Q9hzpfh;^=_$ zR+L~GEjTzRs>3&eV;Hggu-V2|8cIwbu1)IgooQfD6Z#1A7h)Tz=PGm-}sswJf;%u7f*l ziW&NSUH%UkGcayatT{;geoRehj&VYBj8Y$Qy2gMpzq)>$Nc$+y8b-L5f=)0U4rUGL zu@Z0uOMv)e)klzVgL+${)SR7 zxXaWcpzw^i-6bZi>Q_}1hMX&`)#fUF4`$HE4x3$LWG1=4iS@s+ptm6z=kR(cp7$*a zrs&n;eMIt#`WDM!9`Fhd@jZAQTHpcUs64s1>mgkA%VApv; zFI-bw8=NMGQL&Z#1{+ccu0t;LKQ**A81rxa;7Pp$)e1oXJInU( zs9o*1nr)6o&#ikcZLO2~qGyH`j?tmcV;Ut>;r*m(R_D+{l=-qj=k%0?sr`O;R;mE? z7#%5qEQ~3-uy^VNrjo%vnfsyy!?6}w*gey&(^_taWn9c6KQrT}c+9C_=_5~4}(dw5= zXSjlO5x2oA^zD_c?TOaPHcL%S?&;R*wsr_e^21qOow@hAX1(15oh4(>T8Y|tL1~T_ zVTO9j1}G$mFmYPvN?WVP-5G>c*eeM?I%OEZ$fh$RoP$|Cxg;e$jiKs~lwg3uD*&$~ z#osBH6W&5R$}2saJh{W(jb86%9#QStEgp~OzUcLe6>)FwYvMc)=3RzQAMHUTip~O8 ztEekO2tXLHoc! zdz}A>M@(~*iFrzH)aMgtD08>YHHkC4IEK-hpPoO0T+|Cu|7(`CNi462=M<;(Bm5e? z(#SFRiYsP%2yysNU5)Zp;_Q3}-a}W0!j<$iUpb{&`JYRN>hIG&OMd>xP}}+<>46jP z(PMra9_e@fQ@R1)LGS-DU7;Z+`2O#emx&+4N88Md#EW2WT>|t=m6RQ!Xx3H9%tB1? z<%w)RJ-sxUnT@BPOQg#oieoyi)1EEo{P}Y7w9Tb=xmpVq-0k!(MXlz(i9Mlf@$qv= zjL!$ee9PKRfSB*VtsSoPD!RJ_*Wgm-6;KT4ylguDH+PYYsnScQkgjMjARDEhQB`<;h-4w5qhKYzKi< z7SItrV-FJtHn1Uk#68js^}Dk z`#3m39!e9~!_!q7(N;A_v$Zo8XbOq0I2}-{;*gmOvzEBvKtz?P!J{XDIWwd51}s>( z)v^YUm$!PbPVtToxQ{!@o+vMJoato7srX?bt~&FYYtFppCqHXSv|Zb`V8PZi&e?(> z%jedvyH?zmySHjnPuIHKi(->~!{Dqf?rjUt*h+h4BLAiwm3P6Oazc-9WZwcp9_0N4 z?IChbu{{FQ!VqtS2GrWpS#bVC7uxB-h2(LB*eJ6~3rdn(g`!UZjyDB}7|B(LH$k~W zwRa?h8;B$P^L7HignwdoJf8d0(Yx-tc5LjByy65ZG0HnbC%z#*is#nF#rNVlTO1n= z+RNjPleQV7-a#2-glqz-4_U_92z#R=AnqZ4!v@@GU@E7E7II4C2oWo8L9mWNFTh?j z92vXN=0=A;# zESZYNa8gjABo&C-V6D*q)J!tsE5Lf=Ej|Fm(MKQc?>-`$azA;o`@_M;fa7<+a|9X> zy1FA(4Q7k`kCl~w1czxuRiwKc?>v(G3Ep^7UDE0F&i6STiPnHpR$bO*4GxuWsSvY* zwyv@&z>TmE(!RV(+uL0IMFh`G}7W0^|B$LR_nSt}}I4A~XMD?%+xnGn3-h-al6+~5wt z-h*IeL|@VOc*u5Br8 zE?a)U=R43_+E_F6wkP-BumKEWd&%^u$!D*qpI)NVmrSd#K&9r$jM9~NuZ-hg?qwkA zBuI?F*oe)evH2v6;6`Rl5PLes_JpeDi z#TP7HxO&S4xRcQ#4{QJ-E2q1>D%L)Y>-hAz4>zCQuvJ$$<%lt-mmQ!t*~-An66U%tNTP$;|{@BvYE?)5V2uAI zEep0s40t&2yq4+1cN!yKP#^1_i_f1{Vl`t`SxeiE;ZP`SY%fJgAjNDg?VTT=-&=|o z5qB?bQ|LuQdx;hA#fv?{u)TTu?7Q(*^mv!C?#=U}hF?r?KI1(6-n5oo;j*%@AyHEJ zu6aKHyyBPPm&(g0{Zh|-kP-J%JMdE_Z4fUychH`|x`}Fto~v?5u;(7A4U`lnniW!) z8s};y`EAF1dg($K2QDPFaL^QvZLGMF-uPXmVtB_^+a?#ipu>8q~%ymH6c=iI4Wa_H{d1*SXCKI;zCimyH) zp8Dju^nayu@1AwT4QGAtd-1al9z5&acVT4}`eFeSuNPjaDtNkhhXY|FQP-EDFDOyM zUK{Li@XoAkrjFQ#v@Rx;H7OhALJB*{V!_VXmWo}NfuOY_@ z@pgbCIwY(d3llnutwd_5>a&PlLG8@uj=l2A@t>6(5}V&S_tsoge7)|XYcI;|yE2`* z>dI%uy}4t6+1xI6CVIEtBX;GkSMEG_#_En4z!b+i&7G z`%vYl1bo}1M0uWZ91H}>OjEc0S|u8z6NQe3Zc-gR4kKbn@e%}~ur5<%qS1w2D@4R!@>~??of-|pv?nq!@m^#%xy1!dPKa^c!1CyA3v@!Mxh*bar;T zg{b)4GtXUh<^o@+X~E9B?%A>Pt}?x~9*PF9O2=Z-LNZa{`S8eLB-S&5^!5D3g)^nR9<~TLJFT>H;oOcB zBn8M7($4GwgC*h|Co#)9*%EPLtD?g>d9z_&WdrO14@?{^>*xf=w9x8i18+8P#qJ~y zmODJTA9*hH`Br(wTF>fu+#`oi{Ms9rYZ0IPH7x_)Pr*@%Hie~?TFrFg_pM2AOq78N zgjGWz1~mkM1IY%fNHBuHO%No6mIUm-td*VdRwN3l>`);u*06*W7dFqy-i85=+(<{WHk`9Re%`8^2G44V zpEG>Z=LdE!z3GB}O4~xq!}!xF2bk45qOD*cbV0zPkXU=c`b17L9VKcyxdeR;5!qw6 zCfs!+a*bfyz()Dc-#-o)-yNaQ1(w|x!-f1*L$AFb0Y8c%Ho@z^S$2lU*vo5#^ZPJ|Je>9 zo+j``i(fO24Zn>nW~74y!leTA!NtCTTWFeN(H@DU{(gHI3O!eCOK_0UHTJ@nB< z*MH~W!SDR}gAe}v=QrN?jyl(x&lDs7zr3{P`*?9cut$e+p!PQ;9gS4Fk;r};@xs_Jx{Uo06S540f^rqeM;^jebs{^|N2_XR zCRWuVluS#GE!0Wn`o^(oDz8vikzr^A9OkWA*#vhFSH8&VEe^7@0lHbYO}OMCK=i#M_g!V4_Lt~O^L!c9wSs2xieq8&?J zV^z#lL@-)*9@z9!0H0mzBD~v}NqrHj2~jEJk1|^}a*Uf@MCTX68?g?+o;@2X0#sa? z6?mL6gk=!v$y5PMK^2E;+;qA(QTEPU7;_63K`!c+Vtq_=BeI|FqQieW55ZP~a&A(e0$a zHX3t_qw%_K-^Cj@?%%NCx=`HLDtmmn>p_JQZ0OnRdozB(cj7pms^jtM4qv=l{2`YP z1crCaUw%=qyt_K?^Jy=g2>5(A#vgRfeofi#v)g^$XZ;ob`;o6T^pIS)SQk&WRL`xc zB!VzIA$O}oT0_Jl8vcy)y8rdYBE3T5bJlv?>Q48=U=M`jS8yVPqe=Gz7 ze2g(XV}sb?$7b^6v(IO2x{il`&^78Eoa4YbHaXU(le~C0SgE$OW*4nlPtvmuM!}LZ zH{u(|mgzVWbq;2aA=6O=uj1-QKrgZqGo~`(qOQ!@ zqVaGj*4Q_^l7w!X{*Pm&_0vaSmM5}jZzHifdk|uEwrgy&Dmf*I^fnhIHz2@2wqBJj zTOz$)g{*+WU^Eb3i{{rP$JVH?RwvS3oJKJ!%x*vZ${Bc`AaLt9K%#njAyI4H>G|OQ@uU~@WY&OSBi#*wTeT7|chk##^m1nIOuVpQLe45k zqGI^x)kFvmPnI_ltIisCkOeD72Y+lTa6zHU9*)Uj_?tS(GQrC@I}o}YZ%;<>A^Jj9 za8+59SH?Y2NYDoxHhe&Gl)I%T?mI(xHrR4sSQC$9^A?Z!8kD!1&g+T?!{&glB);tO z_&0q~Pdv8*K-~EoHr%pd!~QdT@gDJSEpgxY_BHX`b?2SfF0N@A#P=>c@rLN%E(@Q$ z$>-aayEnLCuGQ+6=L((E=UOnF!+Qd+oR_Co`m^K?SnIAyn7O`J%O4Q9`792{6rNc-mc@FbZIw7T%`tJHRT0P@x(J zjktsmpaxbH&nRze8Xhai`&>m-O-L5XL>slKs-k0efTIwVn4pNyVZ@TBdL@1ORA(wl zU=`7+0PqX<3*wtt*fsDX!M+G!YWifjvFF?j8?iso{@WzAlf6g0U+h6cL=WL&PWuGsndT)U zL^t)H$_uy<)H_@qj?_1+9vXi+p4Zex8=6~K1p*cc8;Uxvri&=(D{hb_6@a1Dr9%b* zvZldW2)B4l7wz=QtIxdg<{QuCKk?aD+`MNN|CyQl)Q50IJolO$)QMh$|FS=c*WaV} z-NAqE*fVR^9{iatrscjO#7zyk^|Dhh9uzin?g=L%F35LTj(C(J{{T$%mDoe=oID3W zbXLbXFFLBQ1Cx*TQllUFfG#>m2Yj;r`uL3&t74a*ec}CFvv@@qUjc=z`hDU`T{ZUP zpt>h(DS?z0sBOw{tBK|DxUTwBlyXGe0ByKYR`3}Ix4}tq19E;9N+A*84daV87&vo` zf>EU+=|(GbcEw=P>28d#)Vpj(z3v#wIlYo=eji^8yKf2N5qkLtu=~!%e6Bu5F^YHS z-sJXa35PU!YWoau{-Ic3{0-fE{2e#a`|%xH3*RA{@EspP_nRn3NT@kNf&;vQA;^^C z${?1k%+7q7;xZO>6vG%q@;`{T55%7d$#;DEgzh@d8N@iy_b#8@w$qPY)8w&h%700G zgwJqWkckQ0chTl>=8wRUx+Nq$b{@LAXJ-GQozoqSg&*HCM<18 zhvjlh9}sJOpv!7lmWSiFg2$(G+~!k&^+n~dHUa+_bV02bqB&S!Wa8~r7;owa%m2W(HETE*(DBVN>oHz>*NA5>;6^V@9!HQA<&Y>u*v&E^O{IQ@zQ;f2-la!jtOa!s0#VZ>t9iZ{DWH5HJ1-z(e=(kLFy!i+x1? zF=`^LUb1VZ?yo6k@undX^sjOaRz09VHPIJX6 z*B~@O{ZKYE^|z@X9|nn+E7?2t!v*Sdh4a-n3pY6$$A;Byg*OLgn2(N4>w<0;UA@d) zhki1r7HKI?{&p-|bPKpFX|*8UvTOn|-!kt|R&kX#S$^33>Nk7F-icM4>+tv2Q|s$L z@{#&_Sw95GI^#>gUF6hqG?Jd-)_(wxFASx4&-tg_2A*CG7UMI!@d*8D#+qA#<~)n128n2akOABCxuHq zv7vKw_lK3w6?T8vasN9tYM#U44hGq-Vg5ki@qX(=S?j~m$OVG;6n*~tv94MnJLF*% zbBvW$eqFY_?AOhHYke8mDc2ze`oXrV)>`cvEr9KX@B3bx)!z%P_HyX)m&}JS?=y`& z>~ne8)IMVH;Q)b^T|#0SiwTCFZf$}=W_=K>lloG_BT26-^bPZ&LIUf|wMG*GJf>LM$^O+Ay9L6A}ak>qWr9k9Q3j{C%Uut+F>UJg{NwOvL zyy^90cLF~gM)KV+n+LvA`LIN)3X3)iPBk$6 zZv>TYw5eE16Qxbf9tbV*~oJw6)4)~6Yv6Zs7g02BK!odBRfKoU=}g~jk_0*T`! zxQtx5#U36buvsP`wOH5~DOCtyHMLD_vN%%Y^L*ctN&pNAvT=a%eg(R>9g)Z*?=ftM zQ^%ekKkTx3Esh%^FNy>`6x(oy$3cLe5A^4N4REYDZW77F_;E|7Bo9z;^0=Xf$hqin zhjGU^!wJkVV(oyqSu@P2d=TrwMCU!{`H@JZuss_6%GA@nJegdsr~4epjvt*g?grrV zT*2oW=pB!hI4zKFTkeH*!o}4gDXExfW9?Kjnkg1(E|S2+I%O5n3MucAJY7p?AafZ=hNbK}2G{yOe>Gx?^Y|GDJ1dcv@$IwkCOtKUIx zxw(*Vht`o)Mz({57bf#>!45!NaU&O-rq*2)TH)$|9_^cg282Son*z2*Z=UBvn z0)}DOm5Za2vEXDL8$TBDsVNzN zE)OgYUs`nsF$tKXS zX1@fD1ZZSNrrd&op7Q(*fx(Psz+n9B69yAz--3bPhQa&JevxjzFAmrkKOPm9M5Bef zCcxlzNbDAAR8Yq8z5uBT1PP^Yh8EL!q1bdJ_0G zu3oT3P-Ke5t?g3y|NV~AHh7DUkAaLJm=7ntNYD|!0FxqUhyH}gG={5 ztncA`0~b?sr3on*XzLc8>2|cnVJzS>=3Y;1#}6Df)0kH%7b^FS);M`9Un3z(YT)6= zv>iBxQp>;xwbX?gwaE6Qv!gOvxd{c5kxR%WbZXe~ps%4N88bpOkAhsqi%v=BeizFbsIIw3~;i zx3H&mBJVcs*Wuz}r~X2=40NAX5Vls77GVhMAt!z0lH|T4waaAWaUlQxQ zVq5%>W!0gsRom8r7MaE{K5CF-(=^<$I+F~Rqb`S0LGxkwWO%J((qI&sDb1Qf@=t## zB-Kxq=TC(o9FU!c``w!ncV&m@!e$s9^s=p^te9r!2yYEN0`;)W&<;ez;S8ewyhk`` zZt;==XT04VcmM(mhchJV!X!ejE=bvwb_)w0m@1B z>A^^$fNyg!a$Yog-rG>;s?$N5KYLL!d0{e%PhcmG4Tv>}1%~P(h0ML+{ zGoT>>=ax%M17nRjwYdRANDZaMUmA=oT&5Wibeb>+)Z;tFrG?w6?4I_fkCAA$MSQhH zQ;=@yZ;HF{hyEWODSR@#G#XtRR$KAorouPWA%$;zDSAyLa!oY0BOKnbb?dELw(y*0 zuEF`P0Y0={j4Fc+(qe=Ib$voM@&s0hc+!HOYy-}LUhsq$VT41iJZK#da)JE}D(>Vki zy_qwII%{|yrd-BAjQ7A@paaM4CT_u96}sTPSVJ-64C^EXgY|rr7AY_$6&r83cY2VI z8%L>_$O&Qu9aQFWO8)5IKEzWp%!jg31ex?O$fS3-k@I*NVA314Q)R_q42%RR7S2F4 z9$m;$C8E`EAaR&WI}1?N%u+Q~FVy>g<>>s?3$xnW6JdwiUR6DUxKL1T#6gYY76N_(0dP3CIn%bq~tpW@D+y8~)bd~xC3Zz-itcY6w> z{xHG?9PUp_ULglo-0kLC^#y1ax}kSo4yy+OGlP~9Y>JyGg8Co}Anbj_rFtlpo6Bxg z!&fR-!GqwOa4rBE!*s%RuKGb?OrwYym!M6ArxLzFkYP#p7CK!m4e%RU=xFP4Hdi`4 zUyL<2L|X$Y5OoF9@vdpLUDYXH;V)5lur=1uq^^J5R~L+g{K#GutSh|fo9>T=szcF0 zy-$6?UmKfM5%st$7eOBu9}2*Bw6Z5LEpUf_MxY8d&cPXJUsE9DbX5f!{hWtx*wXZv zm%vtjw$Y{@i$8;_ImS!IK3r89DRr@7#GgSqMC&qVnjgZ`0i#xpO1U&n@GncbE(Px! zDc6H?v!vW0vOWJq%FWYHQQv@3#dlF3Z7OC-yGbK%wL54$BX3k0)*C1{3;pZ0%FVFK zrH$vTb_dn=ci+|Zz58>kTp#*uO1ra-?^xro+CY8WY!;XoV;rqUy?Rv2wHt^yD&y!; zms#Uz7r3xon^9wxOB&A_Kb3MNxLj?Vu6||?5-zIS*d$Nej0@DI@n-~|Hya;B{?h?G z?=ntMm&o%$HP`r{_54C(qxw&C2+vc-2K6C%-l>)t8?EQ-@ceJ)96TRXOL-Si95Rko z+pOmo6GG0i^nBJ?k*t|RPZBzufkq^1sIx;5WTC^tjObr?|# zf7*@5Ex5;ENu%+H_4Qii6d&#%Y(ugSU@05+d5&ojXA4_MDR-oXj@lr;Xy_=S{AsVgnGRl{C$y16>! zR)=CgQ5dx{lg6Fkod=j7Yt-*BD#0%!@-j@dtY4yZpfCa~B{CjaS?MkP12o=b5}Wu1 z$6i&ajuniUuVUu0b#+%>SyvT=`4Z-zd-4MFOpI@~;a5jALT(Le+GD6jbOSsQ{$c-q z7X6rx9VVuCZMduR$ZdrUf%POUTJ5}1p7B3B#{V?ejP3IyJd)Wd@sbK(;19KdKVB^V zVct1*frLylKgtkFZx=owztrdW>-Xb+v5WAe`VaLZb1{0^Xh1UQb60)g#IP6HgZ=E9e`7;nAMNsv84+Z zt~p{|JhtJ`53ETpTKKVPGiFSyt8JVy%?vNCtLtoDlg$=Yy1xoBxRb0J_&NZatbap@Mz zd@(xE{)tK?QxbrXVc}rQLhrZXkEMQ@`8z&zmIsh%iD?-E&h|qTdToGbjS|IRXRw2t{{I)PI*#!nb zByUC+O>_hNu|9P0Q~fD&S8`V*sWM5mG*b8l`qC6p5HA+KrdCK_u&%7WfdBn2>;oU& z&AHl)55lFzKm{ z$15G~53zk1)(7HmWX@&pAsiEw+{$nYUx*~HzaEf1C6e5^ zlhEB8jXq|c@mMs9iRIjm-T4?t2A6x|GVr;3u;N^>5eR^4i1yUEZ#3XEj>c+1a1A$( z=_ra{64BHRQT&KQc_n9G@9bG-1;P&Y!^|^ot}eWynhXC@_=Oq$_%k2B?0)=HtKYu= z*kkV}?O|N!VePY_;HNy;<%HX!39Wk0?Xi19hd0K!HWxqnOC2sXqE*VX?xhe-Ud-+Zp#2VPdWMtTQ9mloU8kNZ5HXjGJ#8*w9{>V#mGPnl6U4C>x??zD5vT!^Gu@DimqF@)@Zn6A;Db(D z3iu{;#5UO1ywKSOM%%fK!ZS4WD^nHuTUqBEidp8aLRWA@&q3PDK0$*)k5# z&@92rn}>KBujXr7D15F#RO@^TP2G)_ne>ckX~bHEqzZlDMj}$@79K_;L)jX{G6=fq z72%_?H*yd{7CQp7J1&eYLZ>AVrUG+dJEqcPjR$cZ$T=ZqJ~*^X5KFq*$)1}!=g!-= zoPEdbXKJ63j~!Z9_hfzjs^f#ECm1(-Z`*Rt9e13)`Oc*6H?pB{mn8GZI_J{i3#W*l21!U#WY;DiXa*)2N7szt`E@a6KG{q>0A=Orl<2Q zBAq3m=pSr@26#Nl*mhCwL4Hr*sR@wtEL_yLygq$7qJ910_J(k(d1)A_HG@-t z@P(O6mv%Mw^jzR*t*bj?{>SR;YU^em5(`X%<)Oyc%V3#IF<(~}x!sV!E4x_3a8s z2-JJba0ere%{gmZ=kukzj69t9Fm6m)`sZ;Y&hlXx(EKCUnqlqCjtop z>@7(Dj2M&JgB|j^*)k19Jcdr8wA6dBp#phnpIQgx?csuF#7g8n$@j?e??KYOk>MWB zN=pgwoUTt_7E9*CIa+R z-~80t`ntM${B5tR8;o2%1;!RWochfl;l#(4f6=qgPF(ZIk6s2T#FCi{NFjs-1I;8l zIqU>k5QuM?ZPg}=i^^d|ukKbLWGjQH8m**)trE39*NwaLyLop$y~esra2m!t^%iYx z4KsE8d^j2#!9*uugWSe~sEN52qIReIdAl(GptVS@BaJO_MXJrT;s58U8JetfOWN1z-@ab(cscp_pBZ=)jFz6D^SiEvxSt!~D zz3XTIdI&izZxomayw^SR8vBoSs_NCPH=cai8K)lIzAEr9@o-0d+p5#1ubKjS54N9l zk~86-eaLNtM_+K_h39vLyum*-RVU`Ro^#}cy{OapY#H=w#i~4CEAD2s2ml2YA0wNP zv#}wF!(3_oGIkon1Wp^54VSx96YfG%Zv&I3x@irv1Lc|_7`3+PToXY?YFQ7V?WhCG zmT{DjX)>39mgay+m_RNCn2*u3gyF!a+NQtzrC9v!b37HGbyI;TGQGZj`QpNJah!E= zjw!}+w|Evt0~t0RDT56H1kVR%VQ+yK2BSe?=@x;qxEv^r7%N?`+qs4G;$kS2TVw&M zpZ92k4~W^YS%PQnjT$!ny+F7CHt6Xs$xir5&lkMvvVb%=A3Jm-Mag=HcC;i2tyCn} zg{VLWgjTd{8yS>CvaH$m=6H-a=Pq$W6H)K!w#Jomk+AFCK-gNb56w z-Cg}t0JZSTP;b}J@+VS2GQ29;d_gb$`a9Z19%AhwIJf`e>5leU3+R@6z`3!#aLG$`a3bt&zT=>nQMGDmXL5K>Z};MJ z63NxCu`Y`N~a zRqv=zWFX(KXGh1^4*qi7{8T}=4OtB`JbCFn2eEA4bo${u$LUf~zrpKi6dsZ@ zFiY^G_h6P_4UJzp3Yk2xdWKD}Efm4Qd!89@v5&p#hu2=aeDUJKU#`7&&*H`KG_WR& zeJG!=D%y(|`!HImO>}iPtQnl#6OeWaNkSTXEYSpi854U+40_Lw0eNLUofGDhun|jP z8iyk{o8U`o)F)Tcqso1GWIraf=$V74j=W@CAOHt0+ORRpD|;zogx)=()9y)La>8)fjHDKlS&e(UT75m5$M!mQ|+Q{ zdy$6yV^ywQQ-BYjBD}0VN}Z&klPjdi2+%lSH=Sy2;hi>Y^X)uOh?rZ}CCT%oyVgLy397)h@LPCd0%1W?$F;F|YFd>yq(wu# zAJL5VID|H#jPzK|Xrj$&R5c*mN%*uB`si~0$s&TY<(0{~RdjUKQ%KfNNDj7JF6Kg`Oyan9e8?xJVLR&7r2PK|yn4x( z?VrF=X}h@B!Pd%NX{bYf;f9(OQe#h=ta1FRn@M!Cp)X|$|G&;j~Ix&;B<#LyD_WGy1( zx{v7StBA@49?03gh9+AtQLUG_!n zCGhAW+g?HunZt6|_+cI`%6ee6BAX%_GS^|(0KNigfvsh2tOZ@e8kux@R5V|kkvo@%0o^gU zt6}K(aekb7CE0rxtgCUX;z>ZCtk^adOJDIkAnnQ;Q3zd;v7KH^BuGGuHA$i~<@&KF z5Cddeq(1^)3VOI@Xgsy%4|HxS`ZIn%EZYpr2%z1LUiEQ76x6EeO>r$+aKw7R{>wFA zw&}>ePB`jwk45S1O&B|vHEJ>^B{tRurPVGD0fM4y^`;CYjiGU&v8A>xKC@IG5_*o* zD&mjcsHJTf(mT?WOmIb%<~#4BtH-d9wB1IbejB>jr_ivRK5LSk$?~>kUzGJ>$@wNP zk;zBUa*WkmN!coShpQ3e1<~Oct+R=&L%E86Rj z8)E4^h5e8RDvc}FnM&4zHHW2zu6Z-s!05OK0!N*$oMdlG_QwVy9H^~U?Nf&^kbBv* zoIQm1mUbtd6ZHJ~(5@{S#QJy08kB{hne_48rQNRGY?HM$<4X=bugZx}=Fb|ltV8fr z8b>&w(pF@XIANrw;wkO*fmO(HlGs2Nj7^aYq(w)I^M`e)_e5ae#J$kiXK#e15n@Y86W%D^1s93ay;ZXVJ0-!YGrRdO9p zc~F=)}3jxP(wq;Qrco&Up7E*MCdjb2%a4%T3eu=f=2iuhoNM?Zm363QhF5+yFp(OI+ zprw~8kCHaclRlg@@x01vOXpP>TaMJa{a8K-^dVaqdgGPlV;^CK=J5!d_X?Nhg|oNa z8;xq#K88-{&3moPIW}-?qZa+O`!Y!mRYv4sel-FYS@Z?cS-UU7oJf#!M97{JfW^LO zK1HaP@Vo#CjQ<6s%YV1K1<^#$Xa3TbqmOm9?qNNSOXAn;<=1M4n90w-Pk97ut+j5y$p=_3< zFNj)#5H)$9oD~pz7OPXZB69E=rNkOipVCV@4I6p&MlJJWpGquRKWTmra_umu15oDg zT0TWe0f9mJ@+pH!1-wMbuV8ZWhAc~iXOw^(B?Dkb^tOlNS5{?4k-wpHxg3ocrhMM(!cR%;0bLBdmKRj zj7*V3M*rY!FW$D|nHMBqRS_g!Rp8u*%mO2|6_5dNOqvO@`v=EBYpWK9GDe<9qm^<( zBtU8lDp*?c(m$LnDSvcLVmlCmE$c#CE^gmi%LiCU3=^0{%YHf~%ma8QK$cCDYKF+N z;FpQpY_)8(F-i}t6DWa6ilq4^3(ur~4E>7!1*e2X5N!oHSDH0NThc$A!-d74a!hh0 z2Tdi+ty8(xKN$IzuSkHbMt5*HX3>W-{$FS&v}%73ozmgErrJPY4h^XBu&$wIlZbH> zS`bNrNbbrgR%t#1)beRQRv)*7x0{M0St?pRi$WVj(uRQBX|gxCxIPzI zoKk82gx3{@X+nRnKFi{<@pxI*%A}(O_P6o_FLPZV^tK}B3nDzGP2-Yq#}oPkPi)eQ zDJeNDKpCXZ`RgdgSoS@-wh56?aSLsD8o z98;f5po5}qG@_!pM8ZWulWAEH6pf^z-4){ibKTo-7ryxR+n~r++~#sUbBk4nEl@Gj zMYjLIHBmKenB(-qw1$A>17~$~1PhrY7+*Gf2O=EBA7lXx@K#u;o*8?YpWv-bY8RtH?Y8TOzPs0c<$pNp8Tff zIg@vTp*JPwqmD`GfuE+u!P)`EpwFRj!0JX6k~047PbRnNuyu~#LJDc=HqZy3v4DAK zL;z_;R0Q)2sQ^!ta-4%e9=kq;;$;gA0Mut;7ker6SBo^r4el|QCf6XD40%$-r^Hx? z(GJcylVnBQOP!#tV1~55!SWH%;j-jdB}!@CRV)%N{p!V(w>`>Ww1aCLzT zKg@=!Ee8iH3wDpO#^H)3bV^H6=y-%cg%et2?7<>wY6*9l9HxShL7Lr8{SyX=aaw#g z&RZ5NC-J)C5@5ly$k&z~1wf1J^NeN#u-kHAza=Qu_OYjse{D^%o$WR(fyf9cJV7+Y zeG0GyqsD!zB+12w3r`P88&uX7*$x=8Y)AF`wX9Zihr}`Jy0>FY7Az-^3a})mTCkif zp+;n9C9XL1ZL?vi8x%1y0JtB5lZVj?xpm~2>oe<HrO<*TEJwmf)zqu4ijtEZTrgi=n+{D=wIt6YFPEpq1ZAZG$#K++O|U=Dq1iy-Lht_F%qXY1^Ajc!Ia_20hkiqIEXLtRQ@y9aovE}iXO|NRWdg+Zmyjg zMr{HgRGA@yfJc^gte%_~ z8T7Jo7%qnUvGbv0{Y}mB-Zpj)zz0mjY^gZ@5%p z)4+ULdzM4bm$h@~g#~P~JB4GNz-_#Eh+V~eSrQ`7UXk2!A3^M2*`15bEq7=_Pu;7R zGxsfPf0jAZt;<>+zfPrX(02;{OLW)XQOW{aqenKv5NECb*a1Cj_Wptm8*^41JLl|7 zo10`qd82=zU+ACa|FNRp0qcu(rWEH7+xy-dFD1zO5T55!Z2rP?OY9@iq9}0< zQ9hA?h@pTm8)2c!Vdjz?C!;QH(To(td{2f0G!cOvaxNb*v{DF0B`Ja@ov)HG%TIS1LxJEb%MS*;SCJl|k06uw;X^NeyN`@}&VcpvR9wAbRbF%E z#f$}N#Dxu7Q=CS8T9WTY+>E`^ZcmLa(Up#kt)S$VpdF;97(HKhT=l_o9 zXJLK8wBH94G1cKyh!LIu9c^ze{JQY#X4PJJxf!|j>YCANV@!R&kdCQW)vK{WT793D z2yIi{fRh99h%NgDZb#w?;s}UsjR&@FIB#Hng)E@|fHmF)9PlREKbSc6g&>+kJdAF_ zP7Q|~!J`qiNZ(_AgJIQ;d&eM`Gz+p{PLyTbY{Y;p+LM+0sT{a>;t~Cq{GtGbGznW^6Q>@7;wP2#9B0u^ zpAsrdDrDK*+ot!n-5>V}NQL+&DtV(2ECdms(1KN|ACqJMm--QxZp7_=P(ey7g(>@? zB(y#h1h$!_)eo8%o^`rbb7eQ@A0mLP$D+1J?`uoy*?y>i6(f_8#Yg@1@@^fA5j< z_r~|zLVxTm^4=qb8%ym0Lmb9GI9^OdalGn5)YGry)=@l*9!Ly3>-u28ryd;pSP>3p zyNu^x-ZR_B5c$EPN9H{T|K2_Ce7gAF~_mFXp@8PTLy;Ue^y~q4G*1CP~@hRZHreF5Eh|Y(1+s%Ii zYvWWI;{cGR2ucY8pOJ%Q*ZlB-0Rl&T9dd3AN@*Xa>Ujs?*>D-HxE9* z1GqOw-|Owey+c#Eg-dX6slGRV8264zv8WSeQ)hL+&ep! z`@k8vw@KeSbtCRwn#ygx0QWA^_qLqJxV_s_*_M4H-Z^VFA@|y7vy)vK@eLh?OwQ?S zvm}xoi7h?pB0lZWPvgr@zL-yYBzn<^v+wYYeA=L&y84g!AfF<(QJ3l(2@S71k56an zr{TpXoX@8-CD!$b(RgP*w%$?o0; zuByZOE5CLf;65A>rz~Io?!jEEkEhdn@v6Ic^}litUUm3)n|!`qcC9v){m_pm ze;SArD7^On49|_r5s~dWHh_!R8@fqVTUl_kK{?+({@$cqGjC_<4mrR(;R3q)-Xh?7lt|Z=|B>6ieHHxg33zzfR zuzuFtH)sA*&i;w1(Uq%@K5he_t<%pAJABRBllTn5#HTehw<0a8+xDw>+{|b9q((n^+nt~L z5})0zpMCsOx9_@_&z?+;KJ?Z6V^8zh?>SsUQ|LW_1#50xsb1HktzLC&*cmDuCN0(SDX-p|vv3`Rw=jbYUoWJf61h7=4{j zkJC@vc0T%RK0Pj!n}?^~O}Fpi)4BSo@2orinNR12a>w9l!<3pPy7>~rjc|WPiwY4@KZirsh`$<=)s@y>B>;`E+?G zcM6_%-~1E-JGwzXopsAM{)C^o|CmH4rlJ@2J$`fjd&=m9{j8iW*!S%6j=!^f`TO?u zd)053DgS=2@}y6o_Qn56rSPx#8yCCHzUDvI-QotmLqkoT)grI1`}5D~E# z;mmxfTBVLu$Ef4giRu(}x;j&xtP7Vv^>g(Lx8Ha|eTv76uBY&^#<<(~vl<5NdP;o;ypn-@ zwoW`TVWs&+<1cCnR@dX|+|*MzNnB}s9~o-CC}p>lmF-4WjJ>K}-7RJBLRmLrvwz9> zt0K*RO5I*o7CFX@dyO{~ssH2Z^C&yhxXgH7O;ekt?5?u1NzBq7oaM=`Jg#m<*|c%3 z@gvo&E|9X!18w)W7c!>AN zatDvA7g2U4;_v=m%~cd8_Gnobha0<$KdSkP8`$IO7bx3qTw;7%RjV_k?EbQ{ zt;R*hw^U4RL|OH?dNuX9x@X^FpZYiwvOb`)xV2mDQ9qEno>E_w*i>A7DDnyQS@odG zsjsVlQva-esJKBsuD%%hy1L5me+BXI$vF^KBTTt*QlN9 zMsfzZy~tqz;@r-Xt`PKO29;x}#*y z3;YPF7#@%QRq`3)>3g-=bxfcRm!@fN~c!}KFZ-uM)zPvK* z`{y72_-Fs}%U}PFyhZl=q1^o}c{jPi$Di6QT+x@Tl3#f6E4fF%{*8YU-snrA+*K^O z?dlyj+)Tcb-MG>!`Jroe-gwI=K7Ge$S?87D-V{o%S$7io*7HbS$iuC5;^t8&oP62` z&pPLPIeCGTOR>%*dFJ7zhmpAObS5P`ks^M<;$Hh*Q2@BTb0SWUc{&kZD2;;zBsSy2Qcm?e&e!xwzU9%HeJyf!?Q)1G762{zHJlj2IC4`5hBwTz`aC#4NiA9h zZdtGZM_}|-W;qEvMr-7(stqGYbuaxTjUV^HbRw z`*L%k)rY7DqFE~Va|`f2m-!W^Gqqs0W+)qpWUCN*u%#9^t6^Lb?OW)cr<&4<2Ao5~ z$Ss6pX;X7Y4^E@Fl|b$z1ZW;ahDEv6ASlU||iAAFoF8KS+X{-z>)mYhGRaJOL z|CsM<2>799kNNz{jCdOYfxltHUT6DGur5Y;;Dq+r5GBPH-XoH0I!nH|<4ZR{+)_#oII7;6jE zA9=fQqYd~g-hTUvBJf=k+NNINC?g+Y`_Q)2ItwAmX+}n0W2oT8;vvP%xnejH(jH=l zGYByTEys{E7bb}n_?3&IE812h3VkSTfWp?d2i;mPNI*oe{*pkn6IQKCt}5Km&)=|* zpkU)+!~UIYMJsUBYrx}S*-5_koJ4*knY@Xs0cymE2AFS`@O$7GNduyPXferHAe&yn{B%_)qosEZ>?JOUuZQui% zk%Qyr1DKE_u|nST!5Br7e&!pK&=#b#i22=)2hE*5d~gUrw@k|`5&7JMKS!p&1=%~~ zy8L%y!gH5!gz-!m@oN*fb^tBZ@GAZ9Y3utg>zi1i|6Qow_|2+S7p_{RD*37ZpX%D4 zv%+5{;gTEjluFDPatYu-j`;_62WjAr7!2Ah1Gz$AW&*r`K@roI083~X4YMO!8RdK- zgi~~SE5Zn83JW~`f`40;1UHS6!YBH>lmRman6aUMAxsHE&}3*qF1bu#$w~uGmn=Nk2h#M3u`AiX zFcLnDIQ7T0tzudl{?jLX~40sQEU<}i*GJ~Fs*ik9zdymHTw>-bmgj zJ^nWPdmn6{Z2V>}4j+Dpuo|l&IbjClR?$OVV&svi3abqLRg%;Ix!{uQZ#UBF?xQ^% zL!wuQa#)ry@5;?Ye`le;&T2%7jhPPR_WFZWp>QlwTQ|M22^j-r03C?WI2<#a2xkze zsEkD8)iu-V8)me$;s6Lr-{#45B>Fuap2Q&js#GS5fpx_BgMW(<#=Ikr&koMa=a$`= zTzbW^#~%A_{QYX_jZZEu+`aTM>-Poxed@9smvVl#EIoDUjZ3X3Lrc}tWj7Xny6nbM zSv)>%X%ciwL9=AQvf%CUdo2ia-rj31(+KR47Rk20?7bG``jNNahNXmAR+sFvvKzow z2yBhs1Bj;LIcXC$Qhm*Z4PxziP}NT!dMNpJ@}VE0LYogD58A4C?E~-uc2|HO-1fo1 zq4ULkpYQ*J`+k4fzK>dy`&qAo6wOf22!GP7`}Kaz$Zce8$c&g}@BU+bqDmhld}sfD z)<{kSIe}5}BH`5!4o=Y~M=b!A){rvG-uFNXEHh7^9qyS}q%ZHg))R~DhpwKi*+ZJ_aJ)s{((QQ5@im-h98Qb8J#H7WJi=bu+E;zeIltuEw~@;c!80OmJqyoRWVH?Cx7Q9BdpOG={;;It1tGv{ zZyHqREMEK^s!DF&oO})aSc?j0n2{;R>{DTLMxkH%3U5k@Qdt-5fGr5oa$Q`ktYPCN z)u!LOs_>wGxA5jS3rN;Sy#5Y89#;PGz-Q>4>Z`TIdDll+y-KYdQRT@)ayIvT|T@M6`&vJj(S`AVU2|FxB6kX z?h3273s+gqqgB51meu}=rF^`l_N`rzmcZx*5%`a*o~quPm5+2^cDlHCz+L)#a>ppGeAz4MTp4zv1P!nw)#=`+p#!ruyiRo@0y6GhSA zQhArrsQXK=AmNoQG{vT9HL9ATK2!a*u;qEqS>a9fxk6rDVpTR3zJlc0_=<3W|+_^{3 zx$@eRPua2KFkGNjGjc@QkREY`s=e?lCMukp%69I{9+uAa02_`-k1Xiv#3LsA zK-T94_<{-t>C3VwrE^Rfc$!$&Aw&k;7A60s3GeJs4#^+0XNa^QyE&bW!j=a&aIQQa zuMDI|79W2MDqRL3o(CXy4q>k5Eks(0o&`A4bhWR77I`SQ_!#`??LQXE+o9aD<56bG zp#*ayurTMZLh*W@hZeZITR(c#9{@sszU##b>x`ZjV}}+Er^P7_|TYs4oyi z47%W-qETEO?F|M(6#?}?r~)MlRZ>j7B433r6bw{^@U+4k#dYESU?>m>1^)|wxH2QU zShIfV$1Kgi!n!z12hstbw^aNxy8|fAKEMIvzd`}@L45#y@Zqm;nwQ;Vx-sx1Hvap7 zZ*gE;lW~QnxPl-!E5rZDZ=Io=CG;#vLbg_f49v2QRO(C^YbQVj!LqXsHR6&2L!eA9 zh8fDbhH^;d9YKoI+;ox?CuTH^8;?0mPj#YxI%dxz8!{t_@W5cW-4S+s!pJtx%t9)> zP$B(NuO<{%ceJ*w>1b^|e0{H)GqZQL8Lhp64oP+?QJQ*Aor+`&_k z@$>c>r)aZCqDKP)Bx{#1bTur!-_eqio0_8Zq*tuiv|?pnOD4TMoo<{!U#?%7>07aK(_zWkEv;!3 zxOobCW1SyBZ+x_&mz|cR73bWtYw`K^2**<J?}l{Y z2qTZPo=_XoR|xzR(W_E85pWg^H~=P^o>DnNRaF8^!J(`(4AHxgStPs6`}~!f%!Van zT-lB?>*VWpuCsQL-xn56*gPinMd+wS>+Qb4a}GGmxnvSj#8g4Iq%Ure`<$nO`&$@& zvPiza62yp;rY8ugd9ArqT_^6m?qtV#pT(ATr~3WQlTO~Det+jV=k5ZIPJl}USyMzp z%dMGEfP`Ns7Hpds>0U)*JVVG1FS0H}g}iB&TJ zlgrMe$i5H`g=)g#uC3?nQeGQK_+jnu_PX>VD<}42kz8AI1b20njl&zSI7i%%>Ej52 zr{0ePRe*{;IS?$BPfmJZumwAS&3@?Z_&W89vN1-l59J%jZ`l1oz7ykcBVP&pwdctc zA8|opc*m7w(hLx0qm4|J7YGZ4AvOnwvSt{R5a7x5kDsWOuWi`T!CBF>^eP!5dy-_G zLbJtnRFthi0HFgPqA~j{1G17sxuog3MBf8M$NYxf*Xp#shC5q!WkznDAc={Kq`kh+3>MSn`VFe1&-Lb8#P{ z4~XY^+Q2Vv79<;xNg>&uMiJ2TX9_RfK6CC+XL9ZkwYyO2y*1}(`Hd=xr0dY8A3Ibn z?5{+l#5CMnL8k&>{zemWJ?b)Q6G+gF^EK#{cxP7c8?AJ*921;d>@A*I?CofcJfjz8 zj%knb*q=21@JLe$cFrPjnp4n>tWX{5zEtYCE3aC2!j)H@q?dW&gVJ@!RoSbqT7Sa! zw2kI1V-TEhor#|p#^J>{u;wG|c~E*za_ED?gGVempn^y9?_(G;b{%?{N&pZEX7Edx z&r0V1_3*$6MCxb4J){Y5f|di$;ysEKV3l*nN$ak06{=gzbuF$t&OB!qy!!m>w_kCB zpRyzWS)ac3Z0QqW3(N@WKFR3}^T}o-snH7QjyRP_&TB|&pfct|sEn;iwfhvrrk1N9 zssm`$4`$+$?rX5EDK4ozJIsE7qQ$xEoHOr0$J8^fYu25#1KpzX)PMG^pFYd)KjDh) z>j5j^JET^%M70peDKDWHXb1v?!UaU7iHjAu}GuTq8 zJTQ-eaxkM(73(f{CY`r#s#L#sU3R?Rf95B#(@}5iM;>&mJfvxN;Rk`U_GJwix_APTlKJ0RkPwCJJugCjJM7+XfzmZ9P^12B79)U4tNlf?mQ z`q68*$61zWX901y4%qF1jFkn)fzQ9R=jC~sBMjhVB_nK0v|wy#5aWdQ)gEE8G(dX} zh2Dg5e&MdhczsTZ*XLxk6UH~uDcnoy&s-SfoCi-YFr3uV2y=VGHB`X>t%cn(n}YOa zg~pElCjx z96bl48E>C3+tTb=3TqJ^i+zH%Ycc8yyj$hOp;ad&K^;&FV0dr;=ts6zV2XMPVTtAh zzTd7F@-2q9!PW(a)ZScmy`7jYYLxXfyv>@$0s>tw>$8bnHf{A;R<_OpgE*v$ngi6& zI1@@}mt?olyY3)Vrsy}d>3v!;jW2s{N_bJMo1_sSkvgH@ge*C4N=qg~OS4W;GT<;p zv@m(0rd}09!L4IVS#?jeSy-gyUNZLxfB`)vq9cfq0CW&GA?Ujupo1z)Z73kH*Agqj zo_XM-aM1+(3x9d~>EzQK2wT;fcI@kVZGIbPk04H;8O>dZfdZ0|;5o9sNv=1q96j($ z)~4hq41n*(N^C1K+zG9@=#EcsNH#RQVk3Br=gTEU#&{Vs^dMj`u;K~Ck{-kn z)_M?%V$0MZlin3w-h_9<4!+wxQH6qq2LvG>!@TiJ%QApsb&0GgQNFrtSyK&=TFaWk zWM}pI%2i=`VMfQF3+3zq#&2Sx(N&%T2|()qWdrFP`BdYJ=`n4L@Bt z={M4YKHy+kZ>Ac*hwby^gE2z-3Z+BVX7bTK_J8Ya$U~|nGKGN*ddmf2N3&%dm41%j z0XqmTHn0Y0H2R z@dSNf!*Re!i-D|-lSg(6e8^2#BRyr-J(LZEa|V_wZMj^)-5~V-+)v}-oCDaLO^}fR z9+#GJyv5ED>&o;aU3*gV*4Mq^y?Wh->n*OjZm05G5NR4$6Ekflq)I6yC9HQ=_GFOf9@YyD6J})mzV0$+TDAaKp|U4J zC8w=G&cPO?Rk`+AAQNu$!DJCz-(0n!@J02Xs_-GTVH}?z^^q%s7M;5uDiK52a@DYd zoVOD*6NSc8VN2EZ$d3ALR0|E;ggR$f8hV_C?F!3ucC-y}P*vVus8H&|sKM&sxO)C! z`w-zh3j^wlg$?YYwhJxsq5oUad zHy2#~4b?bvXF;b^ zH~X^7DK>8n=ORh+KIg4M(bgkTbV+z**7EIF@abuh?Bzo_=NVWcmks4MY(tS55RGIj z!Z{yQm6SwM{*Y1-icwTDLshq>nKiyjL6O6h@r%^J3X`teWa%!i7`%a6htThwicT++{YUKAEXcx8VfZF3aAK!6ZmTV-oN@_8JoqQKb1rU`2S zky54;oRkJLP9^~Vx2&vdnch{pg2xiXIm#0?2>suV+k96#QPT-lI~$8+JAia4Y?6;TT-9|=9kYOMFvzkrhKTc*9^mArtAPk`$b=M*;`b^w z@ze9?wk`lO8LGi&;hn^ehjt{I>VgeE-=xn>2j@PWsC}K^udh?5E<5qWWsM=Px9pGO zuQXcU{S#zXw?QI2jL3WEWA!sq#us60l{cO;zHR)#c-dhZ`Qu1q+5wmIKo)XFHEfEv zAUK~hl1`6A&YOme2Mt0QBA^Ti!FLl%Y+)A6kL&sIF?@_(dDU?h6~guh442)#yv)yCbt)~EyV2IdH7CNFc=S8p%NekYXuVdK7V?k9Nb zkq71BNGA6P9)3MNy7>&Lv41%#*^DkO-ij)ATrH(;%j9;T)F;vBtM5dgKbIN3#bL0| zH-bj}Je9q6U+x~zpL;^N6EK@CxcqoJe?kjIcqeQCDzt>MpJL1R;mLie+^6u>63U$( z=*rE(sd>)nEI4OA3eE}To(DZUJDqz0WzI|IpMQbRp5KIL&xf)PvyvR1yb#J=iIy&V z3EJ%|fJEDV24m_gLb=^|d22fN9EvPX=bzinm!DgTmv@J?S6dUb1mrP)jO{L0x97M z;YV-0Y1ilOy`Mksi{x&(6Ik359y#%ox82TLpNeEp8_GRD2a@i&i#`Yx&%N+4UVreB z?|cuBbC2c1kB3p_8@#up2*m=C6kT^ib|6-w8(^ z-TmD2FT4Z~;-T#K!XtB5oSx>BM;=7uk3ES-?*v`XE!~8xt2g7-+jo88UY6Yv9=Yef zFFnA^^CGzm@5l2kx1hF|dBH^&U&`Ac|A9;NDY?HdW04xD??GZbRQ{9HG!34b(?z%c>q=J{h!Z+8=9XesQ zD^U|%)R|lotgUhPzBa9{E|~EiQdgI5I)CM%7tNSeSGU;L@2snvc3X1!{%QDkZ)Zox zi&NKF#phq62|=t+KIP+D`D~R>^RRd<;PD1RA%4c<9)Bzx4tTwVBWEvL)_Zh4hCc${BCT|RPjW*R9fDl5+Gs~ah2_jJ00thO zFZ9KMh)yBQ70mz>cq`snz5a+pGwBt{+09L}Gc%gfl_!}&U(kuqfbY{^@dv$b=fi;t zk1PELcct6w{)4B=gF_hhZuh%?GuN0&*n7nEc^@Zym2FPZT!VtO1hCx=p2-9Lh73}N zATat2_Gu6}=uw|vp@NmJ&wb8S8B{AC^jEpR+wW*_WWF2q{@`iT1D*>*f?+nuY!qx#*(u)jJ{Q&->C z-qF=aB_0@p7nLOj~ z_C;g2pt==*QEs={=JPsyuHWxl8N*F)@IP=*`m+ArKbof5B{4Gch*ZdwTj&rD{P~c? zm=7_75YGN}Tx~N_*=Zty$;YR0pNli%G`yEA3y{21w0%xw#>&x@$O%bI&LJ8Gp7yae zu?y9OVNHaC65v0f=MbcerXNSqYV}s2G=Se1_JRjfH+h3*!WSqF;`d`?WVp;On5c}C zB~7%U^h%;^I2ccDLkXue>PYMH96Hi+8%jml1#BqY6KyEzZj`MR8mWg<`Q#C_3Ku=bgrC6>p3qO6p{rAH?Rh^avp{c$JLR)+_KFU1uz_ zIBBRA@)4dJ5er|$9Q9MVgVN+l4A;GJ62}O2lQjRN27rg^gmvVKT05_!rK6qV5K);l zI$*lyUiHszzc=W3x#=W_B`tW<;r010R(;4(=XMuHKT7n%oRs%7MbA>)mD6*6Cj05l z6ua6xRafDMuDcp+WLIaRtGVh~15lf|I&Qc@ zAM5x%;kPxQq@1y)cy5=+NR)B@yN{@UHFtrZPs7ooC6#U52WfY%4jU4(vDArtFI$VW zX}A$f=UOoye;wc*5v8Oh7o8hw%*oI_HeSq z%W)hy>#8-&afmqQ!VmGhc9xviMtaV=3Ps^^mYtuK1a45HW1!m%^q1-yyu4bb-^Dm`z zBexu<&s_gp6r2Mo2o2Cek=%;a_&z+GYidVT^J6ICb>NOWoJ-W>ds-y7XsJj(irk~C zc%|)K$wxvo0W#04($m4>y{XDY&#m%V*eze9@(B?GI7|It z?LnWF&Ch5SZ@vE7s=wCguU-1u({@SR&#vVDiW3~xi(e}BY4lI{glAFh1dzr9{3lqx| z^T3<|E&Y0_;;#DYn!?^z$BNpfmRi+%_uXgIHaT0h{`9rVzRE!B(iw17Otv;xgev=B z%c%9`AWfi@GiYlmwsX`Z)u$NYl_pxy7K^Jltp4HZjT7Z47&3MguA4OTucDlpy)?Nc z!SJF!oUKGpqtN9JV%9zQb9XVbOD2wumEtcs7d^CQ&FjuqXH)GNci*jAYg?LXS2)5)yGRnF@>twQ?d}b6ZYkrv2vSrDywc>c879Q_8|Pg$n3c_ zSoX5AQ@PoAhIj;sLoiwuZRqGjC?e5!LfOf5E?o4a9Nk=~&Y|3Fs(|PLF*qL=2v?8^ z<7c)flC2-gwuh-S@`(zpH4HnJ_nYj&CNYL<@<3-iwP){@AgRpBoQ|C(nngL#GVlBZAX~c)o;R`9@9=lxw-lkWAAMCS*=J6vZ@w+K(n@%Oz;|Xj6)pkVNO2 zfR{El4%^sBy(W~I^f?%F^Ha+z>Q~ZfFjC46^>AY&uL$*KC=Sq|8q$l5IE$Lkrs$GLt^ zoVbaz3(|J#G+$lUNt`x!9QAVBG;Pzg&DkbtE`#5H-tO{%MN1^|{eGXL5WoWacHX>s z^XARWo4*MJfEvV$eQLc$#t z;|D)dV0LG*nu?K~oyt-U3V3ckTlGiV=*&L>7;rRxm+SU-8AJSd4ZCH3!J)>~BaHXNY@~tKNpk&+)L;Nb6(~Ngm z1ss;S{q%P~uBva;i}=MML*LuztH;+Fop!6C?{55rZoY^vUZj0^lz&Njux0*Eaie%0 zcKDYFcOhy6vqQeP#KZXmFse!l1hg7W7tmiSzTj$tzmeBjpja?M-djkW?<~kY98o(5 z!E$T?YkG3Tz8(UF$$6CRkTuFW!0{Gkdk!KNhb|a}tj#*JdDL#iNfcNF$YbN{u<9q$ z7@_%!uzlxB`*G7NCp$upEtkbLTZT?0?K`hxi^F0Itk|~+)d8|zhtZila$E{> zx7xEfP+j=WnGq~a4*leLp)TyM%bVl_WP zz0DVKTXV5GlBZ82gZKog!_g`n8j%w!x3C7$6oXF;-WNTc`El?IwKk9=;@4vN zur9!c2-pzuC~PoE+S}5Yn$5c*8T6 z1B09Py+wNIfd^hf%Ek1-5zA*F5rdG# zJ2)}Yq$LWZB`D`&&ST`kh%ir0nu{5Q2!0k*&ygUiOa1^@5-D_=6iP2%=`fTM=|bmY zUL}x*CC?~@I?+iCFrXAt)fej~U%SOy4W|(QB*UJpi+~q#s@^^UUdKG5PnDt>tCI^p z>~N9%1Z2{O^vrrV3%C>^E9~={v*4TB>(NW!I#wvI-&Dwt-gQ(vsp8W-2W8!Z;JWKj zZeVct^p>8(Z^>zi9`$>1yU?xf#p!%8LdtX!0AW9R2WJVg$?=XV9Z_Zt z|I<#IC%zIK(Tn^a=^f*;y*}Y1AeWLdqqNr07{#*b36y-nLEPyBLnsItK1ksuHG=e9 zpr#EFj2bI2t$a6jqbnp|?)G#ttjHNld`bZRiSa|-D=TW~X zDH_itC_8lRMfHLF8T@%C(B`>nARSe$DLT_YPoqSy(i{S)^nn1#>iY;xDi^XaJ;frO z3b5fA^Dr4}n(l}7g0&xRiD?}f- z;VS6F+6o$zmhh#5F)-ozR4&Sd|Nr_$hG+6oSDB)X{mQ(dQ#Z>w;wk97`knKJ#_rs# zfv~9mtu%(D4`s1L=+?v;p2Y~yHtiJYGn_d70s#=<1Fvd@e0wL2Ar9Wq7($C8KZUaS zEJdoIM`LL9E{q{+t#^q7C6wBvmn;e)g3K3B=>NE6gFZfsz)RI`SL?+|f{rnAQmBTA zN>7On%@9>y8o%Y-eHBle3UUbYhRRWML6t755JuiwI{PVDa2-2dE^~y%NrC>w`KQa= z!mBzDRk>6DYO`@s_RtS>0sbc2{P)C*#7@wEsc;O~C#QgXator|cL@(LAa89h$de`I zZY)7>Dpoh&O-t(LX)LLG3)Rx`(`43N3R|z|&{bsX0b%6!5>Gs9EmoUvrXM%Q$XI!> zAn!R#>2k-hN*yXz_Z+A1_8dS%$5Hkl#j%=ix*IJh>tQG-Zo8ctTklbD8XL#1m44jR zQ>^x%rkncvV8%QIrX1e`UGCfsrM<~J;dOb#hM9|Z9Xov4iPv5E`){}pC83eS;#16& zsB+pnVYTME%0t(@{?lPJPh9`T`{*~9`j8Zg zFK+fuI1v#{3r9~P@OjrAZ>2AreM+*4yBL=EjZNi>8pVOBUan}?LufG!6@n?sO$f8Z zO>~NFm?g~y$z;hD3b({K%fKQfLqtS-W}MuuWU`2@NFfx5$DL&sM&o8bz6zENLHgP- zKwq=G7TBtN+ca!M)Dp3&_4{C=?A>5ATPzN|d21+|+gO=tism9sGi4^>=~qg% zTW7q+tIX_KqN7lozRKj?+C%cYc)r>vw7}<4M3m;wges0Vz}AJ(B8wAA##J{RN5LWo zc|)cu%%@OE&K8ssSs0@QdH5{V)sj0^M+8pDa3|>U`!FOaoEu_10%Abg%j3c6&5G-z>(^1{1wpIMfJyKmj?u=osyk=^S=LtHaO zX0_K#md7#P67Crr>#;dQu}mfw`m$z|gtXV{tAiWo;`e7>&2JNh$1phHH&)lz;J0=b zXc@m@*CWSM%e`KK-!OBR@>{Iqx65tR`HjHjo$_K zo|n;HgSc+@h{52qIKXntYcu*MDul;%PP zVGraG+ExgWaui(Pc(fp=OLJK*Ev&B~=g-n&hg>n*GGXZ;@ePsqaz+zg6TaiwGR<&L z&Kf=bZqjjauj0x=0ZJ&<=SX$o63#||hjQnT9P-L}LQgu0u_Seq@-%OU;64cb@t{ZENQz>InuxiBkJe&F=Y!|S=%j$h z8)>DZM=s6bl;5~C-BbBP{B9wmQ`vYs{-vo^I~B=o5Y$X3ir(aG3u{v0VYTrVgZHb78>Z@oi7vX9tZ%c!jV=v$9;TBr{e zd;HOFimmlj{_x#@611z`Fgb#sC7VGMZk+!C`>yyQSih6PH-u^*^lCLo$(M-iq>y?Z zr2N*2Ab^l58a&WPPIq!vk>@H?oLgt=^T73vV2PZ@et3OB9$z@QvcqR zPNm_TDp8yHPI%=?+%r`o&lKEJbjlMEl-ZV)1-^H9ItVooAroZ-z9`{XiDJiDG&zq0 z8791jeM%6+3!eot8dQa>q{eXpM}F*l++i^+I43ByURu7);kXP;PV%jDTU0YYRg063 z|HAlS`E|Q@jSTPJbyK=KxMZEnXZk0POx;Sx+pVzJ)HlT(xoJ@X~Q8VH|p>J`7bttu|8(8`)xW4sTa8LK*NT(m z-EX1`{E$%sY|ry86C7PsYXE*t3bDcS1DUGkdWhG#cJNyBDHEVKe9FXYVr>DhIq1u( zU~1+$XSA*a&W&rg&754qqy^s8Mz!8=J$cR6rCeFg z3oCp*cMH`7S+HP3)dhZ3vy^@j=83=9PR^xGiEeNL4l>9mlab1nS}1Xi`9x>c=yZ<< zo3J!-qB#goq?_q&8Ujewg(wO{oXF#~In6l%;ekLQlh3ELq__epQdEZ6;ny}Zy3BA9 z0&{S=qIykBEI+hs_wdNB-I)c+m^yNDX4|!gr#WW=yMKT8?i+WqzgQv(S8m*T&B?7B zui_Gdc{zc}S!Dl>+=q;CyHJJe7zrilB*Kl9%YZaON_Db(s3UgtiNT1MlM8c?MwfCq zkjOztrZEkxVUFh>hFPTKaEVgTXPe{g88XGgl-gm_=Fn_&G|8{p+i{f=Z1n0G{TVfU z47hC7Tg+p$YUeSveyL5B&)rNC{-@~t-MMSFZaaDHwy`3c!n{DgDV*}B^!3em7bmt} zbIrD`ClO>~=&>NK7q7y}+{e_K8`UgHKMqJ%DJolSqdx`wnX|MsJ1`l7C3FG18Vq_r z&%i;vb*|UQjlgaXPgsGnt+w-1&+Q2eHS{i0(O`vh&EhIn!#o!(j2MD40FHFiL8>Bb zhF-{k!2){^46#rMHbFjPB~^rK$x=)Y9HI0a3R0J_svtJ4OG6+9_c#f|m`E#j$|Xtx zl90BPDD5i_jUIU19`Hwrq!27I^Ncxj98<&2`DG`44}O)wmM(}yCDSskUhH3LCw0tazEf#+NdAQ00;lZ#4#pwsx0J~csDkg&TH-Rmi zJhVvB8d<@oSW&L1VLOEWl}o_7O%q^F6AG>>l?@QD(Wx1*M;YFZs{_zB$kmuZdb_QA zcw&k+98F%u0pC1SFswfEh9GkTy9&*x!XXF-`jI?|{RH2$1tdx#2S@onIopP68JMCP z$CqXbGyg&Bx{7EHTCcnkv(PLKkJu09uCNz3h~)7&e%u-~i}z!dV997%Z1{l zzSelDpPd;#QWU*b>+AOKKZ2}lr&u^Rc03R`KDKRCbb4$%FTea!i_I$*j|{(4+SJvv zeRHueY0Z#Fq}&omw2Oi7KSliOO8C|^nT2axHkEi@H2jxHU^!YsqSJrK8!@Sm^EHji z1Hv%C$!Epa3#S;Jm7P8ck(GGHrZhp}JPBdas!gKtvC--V55#8Na2V77@MgMtct582 z<+zlm+Hfg;#5PNUD*5D*A$hY`-hsR+pRx-sGzy04=72hxd6+rR5Gqv}iW6M?U>9dF zMW#`Rl<%0AeSyf)(mUV>BY96D>KUtGNO_}T2Q)0 zx@Y;d=sor~)cq_fUA)Ke59+#$!#nKnFfUqRgIuPa%x=ZJ;F=h5*SSQ_O!+Vg@)g16 zz#;usj8C>$ZROEeq*!*zt!g$KrAUxzk!%IWJ&^A##jTOL;_}K~$TjD28x@c85`TwT z90Kez;uJfK@-}aA%%Jk9_9Ee{zbBqU|B*{O zZE+aQPG1X-+jUQ7n{0+}vIla5agVX~j|NY?O!6mS-tR}OzJpG18Og$;bU``GBdT%~ zLWjD0sKBY645rd!RD=MbG)jL&{)bv@kFu-0lmG{$5Sj68TIAgz**5NzThUV+Hj#5) ze{-D5@L>jP$-&%yfUsbS!d8F^46B-tHYR+nFtc%VWoDo}ECxcCY#bSj4qUc=U&Jd; z^mXiIM{A!TPOyE49+(+4hWtHSj&0lizT=znVV9-6?VY68hUb4I#<7Mvg+Z!Z)uP!= z2%Hlx1Vd>}7)J;y^XfB&V%46)FBn3UYSF=dHaeGq0?6X3;u@e0@zqryXKt{1#S z6jdEjD^8VxQ4ZP8lu1EklHfrf1OQoF&1WemsM2b2nd@~@Iax2Rg~k21_mABeFdV(7SUyo4HA(*7J=vb!DClYG8ag%BoNw*) z_TY$(5f0^-Gx^TWfkeD4?)2_))E*9P@&pD4+Y()dfHBwaWY;_Tx^R>w)saFvwYkNV zHW(dml`ORn*-%Iv6Jvf}+$Wwu9Ha+(?t{Wtg(^}Lp@G3KAEi+UJ|s|7$H_q3g=^!x z=-T)&u1#r6XdYVJ3|!;>%oJ=jp+xJe+yn++T9CJ&mAi{P1+I?+i|Feo)eg`qypTzr z#MC=bR1V>7AC^ff&RFrzx+kXqV>+ZnH-V?V%=j+)U_b$<*J(Q z%0b#J&PJ1)01G%IPx<6YF00yngDfHo+kiBe3is8i(-LY(RK>QkB%qN_<#AQg%szD? z7{je`LomQz*4D_{UMZX^9vD3N2Vs9pOZP=Hy?rx7M^COR9vSZo->NU9PgvTzublq3 z&PX7Z--bo@qU`A2J-vO0C;FQHU0*KuhLWwJqx-z}ZH1og+wuo)-;fBlw#6-h-z@y1 zCDrm+z_JyK^p?I&*G}f#zGysZ2|)Hu^Y@A~;xNW;N`Ob&LE}UbfC}cnx^oW~tHTu< zv|+@gP`;P!Y$i{0I~s8(3OG?sm;&Aw)RZ_1JoT&DZ#WJ}l~_CR1Ii8uA`YGKNI*Sq z#Or7QE_?${0aeCnXwvjp>YKBn0yW2sPRlt53l#RWWBi>480mBMZrjn1D$nOTF4>P3uh+E zyUP8%EJ%e`pIL)2bSZJ~YIr@@;p@x2v*VlhQ&D3DKi2n>n`WqG~qk}jzD|4 ziq;n-l4x^K|C9myxb`vfL(=NdCs_k!AZ5apTonyR#U;NE=2d;l)*2E-OfA1~YjL7i@bL_Kz?)NjH(+2~!T9nqURJ6coEm z9*^mjZ=0M>*0SN?P4wwqHa2;3g4y1Oi>6x-<(w|6_{{D)^g1Kn21JW{$L5*sZi^To zR$X=+HV|a>?a=L40qy$pLKQAn1nAm&& z2`7Y^e92M~rqhd>A|ST|7Ef9(FUH%g;L62Y>9&h;CRy33bETW-6%~eL)PN~+|J}fl zaxtxr01h zpQ_fgQEosa{)} zYIEDIbU)7zWP(ZDD_$xfp+V4!K~+Tyu}{Q)oaZ^B9xb#*)yKEi|ny z@5(6ShXvJhmBb?TU}d=&ev)1Oc6#LVobKBD@5gq+eEu!9@Xu0z=wuF*&XOBM-@%gQ8lSiz5 zW@L9RWH`9((i7ehZtt3*&n{sfhvolz;cG&51I_HA9T)-}kdjXns|Pngj)a4!V57B= zjfUd=RCLQ!oZGx-Lkkq$WhG@ZE{qlDk~X@a7H^=``L1F$NxN+!*-uh!NmK+^3 z7)T`a%y^V5F^2h~avd&|i^>fc*_oYSd5DUqTsY7u@#ZRvA_-m;pR>dC^pcxlATA zX-<9-gCwo~7WT7nF5KdDwMO%ird%lI8eo`aL+wo+c<*kDc7{WlV8Rn>6&+G|xNS8* z{@21#0ss1j`(AHw8pDC64SRY=dJBUE{2Oe=cv_s@6O$MAE9_(WSc|WR?++R0JP&M; zb~cL8i}8k$uEaaxA<=XMa-RX4#=U-yvq+2OTUw+UJGeg!Mg5_?!_e~BiD3(oZ(S-u$@bNu$B+w!Ke!m*jLx@3Oqt=9Wa`la|MApENVcWqXh1o%F>! zrT5xg5;H%^+W-8R=#-qMy`R8@`NVDsMF_-We@+)Ic4;?Vd}0qBJ`|79TF;Q151bBh z1NQx>%NmDrLW(7&4yFQkXv;~!*qngng$1YfjssVwx zF&x6dqyYed8Y^n=#|)LYiE9WpY)rOSVj+WAdxK~Q#VYMdW9L-&X#3FUI=7>zIM5Lt z?Vfs}YqG6bGBB@~8Kma6$*%RIUFp$cd4DCpy|*u0Zta3=63VoD{=da;aRTSAhiPAr zYp^S+8(v;&hH6a}in9ZP)q?3cLd6T`T6obZIz))0D{}$01Qn@VI#TqE(drF1Cp!w5 zU4k-zk(@JzlNp+9D0cR=IndJWENROt3YhUY7;JG^gnE96WH#`KP zH`X7cIK|J00-pubLdR!8$~5Joa;y}EYWPD)Q5p|>?M$`|-5r840G|Zw3cR~8d}nm@ zk3Klqd*)1U=wv>9v$!Gq<o3lve-`vN*v-j>lb)Bik$T-?I{1fkZl2~2!JX?-r(q`?<^_4CM0xNe&n_@-8)c&{kz}NQ z&TUd9c{_O0La80y;Dqc!TB5}jXiivjg@FoO>m$nS@pBwN6qFzoH#(1Lpm{K0&n!-D*5lpWUEJKYVS3!DP2OYd#nGYZ z-tI$t_MIy0vlF`X63jWq=Uiynj01p)2GX%G&sc6@b}gAxkO_SrIdBs5Mi$oaY$?4s`Jgp^|a{7p{f0+*FYFp5<^th!h!emX){Aq$-bNer@4yzbyzK$UwKnxe+>=E#oA%A8%pDC^zHvvOFeO zSKqD!Bu{N#<{diJ^*r`SH1|K)4r#j%^l?}Td$+vAMjSAhp(V!k_0q#*$G_4M25C1B5DaB=xDMzu8ii#%H940K||c< z>)zXK4cMcHQ{CmUe5k7}+`L3X3}(27U``XZME-EpwSt;RR9cdkw|HEl-C%}0()wqi z%St_6*(R$g7fD(RBO?WPb-ig|s&@61Ao4@umRxf@;3uu(4h#?mJjc z81}8)=Ag|8TroN;ixzw}Wykr~0S#QeJS1a<)dDR|y5nNW2ODOH4c}Y(>Vo?YMjd03a z@1~#tNtstuMKlE`W&=LOi!zF+Qvm24B$Z9eIwZAPf4lmQ z?d%uT!O0%0eKLV<9SzjZH`ngq{S%k;FCqNFd3#9dx@r$w2ypCGokXBlD;~QzqWrMg z1^06f{fw!{&K7z)kk+@ESh>}n8cYM}bN2QZOsv(^E`R#i%gZ~=o;kMt80@4|7}!4% z&!F;o9DW~;XhL$&i}&yX?g4%>0Y!2U`ydZIP86cdI<^B`EU6-n01rF1W;iR>jMdZ{ z#u(vb7#-I~MjcOc@Q#Uwpk`W__?%&HAKn~D`ZCUeYYzR^l6FTUp4~fYbC5@tofwiN zzq$71;KVU@>I(*s*<#qf{o{9vS3x2nb)s*$meS zP{-YX0M@@H{9gx9fH^QQ9gMW&H_S6DFb8}qqG#41+WlR#X8u>=e{J-ae&gi7iNFyP zapOv}XV%;7!H)y{Z;=0OqQ7|dY?S{7ceUtu#pudxT)&>6>*7Qc!4I1yo819t4IsON zP0f*LODvvPESk0aomMN$FAWg*2>(9}r%I5(R`~x1Dt>Rp`|bk=y3TeTz<>Ou>nz*n z%DLnxl26Nct-XjpAAThH_=oYz#%eDn*&ytZ83>#Ups*xsZehQgPeZ57V2R=YE_6An zjgqlykgPUf{@^fLm{K}D6auHkg)CMqE)d2!laaooQuPjevZ&^&dPWdw^tAg;g{fXU zN*4Vpl8Hnzzp1TbFSORqe49Q*J_~%>gY53wtrUk_q|1#8IAP)URm?~_OhuH1h%%o_ znFR9!l%XOqo<3)H^H12~ayoIM%6oP$tkQ`?qX_NLQ9hy*=lGUW4QDPLFGc&>$toR~ z5WX-@J+C$o>e&^X{Quy8*`wdQhlh(e0YU z`2-rz3~?Z(a=;)ZQ!sda{Ny8bCRCh@Cipd+@r&}#^G#BYJn?f`wf|XOsvmJys_jg7 zVby|P2jYhVXF6MOG7y|WM#EgI+WQXP`!Z$pq38OmxB?e`x}^IPOWrStz@5DNFQ`oT zE`RxDJx8Qh@gobbcY&(O(z*Y+g36IRaimk_h~3@M364bg$yn-JyomWo*R=UK*OukK z0MFV~{!q{c)ww#(&J|StAeKqXLu--HuBeT6zR)d~66BC0IgU1`scd5LLgO`H^x$o>;Mdc;(+u&kREIFLV zgzX1jDUNO|VmKm(>7)oPxDJ;ZT{kx2dfG#kk10cwW*o8!m&`bQrjd5M5E*1waz_4u z|5X|J>i6t2O(&^ylm9l-%USycRUW^?Uw*BAczHotaXT1pRm3))M7?L!s6((&t97WF zOK{6n-PoIQoHCRjK*52kgXf<)0Aw{g;h?gI$h*bCHUzy91cY)4Ale6?sJFiu^il#D zaz{~beOL08pT$L%W1p5RK~rrT`>$NiYqE>iCX=r`%YI@BO6HoK>aw(fo&oj+PsZ_f zLGYLzF9@GlLg()x=La?8Ih|%~h#g4a;iQ^DDhE_~6_px<2&b#Rl;m`Q9^Wf}?MK8W z2Q-XaPAvO;ulyyRW55A60W~}aA$2h!C%i>NFGe~#-wO~b@SG{`1tK2BeCC99{ubG& zOzz-FGaB9noo36zP=lr-DWTd7u4Y&>luVGN^Jxw$(2(?2jV8XYHsD-ql)O0Yyhwu? z3$PgA+hYW}RB16{51cF#d%4G1ee37Z0PK)~~)0inQcJ^xwKOIY^Xp*vu2!udiw1N!9RHk)Fh451+P5;>k` z+U%f@CI@$<`_)(zMSm%wfp2z@REWG!%7b`$=c%rXyEeP~;tBPgr!OY{%RPs-?S5d_ z%nbu}r_JiYr(F;1+ImCTfjYgOWpr*AD8tgA(kYw;(gmH}C(2+)Blm`uefnfl=>#}9 zPS3>(&ya_iWlFB z!l8RSHpJD-H*5u&+tg34fg5IaJ+K>M2445TztklbJ#E4*^S=_F_qc7JL~cbEJQoGK zPy!Bt7uCqq!W#rS(2DFtIl^}`OwMq$&KRa*j56JY7?Pb4`B1W|p}g*Ievl?Z*@a<| ztkSsNC2h*MnFF2!Exx8grR7vW?PszZn+g15sWRkiUrunj57RVPn~bwcxS ztz*bNpG91gNa0)g7?@r>CS@cG&6 zd-kr&M?_!l^j+_|$?a!b-o$yub)x9=q=Z|3*#({WQHT(iA;AS1+6o>5;8UF7JVfa+ zaWm?QMW|XGFBOX_I!NHFhIsCK2-`5=%Tsy|&Wxhog`YU5LJrZ!Qc#bHz+W{Z0?)T-iAt~PT9m|cjO+s`OJrMZ#k0d_~`3C?ekS{m*>t@eZEg$@fiCP zpARy9VE#YZ%j`5{JBUn0+Ln|4_VSV=IEu>0GQc+(bZ8Zu4AP*W2pKEHVu?}!)G;-Y z3e+^Yk(47>AM9mc?ZX(xExFonx)X^|IAQeV*cQKkTOuFFN8*=81Nb$GHD*8f1vmv^ zr`W^xV?X2rWqSpx3j%yDU_()H6$%~WcnZP0`Npcxr((90(Tsx<*DDJr{dE~1PM z<@Sg1Ap**=Z$K`EHnb-2lnz61!-Xck6TTFX?GftjGNEe{YxbG>!fUG6D z-rI*_)oCe#umL92lAet4>RGsU`{~o{;?t*dV}AeGx4uGu7vH^SVuVeM#B;o7_ygfZ zP6al`TiAEt&kUk+ILQNnVxcUq4KE05Aq|_e9Va-TsI^o)pDGiYAf!nU=okvIl_pq_ zcAMfBU+uL2MnC?scldI(_XYg^FXX+#dFcW_*@^>+;jEd9Gx0}Rj1zj zsw?O0rg+Z2Gx%kCu@+jeySb9r(Z?L9a#U5p1@jgQKkX3#WrU=ZYNnHfj_SU_+M$d- zz6Hd|TbPeKDLo{dbgyMv?qWC*$ofmnqctL+s=oEoKo>a=Se<>4{ z)+!2hLyVBd52UioPuzRoqv$46mIpz1R^P2A! zE%(20|GH2x(R%%3*I&6clnU7F*6y^et=Hjj1#;daw~@@a(BHl2Zy#z3!h9wxr`p;< zq_^T6Cv*Z1zhRkD3e>wIj^m!C!wJ=RH~l1@LGMb`I|J;NIl<8s=OqkKzC1?mT8jVa zeaP|^7+s)*Iet=B{Z?H%VBm$=zq!Y}eqXGwNivzu&3!FCzjxRxUALWjnV#x222YP& ze&7AakKDt0+?@fN&4%9C(%k?P2xPrmuS7Rn6T#5B`;VWv7X`o>&bK};Uc?@NH!CC1 zN`lp{>fmS296mB#bm$&41uTPPa;Y{kDU7zHmNrs=mHGD;hx0g_jYexbjv_n6X!-mL z7WRNgvPtD&U_i1(Y?XG{Bu zL7@DzbCB`z<4zcomKQ#cY*(lADBE2}QW?DP5EEQTiy2tT66kuH@P`H@NX>)!hvW*2kD*~YG5Q|;QO5WC4~(g`!$0c2vb0r>e2u+i*@9*#COMLGZO0ssCO z{M)yLfBYJ%?~Gujgx7@zo(@e&{OGO3_u{CMiaNm*hx8UH@GK}k5<8Uhb$j_?TU`cd z{|9@LAQCY6=pmQDH<{dyD%dcF^Uj^;;<=STT`h2_k3eN)lPSCCkxNfG;zms;R8*GQ} z|6S0dx3gAyaNqBQKlSZ2N)O%N2!C{X^mbND&opXk|1RWbrFI&nNAK5a<++|7y`8nv zgZmniADy21b{eIJ?q7@i)YGH4bD{L$ems}!r-tO`eC;$!kKV5fm7kU9(c4)oJ-Ba8 z@>5SweLIcPL-(&nepaGKZ)dgipuOLT{H)YYqx9(gTCM!l)1$YuR(fz>L-M23Q{PUb z^w9llk)L{c^mfii5AM_W(3t+Y&~_T7NAK79(qxWmI@>5Tb-p*R-!F>(Mk4{g0JB`vq_pe2M>gmzj zxlnp=Kc2gG`v>=Pex9$LM(NS}b)oXJ58Wq0QF`e9)yU6E^yuxZ zmL9a%kp9u>S*e{y>CyYOTKTD`M{j4X^x(dR&^mf)t5AIu&{M6G^-%g|S(EY2CpOxs*+gU9=Xm3^a586Mc{H)YY zqx9(gTCM!l)1$YuR(fz>L-M23Q{PUb^w9llk)L{c^mfii5AM_W(3t+Y&~_T7NAK79 z(qxWmI@>5Tb z-p*R-!F>(Mk4{g0JB`vq_pe2M>gmzjxlnp=Kc2gG`v>=Pex9$LM(NS}b)oXJ58Wq0QF`e9)yU6E^yuxZmL9a%kp9u>S*e{y>CyYOTKTD`M{j4X^x(dR zx}_sc)xIdg%Uh$7Vn_vr;>a(xdllwenL>kKWE&>A`&s$&XG? zeLIcPL-(&me(LGb+qqDBaR2W_|D3O#M(NS}b)oXJ58Wq0QF`e9 z)yU6E^yuxZmL9a%kp9u>S*e{y>CyYOTKTD`M{j4X^x(dRJom!= zKluH!eH=b>ex9$LM(NS}b)oXJ58Wq0QF`e9)yU6E^yuxZmL9a% zkp9u>S*e{y>CyYOTKTD`M{j4X^x(dRhW8?W=V z(7NU2r%`(Jew|N# z&PR{l&RXfgeXEk6mFTH&r%`(7{tJ{0c28>&H zX>phpT@Q;q%EACg`ct%!kJM(1i-!HpQ`tgq~_uO~c zJ<<<+dE$$s6OS);;dPHcK6+rw(W9F`@x_S={Nl*bna43osJ+xR|F>d~h}zG# zPiteyhfm1O|FzgFK84DRS&S>HT~h63RO*rei$uVUzG5}bk!s@r0jwtDR2eUsrYdWw z8jq5ABE}4m5GaH%r;17wKH-igj$+#c-1BNn6W!6$ih5;DDy9oTZ=e8|(nN@wE|Xfc zxWvI!%L6E=s1_Yf^KzJK{VKf*WigoRy7kDRH+6mB%B%l~HJxs0IUNX|jz&+T;^R{_ z@1-ZNJ-~ik^S`>yEET^fyEn!6$zMNP8Y{14E_2T^~rq+(mirV;_#W9WDVQk3{~ zo>i7qm!^`pVo|GZ?e9(YdIKnPo20)v(UPle&WR7ca%&_a9(m;>kq8*M^ckqz1?YGV zSWd-lEh1Iav!He@B$}7e7SE!zq>3V>LS&E|vCrfJ;@y$R zD<2V$@MjrNO*}55pe-songly)!*Ng()KE2nY=B0f6g{t4i8A#H0}3vXL(Eo^Jk*8@ z4@lYII77u$FWdFr@4ff`eE)mjfAPeNuRHPLnRnDa^^SM2U67R$=5<0WdEADgxKk`W zhmXQIDwThieGRp5lT^c#BQ3ai#l&jdMME4%NxG^91Yhqr>0_5k=70A}CZqMUW)o_5TO{lE zB)9Yvx8Zf}9~Cc?EEf9OZZVooe=3>q(egd1^b_}SgXc#i??msvu&;XVaUBy~^FI)T z2^4Ck|M+*^`0i`;oe&UR0=}8hznkFqea+*>eJS?q=jiud)U?~gKJ2;2`d@-i+_?CA zjC&`(%d-!nmTbQu8w+z@1A#JCjl6ED5mh3qUL&!`>!LZMV+wkWV1%7YDN;p3j?_c- z2zi~hqL>fVo&~6;>E8QIQh;p?up{>bn^1-i_2WWe(DN|?b?C$>=)Xeni_p+NU8)gsRhLa zsq`JH%K8*3NUK0)87E5|Jd`TlQ4ze)E^zd_3-6w9GJ937F1(@f{^?+Jh16n270hJ!mqP{F4(5n)N7mYwaAc1~zGfUlu@~NP2u^mb1rkTxG z3skbi^2n((E>wOdeJeqQtJJa5K%0cNSQEexZe}w*d(a}`?K0@nULf)a{cImgjUHcdw$wyfKja9f#zzQW>O2iGmNU7k61lRNz@gGcam}@Iwu1(LGpyYq$ zLLK^N*M0K`0s6uOs3w>Cc*ovVkM4 z@3Q}`{hdzW3@~_7vmfDPqI&ZD|GBJRtknIVowdA@{IlM-~G+s{w{Fj2K7LZz#-3z^*xJhH zp2z`g`J`i$dspJrcdL`#|^QLW=zyHWjbOoi?czlPhVenH&sc>pru8tvzOwEDkR_WV1+i&$3TH z{gN5D)i(b3i6!qbrdS8wIR7K|GU#_(z91eK3w#~e%tX)@18paTXFX;C*arN!hlH1a z6EWb)h;Isf2-lA-z0SVPd*Kj1r+$OB4+%TiZ`mQVbBp-MlHWgun(u!l9>VYK^FUg_ zH+Dv}JdW#M#r2Hf2ngVX19-l4eIqage*#h?{X{NmjB*u#-TA`)$G zO+-ZRR5aR=8ZUNDr|Dhyw!!H=o>*=KR;L}T;F{D;}+gtA8x zPa@v+3?GBy{K!Av0p91h0 z740s90<#u^Rckk6K3MJ+J{l~`7+TVdP89k zGxGJG8QBirz8m{j6q+V9ZD?+es|RiE;*Zc?6o3lQlgJ(fx{UUqz_Q1P+y1$w<)57f zdIXB^i^4AaF|p_Q9EF{N0@J{v!jcT=k|>cb71HA$e~}Av)MB1=KTXuL-Cj8Y^_TDJ^awfC7m7jb>uJYpYEC}ji+L<@EQ2G8P$jy+tW^&+V&InbQ z4*-?|TlYFopfJ_4*2Po}eF8m+u9MGmcgq=z|hb$xT3X+jz zQ7MDqeu84p_0cL+KoxSiZAk9%%2vP!z%?YNco}aSt?k&)sEki*T#xa}`(41~&^T}6 zLN;b7h2b><^0;W~EQ0gwKF;^4c>GkhOq{k%a!J>=heqVh#Bc8l4q|>V z80eYq@1OR+U`xe3t>t9Lk$zf}cg+t%_P+$whGVEjPFx`9mPA2m!;7gvI4ISyiDWFy zYv2Pt$*o#|fEBXxt-sX)9%wkQaPL(G?imu2a0u|a^(MlEr4s#sd*x!KcaY$OVoaSV z+$BUf31#+jB>C2hc8|p4CxU_UwfC2^eWiFj89BNCkpNz$NH}+50uG*Rp z1fiQ2Wa)YAE2o4iqka&A3(CTif5t;&C3rw52P1}A4?vV^fN$CZm^yO6qqs0;a1ZhQ zCblvF#i2=D!p5C;mb&m0qQ=jrxYX|n25Ns#vOXH`pZ@xE|5fq$cr5kbsTeBML%!yJ z#`)R`?*Tlfeo#bsOgRvh;#K~M#~fgQPhh)5v~?9^ZOdUT5|mH~G^HU@z{CUa8SrSh z=IO@O5N(L8-GY#$%0CXycORWG3QxYU$BwH1lzbcpgFgP^uAud;3)(lA<8+_nQ zFc1tpf0BCEnJ=aLBgx(0U`PA?EsCSyj>Ue+4E@tDPxlY>q<~Ewt9`%49Z8Jq zycZwEz8R>&fT!wGI(Rqbe2Dq?Edbm5ro_}xB;kDxz2D|l@{~^`;jj8^SpO0QF#rpiAB{KCT;8+c+7CO1P zs~#>k9u(<^NC7*HK~uqs(ic(QD@i`j|Xu9(j znw+eJ_25rwx_YsasyYgM?IXSd&PAP@CirAaN+0-2xRrqQNS!56BDoh>ektOhSp)~W ziUe3pTeB!w>x|MD&VUB_L1-CJ*MRoY38Hy9030AyNQlEA864+`ZM2jIKOT#zdOnie z8;>6@XE|rxlHD|RImz2K>?cV7fHU|zUW zovCt5i7VVBKsF0DBuf`Yp=cXcy23HfHPBGc zKwI^tVt0=o4|D;rTJQh!FrGSTRAG(jqsl?*u-eev0geUM9NKqo-jDtuHIOcE^PT)EP`S0_zcSHQ_H$Kw}s(W~9W1(E;JRup#h*^s}3 zhVsEZB$iNa{nL{lq6?#oefSdh69a3QcF;he<<66Ek#P24HOWbiyi|4az=9LrVeT^q z`V(Pk*^5#xSkU%mqI9z=B`muDR z29g!kb~&RiAD?}FIeQ`=AE|w)jg3Z3A?j%ZEPKU<3A0><8Dn zIBpc6foK`&R6Rp*NVLAz&0y*n>Z2^h1J8 z^;a5(dxtuu@9W~17zVNxYz;Z)EUpaE7$n@m*5!U2dYAwR;> z2Pi99!L+e);b$%nb&(9tYJrf4CJNmcw-H>5RxoZB!i8jB*$0#~tXCeghCJ8@5sZm} zUrzXd4lfT8gqlgfgp?(Wb`p2ly>ewp?)NFFGHu_5K#0(lQXE@}gVtqX3Xv2HRQfYn zEZ3UFy>!lUtRm))v_~TC>!hxhRH2YcgzqZGVuv3wu;KoWj{g7Er|^H!^1wp$(?~dy zDohoE9}EP0{)9YgDFtSRpAk&`99X-F4IT*!#RTqZOhcQf(D z4c*INTFR!g(A!ELyz>?1yk|}{3eDiDSilD*2VsI4NhnP@9)`=csagz^yTm01Q~KQI zIfc}?{iGQyn-d9py=|@Zi1Vda8rDU>|!?{5ogVYjsgvK zw^uGgBus$%!yDP>t!BCl0PB%E$Y6$buTB$+U|=yqkTgSDn#U2zE~7pgtm_VsVE+Jk}p7Moq#P%7`e>Oc)< zWiZgao&6>j>uB#dbYN_U^PmYBM5|do)S_6UjZ{vY!1X$a9Yrp7iKge9f zS#=p-redtuC&J-`BM{7t_4a}{WaIu1Y}~&D9$-m$yHG{QsY~uuG5;6`Vv^fAt`XsP z6(K!E2X(SopqYTZD&WX+=)Jor=R9O~>yZHR;f3)9CIW3(bF@n>49PLC;%kR>YbC#E zgj7V>i`76-7~x`tW4O@jm9s-~yHECjbLj=!mdxO6I!Fd2&~hZ-IrrC5VV=ujo~v*?&j9iVWC>kWof9O3@i!} z;M%aQCj$U@j!a(1g$Q{QMG_)lXjh9I&$EaK8CY4tR4ekS2Ba{h3rqvFox6)qxEN$< zl*>{94D5qu^@1-l^U0mqBG6Lm9g@>txr3x^piauL7u8G$RgxCArr3+pl0}k6w}6+s zPQ~L>OHGM|&+I;|T&;m-?ED7`JANJr21%}LFZgNE>d`(rUhq!$TTi8$gMnh5eRP?o zc>s+?cyF|o$np)HzEgs800s~_+OSii+yp32!n6)?-5J8pjvGcw6R8%I(%KOeSjV1pEMhZZ?0ph z4u5EJy8i<*-*{`bML~Qw?08s9WLwDY5-kmo@M@l%jGcsv9gO8wRBRcOhuayj?KS0U ztXHmtSV_qYh1RoPnnl$@js~4pUqEZlIr*Z(;r+0Of|$d7L-wpX%CKhvt+_COWa+Dx zzuNKs414!KB_pi6oV6zsux@LAPb#$3mo5LWxBa?EWMe=)6-!-OHr?MlhQ%cj=K4WfBXh`KIHLL0$hu7yWOIodiIgqT%^j)O=2p{T^g&3+(Fg2! zMRXC4wkpsbaaE-C1Di!f#C})l=p;)bNvJpsB>Q9L67h{)te``l@x*$hOvljDWRkQ8y%|zw<_%%Pq^wQ zsA0kH1|I+|jE$tO89VT{4J_$sqrvVV1V=J36(81A#R&X!3^wm4wvBa8liO#qe}e@D^t2^C2> zfv*!DWfSIihHJsk*}kZg!-IYOU(i=Uwt78ch;aB%XUqV2KNPgH~AtJD3< zFVs#>_ZOy8(3-87KG+WG^2FB&O5)P}ab$16?xGb{83e}=&Lbs=%q_UH^ zU3lp$P!bonH>9(23@J@uSwEHmjv`6e+M0q?y4Mxe{W5G+!813Znj;eh85;y{bqq$p zN`fmkVU^@;R2i~hR&cpmN<3RWeckGAZ&`UOfcXe z8{>Mbrcc)a|9Qa_X#?^(*&M3BvK6S0H>vv@GP|orZe_#agWmGZvl6pkPLApl)$Vq0pS^%1~vyAD`86R!}gQ&!%wn9 zqN7R?yiW#hk(LnGLptIpgYSRmw}OG*e)3+DYi;)a#QH}EQZd$abs|xdRFj(>tIg9C zyLHEh{>r_h<&q_P(Kca=P_>Z^Xm%wBb#A9jHu%(dZe^la4f2$kAgGpuJV}p3R8tRI zD0fmcq62XF43xqZO1x7Y0n5g<;+3$Tx21OV{PCxHxLZdbpk}6g8e8V#+pfK~_6+%R zCW8Su23p!!{`%KnzUZTRH}X(UYn}k?gOGHm@9*K)q_taW~u$>=>;xuJ7(M;4)QnpSVspdiI=V- z)dcRqe(9gy+AeuJ6ZB3U_SUxkG>2bZBBSlX)k4)tHVshU$?qc-+P z_M9arRkew)aHeW= zZIo$qZE=C~0~wwcf6>%a(I=eRwVmH0W9^ph-L%7fpE}#r1&uZDQ~R^rF9*BRO0zD5 z9$=$O9v~ef8XkHFSF%`!v5&8|ah1_VF@!eNN$=v(Hr4KoWiW7LYtmK)mZzVrP|QWm z7q3pmbA)}Ia18NL9*}jFsk!F3*~yOAKEwx;J^xyUCuG^6+bf7|HY3IZePX1Z#nB~f zF-WbTXV}pa$}l;x;VhP;9)(<~7#ND*C@HXC33AtsA48%IYTxo(ZDtGW@v~n94VRhSlFM)u{!LcPvVWYv z1NTvkn~=3Vr8%2{9DNkC6cihBPGkXNi^wx=FA=U#edp#>w)WCpPLJLGkhRJBklpTa ziW?4D?PjNR!fZYvIn8#+8mnz$|2cmQ?fZdVPI0#d?D7d0t!V-#Qh+C*Behz#dlVl= z2GK_J)}X0?QHGUAkpv6QNKd4Vy!MmM7!-B4us3Ucg#P z*B_sM`#hWfYmbC0!YhAB{Ced#^MmuBg14(#*skryNVzxBgomR8Zmy~|K$F)BYn8$G zOGy!kMpVsY%TJgE(5}THV9R5#PO(5*L({>O3!8C`UQo^#CTOtQL+yE=X?^bbM6jc+ z@0;bldpbH?QNJsn4J9(6vEIp%bwl&>I$h|)U-CYDiH29#OTzs^5dW*f#G-T{NOuU+ z7{+36U%5CYa%Yi?0h%4pqo~qv4{gt-EGFx^Yp%L_*lMyQGad1C++lH7dV4EwizA+< zwpr8suh=AFrB<8~&~nsldc=-T7i7fY5oySk8t`Gzo54T;FyMcLBz62Ja=&4dcnHYP zKzj0F@7iUHJ^7*kC%)%3*l zZeqf&q`XPq#Pr^KH+j>0@51kM=H7)xlf2*W_j&%$^CuDRot>RIbLPyMGpEf+I`u0f zvEzgXN@dc(AaGw+**Vf38UDnkL(OsxzDMp4ZGCTv$-0QyBzypNHP!|Wr^indoR zY1AOm*d;cjr5zT6A@Ny9&nht}b#HL%XT=?mrcZkg-oJ|Mve*w-0 zz-qK!37dp7@T?Ha2hIX?fCh^W2juB70JVqhSB!HA77TiY~K~j<@&%@4{iqW3lJKA1E zoOmL;s{53vAx1{teF~e|9inkg!Y>QsOaZ4pEP9-&7^n0`qH#*6=`4G2w7-IJV@tx| z$WokH8P1nmQa-lPaGV@Q95E0as9|HV2Pb6{IXJS<|NY=Fo<6b4!>+>ob$9fRY~jL; z0-9ZZ@ZM*lhPC$@9^=d>?LSEv(ZRchf!evJ~2rP;#)F|6pIZ}?GQ{yK-)vcn*} zN=xN{DnOr&Pr}KxB(xxw1VqRX8(xVKk0yztGo>;#0wXT043Cg<#R%vP!tN0?ac&YZ zMz~4+)mbD%j5=#7!zwKcMJ>l?syn0r8#<<1m$HWrFaA)Sy?il?N&l1S-hY*tv<6;3 zapyoFiu?UzNBI3CB*BaQ*2O*BifG!|5&dX_$bO%NQDm97U}tf&mvq!VUp zz%at;@*JK{I+z-Fs1^vUs+6W}y)qnMOUNZ#9vQj`fjDZyX@p8$U`kTm2>#cPZ2sq{ zl-PT7fz2}>D);;Me((1Wt?>IR`r~l#wxV7V<5m5LFA9GjM)1azu|41NTHtN8a;;-= z4FExq-r)T;kaQE_L|}^Ctipwat+;Yv3;r)5v%0c!*GRRH{Qbahb6$nkhKwEIWNFn* zP6NVnl4VEMhk1QpRE>#`gN|Z{($4DH9(SSk z685my-G=rp?T5&yhOoy==`TCOB0)~@> zlQV!NvyHvS-0il;rX&}ni=+u9MfH41QfW%6%UeAJ-Jt&imBZPCvj0~hf4%B9An8&6 zBd{K_*&gf*$0AyZ<`QbtkP?G!lOx2ygIR>SBxI!#%|qP9se*j#cfa$~Dblx|m3GFI zk~+Xrf9s3^MXSHvJ^I*Rnf%ZL_j)(n>(f-Y*Qf7>-7>wjl)<&$igA0=@NEA$uJuFZ zd4y!)q^eQs&?M3X(3_gTNRdLjcfJ)DXKe|_j+x#43w=<; z&Wy7}Tz5Sa=~cxnLB2dZcNQX9Aji;n$-p}vDRw{216IYE9}IGWOFOT{puGlp0I z?EvWs^Q0vpuMks#(+QoaDSQzo0_j?zkZT3$h(mc~ZBlDGEf4%6vZZAXN0kf^ru-z0 ztg*eHID&2xiB2gny%C|>gY_XCQ(E9)ZeaB)v*ByYTyV6ua1r&ZH%7qbIu zPZT5vcV98A-Yz4dJ-^jpXqqn`z`X)EB|oq}q% zv->Cll$$hBiwGEYCv{Ix-EuCmXGi8F9x`;D5{BatVgRe4;-dp4kZz?&VfKZlHzfD!K-4|B^M3~_@>Y4K2i3)F25ic%VLwf1^E(0 z!Sqn#yG^=VTU``2c1#onKar=BEncV|;p#+S^!x#))O z$5>iX@l-j5eZ0U6KO*TR+#9D1#}n8QNVJJ1zfC2Uc4lZfiSI^J)L>@@gApRyEGLKU z>7yCB+WOPW0B97#iA3cv;gJ3SVZ`xOw!I3P9%xxfCdeSiP$dIMKwK~+QPMyY`%!@k zaA7F&$RcOebY^*^qaxfYomB@yrxi%55y?PDv7rzrBm-sFT_I_GWZg5H28#H2F`&GN z9VoH1=Y|NGj~Uth)bVmgbB3_$K6;VmQsn@pGDe7eY<_sMEU3Ym%FI^ z3#WVs+NrbKxt7Ym+^_ww?0(5?26(_od5T>>@HXmy{T+za(e^s!6_`hlU7nPM^6vfHk^S~K&#aP#-+nWT9(Usk@WbwY$q}=k{1w9RHvQ-Iz1_>d|F>Qq7(b-L+8=kj@|Czz3+Ct!}e?UR{Fjh zEdnllD(j;^yInV0j4JPgw|$>>SMS&Eb_thVzCXNnIa;^d0e;Ha@xTMh2`_{%ycfD! z@m^|U0_FH&i`m=2PwVTaptQGLS--Emf4e83-L|N9Pe8jZcpuiU-REAoBtm5+gkCWT68IC#V#z}r_2MObe;W}jSWG&8)Uomz7X$)M88TGDCd3~UeIqhV)Y!lcI|fHvgxd^`~j!D zuif?gw7atBXXTQpc2_v zyGQQR?rOk$`WcfZL}PE`1(26fW{8V3JieCRQE= zs;ZqRHy88es6*rn4gyK#RlZ<><`(ft`EcByFOM-zE8h6|hq=SoB*a*%u|{VL&MVd9 z-P*0)Apk|N(XVbk5obR{ZVN{iCXZiM8Aj?PK!o5tTjKUh6-QoqPIXQ}q0_1P-VYuq z2s7E`lWsq4t0B-I+t3|K@&yn1eQ^VI1$^KKcmh6Lze)V};RgbegD?8Lz%@ql>w5Zz z{XK#W`#}BcM3-ax%4K^6u5osKZ1=Jq+P7pobtuQT=u=wr)Cci^-ahbIOy6g;kI81h}m8rQc6siFs6R^bV+~H2UV6qMG>0< z0gmQGj*ll}e78p4frn&go&5}a9uUC~)?ZY4^nTT^+}^LA?D-b)z`NXq`eeo1eIwd` zT(*O9tna9DL}?Qovc3Yz+^)xbJ?{X|L-F1v@vea1ty2zy-EfuTr9*bZG`V!sD2DwJLp`H^+aA68xGHKmd?~~NNg%pZ+BL@L^ zvC_KE?!z`@!!ib)&TRwIj99mx8iaZg1C^Uk<2HxlH^hN^@4@n1j`^ixqMgcvXmHYDS z&}G0}ew_pKKqK*|FUC1AUnOB)mJr-(1B|(-#N1sZ=OP67ReA^fZz~3@{zYUl%O)O` zQbm60oj#b7NP!`Z{s>|(v~>lAGRCuz&gw`nMZu6z6^4^!+NUH$xycZAodQ|&YdRA= zUEFe~XOIhj5}X4h`NzUt3Od8iT#9mqE^7#~v-!M34a#sv>CF(l=QXh+NLnBf^eDlj`fH^(!6N=%vd;Y89AM5Q$wX_&vm!%` z|8ZvE9{)d?naBwLW?t;!{kK`6bwqpCLY5DE<{0Pf2ip{R(|F6sd14K`IlM9QuqQQo za1(=u6#hQsOQF?tdOe+HO=k>E1g5};Jrv3yvKf$$8AMc&m+NB43-o;ILB53Sw9;hF zV`#~~6!(!}p@my>QxdaH)2$_^?*=Rt5*Bzof>*yComA$0+$@&nn`Zt|hYE?k~E0spKucvn$zR z#Qa#4yGWNgP>^eaQla1v>x4gnDbuJ>A#yC?59Qe*EwyxMAS7Nw#d5NIQ4K-2ee~yTF^T?RgS)SD@~rvTO(H z-pZ#?U4^8IhFS6(om77Z_WE0UK0y5@)W4`-eV40=FH}F@-M`+d-g@`4YuI)^)m`dp z7%{)9%3X%`M<};CPbVtJ}s&Kx+W{8P1g|-dxIenCqs*6YxtS zWiK?GU8%B`?YEd9oko3r$x@UYnE0$juu4!fytGZ9%@+#_rE3K|Mr~^e2;0tu$PP%Y zO&U^?nx4Mc?apx5WM&16a?2|HZa3%uD$CHUG+$!s%DDLTsloiLocx&7J7}y=WBk|j zeER>RvFiVmu^uePnwGZzSQkWwIyx?XMr5dG%DJYsR}PxRncB`jqYv#ZUl?8P`bC!0 z0_^5zgJzmeA;}t&Yor`9R9`uyR$1N%T%IRoACz>T3)u$RFVG&!9ht}}cVok`%TfOh zbu3m494R}ww|&-{Jb ziz=t~Kr3>5u$mPbfZ)7D0;Cq$6+xJ1Zr%iB3>} z7s`t&6X8I77+i3>-@nsJ%_tZ;6z8a;4@w&qYpSKb#pT}AjLekOjI8B|q|Y|oL#9+$ z7tUL-$_=+7*QgIOl6@Ke;w*ct*kgCVR$Qi>Y1`>2#M*YR9*99B-3P>@a4w0Z3I&fx z1`Wfp9CW!HG)1hzQHTaTNcsX$E|6q->`aEaBWy}TqeMXkp#Vu42n7Qq3I>v)Pt=6+ z5wfr=E%p@_ZO_Ht@3j^AO3IjoFGUx271PRdNuiO zpHCWk)*{sy^$$H1_VW=vlc|4rN*$?$^GM!Y*;58qmY3ZV(hM%B!Lqh z#YG^V1~Nt$QuNJ(^x>RA$W_WYnSj`()2j?BiL2>M^Mv(eIJ}Z69FvEm1~kuEo{*J6 z6s&s_IB0kO(;Wq@R^?Q}jFpni-maW^K*9Ki3?l(b2LIf$+S8ASNo792|Gjw$^U{OK zv6DyD&Pa&0xG|%os5;*_+jRSawJ|miqI1->QvbS=PUd}jQh7!peX(5*EJ?iI!w+HG z)okEtv^@`4=nL^)pbw_+Q>iS!0ohJ(RRbugLjhw2R1eYP$%xgGzKw*#a55#ftxJZ- z4;j=XB^V=$+pQxCtI`WxV4Da0dW4-cbtZW%)aN8e1*vfd}@hhxe#@ zcKgU(rQ7-iTR#~@dw7qk_bgw;PUDZDq*vd7z2Fhl!+TV{FZnram1?3SO8E>sb}v=XQ5uItY_~7Hr0XOv-l*`OOy4!le!$(XY~54 z==S8gV$>c^h3Iz#*4aY5+j;`bf+ppbQ*$$_Ci9JHQKfn=PF6Tz+)5!;9yM4NT;7vh)y}JJGj!wQ&sYU5- zd`ja}dbR`4q~o^BLDRkM*fxY56mZ!OYzWwM2P??+QbJrB@t@(M65=XFmDpq9*g<+B zMLOh?DuvR=`DKKpOfKMhwbNTk$M$fp%r%t@vml+|Dd~hvCR9dE6$}L19)hB>T-sQb zj6`g#odq6f2G>F^4Pw(WJTw7*^vOy1hpZfur<0Ew8(%!R zFh?W>2G1>NyP&pmq1VecC3!Edm|mP=ZTVnKRZ8aQg+38?MW7&%WEGYKE?hTyRZh^C zG@xqqAkU&>*@lUwBZBisj!%lko`oO6r;>_1c453Am=%Ht;v$heMXF;UgHYO_kbvG= zDjq7zs{t~BiL`6dIVm?m9okkuK|JsEzJF;F{Fw@-Pny+UR9IS}1$`nG!6f+-_w);M zg!qYUusYEV9~Db1!S!i`*84YcO^Xsvf`*yRyg+k*OJ zPq8f>%(h!F1cD%Qdhhw3f+dwo%Ht=q>7-I-LP{+VPO*q=QW!RVO9m=f$lZ=02IwKA z4#L&|5E_QtiT!AgqZnx)U)VhsDENHBumR+QRXlF79Qef53*^#g-XMDg)e-(S9F}(0KLXREmBULKjrb%)GK@8!<-m8qyrn?$T%u#j-GPH zp&+7Wp>h}M70Piq@5ryYt#2-Hq6jJEu%9gzE_AHeRhLcS2PKt|zR01dtSeb1xr`yE z_3uPCav?&T9eHpEu;86DND&?>r3(BWEAbd5yI?m@^7c9++kF&g1FjzZwY;CWuen3$$e{*W; z{EpPrj^ON=xRixC_PU4X(Ot+*H`&QFXaE`UNkE1n)vn|a47Xr}aLW92eaXZe6(5PM z2w#ygCX^2yqwtbrX*FEX_lKD$y8JoaHBa_|dDXP>p5Xi_xM<=eKcabq))cXuocIxv z1BI1X5BZX-QuI?1zqzYUrBHOr$DS#}T_^;}f;~jKMv*;)0+Ns>S=vlI?vM|U=~!jy zK3ZkzM2T5cL>w7{b2z-g{#d>_Blp&2&sW zJ!hALe(>xI5{lvhB@MZgh|B5C^-c-aj>PJTOj4%s2?q*qWO z($NvNp@GuVdmxUDGNl(1nRTT}TJEfJTq=i3w>&s0?ut*2#c2eouE=4WJQm?q70MY1 zuVq9^QN}VlV}g>0;W3d;jL=SJc*2&z*`p>@ZeTm(`8b9IJ;>ytgsR}Ydq8^pfS-}~lHtxPGyMz75fpzkVk_t)Y1jonMEj8Jp1Tsh` z(O?k!8cvai{}dVw9DwCu@@3vbWCybclPaxVsgzSI%`I021)j&rd?gfwBqLw5V9yF$ zo+J#0ED#A8Urve~pB*63e|!;}O5R_O(RFesP3vC`GcO%e85wBQ8Zz|azO^9NNP}`b z`K-8JbILKVC?{PpmA+2wM1DUaAKT?`IOPXJp8ko$+f5%R*A(7y78ne^6GR?i8P7%9 zc!)qDB?tJ_2%qq4564qX0Vt*`GewI>auk_}RR(iWa5!R~!Z}iK%mL=YaWW=D20P`r zB_1UYD(K}wh~x?tkQSC}NV;6pneMS&+)403Ue-+5e8S%J0!c#78nQz$WH1oCQX{Jd zYN=lBEnj$PW^l&bL;e0C3;q5wd$lxSnMk6dwc<GH>?s2CpB zk#w7fM+aqSz&?{rJCTwN2`$WJ{zLNbprjz=9!MEt+yiKiDd9<{3tb6l__u@3s^9Y-Gfop3?xWfrZM z_kay&y~uSDDc{pq-mm`qPWg#A!wUsEHhqx%e($^MW7M$E7dI;JLk5No6eW_Huu4b( zmhlh}fRSHTqRdWW3OH< zMdl$1(Un>$Fvf=U1Y+Y-Ou$K`5Hb*hNpi?gCtR0;G!SS`YJuQ5Z_1*e~PrFO|15B)X7Y`5t^ z)~9lK<3*J>_Dp6!O1;7md{u+Kelpsfg%wqS_`eCt_m0$tJOy^jN=jP|DLO?;9^n*{ zNtBd$EEc4SA=^J>kh-d_N@;8C9Dxa#C{IKPqSzJdwIGy`H!i{{h;J05&>N^hI{RvA zhU_f1uWT!`<0+BuPo=2AIu+s_r14!|bu{XZv+Is4>l$qvFDKC&MMOqtd`&1&gTF>i zXR&8j3g#2SN@!d)4(z#wgEHwwMf#cWh`KQ}`=LQ}2KLv43Xwx$S9YM7qLN5*{5y}D zerALy!$D!M-4y3}l8JDV;|n*hnKrW}%Izj%SvbToCmdp%SkJTnfTAW$oj9de%qN$e zLAI@-V39xBmzhzV`J7{njk3wod|%7DuJu@koUKp8c@5-($wtCcW>lYug2 z9uneK7ZVLumx6aS5WP9_|zz;;nmLEkAU27W3eO_QPh9}zbmh^s&v!qDJq zTW&x;Q`JEu2-rsBC_>{fppo*$k(1YdLnHhUqkR&4afwv+Ebbri$?En#AMsf{!@;R` zi|3!w+WlRy*QYUp+_=5Jfe+$G`tsQpQHN*4ztN+>zfmWijodCcvXXxziDV3!%jn7r zuxR)@O7QUJ8x$B_`{4d*^vLa@0H#J!LKnE;8>rB^7V>&_fc$Q@52!QZo$ z-Xv;#;dw3nUG*Rpl@n(eU)SGRj~@(iFVoqNw_;pwXhLj5BSc*3A!^?+g@7aE(wEsX zWV{sRb#|hRD9^l%M6~z5Y7bir>2me7Q%)H^j}J!sTP92mR#&j=34izj{w84GA8PkQ zndZ?I9ZVDDd|~59Pe2`Mw?bYQDAfQa63NZUpz%wMd+$~ot$L5_v7=^76};S9hYw>H zMCsu#fUV(GHU;}%jhxTh$=0Ae2;bdF&U_;48xk1MvP*|AS!cStOQ@9gHZ0^Gg8Sz{ zREX$}o;COdjwtzDB=wMK@ zV0?;i>XyM_C3#kCQv4YbO`yM@a4*jec_uwr2`3OQp=<;snmF-nqy>?uP(*Ih{iTi< zD4o*j)aj$FOtI4M)1oNDP%8X9P!P%jT6YRNjr9OYR0Vm>MD$`C@W|s#33!DG-AvBq30*ybq8drBDx!d zL}-e=qbNLhCd%VAtru+eas+Vh!w4v_P|&YoGcr11VyDJ_ zPzXO(j3bL|)_vp1;!-hQ_>bc;Tm#aMK6-R+M&;j)OOH#if>}lV2L~P`{aU4LRx5dg z^xI`?qzt9>AdVx-uXS;sY}RMXp*0Xy9w%8U(16xQbfLaMxdr?l=Qb!Gr z5OH`qyP)L@WnwS|1M~@$<{b$rDZN`pKOH-16%fOVyNlC^84Mt?H4)5!u#}vVQxFkb_f^4@Y~?t; zL`kcBVV^(8)aCn1rO?saz8v$XIIb=Qiy|&A7<-?O#7K;td?eO_Hg1R3YMwK44A^#P zhK~gD3qy@y>r|kng3=i($z4{WiE{u_yGsp90)^?uVpw?tAexH=4@7fH#Oy~*h1@m& zQpqSU9)-1pwy+ew@cNU_T2uB8}E3o%*9}ALZ~RqjK0H zqRO$#*pINwN9s@B3e*Cgda5tW?fR49sz~Jx*b9Gz-8)jh9!ES`AMlq`e5BL9EC+oy zNO&f}Dqc+Y5CJF{H84wY*tf&@97$@37Lj)@MLfUG1Nx&64{jOvmh!$;mRab#A-(EPMdOUACBRYroz?HdlQx!3D` zB>%u*=CD9PNzLNYcB<>)uB*HEX3lWS<13Qt>h*arFPgO-D zkD-y%zU(3g;W`(BeWX3uzF7=N2)m>S&jpcbm*S$c4noXHc`n`7%jru=FfoP8rAu0Q z5ttVlh|5XzDS_9z2Ujp5cPw=0;iTMhBI_7Ws6ysgWVU-T)D3jZ8OXd)Hh5|KqwQP< zrjts-HVz_(30c#8EF<0>@18ts8L(4Yaa(M1NoG9EgvG_xv~m(628x? z5QmULnH=B{GZ5FqhfSJW948nOgHpOU%1Ht%Eo@wHzNWi0NWU*V!IA1eF~o4)7lxtB zRNRF$3=h1&M>6CYgnIT|mVff12}P^gE~V?3kUz|Q_|_TQimqSY{T_Hs>$`T|H5F@1 zk+8%APuU85?Bi*rArRSKWN>!sO7O=kP(aGO9Rca=hudVF%j#i$gQFjE&m$LaB^k^^ zDM;BPg=jxLAeBLahjbgswziB?JmLr~cfo3vA;4 zaVfb%k!HgW$wi_w=~bDvLpowAg}nAw;r(M)YA{uV!ggNp4_d) zZJ%@CWmR$E)hoiWhMyOb`w+$sw##+&nXl}0?spee(#0=9-o(%H)HbO zz#Z1v%9~D}Syxp!6C9*7y9$SQnc^9#VL}Qnwc9PzdN}trQi5Rtk~AyBLb`YeS`9cS zrO1o>Qpld@mrI$D@%5HiHwt5O4!J4pp=6(_A8`Wx?tVMjCp3B%8vDs`d0Be*;tcvbTJzwAU7bOH>1=QW zaJL8im+s7sr5GG(SR;QcnXC&Mf-Y5PI(VKuFXD1%=k_^4`N2aBbYfM?Yp-#S7?v>Z zR?1N!>mrv<&Q&bA`|~2!)cpWG1mEfYgHo}v(svYHseJCbXT!l&%Q2JE;K7Hx%)od0-UAe zd0b3_QP0wOk{tVF_0+$!E&=@xcl8W~}LA`W$xE z%9YJ~_hJ@GT2{BNk8_!-fHxNK6oDy;ba4j3Bt=XRemDWpM6&B5gA=*0C?S>HEEIXn zVNoE^9iz#n*JR^@S5W7dUupNu;`iF!{?54URlz#wzy4f^PsA-4fX2RCEU_}oXep4Q zPOt2W6AFt*7?FJ;gd4^gXD!ICsWtA=346;W86 zYIUA35BJl1;8|0Z=c_t`H5F~RV*UE{S8mwgEYWw~>Hhtlcixe*rn6Sz<60^46p`7I zNS8=T5gSjCFT$FP)zhSx5@5s9MUp8xi~tUDu6KZtGLAE4GNc1!!|sBR=JQG^jTO(> zwzmgURDAvNUV7nP@F_3lr>40?v_CvxIJA}L(u4j2_3;F5(?8VVu`L0GA< z^5tbjluQjaDpoT8$Rp>^J8I*CRcD=WsL#7>%Q-ydf)_71e`U)BMLUl<{D#}yq0W^}iaeW)fYY=N~su!dg zq!`g&i4`|N41_~0JOq-j8&|ht@zA*}@wjOdiZ<1+Xa1?B1E+2n(fB>DT25=M6Fl!m zta}f5ei&~4SXD@o1`DsSlRw%f116&?!RsQKvI|)&ak{JTdxHMlavvWdnH;?nv%BR! zYI}Om7d%Gl((4p^V03Q^?!}3dhHGT@L2po~+l(Y^$YUcZ)Wb2-H7kZP*xA`9Bf51- z5{N;nV&C}vxaQzC(7<4yckdtgi&S1%4(N9a#`F@Of*qg&d>jWBxF$&hy3Be_SIUOH z5(00p>748h(5?fCq~GZcMmJTP3e|!wZZpO_<{07}`k6lO89E;#;^IH~BtG7R2-w|? zLkk)**7d(LF_Qw270BOXOW+A2{e9mqaW}9F#y# z&?A!G5Rs~Me<1P(;xm$sXh9Hrq63pj#s=I<7)pnGQ%Hr|7M(@*CHoZnn4o&(357Or zhxm_~aoDOOX02I6yLL4359~H#&sx%OPPd!=DYi8%Kezw9!DfnigQEcw-r|}Mx~UE_ zV6rqjBvTZBy7bvfr`dxCLf@>}o|SUH!Fj3w+^zC?58G$<9tipUyB^wq2I)M1IPOvZ zghPjobwTgWT7?d)3c&8ScIC;GIU7@#TZvD=+QrSYa_wSUr-K$QO~YABk6yb9d1}bD zO9xCk->1lImQ{F7zHaW&#VhKbTVKDaXu`DPSn4-Z2bNAfV&rmO^?jqH^VL0{f`8Q8 z`SI+lF2b?W<({r44!fkUCuv*cDzjvYQOYrBNe5O7isQ)Yg5)Ys=+z!d6ke6X&ZNAu z-M@pV4PJ_@zOUd(!DTKI7lV4faiYoRT`A&xIN)Bdi^SNK-LRLluUEt-3LQKk8GhlF z;O&*lmEe7}F3l>+hbeu(z-H-6DR_m2vT#2fVl!=Nz6y!s<&!qa35beas<)bO@9dR;4?2%WFE} zUZvE8QYho5g$wu)5l3;GP$A~o3Dcn_i^>%r$r&%TYEHgKY;>6jt5`EblEA%n}}V7h$RIGXh*<}$ah>|G?1($gA$N8!!dL@X=HnjhjAF}TV5ZE z;M3CHlScL6`+S+1J{TrO&Zw#?%&+DLW))}nOEQC1!NSVQLZ~t^UlBNBD8?WDI80UI z5YBJ^VaV1C*@=7(sL=jH-qT<`iIN>k+tGzi`^QHbj^ZPQ{4G!ewA5G$=&xJl%{|wt z0np+^1$RUdb&zu3=kAKNU40VVq!;v1lsZ^8)HTozIF?Ov+a|4+bALNnH{ehqe~@1} zd~Kj;!v)FUAen4Syyd;O_+GCS&tDiE=khPf@%wX@_+4?;81tii4%-G8JlLto`&mW0 za?(S>Qvh9b8A!B=s6j^paJZ_A^=(|EBtKocRL70dCckct~^+- zk4Hw6T%2=2^wfjAe#2dc=rM}I2RcJA(*S;4!jV@B;~QNXp2bWrTeKl223d0B?LXYV zSTJm5G4c(#Ofp;)>N}cYq59 zh=tK0BOO60=&ZE-91P6v2v>Tfz+;7a3P$?E05G0Y5l<9@Q|uv*ofz^nr86f_7|@4| zF~-pq0-?iH=zxZ6HYkH^fZx;sM35YS{p*r6rpfGk~y zGvvnLfb6nCraR*Oa<})BMC+1X#4g&@>0VYBL7ymnXACk3;ucXr@AVOr%HbVI5fgX7$gD%BXNID2*(8Sx$nM9z@l@F3N| zwtGw1CB<@roctKcrPLWH> z&aCWx#+ia~Mz4?F3B!nAa%QYGYPIY;al3lgjy}|$KpH-NhVV1(m4xT%%68`&`Mi}s zooB9?%;!9-N;;e4JZp+z$2!ltlE`jzo(+`0>^uuaWgj`uvC2R`#(9p{o`_@4bAmF! z?e5Tykafy56vnk6@m(N0$ZK4;20E97&wd|oJ@bEr<@4EdZZ z%csca0eDXE*l-q5d1ARNFT}GqF|!LJtss29e8h3+96RX<675lY-?WG)DfsG zFRv`2-&OK=wfsFy{;r|l73K1GWnj{bd4Y*-jg2c>*K`Dy<5yervUMG;ZS80-()V=P zV=D?!aeC_-Jk173E75t2vJ{;wvl~~gYgvj18|>OBz$0G{U_b=uYJ8&wZoD?4Xs`-k zsFgra+kqLaZL62I)Dn>R>>ps54;X0R%@Vu-P&gQ>h)N)$68{aswFd!3ncvvf-rTw- zP(HY#vb3^t$S{=sLz_1I1d$@AwgXe$4rt5pUppXQkAG|M-%@-^7u>G_4oc;?R-(pQ zz|)GfevQgb7M~JWyHJW9cgrNqEon|`#m|OAI zj8-@H)>tQN*rVvc2mN-Yt->^kkTp9YjIlaNGOj}Suu>HGVSJpe+Xk<)0F0zRA0Xqh(R+5SF_aJ_`7 zj}EI))_(;3VWSpo+yrI25e!pX#9AUf72%z5NqD1mW@BjI{z@Sf#6;0aAEU76_OE7& zu+WT_3F|iYn`F=SJTy5_*|WI;HCrUUmr1;?llWZ$ytGJ`WwW4wvH%n{7h$|J!M5h2 zp=?y3-xZ}d7^xufx2_>AIw;ET0OM-}7M4mp6C0=bTqJQr>(Rzii^KxW3{jiC z28r669Ha@~vuT|82T^JN(NgO+hoin%gPYhC zx_`|G^$`alSnc&|*ItYMiC5b^g?j4a82$URS1N7teXDg}=n1C3>YwU0O3c~w65)Nc z`e=SyB$kM;+avDlf$-IiQi7T|1<}C1y;5z0%H|71dp0MuN7$ds{$D7N=xS~+{`a3} z8>0T>qt&tju-lN>xT8IrsLJNzM3WJ&O0-J+kJh72A+-O|4nh>))Nh~IfAp)-W<&t| z{}KoF{b;%We!Bmx6D>q`Y?8Hz8xZb@`e-MMrj)xy1^dLl&W z(m+ zUa^r1)~#tk7adKF0d%#xo!X}tlG!IUu4!yr+7g(xZdpt7^1!s_<&A6F8v{!L36-=r zH8up6ZIsnXY7fkDdJjx&MXO6YAQP7a8k^BvTVTB-BUeQ_a9S$~0Gi;^4g$C>(7Kks zR0JqD23nTV*q!f{#>7aRL{QN;sD?oE8VN#E>spMV2~A;C8=6~M0?Qf$>)IPvtZOL= z;Jgu7P(OF_%z1MIV`eN0EEqF;_Lv!S7Y#?vj;2<8+PJ>aZl`(m+LmUF40vo?x~5|z z0X%)e>~WJ(f6UnWY4vj#5zG_o=gycgXHH<^%-Ml4fmvf_&#fOfZ`zpIfm!or&zd=B z0w!h-aPnVpxkAoGTVtT1v14g-OFPE72s76XkXjl7O-t7|Vg{EtHm?UjOFsOaMQ0O28pu!-fsL=L5_$&2f8KOw5>e^w_?( zad|UHpz(<1jdE^E0?jLI;%aNe1ffbp>+*H08*#GR(A2!Vi7?*Y*KG+AT%d9FGQi&p z3!!h_nx!q6UlgxxZKFwdD9-^GliJZLkr!#cB^<|A?j=HXm4y;0a{1L@(XQ^ z?dw`Pnt_4HR|9AYw6MGt#DoqVlKp!MFAYdq>m4&1a{6m(-2ggi19a<`wlsspfdG(d zt0ZwCq@BjGp~+qoz1^>t$L0SW1obwSopsuax)O=|)k`;GMmidSTKP#Iq!_7ONRj2{ ztY)GBdl3*{0E3X&YsDg7i7}v+R*Vtcqcq_3i#NjW1}yU44@*F#EsYrRK7F=#pbbd@ zE81FD_v;_5?0+VaMWjmNtaLUk!>+d$e7016e>I-unRXv)rS;I#xT2$M<=U3grjFI9 zh0Tg}va6t` zmR6xn)h1~p@%#VYFCEMJS|TPNWZ*$}(W*<=7Fy{X4|NWaS5cU5)ZP(O7 z^e7aJ2!b4o2j*7clh5}399SHMSsbC2Ys0l_jiPJDpq(~|jwDtu#k-?}hpqSoQugfs z<30do4B9vqk^DM*r%mW}EeQRwN*r$VO@L*~4eMDVEKpupd6MCAk_y{HI(!B);NhPI zuMF}6$wdz90nm>ZAWA%lIKX1$>nuTrw^9&nxl{~QK|xdv#n4cwhH7vO8IFA0BXR59 zXjo0?;5ZI<7*4?9a1xG_^~w}wDvpEGF=I1v5Ig|K!8s5|=fRtBfwB+>v;%Pu!ojeB zEQUAK5{&c~+~Is8vaS6anOiO zl{2`uuqrl~RkI;%C>zFVSS=gQMzE2}2g-k74;;}4x7v7vH8j$umgOke8d*8g=`T!kR8MhMo!Mf>`=CZEoIBt za@N2a*$TFjHL+&4iXFyUU^iXO*05H#R{2=@gdNV>SUc-r>)3j>fgQm%vLo41Y!f@0 zZDz-?W7%=+cy@wvJv)(YVJESZ*(tDroyNAZ)7crg9po%_HamxHW9K5v_4(`qb|Jfn zUCb_Fm$L2bGIlw;0y){QVpp?kkoDj?c0IcRSu}29JJ`+a7Pb@F`fdYX48gs07yR9C zhrj%t>@M~%b~n3+-OK*X?qm0}-RuF}^!pHdm_5QCWskAP*%RzZ_7r=XJp~;1Ady~Dz-e&KxciDT$O7uSafc=Mk$Ub5pvrpKk>@)T``+|MR zzG7dqZ`il&JN7;91N@Qw#C~SKuwU74?05DD`;+ZO_Q4()Js9U)-eA-Q2?yc@n%8eLR_`@Km10)43lv-(~VFp3MV1hv)JqHEkB&M@pj(9*YWjy13!XqQddRM@>|dRIln&lhqV8RZUaVRlk~{W~y0gwi-}#)Lb=B9iZl`1!|!h zREyMNb)Z_J4pK|iGPPW-P%G6cb+B5k4pE1y!_*qJRvoU6P)Dkx)X{34Iz}CUCb*4H?JwTnU&Qa&8^VIq30(GIfNIg(JNIh6RL|v>N zs$8KiQJ1RA)a7b}+NiEjSE@~Fv${$>Ol?tDt83I&b**~1a-rI$wyPb=rRq9$y}Cg? zLfxnysUD?nQjbi9vt0$-@s$0~P)RWay)Kk^d)UE32>KW>p>RIa9>N)B* z^<4El^?daL^+NR`^ig;k z>VMP^)sNJV)lbw<)z8$=)i2a9)vwgA)o;{q)$i2r)gRO!)t}U#)nC+K)!)?L)j!lf z)xB!B+M_8N)3~N;nx<=p1}{y`(qgn&El!Kq5;T|Q);wCGmZW(#pO&npXsKG7mah4= z3@sCqShg0>a}x4by70 zTF4qBq~t*oMO&?{(OR{&+TmK8)~$LUS2JHxKqjscrl(tDbTHCB0qaCXqryZ}Ipq;2~(N5A%)=tq*)lSp4YNu;wXlH6? zX=iKaXxp@Nwez&|wF|ThwTrZiwM(>1we8wv+U436+LhW>+SS@M+O^ts+V$EE+Kt*x z+79hz?G|mPcB^)q)~SWGu(nI<(r(x8(C*ak(*C90t=*&DtNmNMPrF~+tv#SUs6C`T ztUaPVsy(JXu05eWsXe7Vtv#bXt39VZuf3qXsJ*1Uti7VWs=cPYuDzkXslBDWt-YhY ztG%b~(cafS(Eg)+sC}e;tbL+=s(q$?u6?0>sePq=t$m|?t9_?^ul=C?sQsk zs{N+@uKl6?sqNLewH{s3na*`p*K{5EY)oC~mL8+W>T!C!o}jyQx9-ss^(5V^`}AZz zMNie!^mN^?XXu%FmY%H#^c+1`&(jC!`Fep~s0Z~Ty;vWpm*|7^QoT$s*DLf&y-FXf zSL;Lcq53erMz7U}>m&4$`Y3(0UZ;=I$Lizs@%jXPqCQEVtk>&P^r`wZeY!qFpQ+E% z571}pbM(3TJbk{tKwqdY(ht-R(ht@T(HHB7>Pz&c`Z9gF-k>+?EA*9mlisYa(ht*H z^ws(ry;WbUAFj9Q?Rtm4PG7HY(2vkJ>PPBF>6`SU_09S*`my?P`tkY+`ic4${UrTl z{S^IF{WN{6e!6~!ex`nwezty&zD++@KTkhjzd*lGzevAWzeK-O->zS#U#?%FU#VZE zU#(xGU#nlIU$5Vw->BcD@6d17Z_#(^x9Yd)oq9+Q>$~(W{dWBh{Z9QZ{a^as`aSx+ z`oH!2^!xSQ`UCod`a}A|`XlZs_>5#D#Yi>MjC8|qWEh!7mXU1)j2t7^$TJ2Q`9^_JXatQSqu3Z| zlo*4IQlrc$H!6%uqska;R2xH#p~f(y#;7%h8zYR7#wcU7QD=-X#v0>{@x}yWqA|&s zY}6Z5jH$*nW4bZJm}$&14lrgLbBwvhJY&AGz*uN3G7dBjG7dHlF%}z#8cU3&#xi5M z(O@(hD~y#!lhJIfG7d9ZjMc^(qt#ez9B#B3?M8>O&RB13Fpe-b8b=yO8Jmowjm^d} z#<9k6#_`4p#)-xj<0Ru`;}qjm<1}Ndak_Daai(#Wakg=evCTNwIL|oWxWKs3xX8HJ zxWu^B*lt{ATy9)pTxncoTy0!qTx(osTyNZ9+-Tfn>@aRNZZUQmw;Hz@okqwA8@r4y z<96c?<4)r)<6p+z#y!Tp#=njGjQfq<#skKK#zV%##v{g~#$(3g#uLVq##6@A#xusV z#&gE=#tX)a#!JS_#w*6F#%spw#v8_)##_eQ#yiHl#(Tyd<9*`;<3GlS#z)4-#wW(7 z#%IRo#uvty##hGI#y7^d#&^c|#t+7i#!tr2#xKUN#&5>&#vjI?#$Kb_=rI+OncP%O z&D4<=%QS^)!G%B8j5FiS1k+`@O^=yqCLu7$XC|8|W~!NHrkj2#pXb>#2jRnnq|0Rs=}-^tIWY>wK>EbY7R4N%vy7}Il>%ijxtA^ zb>0CToE$DC`nRUmF89E)#f$kwdQr^_2v!cjpj|}4)bR7 z7IUY0t9hH*X@<K4(5}zF@v+zGS{^zGA*=zGl8|zG1#;zGc2`zGJ>?zGv<+ z-#0%n|6_h=eq?@Zeqw%VerA4deqnxTer0}beq(-XerJAf{$T!S{$&1a{$l=W{$~De z{$c)U?lrqjL}CagIPR~|1f?i5g%FmA5eViN@ghOEgj;w-BI4P+$Ul@UQbejq6Y0V) zGDN1x64@dkazw7k69YuPC=i7rD2ha}7${1_AWi7%GN|8c{2T zixFa^7$ruFIx$9!730KsF+ofelf-0EFQ$m8Vw#vPW{8<$mN-Dn7IVa0F;C1F3&cXP zNE|2*5(kSz#A0!%SR$5+Wn#H#5RGDmSSgxBvsfh#6D?x3SR-1+T5-5&6YZiytP|_S z262SgD2^0IiA~~Yu~{4=jupp=XNj}LIbxeQ zSDYu#7Z->N#YN&`af!H8Y!{b_%f%JqN^zCAT3jQp71xRD#SP*{ag*2~ZWgzQo#Iw; zo9GlF5f-~dm$+TrA?_4+iGPW^#XaI)@o#Y-V&-;<`^9eYfN}?7kRKKgDqYBG@`!jy zIUZT2P8JV~N5rGb&EhffxOhT5DV`Efi)X~M;yL9}@w|9JyeM80FN;^itKv1~KIMM# zx_CppDc%xqi+9Ak;ytlPxmUa|J`n#AABvB}$Kn(5srXEMF1`?7im$}i;v4a;_)dH; zeh@#3pTy7N7xAn3P5dtY5PyojqFeM>ip4B$sg`Ex$RuwfP!-CJY^7MKR+^P=`K=5q)5@~4t$>wdXv}J?T5Gs9!Ws#^*=VcI8e@&M##!U73D!hwk~P_?x29NA zt!dDx&9G)#v#bNG+14Cut~Jk^Z!NGET8pd$t%IzCtwXHE)}huCYpJ!&T5dI1jn)cl zrPX9LTdSv({T1tRt+A){)jx)+Xy{YqNEXb*y!q zb-Z|cOI>kEGI?dW@oo=0BooStAoo$_CZL`j`&a=+9F0d}NF0wARF0n4P zwp*83ms?j@S6WwDS6kOu*IL(E*IPGOH(EDYJFJ_nTdbYdt=4T;rxmio)-J2dy4||N z`hUc|2Y4ID(La0u2$HvVw+j`^HMZk8ZgCs}M*v4Bal!^!mMqDV+?2!$8cC2KKm#Bp zDo*dc_nz44y%#5On$w)#oWx0-?j&}4w`2c*yEAt<$bR4Fd!Fxm{-5;lc4lsOXUp8| z&TPSl?T^?WwLfNm+`iWSg#AhTQ}%WCr|r+!pS3?{f8M^{{(}8Q`%Cth?XTE3*k853 zW`EuOhW$bBwTYnU;@NfuUiHz7FHd@z+O=7b+%6fs=B90cv zh-1ZZ;&^ca?qNDnoFq;b_qJXoP7(JJr;7WE)5PiG3~{EwkqmLRxW70@JV1w!m%f#hkzj&BB5f@p|E zu_T&eS+qo3tcWW`M|4F`tco>pP#hBL;;?v>xJo=)JVrcLJWf1bJV88BJV`uRJViWJ zJWV`ZJVQKFJWD)VJV!iNJWo7dyg%^zUXT)d4=fvm5_2LWSi{eY-%i=5I2Juz#HSu-v4e?F!E%9yf9r0c9 zJ@I{UqxgaNq4<%wN&Hy+MEq3TEPf__E`A|?DSjn>Eq)_z5x*6`6TcU?ird5=#2>|< z#Gl1q#9zhV#NWj~#6QKq#J|OV#O>k^@n3PLxXZDegp+hq_%)=oGwk607n~>(j^ntF z=SW968E1pD(b?o|c8+qkI7d6jILA82ImbIEIQMc+bWU8nsd5y zhI6KKmUBPnZ0G*YInD!|F(>Qf9N!st@=n1iI)O9coa;Q$d64s9=RD^j&O@C^XUds& zW}I24~;1z7dw|YmpYd@mpl8NhdB>-9^qW! z9B>}#l%0xGb!txCnRDiy1*hRGI!jK|S$0}Z+gWk0bUIGg={c*;nsd-O!d7ATd=NZm3oo6}EcAn!r*Lj}veCGwu3!N7^ zFLqwyywrJ_^K$1E&MTc)Ij?qJ*Lk1we&++u2b~W&A9gc7EgB;{4Y6o%4I=R_8Y756&N*KRJJP{^I=A`J3~1=O4~L zoqsw1cK+ks?%d)0*SXWV%eCBun{-p|kehag-4S=xwOxT+W+eb}w-+buV)-clWywb06+L!o9*h;6BnVyA`+U*4(-~ z=gzweZo^%4m)xei?6%ytyW(EycHFMpb64Fp_n>>oU3U+=k8-baAMHNIeXRR9_wnu% z+$XwEa-Zxz#eJ&#H23N5Gu&so&vKvbKF58o`#ksg?hD)(x-W8H?7qZ(srxebixL|8)Q5{@eYJd%Jsw z`(O7?_b%Kil<<;X${X_1-mo{~je52xJjZiA4<|bn?v2{uZS*#Io4upFE#A@IG2XG> zao+LX3EsWD6TOqXlf8R;r+D}APWA5Vo#vhHo#CD7o#oxnJKMXzcaHY}Z_LYjInVdT zy}VcOieBJNc;|W#^d96r*gMaAi1$!$(wp+8y%}%TD|uVJ^Sy1}1>SaVhj*d3)7$0k z_Ac`FczeBl-o@S}-lg7U-sRqY?_u7kdS$QTRlS;5_vXBLZ^3JLi{6sg z^p?Gr*Y;MtE4_}_^?Kf_x8@!64teX|Vee7iRoPqQ_d@SQ-iy7LcrW!{=Dpl|h4)JDRo<(;*Lbh> zUgy2udxQ5z?`rQF?@ivDy|;L8_1@;a-Ft`kPVZgbyS?{#@Acm2z2EzQ_d)MN-iN)9 zcpvpX=6&3|*87C_N$*qMb>64F&v>8pKIeVjyWabP_eJkZ-j}_vcsF=o^}gnP-TQ|3 zP48RYx4rLp-}S!dec!v$`+@gE??>KE-jBVXct7=S_I~F5-1~+1OYc|Suf5-Rw|Kwx ze&_w(yVbkR`-AsK?@!*Jy}x*W_5SAl-TR04Pw!vezrFu>w|jSZ|Ml+l?vj>F$fQik zA(@uLazu_wTMFq&S9(%PB{On^+$cB6&GIO@MIJ4Wk;ls8GBMDraVjDPo6FBFVB$=kYh3{bJCaNGA|3VC<8em&y^3950Vd-=gEi2 zhssGgC8y<#oRuZHRh}=m$qVFmxkFwkcgkIIx4cO1k$dGnd9l1iUMeq>m&^U~Ve;Yf z5%LOoKt58IWkptHP1fa{oRaV`AqpN`E2< zugDwZSLN5_*X1|lH|4kFx2@}}Ps{Jf@5=AV@5>wI59AN!kK|4A$MPrgr}Ad`Gx>A* z3;9d=EBR~r8+nWTt^A$*y}VW4CjTJ+DE}n?EdL_^D*q<`F8?9_DgPz^E&n5Lmv_kj z$~)y<%2ElHR4Fy2(rQ?Zs8MArp&aEZPf4XzMr}|V)h4xB9i_IYqt!9$SaqB_UY(%s zrA|~Qsgu>c)hX&e>Qr@Kb(%U|ouSTDXQ}(Cv(^38IqCswOl4J0`D$F{RY4V1peEG0 z>VfJ(>cQ$f^$_(?HL0f5w3<<~s-(86^VK$Wf!eNis0-CjwM*?*7pXmJuiB?BR+p$t z)n)2(wO>6Ds9vOAtX`sCs$QmEu3n*Dsa~aCtzM&Et6ryGuil{EsIFGms5hxMtGB4Ps<)}P zt9Ph(s&}b(tM{n)s`sh)s}HCTst>6TtBND!I>T~Mz z>U#AB^+ok1^=0)Hb%Xk<`kMN>`iA5Z={ZRc#-K2i3exiP= zZdN~2KUcp{zf`|czgE9dx2WH$->KiLTh(pq59*KVPwLO=FY2%AZ|d*rAL^g#U+UlL zKk9aMhx)I&Q{9!p(WOi>lgbR?Lbc({NM>#{vmvuFvnjJVb5v$a z=IG2ZnPW4@Wsb+sKHn>IV&J$#oQjT>KI9vM#apwE5*cxRZ z<|EMEiXD*4Wh$&YA48n;`)QqpK0I`OrQ8`>z+-q@uhFd4hZ`ms*~T?#a80&RO&Zj6 z+qldP3X|I|NH!Lo3;Ovwi+$ML9xI5uG=R)w7i6o69dC{5@Q!j7n>UA>CUAGe<27T* z@D6IArU{02p!P#eJcf5t`WB_%8B5}}V#x4LO5dUeZk1Qs*zVbe5T#~Y^Qn4keq3>_-No)}xV6GMjgP#HQD?4?v)6F7VO zP3Ls`@aVoOcBVF)<$vHveh$s(#4{%G3@55lXk#xP z_T8;^TK23~1D$TYmfl-kI9PUPSGo=G#{StxD_t_VZPib)wT#cnkdq5*_BQANgA8rc zT(+BtOZqv~1i@{Vmn*fh7N({8$c35{IP5g7*Fw94B`p0+?rJ34jr4Al z&x(E~*E=J+j=h6zQ%8iU+3td%GeX&b5=*)6+WTmlEuT`3!p_Q9!^)^l= zN`rOkXf3UAM`rdLDIS?QOks_}%S#5dOP324&?q@gO1cm7S!Y* zM^zrg#UwrjDx4@s;OHceDx+l{R))6f{+h>Qcq`R#o`UTZESX?r7k{yB3O&Y&fyY$B zY~Ak*-@kBWS zMbyWPdK(b*&AZne``={X$g zFDhLzimYnjDzsZOUDL`cUDI?MA3JOLuGaFldFnTAJ*E~_96^drN8mcaQAg0K%{B%P zXOWAD4>3KjZuF-=TRR>XWXf{b>`%Q&TgVlS8iFX$Abn7W#zLk;9&H^LFd z-WKLyH!-I*19`fM&g_jeJ4W9RHS32L(#tw4*F0&^Zt17q=`9Tu=~;Vc znDLNS=wjDwh-cc%^?4fKsD7g>3k@0)JDQ8^=;0aTm_|_hj^;q{XeVTDh<0ZVyWsU} zH-gZz_MVWH8$l4}G#aJ7l)@0_ENTdXkAgYCb!+wXXav~wZcnot>2U+BBAUAz>&o<+ zPM==WvC;?4b6r2}%R|QUaMTbc>S3L23Jp1eKMgyv;VeG1p%f^pjJ*XqTE$pj?^PGP z3L2_{Qy*qbv!_CK9?K22rx!_u=53!J#uAG#9=bRa0Le8CYVQbNrLWXc<7`pqc)5V0 zXrr~mI2(cyd8meZ3}fg-RgyjIk3!20_spk3-_<04r<44hPV#p;$=~TDf2WiDolcg9 z7rLt}^^P$8X~5w{3Oe+{2!{bP2BhAhcYAd6Q|yBjteaqTrPElhBpI%ht4jzv zd9XthVt*;iOr)W$tM}^esn_bZtQ1G*Dq}iPk%tHBGSbo6;CEjLx9G!qp~bumnk zNe;3TPWb?Px*y6FB+}7FbccfK8kYDZTJ7$NZRTJm7oD#bFiln=!qFUrRAyaJnA07o zqgHJd*=0;Q!bt-Nn~>YG8B^;uVvdp+T8JHGFnlS-MhdAASM6KS#Wmlau7KfbrWhJC z1rITWI-3CqlCF!K(YQ&-1Q%kD0e*Ewza7y7qb}UwcWhF6IwkZxqQdd)paneD4o^+6(CmrPHNeAJoYT}nC9pvXp2l;vCpJ)Dg=AUQ&`7)s=%#M;8GR1R0S?o zflF23Rw;7+#k@Dr7gU)jvDJew97atiGjWmd0q^8bQbOz^Ikhz z4CL-IZe5&gMY7PFVGyxP&sGh%LC=~8c%==mp(&gYP8%1_$Sh40hxUV%0-yB01wccZ zQF?oMWu*-7qjugTFI-LTTuolqfDceRlv~N&jntlnc6x7Pez~05S6&@qpGs=?LL)Ve zzun!2tMdsG7@_cWXG}3;1c%#8tL#h+HmNtq2v7v% zzZpU?v#1aRV5Kp_-49@3bm>Q{3=$Muut4yNmF}@_Mb1cfGyqN9w4y?gl zu~<%C-cHRd)>B&->#nAD7aH*CH5St|hcS?(w;f)nHJ-zOFdT+D6 z)*jvm{ab4%FJ2ldK`Re!$BhE1eRw+!U@f(Gp`6~kP+cfz&M$Xrkb_ngSCnDOTTRV& z*74Uc^+wUkz1I3->hjfY^73-Be0Zp$%a615_2GFcf6bJ?j`APo@*k%1H+1>+BWi;s zUH+yi|1y`qrOV%LrB_V(JG%T`Q~n;6e|2eSPM3d4mtViFn)1VIl!i@+@?)K6u>5tD z9|MYu5*>)jzmD>+m*GZ@dU(B^s-f%V(ZiY=^>EGfur78Zx!y=FuBV!a)Wlkd%5w{1`d(zFJH1Vdf>XcQ_El>F(JuOHPCh1E%&}n19SYL!;s6V_A-OO*{T-UNi8Eob_ z14P9!50^1N1Q%FFCZiDF%pdT9He3Sr&Qfa?+%}{1`l}uwZlX_okVc=)0BwW*;s+`E z8ycW&G;z>ceV!Xlq(PF2#nqvbQLF({#>A@Nn_Oyix=>pglT(baK`WdA{u}fsJV0?w z9BLfLyd_u~C3PTdE@{5`2=&e{A1GIr7RrY;V{ajjZ8SP{9=3R`iK`YULoHl5+G(Ztt2FRaa)&Mt7Bi`CT#ErVU z21%PJ)55d7!&QOYevt=lHpKz}uV+<7HUPneBcssfq*P>JxmdYv= zY#xXd)QFMk5I178)LBL3_8LA6BQUe!j{?>Ej-Tml%ctrjKRtZcrWJ z^#I}cBV)u;AAe-z*t?^3Z-kS9I9>P5JRG5RI<@Vbvlde_ESya+Lo z^fE+QRQu8So9z=e@i-G7MmF(S6Q@Z`)#7Y1^NFD+aV`T52$2jw(vvvH|LMIl+O8j8 z8C}=MH=6eYsW+M^10)rWbUc$(;V8#=s2ZYTg{mPUBrhr_uPi4oEhi6_Q`^_dsiift zq4PPKy61Cb<>qr_<>qrmXL4QpKrjUfwk4M=! z7sk?F9g?^uaW1Jx*}MDVHQdvW zB&H)??&YyK?(W9HPFNt+J*tb?6@gBvPag1Yhv_!<*91b+=K33>wUqhlO8>bwTBFy;01kQY4ii{DNFN`~wCxq|C z;i&yYzeWOPa42KFsF55@4@0uk=!+2NJMA2b#;cQN?dT3VhP&)YR&b@3LOzoFSU2E9eqdCEWxA;4|yaY4lKL1@`B!_cMt|;uRPbS z!$OD?RzO`K!UhxLN+j17p-$t#J?dkB@EZ^ zaqe!dJdb6(Fv~`BHDtA1zOp<=bTmC0fw-=eyLFD@!LDt=y-jc2ZVZvwJBgTKW^#1} zEfqzjl2loOwnF=wH?tTmjbZPwzjT&~qu=m&!2 zTv=ZdTIR~jns%`EQCM8?5{7Rb6PXf17w8p3lN}tfh(@priPj=RFS@HsxZ09n5+wsu zi}3nNf!9|GyuMP%7v*7H^riATCFSTW`1y(qSNH~AbGzobUGsC|a2*@B4MQ7iQw(9V zBSB6NyCIn>=5XICk%iv*$Z@e9mE(5d=m{%Q*B4lFW}B&i78j;)d4e%%%8t+Mfcco| zb-=|5xH!Q$RZ&O7J_ubeY*mZIlM@vMgoP#FGuZ+)W#I^1$bgF#aA5-O?SOkb;KEIe z32d5S;ifDma+ER1v5Y6kz$i@Qk1=fZTNDvSG1MCRBiz|gm?+wdM`Sp0tVyDE#{ZCp zOFO}(o#4_=aA_yFwB@luF~Fx!WiE4Lmbo#@T-q}CZJGPF%zax9Fl#)bZOdh*iVRsKS%EPU09gu;XdhO3(j0aS14inn8xOI)2B8PEtbS%uhUFV_IQ#;wU4 zdI~je%{u2(pL4YS!o&?+TLKC!1fjtgbd3sQ#L+q|f}zC1_|Rfue51xf#OSdwfmUP; zQ)A6><>y$I=UA5KxQFJroO4E&vtu(xY7iQ!L1?4~p{XZA)A$HYJrNqIL1-Eup=o@C zrtuL@>T)2Q;&7V784hPTEKxYi{AZc}Ec2gb{)u*f0FX?C#fF(B-O*8q?SsUH3$wWB}9{HK`z6!V{A z{!`3_F0a7mSdmg*k?KRS&n^{W1r>NXF2v+j(wJ6pXJzR zIrdqOeU@XN<=AIA_F0a7mSdmg*k?H|u$&fHP75rj1(wqS%V~k-w7_y&U^y+YoEBJ4 z3oNGvmeT^uX@TXmz;aq(IW4f97FbRTET;vQ(*nzBf#tNoa#~j(Mc3D zRil$A1WsWZ$45FV5ol|wd2eeX$J9G53B=v+40_Xi0?+Jt^l&!R9LzJAn29pPyftKF zYbxcfiIm|?l%c3a;OI%Be>p&y;&}p*^=hr#}-WO4JJxamBm~Hj%=kpvLo22qc>HF^P^W{ zAp&hpHQa1XBULOTX2k*%o|QxiF6zhtcl%!%pPxQrM*$6q8ov>rc(ZzNcqzt-zbiv z)4qLtui5I|$Ge00#wgDy%El=3h$Qy)!CnN~no8x-L@N2detmionAvwkcV>D=5cNMq~U`UTuM@db$5I@_N8Gy_Q4Pmmm(#9O$Q?oF~c_0L{lqqS@c>Qia`6a zFem#^2#L$1*J3>ao&Ehra1QrjTOW@$9Bh3$nt1JxI!PSvg9{O8_d;grgW6G(#*V($ zJS=GKiqZ!~42o?tt^S1pIBFR((g#k7(PVTiL6jpfGubbmX}L`KNJ1=zY3Xg1nHa5sMLJyg3#tBF4E40{&lw&42W|Ctjv^fUvxt_E+26#~) zvxwD_PHy7`Go;f?z`2%mdI>n!l1^@COLTG@As2v7ZUg56_*_e$3*eIvrsNmt9*usL zeXg?4CGfe*K9|7fD*IdlpWAty+j*QzFwRXd&LtS<5{&1=V3PV)fgcBIohz_}giG*7lvaF=i4qSFFTdu@Za5O6(OYu~)3bUa=B;#Y*fIE3sFs#9pxy zd&NrZ6)Uk=i4qSFFU|uo8R2O6(0Qu{W&5-mnsT!%9=rX(NpiKbhX` zAJ9wdqgM@V8j4Q&ZH|ug#fYSH*+W)h4_S#lWF_{DmDn>@V$WEKJ!2*IjFs3kR+^p` zhOIuPQML`(KGY~@40JcH>?bR+pRB}wvJ(5rO6(Uav0tpjez6k!#Y*fKE6q&NSqI{3 zjy)Kz2D%$p_Kuan$7{#g8~F>tF=GlL=eBK2l#cp$b=~7?->_#i&PXQj9rsjrmpVi_sZmR#scM^uuh} zGXxvIuSj`gj|WaJ=txqpj2*<)84Cv87@@6pYY`W<)H|b^q(R*RxiK2X5teMaQVwfq z9ziYa5w4e)uzQcbNot1@Rhs$|0>d-sODx)9 zmV-;s>Sg?_tOU~-$|V8C({^68Utpu&H-OpF%0fg+A=gx=Sx>Fh<^Z<=x0#92JGn7G zi|g@lD5XB!#ewryjo$j>p1Fe|EF@En&O@4F8g7VskEv5OZLLI|v0D+h5Oi>f2rf)B zm7(z&X-cVHqfWH(>?kh8(x3jaOKn+MZDNBMzcm^*H%6y+5gHAP&jhJusnZcu z7Lb5Dh=%^$7!CahxnpSP2R@F?!tjtRqgL3yvp!nwtRAl8@aIso(J2otOB)h>IYxCG5(2bEpRQ%Fyg8zQ3i$%5ZNL_mnJ+V6$lzd(}o(=ISjR zavwtC_Q*Vr)R$Y05&R4ikM&cm=mg zR+l^q^lb#WmBz?6Ite_wZ4O(`rQ+h?1LcOzDqvh(=~oE=y(-Lh8*dnMuGa9FGQu?09`t ze+AgvT(j|E8=$L6a}5ZhCK&)%>FYgm1e-l_1nQXhwoy2v$TZZ|TkOnNy+FHC2<~ER zUt@aLHQJ3|xQK6hLhq3q-Gwn5{(l;-v#eXQtXs3JTeD;^<+5ZjAtWKq(%lEZX>7@o zwFI1mHcQqLa1z=qSxdl4XtRZqu8}FgDC%wi>@!8x4QOlK%@Ars@lbz?XDUUYsTxJp z%9#Ug0bOK&n)^?CPONhB|{1!7p;iz3A_(#4KZS%)fKFEf?_0>Ug|4T z2&$kj30hsLach!|lgpBggOFQ^Y#iX+N@U{z=T;&c2RIjnY@A${Y#fB7Lb7Dz0B4~i z8wWUzz*(|!fEQh^K&@5j*0DPcwa~65T{rl$88h`FaV4tkA!esWo|eh`v!%aZj2d>j|(bYL~;ZP@zD!=_7c#U5$=w0#3Nc~zGVQb9l>bZ7ww zqAB;vgfh|laUFwGL@oiBly(h(Bexyx8o22FEPz8US9Rw#Vd7w>C)-kku9H27spBqgc9qBScFT)sJ=) zsMWc+bH0t~Bm}=(ZsJtPG88=!0@o z>ia|b=qL^W_0};Siy3{z>p4bC>XD;?=p*=~bb4%n)_T~0e07Gt!mW$*aC&#MgV&f8 zW5-^rD^M@6#|B;84cEpUOt^WqjmwmJwwXlf$;Sw$fpigE zp7|~rM6bT+J$YJ{Nty(Q=xBAdg6|y1=(Q4oP63F~q`iXc?~pZ`0lC3F8^gv|@VEtT%NPLs&H1^OT@wGl+?u znbN~J&|K5#1dL;vgr;nw^UZ4w_c|+0r5g*S4d1&c4$ewYWM{1nbA{n*^I!-oz&c%s zXR1dKV94RCo5R&dBNbeLz1pp((O`|X=(olwTHbWSFia6l9OQf*G7>cwPD3q260v+z z9qBeOVZ$AVFl`tdZI%zOW9GOvgh$;w2&=BQz@4iFBurr#L|)}wj>pQ}_&9_Cch9Xt zpd{{C>9h|am6>=NStpfu419MEdx0Y`Wywbjh*ll4H{)$EHh; zO_v;-E;%+`a`}R-CClWD!yee!jH{GdwJvdC116WBv~in4C&oWt6*UanElSAZq>BfW zmCnb}8$26p+FuO2U0ZoM9&d9z-sX6G&GGn}$h(kR++)nKLqsQGYd1%JOXSQY zBflkZE*bePfwO3^(UK#-C9YVdaUsXXOOB0~92+mWiE*J9#4u0P3d(fWM8RBJ2er4_ z>@`-J>r5trVfGT>)<=V!i;Ttr9BmT(8S~w8V3r0 z6f5aw@HT)RF+8*hZtcd6@2mJYZA_>2wPEd+-NR?4@BFCaNKLQN#chjR3vzVka+8yo zWp%3N_CYG`rg1jU9M9bscF34KE$fga8o7%fRdS%~SPst}o@FFHfG ziD*8C(KcXxDlLtf`FwrU)P1?Ul0n|f4Ly7;Le+N~heox6!`<%9)S-4~sD=CZQmuB! zGdUUNF1a^x*V$X_U%n>q59f<8{BcsSNj?&L$o zTmDYThYFm(2J)c-=Tfkdnqwn1$3|+7jno_)sW~=MbL2hE<;Z)AkbFEjHdb?Ntmepj ziue2tkoOcg&2V#Uyyn<=&9U*CW8*bPdkb?p+5?M_TEVA<$DEG^M?J)K+PLo=8W&?7 zrZO<&^bn>kT+D2g)(R_qW$je!@Q~|~e6Hs*Z2HBX!>#Y|>0vTvpnhlhV>G$)={O(Q zl6v;X<~$5{EJ;vODTtcs0H2l}bAFZ-r=O(;@M*;n^e`UTe}0y8%+85$yzZ?;V5OJF zHL5Tvy3N&&S)(N`wCplY;>plY;>plY;>plY;>plY;>pl*uY9<+TPw0$15eOhcpJm$}axX*^T&xW|qhPXe${Miuqd7Spy5ck;-_t_BV zqaryzDuR&Zjty}>Dgrw5XG5Hiih#~?&totj6#<>UJ2t!ds0iq@)IPM>nB$`&2>H9K zF#ig-KOGdy`E*bW@mTI@(GfVQWuF!qfwO*K8J{!+I8_zx)&psQ^J{!+I4~{+$jy_L|{puXmn};zz%8~PFUkXBg&%RWjeW`p@ zBj?w-{p*FH7FI|cJza-;6t_dv%+$RHgBP}JtER(I9b#hB1p)K(&bot17A|pU)^Tks zZi;L+ZDWb*b+A$Fv}qjfl`EqdV(N4CdTm4xNSc7j)Uqa|rVs2H#fkMf7<9c612kGV z=8JtIjn)#Jn&p-HNUgqxaV^VXjzgcraoNIcjQv8SyX!67iyoGc#!Dlu`a#_WOr-`N=K*S*2Z(VVAjWxs80P_EoCk<;9w5effEZ_sZ=45+ zaULMXd4L#a#XHW5cbpaPI4j<9R=ne^c*j}sj{Sm~fhK0-!H-x0yQR$Fie) zOWEug{xN~Rsyyv-f-y;z%+oF>;H1^^bYTE+ntbJHixcoEWH=Vjkkne97f17BB^?2i zosc7Ka)P-^-*rCg>j+pyjUsRnvh-o+vqc?YJQjh=Ks%n0lPN8E^@BT9fB#O`fGT&r+Lb zsm-(0=2>dvTIT$x`03c_&6F}aG zjs}bb0KfozS`$%SzsZuH88I?i|)8N)WGiU8A^Iv>+7I_C%oUyX+>vSB`kV7P}x z9giZ1Igf|s84t^YZqEFoPP*0oZNNtHi z?U~5)yeH4|o;=Tc@;vX!^Smd|^PW7l==IlGxNE8nKeY2+n~%nKsO-ZJ@)`Mx0qU$xfFCSLoQ!o zy9NQIqKz)jxytQQ<#wrZyHxWohEDvDR|B_YwrZ|vP(~>KDwnA`F-*=U7oG^1 zyM%BYZk=k~A#xY)VWzP&A`chh5yf1d7ZvjCM##?;J#wNO`?|}utZ~cK zxMgZAExZ(wuQ6AimF0O>malU^)ww;`nUH5^LY|!o`Pn?lRBRJVo*fE#b|~c8p^#^X zLcY#rsI#nK0%l~TUdK&Foq4l+zRBgE<8sV#Ip(-u=D1(zw(4Adj^%ZZ<#monv^nk# zIyjDa+zxbr95|PO4(J09`I2-%@0qd{X!kT!E%7hV?rGrTwvh)tJGJTS0>o*k415_8 z;!KAc;{?98TK$j~)BdtiAv<&Qz-#k? z#NU^wI6SW_@L*ry!M?zQeSydF0*~bd9?J_nmKS&|FVKEwxVyQSJd_uBC@=6(Uf`j; z;7{0HEIZWaF!y#LoDq`huF_&_fqfJO9?T1VX?PCHi!20%@$8U6E?8aBv)SRj^aEI0 zXbKp|ch*;WtF3x^cM}(prYAdalMdrv`p#;1cq=Y)YBh$Z^*pof6k{@P(9dO96jvGGklW)t|!vi0whT)%# zV5~H4uGIA%UwZoh?oa70=qTNxIs6#aNJ}>cJg^u_IxE!EY~mMqm@DuwS6~yrz$Siy zP5fe&guKWmVv&_!k(FPOm0yuf#3GxB#VQGSkyT!?N&;T2(nnUT(#KV-asD;>h>JD) zh>A7(h>JCnq+*Re;$n?H;$n^Suk(9035)ExEV800vZ5)nqA9YXDYECX$V#KgN~1`( zNnnA4`OsEQ;G`^yw3QS1q-oD0ZRG?$ZE(z!IHdXntP}!P3IQ7%0UH|u8yf)|8vz>| z0V{-njg5fLQ((a*DP z)l$G}DPS8TU>hS~8zW#FBVZdNU>hS~8zW#FBVZdNU>hS~8zW#FBVZdNU>hS~8zW#F zBVgBQz)PY5FNp^1It_S9G+@_hz)PY5yG{dkod)bW4cK)Wu_QFLg&MFS5wHt2;C0V{U8n&Y5&^G!2JAu&*o7Lf z3pHRDYQUyMz@|jNrbNJ|M8Kv*z@|jNdzk`up$69n@dzk`up$63fP4junRTd9ZLbbPy^nv z6!4CvfX-`R8JWhDfL*8oyHEo;Pu9U*Bb*i5dt<50yYr> zHW30g5dt<50yYr>UTF+?r7_@@#(-V40k1R$ywVu(N@Kt)jRCJT2E5W3@JeIA#zDZw zLBPgAz$=XbkJkZ@*8z{$0gu-KkJkZ@*8#6I2E5W3@JeIAD~$oKGzPrV7|>A-Yzv_I zL_kM1fOG%zI3MshAMiLI@HijvI3MshAB5vP{SF@T;qj+LaYd zkN=ZA{!jAwKgr|iB#$E%&ZolpR5+gs=TqT)Dx6P+^QrJWqQdit3eO`dV>IuojFEv= z;U&=uFNs!oo>1X=LWSoE6`m(lc%D$s-z{m$S}%>ddFkeCo`no~s&tRL@a8>$y7N{+OqKJukW(Cfp)VKZuFDP_aAB z?2qV!&aU7Z%Byck$Mrb4$p_%a_G!l%^%v2GY85}}0Q$=Vr z7(z2sMQAiALNkLxXl9TIjRYe!54l=!g?jVj`Q=QSbBh&B7(pAP3l`1jE zVvf)>B0^IMgr*S@no1xvjfl`x0^!64eLJ%LgR6*g&KOFJaI|@CF1{X!xFtr9MK`yf z4ip^*c>I=a69HRw!fU$quaebb_iAr2d+7%#W>0;b0%JIiD%{{4$2XX(t>YAhYpn-x zG;ti?Y%Z6^-J5YDemrFW-(ar09*BWkt_N^jZXL%r(iPWn5`JUR@QTxLGkP4|U@o?f zQxvYZj^i2ZEnFBN^gDyPQQ@xZcm#^ob~$tab^;46U}L|=2NF})@^kfTwj z$ib*#gho9gH0lvyJp)H4E*I@*%7lKtOgJ)YvLNOvWI-V0PlqfB;PC$<>wd-t?PqJy zyJcGT*5YY2)=#3byQMG+9_-9OHdi4-1EEoq2#uOT$Zbl725|nu$S*3 zx#3A=Zu9NegYbT-2tTr1MJ|_K$I!}&3=o7|E;2xXbGgU>0nX(j0|YpiD+r{%LAYNG zoh^Kq!CC2?hpTh@sWjG4qp`sha}_dQ z5OV3sd;xA+6(P$dt%Lw)xulg4;PuV@D|P#fsVQ-ru(@7W)8T#AvcJ&~beXWTo=6)E z$rOw!oO*myc++ky1AbT7z!&ZId4`mJg>`y;-G6Bv4LYqv0vw&21XsU1%)4~egPtbM9@KV_nyt>d zXO^5C$j3Ar-cOqX)@iL2bn3i1=@^`)H+&IhkEKfM3+nYf-?2{AIUt5_V)TEb;O;T< z#|-@1!QCSi`&pR1lXs6nDs(oOJPz&I=QYq-#}2%TcN~{=#tfu)&%7jYnS6|#0XmQ~ zrbjed#6ExAGvFW9zc@RVoVZQoC68I0NdG{?V6HlKOuRVVgwj9P5X*0a#s^aBAC0}I zq%%tYyhA*x#s`yX%H6q6W+iT&kEO&9zYOwJAlyT`JbnmkAR&GP@Q9QdO}U4p6WCK_ zMxlXLot#4b?}EYC9YI()uGw(6{;03@C=$`ARW-bbRgWlw9{FMpVx@<13H)v zgnLMrm-sP`csihi=|H%LbQ5y0y)!@u(}8dg>1HzKhdyE)HyXk~+6{(u_tZ04iJSgo zX){0vQYsMcAzfbLS0Uo*fDWbu;U3aWNX*CM>3|NV1K}Rh%}7kt1MM^ z|2Ro3;RcNlq|9i_JtWO5J;8}5)%ajiO}U4p6Z)qDVkvQS^`q^Zsv?N$J8 zCS0XbX=Zpet2Z4|kfk6;flt9W1$hbz6ci~4D43vNl7cA;rYT?!vxJpQkR^Uu;+G|U zS>l%^ep%v|C4O1rmnD8#;+G|US>l%^ep%v|C4M<#lp{ttVg$F!Ec`Y2m>ewL>n%+O z+H-Rvf?MXNAIxFXdQI~0FY}LhyINWftR2A5%;2!8L11VQietTaXN>3#2CS41U|c?6 zdMOj9Xh|I)Xi23sP(25rMh@sGgA^Ud{7lV(FpkOCvaE!avTRFQM_JxocUcO!1v6yyZ#@z4lkh)jJ=uCD;Ai1~(t5V_T)@x6|D^SN>&1Xyg8xbD zrPiwezZ(CO)@!Uc0=^pmlh!rX+X24=|C81`t#<={5B?{u_gWtW{2}~LS|7Hq1^fy8 zPgt|e9gUzz9t?j{>nBDt7cvQo)r5@kzXo%{&kk0w6~_+!aW0RCk1Q-H5aeh%>G zlh*_OLh>tsZ%Ez%_^Zk91HLhN6W|{we+u~K0NEL8x zWC<{iy8>Ptc{Je1j9zFZMt6?xv{IwHMt1?;J$ez~J);i?{D{#5fFC)Ewi;a=Z312% zT?c%4^eVuQ9(@Ynr;a`i@YC(ntb~2KeTJ2?&$Q13e3pGbz-QZ8z&Sey*tZLSi*^xk zU|$KiW3O6Cd(B=0e9%UD?8iv-ul%0;o|Tl}SCCMJ|5j2ZGaIc$W>e-^E15Yivm5fOScVms?cYT{y373~FSyyO}n6&`i-Leju^cJK(3CM}C zA$$>Od=oR0Gb6JZZm#r0S`)pb`K8EL2`ct-^ zfV-~et*2?dZ9T`--jo(K(IrV(u6d4{C$-M7fgSTqo2PC)T^s$h&C{|x)ZkR>Y^z|M zXO*m-)+N>z)|}O{*6{sPK8vQ#3D8XICr(aOh?0bq>ReLz!c5pR`6g0_C>+{PVfsN7 zfA~CtE{I%iwr|QZLvk3#u%yraPr=^=9(JI3yEETTTjV8>gJSbdY(&ZQX>&qI5%$SwjOnY;A@f-2yJhSjPb`phgn#DQHo|L`x;LJ^^%P1VJk? z4LXS+D`y4P9_up1{2HY2IHW!g^{-ex>nYaD&>q*C(zwuv$>iI#E>6CQDfnhUfkuuf ze=rp2<&cud7z(s=L^;P$prb>I*0ab5nmVFD+kygp9Z{fjL4no|DO&S_0^J=^pnpMu z1_vd9x}0jAf!gT$%~K1jY`jh&b`j@&RV`YXiqZy%|mdU8&)J8R|gxaE{ zI!m3V&QUpaqB=!QsZ~`{JJkhhpW3gUrJj<>fOnAIl0G_pZ2GwL@#z!N_e!6bJ}G^2 z`rheN()UT9n!a!PwDjreGty_K&r07feRlf(>2uQgbRk_#2kD9Qx#AbZ&t@do^tI~s>KgS{ z^>+1c)bxYuqv{jt)9Ul4re9HCRo_(KRX6s1cc(qB5C`TQwl)9ffT|Geg>Lhg^HLcdvR<%oQR~M^?sb{OFW;Xo)RMV$e4^_IR zZ$w?+jJm#4y;r?oeOP^5eM)^+eL;Om>AHSheOrBB{Yd>({X+dl{a*c1{Z;)_-LCG+ zq%tEJC*x(5N~vSj29;K}I!bx!Om$y%f0b1aQ4d!4QukI^9#FIDLbXj@r1q*y)#d7$ z>dC6Csu|3$tcRLDKIZ>*FW2s|pP!U|<`I4UPW1NsslU^oPhXEdPheb1LOYz@*8rI< zm>*n|*^KAwGn?>yZRQv}-yVwr&5syI#bRI{ff#ybafkXZ!hc{)nSxf@Y3+k1dL+i1 zrq#hL;W7B`o`IR|OE8yxgY_2cUDgMzk3maaZ{1*h3p3E4SiiJ>XZ^`armdly>oN zx!kT{JOK~=X6Sq72^|XfM|j?1p1(EE-@7nDYNo{?qd0zcJt1 zg0{d+OOM6Yke-F1^w2A5JxWJ(JtG^!+|^19{RU49{jmnE6rXpEnR(ZFX5NJ#GRM5@ z5;O05Bzgt^FbnF>pOOX+>Bu#T13nSlkFTwkw7$j7I@vr=GtaZlGq=37ykwn=921zw z>8Jju^R$pxI71p5oJSZQ32ThP#IGq#y@bN_(-0dui1P>+?|{x-N$kY)ahPkJVaBm5 zteUlGt)LyQvYvpM%5$t2S+B5OXT1sf`hC_%V8eXQ`m*&6SS~kNKeujy)$%v%Kge`A z;lf@yCUIio)Wlh^Rtkv+A>Xr;nCbsteLnR6I>&w;lU6!)W$H@HN_A7POi~9^hpnO1 zV^UAGMpI8uy~fDhFzmByl5a`A4Yuj~(H|COMwWFl{!YW++4#%hFR&7Kekbw%|K)jy zev5gAp3&Zb-jn$5PqofM-xsV0-Aw}|tP>MIM@-8+)8VBzgs*pN2HI*u*&QD#C+MT*2rRT{LLZpPoQ(SA23u07&;lcD@70s9zlZwTKe zMm0UL1b5L}J{$J;gjJ<^=|(=UFDzIe12?o~KjY*Tk7IPaP+LZv#*C z^@(_P&~JLqj(Ja_bk`ktUXS!zSDc1+NLsG3->sy!+=q*B!{6Be8p2U})-!>d?{NJ8;9i!hShZ8@+vxYuD_x}qWo0v+RpV*n$ zm)M_pWMVGSOmq^55|2qdDe;WN^AayfyejdA#9I>YN_-&kvBY(W>$S8eMAFBzV4m3| z?A?TnQHXw?p^Ta5*~#($)2r2h)RA4`nb9}@GjIN9HdrgEb*;R*Hdr_7S`q68$x0f6 zq}h-L*NR9gX|N&^f=CF~S~rLl5o;w4B0;Q(gzyL<1XogP#oFN7AR-|I5fN*%B0)l4 z&)oZ-?>=A5Zs~3a=lbQmbI(2Z+;i@|b7ny1e3A^7#BDB;@uKo^{jpzt4%YJ?lF2=w zeHZeMraxjnh8E*@2>dR>CX)FO;qNg}#i2h%?avS=m#Nt6`yrAHVV$V`4(^LutRL&a z_|0t=4HZ86hsg2(!r4Bi{)6z(>A#>{&ZK?)xloq`#y- z0J$Kl*ioof$Dm4^i)w7|Ky_^$vU!u)qP~kNYKKuR?fXoGiBMT7bP7Ygn-WL~P(MiF zQcNlbRjS@cHL4F#b?QUd{oh8;Qgl0{6jW{K9-w}p8$iQAqd*ftVW9g!Q$RC7^FWJ0 zka=*0m=g^0D}23r9kCCCx9wpRkG-DXy2np zAj=o%Kfm>M2#uKBCR>7K;{3-DH~u-~x0~oRTA-8lygkkwm!b&3G*k4y%+*|CUW>Gua$Ak1b$}*iyEfJ;7G8XW1&Y zmaS(S*k-ncy~cL1U2HEqz}{qUv19CQ_AdK?eaOzTkJ!iT3j2&*XSdiL_9e$~Jg0IN zcZkd2vbYd;ggeR=b7fF<=$6e$axyxKD*T!{n-CQ3x$PIBL+&FiKyT?s( z)7%`lz%6l4xK(b0d(OS!Uh%2C$ZNdAr}2mRY(AII=L`7~{uo~YwdgZ&_o;!q&n3PI z?mbuecK$lw!}s$y_+fsOpWws%eSV6c;ph29ewlyDukoAwHoq%S0xQUZA$UT%kO|qI zClm-pLa9(LoDeF7vqF_nE7U_3x>;xut_dANm(VK=2sedW!kBPdxGOvm9tyLFRG#?9uhOeEHNY=5s!++VwrecJSm10 zc8h)Dpg1Ish~wfN@t!y-PK$Hmg196;5m&_x@wxayd?lqyqNGWVlqMaPvZY)pUn-PJ zq+?QrbV@oSotJ8)3(_U2NxC9kmD;82QjgRx-H?W*QE5U7OZTNIX-1lt7NuqBskA0- zO54(|Ov$V)%ZBX9>2ju=Bj?Ela*3<#ZnF_8A_HCQjRD`m13n#Ij)>k zPAlh>YNbxOs5B~d7->g zQ&mybR7Xuy53AW~u9~kFswL_%wL(3mo>9-MHR=WRlG>zRQLn1)>UFh8?N@K8!|JFy zp@!A_>XbU8&cmC7W%a4Lrf#a+>TZyN`jre-ED!2dnNY*Z3l;>6png>j)hoYtRTZoa z)(0Dc&B2!7wO~iEE7%(x2;PLc*%;K!?n1@vA=JtqL8WX3>SXIsCEI}-83PqERkPqd zM2427g|s8uQLR`j(~fH=wbR-;ty-(oE^3Y1Wvx|f(>k?otxp@&hO`lFT)U&)(1XuwdX0WTzoa+m zSM;lTyMA5o(fjoq`mjE#Pv~L&zCNYT==1ubzN|mh*Yr(&Ti-P(gEeHsFgzpO$TV_{ zJfpxUGD?kd3@}_E9<{>k~%rZmf5%Z{7Y?hhF&6DP7^PE|2)|nU0M)R`SYPOl3 zX1CdA4w^&eh&gWFG4Gj^=CnCyE|^Q^6LZzvFrS+*%vV;bC0d&0SZUT_E8EJo@~uLv z#5!hGSf{Kr)_JSOx?o+hnyf3vSzG#YtdS^o?2_x zrnPPD+LX=OvTfL&oo;8^Id-01U>Dh?cDa4RuC&kERd%giZ#US@c8h(@?y$S;UVFg4 zY2UKP?A!KT`+@z?p0yv@kL?xvnZ0gr**o@2hjDmEbu8zQli_4JA?Jv5)G2n#oa4?( z=d^Rqsdnm|i%z3+*=cp!oKC0P>2n61A!o!HckVd%oJnWenR6DLCFhB=>TEdAofpn4 zH`Ntg&2`)~_pqDo=DPWApeO?@qZh?!3F`F1t_NHFwk9c6U9>V?Eh3JkLw_GQAux&nxhXyi%{+JK_|=anM*Oh|JW=oQu{_p!8u9ax zJjus+JBg34Q@^} zjd9FB*oL_N5OW4(qW)nBI}r{?+WlXV{?}pbAAc^hXsmxk;GH^s1=;Yi?`LW&>Sr`x zrVNdRZGgViNOUc*Z!6GPah-`a`~2z1CwnrJxG&s5_w0{iq@P@O5|iuB495E+`(v?z z?Lm5?KCD9Hl50(3Y%`u0wx>Hu9`msb*_Yra*iesb!}6ql0`K?dUObaJ%i?p4#w4~4 z>p}C1>Pem>)|1R9?SB>5PufG`=(x#qA?zgA4#it6H}P1c-+Vk2%>_R3NL z@%2weeEVa3KMseYe#S8wU+Y<a$`{@{!s~$=JiYq(2!P8(7aCrh1u)m^|!b)d=Ig3(J6=`(!qe zjBkIxf1i)WkIMMyJAE0v55(35#}DF@aeLr8VtHiqYt|R@aXieSu`r%G0AqW| z_<4PtB-%{MgyVB0>?x%G2>dWw>joO$NYaZ4$M!%n7veUA(Ox$aJzrohRA*Eto)KMj<3Z*Yeev=Zb6U}_{T zvk&jt1N=Cij-O>zBJPR#4fuo)qkYl3f9se$FG!5--)}<$I?K@B4%KxJ?8i8iaK@2( zFvk9j)blmv42KL~-XpSU~#tFJ_d@byc`x^b^SlIS=yWbqQq)$k^-#;<`WBx%plh+07jPr-k zo)eDz3L+NcJ_2_kn=!vF!TzNE3hil?u;0R4_<$WKrl`QDfoduzP!p)7LV>!#=hS=t zmt55E1Ji*S>a)NXfqzk5fu+C_^@ku6{3X>B{7vvPYA5(7O{aCu(>%Hjbfuy%;3@R& zJCDA4m(lm`27Dt-!8cnJe6>ZxcUu%y1yl>T9{x4}H3PN4|E~dc0PX_n1%gu^zUQLg zi!KVj>7wDQE{eMA@2R+=lW_kUMEo+pjbIss@v{h)hu(YpG46|ZcXE%9@(T&cGUx|A Z{pN4K`}L3fzn`ys@^Am|-~U?J_FwB5@0S1o diff --git a/tests/example_based/test_datasets_harmonization.py b/tests/example_based/test_datasets_harmonization.py index e9908c90..70d5efab 100644 --- a/tests/example_based/test_datasets_harmonization.py +++ b/tests/example_based/test_datasets_harmonization.py @@ -1,10 +1,9 @@ import pytest -import unittest import tensorflow as tf import numpy as np -from xplique.example_based.datasets_operations.tf_dataset_operations import are_dataset_first_elems_equal +from xplique.example_based.datasets_operations.tf_dataset_operations import are_dataset_first_elems_equal, is_batched from xplique.example_based.datasets_operations.harmonize import split_tf_dataset, harmonize_datasets @@ -13,7 +12,7 @@ def generate_tf_dataset(n_samples=100, n_features=10, n_labels=1, n_targets=None Utility function to generate TensorFlow datasets for testing. """ cases = np.random.random((n_samples, n_features, n_features)).astype(np.float32) - labels = np.random.randint(0, 2, size=(n_samples, n_labels)).astype(np.int64) + labels = np.random.randint(0, n_labels, size=(n_samples,)).astype(np.int64) if n_targets is not None: targets = np.random.random((n_samples, n_targets)).astype(np.float32) @@ -36,8 +35,8 @@ def test_split_tf_dataset_two_columns(): assert targets is None, "Targets dataset should be None for a 2-column dataset." for case_h, label_h, (case, label) in zip(cases, labels, dataset): - assert len(case.shape) == 3 - assert len(label.shape) == 2 + assert len(case_h.shape) == 3 and case_h.shape[1:] == (5, 5) + assert len(label_h.shape) == 1 assert np.allclose(case_h, case), "Cases should match the original dataset." assert np.allclose(label_h, label), "Labels should match the original dataset." @@ -51,25 +50,39 @@ def test_split_tf_dataset_three_columns(): assert targets is not None, "Targets dataset should not be None for a 3-column dataset." for case_h, label_h, target_h, (case, label, target) in zip(cases, labels, targets, dataset): - assert len(case.shape) == 3 - assert len(label.shape) == 2 - assert len(target.shape) == 2 + assert len(case_h.shape) == 3 and case_h.shape[1:] == (5, 5) + assert len(label_h.shape) == 1 + assert len(target_h.shape) == 2 and target_h.shape[1] == 2 assert np.allclose(case_h, case), "Cases should match the original dataset." assert np.allclose(label_h, label), "Labels should match the original dataset." assert np.allclose(target_h, target), "Targets should match the original dataset." def test_harmonize_datasets_with_tf_dataset(): - dataset = generate_tf_dataset(n_samples=100, n_features=5, n_labels=3) + nb_features = 5 + nb_labels = 3 + dataset = generate_tf_dataset(n_samples=100, n_features=nb_features, n_labels=nb_labels) batch_size = 10 + assert not is_batched(dataset), "Dataset should not be batched." + cases, labels, targets, batch_size_out = harmonize_datasets(dataset, batch_size=batch_size) + batched_dataset = dataset.batch(10) + + assert is_batched(cases), "Cases dataset should be batched." + assert is_batched(labels), "Labels dataset should be batched." assert cases is not None, "Cases dataset should not be None." assert labels is not None, "Labels dataset should not be None." assert targets is None, "Targets dataset should be None for a 2-column input dataset." assert batch_size_out == batch_size, "Output batch size should match the input batch size." + for case_h, label_h, (case, label) in zip(cases, labels, batched_dataset): + assert len(case_h.shape) == 3 and case_h.shape[1:] == (nb_features, nb_features) + assert len(label_h.shape) == 1 + assert np.allclose(case_h, case), "Cases should match the original dataset." + assert np.allclose(label_h, label), "Labels should match the original dataset." + def test_harmonize_datasets_with_tf_dataset_three_columns(): batch_size = 10 @@ -154,7 +167,6 @@ def test_inputs_combinations(): assert batch_size == 4 - def test_error_raising(): """ Test management of dataset init inputs @@ -174,67 +186,44 @@ def test_error_raising(): tf_shuffled = tf_dataset.shuffle(32, 0).batch(4) # Method initialization that should not work - test_raise_assertion_error = unittest.TestCase().assertRaises - # not input - test_raise_assertion_error(TypeError, harmonize_datasets) + with pytest.raises(TypeError): + harmonize_datasets() # shuffled - test_raise_assertion_error(AssertionError, harmonize_datasets, tf_shuffled,) + with pytest.raises(AssertionError): + harmonize_datasets(tf_shuffled) # mismatching types - test_raise_assertion_error(AssertionError, harmonize_datasets, tf_dataset, tf_tensor,) - test_raise_assertion_error( - AssertionError, - harmonize_datasets, - tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), - np_array, - ) - test_raise_assertion_error( - AssertionError, harmonize_datasets, tf_dataset_b3, too_short_np_array - ) - test_raise_assertion_error( - AssertionError, harmonize_datasets, tf_dataset, None, too_long_tf_dataset - ) + with pytest.raises(AssertionError): + harmonize_datasets(tf_dataset, tf_tensor) + with pytest.raises(AssertionError): + harmonize_datasets(tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), np_array) + with pytest.raises(AssertionError): + harmonize_datasets(tf_dataset_b3, too_short_np_array) + with pytest.raises(AssertionError): + harmonize_datasets(tf_dataset, None, too_long_tf_dataset) # not batched and no batch size provided - test_raise_assertion_error( - AssertionError, - harmonize_datasets, - tf.data.Dataset.from_tensor_slices((tf_tensor, tf_tensor)), - tf_dataset, - ) + with pytest.raises(AssertionError): + harmonize_datasets(tf.data.Dataset.from_tensor_slices((tf_tensor, tf_tensor)), tf_dataset,) # not matching batch sizes - test_raise_assertion_error( - AssertionError, harmonize_datasets, tf_dataset_b3, tf_dataset_b5, - ) - test_raise_assertion_error( - AssertionError, - harmonize_datasets, - too_long_tf_dataset_b10, - tf_dataset_b5, - ) + with pytest.raises(AssertionError): + harmonize_datasets(tf_dataset_b3, tf_dataset_b5,) + with pytest.raises(AssertionError): + harmonize_datasets(too_long_tf_dataset_b10, tf_dataset_b5,) # mismatching cardinality - test_raise_assertion_error( - AssertionError, - harmonize_datasets, - tf_dataset_b5, - too_long_tf_dataset_b5, - ) + with pytest.raises(AssertionError): + harmonize_datasets(tf_dataset_b5, too_long_tf_dataset_b5,) # multiple datasets for labels or targets - test_raise_assertion_error( - AssertionError, - harmonize_datasets, - tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), - tf_dataset_b5, - ) - test_raise_assertion_error( - AssertionError, - harmonize_datasets, - tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5, tf_dataset_b5)), - None, - tf_dataset_b5, - ) + with pytest.raises(AssertionError): + harmonize_datasets(tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), tf_dataset_b5,) + with pytest.raises(AssertionError): + harmonize_datasets( + tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5, tf_dataset_b5)), + None, + tf_dataset_b5, + ) diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py index 912cf12e..9da17c68 100644 --- a/tests/example_based/test_projections.py +++ b/tests/example_based/test_projections.py @@ -196,11 +196,11 @@ def test_from_splitted_model(): assert model2(model1(x_train)).shape == (nb_samples, output_features) # test LatentSpaceProjection from splitted model - projection = LatentSpaceProjection.from_splitted_model(features_extractor=model1, mappable=True) + projection = LatentSpaceProjection(model=model1, latent_layer=None, mappable=True) projected_train_dataset = projection.project_dataset(train_dataset) # test HadamardProjection from splitted model - projection = HadamardProjection.from_splitted_model(features_extractor=model1, predictor=model2, mappable=True) + projection = HadamardProjection(features_extractor=model1, predictor=model2, mappable=True) projected_train_dataset = projection.project_dataset(train_dataset) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index 7c890d63..08ffed65 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -33,11 +33,11 @@ def test_prototypes_global_explanations_basic(): method = method_class( cases_dataset=x_train, labels_dataset=y_train, - k=k, + nb_local_prototypes=k, batch_size=batch_size, case_returns=["examples", "distances", "labels", "indices"], distance="euclidean", - nb_prototypes=nb_prototypes, + nb_global_prototypes=nb_prototypes, gamma=gamma, ) @@ -122,23 +122,18 @@ def test_prototypes_global_sanity_check(): x_train, y_train = get_gaussian_data(nb_classes=nb_prototypes, nb_samples_class=5, n_dims=3) - print("DEBUG: test_prototypes_global_sanity_check: x_train", x_train) - for method_class in [MMDCritic, ProtoDash, ProtoGreedy]: - print("DEBUG: test_prototypes_global_sanity_check: method_class", method_class) # compute general prototypes method = method_class( cases_dataset=x_train, labels_dataset=y_train, - k=k, + nb_local_prototypes=k, batch_size=8, - nb_prototypes=nb_prototypes, + nb_global_prototypes=nb_prototypes, gamma=gamma, ) # extract prototypes prototypes_labels = method.get_global_prototypes()["prototypes_labels"] - print("DEBUG: test_prototypes_global_sanity_check: y_train", y_train) - print("DEBUG: test_prototypes_global_sanity_check: prototypes_labels", prototypes_labels) # check 1 assert len(tf.unique(prototypes_labels)[0]) == nb_prototypes @@ -175,11 +170,11 @@ def test_prototypes_with_projection(): method = method_class( cases_dataset=x_train, labels_dataset=y_train, - k=k, + nb_local_prototypes=k, projection=weighted_projection, batch_size=batch_size, case_returns=["examples", "distances", "labels", "indices"], - nb_prototypes=nb_prototypes, + nb_global_prototypes=nb_prototypes, gamma=gamma, ) diff --git a/tests/example_based/test_tf_dataset_operation.py b/tests/example_based/test_tf_dataset_operation.py index e7190b9d..a8c92ee5 100644 --- a/tests/example_based/test_tf_dataset_operation.py +++ b/tests/example_based/test_tf_dataset_operation.py @@ -6,16 +6,25 @@ sys.path.append(os.getcwd()) -import unittest +import pytest import numpy as np import tensorflow as tf - from xplique.example_based.datasets_operations.tf_dataset_operations import * from xplique.example_based.datasets_operations.tf_dataset_operations import _almost_equal +def datasets_are_equal(dataset_1, dataset_2): + """ + Iterate over the datasets and compare the elements + """ + for elem_1, elem_2 in zip(dataset_1, dataset_2): + if not _almost_equal(elem_1, elem_2): + return False + return True + + def test_are_dataset_first_elems_equal(): """ Verify that the function is able to compare the first element of datasets @@ -53,22 +62,44 @@ def test_are_dataset_first_elems_equal(): ) -def test_is_not_shuffled(): +def test_is_shuffled(): """ Verify the function is able to detect dataset that do not provide stable order of elements """ + # test with non-shuffled datasets tf_dataset = tf.data.Dataset.from_tensor_slices( - tf.reshape(tf.range(90), (10, 3, 3)) + tf.reshape(tf.range(900), (100, 3, 3)) ) - tf_shuffled_once = tf_dataset.shuffle(3, reshuffle_each_iteration=False) zipped = tf.data.Dataset.zip((tf_dataset, tf_dataset)) + tf_mapped = tf_dataset.map(lambda x: x) + + assert not is_shuffled(tf_dataset) + assert not is_shuffled(tf_dataset.batch(3)) + assert not is_shuffled(zipped) + assert not is_shuffled(zipped.batch(3)) + assert not is_shuffled(tf_mapped) + + # test with shuffled datasets + tf_shuffled_once = tf_dataset.shuffle(3, reshuffle_each_iteration=False) + tf_shuffled_once_zipped = tf.data.Dataset.zip((tf_shuffled_once, tf_shuffled_once)) + tf_shuffled_once_mapped = tf_shuffled_once.map(lambda x: x) - assert is_not_shuffled(tf_dataset) - assert is_not_shuffled(tf_dataset.batch(3)) - assert is_not_shuffled(tf_shuffled_once) - assert is_not_shuffled(tf_shuffled_once.batch(3)) - assert is_not_shuffled(zipped) - assert is_not_shuffled(zipped.batch(3)) + assert not is_shuffled(tf_shuffled_once) + assert not is_shuffled(tf_shuffled_once.batch(3)) + assert not is_shuffled(tf_shuffled_once_zipped) + assert not is_shuffled(tf_shuffled_once_zipped.batch(3)) + assert not is_shuffled(tf_shuffled_once_mapped) + + # test with reshuffled datasets + tf_reshuffled = tf_dataset.shuffle(3, reshuffle_each_iteration=True) + tf_reshuffled_zipped = tf.data.Dataset.zip((tf_reshuffled, tf_reshuffled)) + tf_reshuffled_mapped = tf_reshuffled.map(lambda x: x) + + assert is_shuffled(tf_reshuffled) + assert is_shuffled(tf_reshuffled.batch(3)) + assert is_shuffled(tf_reshuffled_zipped) + assert is_shuffled(tf_reshuffled_zipped.batch(3)) + assert is_shuffled(tf_reshuffled_mapped) def test_batch_size_matches(): @@ -78,19 +109,36 @@ def test_batch_size_matches(): tf_dataset = tf.data.Dataset.from_tensor_slices( tf.reshape(tf.range(90), (10, 3, 3)) ) - tf_dataset_b1 = tf_dataset.batch(1) - tf_dataset_b2 = tf_dataset.batch(2) - tf_dataset_b5 = tf_dataset.batch(5) - tf_dataset_b25 = tf_dataset_b5.batch(2) - tf_dataset_b52 = tf_dataset_b2.batch(5) - tf_dataset_b32 = tf_dataset.batch(32) - - assert batch_size_matches(tf_dataset_b1, 1) - assert batch_size_matches(tf_dataset_b2, 2) - assert batch_size_matches(tf_dataset_b5, 5) - assert batch_size_matches(tf_dataset_b25, 2) - assert batch_size_matches(tf_dataset_b52, 5) - assert batch_size_matches(tf_dataset_b32, 10) + tf_b1 = tf_dataset.batch(1) + tf_b2 = tf_dataset.batch(2) + tf_b5 = tf_dataset.batch(5) + tf_b25 = tf_b5.batch(2) + tf_b52 = tf_b2.batch(5) + tf_b32 = tf_dataset.batch(32) + + tf_b5_shuffled = tf_b5.shuffle(3) + tf_b5_zipped = tf.data.Dataset.zip((tf_b5, tf_b5)) + tf_b5_mapped = tf_b5.map(lambda x: x) + + assert batch_size_matches(tf_b1, 1) + assert batch_size_matches(tf_b2, 2) + assert batch_size_matches(tf_b5, 5) + assert batch_size_matches(tf_b25, 2) + assert batch_size_matches(tf_b52, 5) + assert batch_size_matches(tf_b32, 10) + assert batch_size_matches(tf_b5_shuffled, 5) + assert batch_size_matches(tf_b5_zipped, 5) + assert batch_size_matches(tf_b5_mapped, 5) + + assert not batch_size_matches(tf_b1, 2) + assert not batch_size_matches(tf_b2, 1) + assert not batch_size_matches(tf_b5, 2) + assert not batch_size_matches(tf_b25, 5) + assert not batch_size_matches(tf_b52, 2) + assert not batch_size_matches(tf_b32, 5) + assert not batch_size_matches(tf_b5_shuffled, 2) + assert not batch_size_matches(tf_b5_zipped, 2) + assert not batch_size_matches(tf_b5_mapped, 2) def test_sanitize_dataset(): @@ -101,26 +149,27 @@ def test_sanitize_dataset(): np_array = np.array(tf_tensor) tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) tf_dataset_b4 = tf_dataset.batch(4) + tf_dataset_b4_mapped = tf_dataset_b4.map(lambda x: x).prefetch(2) - # test convertion + # test sanitize_dataset do not destroy the dataset assert sanitize_dataset(None, 1) is None - assert are_dataset_first_elems_equal(tf_dataset, tf_dataset) - assert are_dataset_first_elems_equal(tf_dataset_b4, tf_dataset_b4) - assert are_dataset_first_elems_equal( - sanitize_dataset(tf_tensor, 4, 3), tf_dataset_b4 - ) - assert are_dataset_first_elems_equal( - sanitize_dataset(np_array, 4, 3), tf_dataset_b4 - ) + assert datasets_are_equal(sanitize_dataset(tf_dataset_b4, 4), tf_dataset_b4) + assert datasets_are_equal(sanitize_dataset(tf_dataset_b4_mapped, 4), tf_dataset_b4) + + # test convertion to tf dataset + assert datasets_are_equal(sanitize_dataset(np_array, 4), tf_dataset_b4) + assert datasets_are_equal(sanitize_dataset(tf_tensor, 4), tf_dataset_b4) + assert datasets_are_equal(sanitize_dataset(tf_dataset, 4), tf_dataset_b4) # test catch assertion errors - test_raise_assertion_error = unittest.TestCase().assertRaises - test_raise_assertion_error( - AssertionError, sanitize_dataset, tf_dataset.shuffle(2).batch(4), 4 - ) - test_raise_assertion_error(AssertionError, sanitize_dataset, tf_dataset_b4, 3) - test_raise_assertion_error(AssertionError, sanitize_dataset, tf_dataset_b4, 4, 4) - test_raise_assertion_error(AssertionError, sanitize_dataset, np_array[:6], 4, 4) + with pytest.raises(AssertionError): + sanitize_dataset(tf_dataset.shuffle(2).batch(4), 4) + with pytest.raises(AssertionError): + sanitize_dataset(tf_dataset_b4, 3) + with pytest.raises(AssertionError): + sanitize_dataset(tf_dataset_b4, 4, 4) + with pytest.raises(AssertionError): + sanitize_dataset(np_array[:6], 4, 4) def test_dataset_gather(): diff --git a/tests/example_based/test_torch.py b/tests/example_based/test_torch.py index 61368559..3e48e21d 100644 --- a/tests/example_based/test_torch.py +++ b/tests/example_based/test_torch.py @@ -2,16 +2,20 @@ Test example-based methods with PyTorch models and datasets. """ -import unittest +import pytest import numpy as np import tensorflow as tf import torch from torch import nn -from torch.utils.data import TensorDataset, DataLoader, ConcatDataset - -from xplique.example_based import SimilarExamples -from xplique.example_based.projections import Projection, LatentSpaceProjection +import torch.nn.functional as F +from torch.utils.data import TensorDataset, DataLoader + +from xplique.example_based import ( + SimilarExamples, Cole, MMDCritic, ProtoDash, ProtoGreedy, + NaiveCounterFactuals, LabelAwareCounterFactuals, KLEORGlobalSim, KLEORSimMiss, +) +from xplique.example_based.projections import Projection, LatentSpaceProjection, HadamardProjection from xplique.example_based.projections.commons import model_splitting from xplique.example_based.datasets_operations.tf_dataset_operations import are_dataset_first_elems_equal @@ -28,11 +32,13 @@ def get_setup(input_shape, nb_samples=10, nb_labels=10): x_train = torch.stack( [i * torch.ones(input_shape, dtype=torch.float32) for i in range(nb_samples)] ) + y_train = torch.arange(len(x_train), dtype=torch.int64) % nb_labels + train_targets = F.one_hot(y_train, num_classes=nb_labels).to(torch.float32) x_test = x_train[1:-1] # Exclude the first and last elements - y_train = torch.arange(len(x_train), dtype=torch.float32) % nb_labels + test_targets = train_targets[1:-1] # Exclude the first and last elements - return x_train, x_test, y_train + return x_train, x_test, y_train, train_targets, test_targets def create_cnn_model(input_shape, output_shape): @@ -184,57 +190,57 @@ def test_error_raising(): # Method initialization that should not work - test_raise_assertion_error = unittest.TestCase().assertRaises # not input - test_raise_assertion_error(TypeError, harmonize_datasets) + with pytest.raises(TypeError): + harmonize_datasets() # shuffled - test_raise_assertion_error(AssertionError, harmonize_datasets, torch_shuffled,) + with pytest.raises(AssertionError): + harmonize_datasets(torch_shuffled,) # mismatching types - test_raise_assertion_error(AssertionError, harmonize_datasets, torch_dataloader_b3, torch_tensor,) - test_raise_assertion_error(AssertionError, harmonize_datasets, torch_tensor, tf_tensor,) - test_raise_assertion_error(AssertionError, harmonize_datasets, np_array, torch_tensor,) - test_raise_assertion_error(AssertionError, harmonize_datasets, np_array, torch_dataloader_b3,) - test_raise_assertion_error(AssertionError, harmonize_datasets, tf_dataset, torch_dataloader_b3,) - test_raise_assertion_error( - AssertionError, harmonize_datasets, torch_zipped2_dataloader_b5, tf_tensor, - ) + with pytest.raises(AssertionError): + harmonize_datasets(torch_dataloader_b3, torch_tensor,) + with pytest.raises(AssertionError): + harmonize_datasets(torch_tensor, tf_tensor,) + with pytest.raises(AssertionError): + harmonize_datasets(np_array, torch_tensor,) + with pytest.raises(AssertionError): + harmonize_datasets(np_array, torch_dataloader_b3,) + with pytest.raises(AssertionError): + harmonize_datasets(tf_dataset, torch_dataloader_b3,) + with pytest.raises(AssertionError): + harmonize_datasets(torch_zipped2_dataloader_b5, tf_tensor,) # labels or targets zipped - test_raise_assertion_error( - AssertionError, harmonize_datasets, torch_dataloader_b5, torch_zipped2_dataloader_b5, - ) - test_raise_assertion_error( - AssertionError, harmonize_datasets, torch_dataloader_b3, None, torch_zipped3_dataloader_b3, - ) + with pytest.raises(AssertionError): + harmonize_datasets(torch_dataloader_b5, torch_zipped2_dataloader_b5,) + with pytest.raises(AssertionError): + harmonize_datasets(torch_dataloader_b3, None, torch_zipped3_dataloader_b3,) # not batched and no batch size provided - test_raise_assertion_error(AssertionError, harmonize_datasets, torch_dataloader,) + with pytest.raises(AssertionError): + harmonize_datasets(torch_dataloader,) # not matching batch sizes - test_raise_assertion_error( - AssertionError, harmonize_datasets, torch_dataloader_b3, torch_dataloader_b5, - ) - test_raise_assertion_error( - AssertionError, harmonize_datasets, torch_zipped2_dataloader_b5, None, torch_dataloader_b3, - ) - test_raise_assertion_error( - AssertionError, - harmonize_datasets, - too_long_torch_dataloader_b10, - too_long_torch_dataloader_b10, - torch_dataloader_b5, - ) + with pytest.raises(AssertionError): + harmonize_datasets(torch_dataloader_b3, torch_dataloader_b5,) + with pytest.raises(AssertionError): + harmonize_datasets(torch_zipped2_dataloader_b5, None, torch_dataloader_b3,) + + with pytest.raises(AssertionError): + harmonize_datasets( + too_long_torch_dataloader_b10, + too_long_torch_dataloader_b10, + torch_dataloader_b5, + ) # multiple datasets for labels or targets - test_raise_assertion_error( - AssertionError, harmonize_datasets, torch_zipped2_dataloader_b5, torch_dataloader_b5, - ) - test_raise_assertion_error( - AssertionError, harmonize_datasets, torch_zipped3_dataloader_b3, None, torch_dataloader_b3, - ) + with pytest.raises(AssertionError): + harmonize_datasets(torch_zipped2_dataloader_b5, torch_dataloader_b5,) + with pytest.raises(AssertionError): + harmonize_datasets(torch_zipped3_dataloader_b3, None, torch_dataloader_b3,) def test_torch_model_splitting(): @@ -288,7 +294,7 @@ def test_similar_examples_basic(): k = 3 batch_size = 4 - x_train, x_test, y_train = get_setup(input_shape) + x_train, x_test, y_train, _, _ = get_setup(input_shape) torch_dataset = TensorDataset(x_train, y_train) torch_dataloader = DataLoader(torch_dataset, batch_size=batch_size, shuffle=False) @@ -346,12 +352,12 @@ def test_similar_examples_with_splitting(): k = 3 batch_size = 4 - x_train, x_test, y_train = get_setup(input_shape, nb_samples, nb_labels) + x_train, x_test, y_train, _, _ = get_setup(input_shape, nb_samples, nb_labels) torch_dataset = TensorDataset(x_train, y_train) torch_dataloader = DataLoader(torch_dataset, batch_size=batch_size, shuffle=False) model = create_cnn_model(input_shape=torch_input_shape, output_shape=nb_labels) - projection = LatentSpaceProjection(model, "last_conv") + projection = LatentSpaceProjection(model, "last_conv", device=device) # Method initialization method = SimilarExamples( @@ -386,3 +392,58 @@ def test_similar_examples_with_splitting(): or almost_equal(np.array(labels[i, 1]), np.array(y_train[i])) assert almost_equal(np.array(labels[i, 2]), np.array(y_train[i]))\ or almost_equal(np.array(labels[i, 2]), np.array(y_train[i + 2])) + + +def test_all_methods_with_torch(): + # Setup + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + + nb_samples = 13 + torch_input_shape = (3, 32, 32) + input_shape = (32, 32, 3) + nb_labels = 5 + batch_size = 4 + + x_train, x_test, y_train, train_targets, test_targets = get_setup(input_shape, nb_samples, nb_labels) + torch_dataset = TensorDataset(x_train, y_train) + torch_dataloader = DataLoader(torch_dataset, batch_size=batch_size, shuffle=False) + targets_dataloader = DataLoader(TensorDataset(train_targets), batch_size=batch_size, shuffle=False) + + model = create_cnn_model(input_shape=torch_input_shape, output_shape=nb_labels) + projection = HadamardProjection(model, "last_conv", device=device) + + methods = [SimilarExamples, Cole, MMDCritic, ProtoDash, ProtoGreedy, + NaiveCounterFactuals, LabelAwareCounterFactuals, KLEORGlobalSim, KLEORSimMiss,] + + for method_class in methods: + print("DEBUG: test_all_methods_with_torch: method_class =", method_class) + if method_class == Cole: + method = method_class( + cases_dataset=torch_dataloader, + targets_dataset=targets_dataloader, + case_returns="all", + model=model, + latent_layer="last_conv", + device=device, + ) + else: + method = method_class( + cases_dataset=torch_dataloader, + targets_dataset=targets_dataloader, + projection=projection, + case_returns="all", + ) + + # Generate explanation + if method_class == LabelAwareCounterFactuals: + outputs = method.explain(x_test, cf_expected_classes=test_targets) + elif method_class in [NaiveCounterFactuals, KLEORGlobalSim, KLEORSimMiss]: + outputs = method.explain(x_test, targets=test_targets) + else: + outputs = method.explain(x_test, targets=None) + + examples = outputs["examples"] + labels = outputs["labels"] + + assert examples.shape == (len(x_test), 2) + input_shape + assert labels.shape == (len(x_test), 1) diff --git a/tests/example_based/test_image_plot.py b/tests/plots/test_image_example_based_plot.py similarity index 100% rename from tests/example_based/test_image_plot.py rename to tests/plots/test_image_example_based_plot.py diff --git a/tests/utils.py b/tests/utils.py index e7ae014e..1e32bc8a 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -272,63 +272,3 @@ def get_gaussian_data(nb_classes=3, nb_samples_class=20, n_dims=1): ], axis=0) return(X, y) - -def load_data(fname): - data_dir = Path('/home/mohamed-chafik.bakey/MMD-critic/data') - X, y = load_svmlight_file(str(data_dir / fname)) - X = tf.constant(X.todense(), dtype=tf.float32) - y = tf.constant(np.array(y), dtype=tf.int64) - sort_indices = y.numpy().argsort() - X = tf.gather(X, sort_indices, axis=0) - y = tf.gather(y, sort_indices) - y -= 1 - return X, y - -def plot(prototypes_sorted, prototype_weights_sorted, extension): - - output_dir = Path('tests/example_based/tmp') - k = prototypes_sorted.shape[0] - - # Visualize all prototypes - num_cols = 8 - num_rows = ceil(k / num_cols) - fig, axes = plt.subplots(num_rows, num_cols, figsize=(6, num_rows * 1.25)) - if prototype_weights_sorted is not None: - # Adjust the spacing between lines - plt.subplots_adjust(hspace=1) - for i, axis in enumerate(axes.ravel()): - if i >= k: - axis.axis('off') - continue - axis.imshow(prototypes_sorted[i].numpy().reshape(16, 16), cmap='gray') - if prototype_weights_sorted is not None: - axis.set_title("{:.2f}".format(prototype_weights_sorted[i].numpy())) - axis.axis('off') - # fig.suptitle(f'{k} Prototypes') - plt.savefig(output_dir / f'{k}_prototypes_{extension}.png') - -def plot_local_explanation(examples, x_test, extension): - - output_dir = Path('tests/example_based/tmp') - k = examples.shape[1] - - # Visualize - num_cols = k+1 - num_rows = x_test.shape[0] - fig, axes = plt.subplots(num_rows, num_cols, figsize=(6, num_rows * 0.75)) - # Adjust the spacing between lines - plt.subplots_adjust(hspace=1) - axes[0,0].set_title("x_test") - for i in range(examples.shape[0]): - axes[i,0].imshow(x_test[i].numpy().reshape(16, 16), cmap='gray') - axes[i,0].axis('off') - for j in range(examples.shape[1]): - axe = axes[i,j+1] - axe.imshow(examples[i,j].numpy().reshape(16, 16), cmap='gray') - # axe.set_title("{:.2f}".format(prototype_distances[i,j])) - if i == 0: - axe.set_title("prototype_{}".format(j + 1)) - axe.axis('off') - - fig.suptitle(f'{k}-nearst prototypes') - plt.savefig(output_dir / f'{k}_nearest_prototypes_{extension}.png') \ No newline at end of file diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index e935acb7..700ab60f 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -8,7 +8,7 @@ import tensorflow as tf import numpy as np -from ..types import Callable, Dict, List, Optional, Type, Union +from ..types import Callable, Dict, List, Optional, Type, Union, DatasetOrTensor from ..commons import sanitize_inputs_targets from .datasets_operations.harmonize import harmonize_datasets @@ -32,22 +32,22 @@ class BaseExampleMethod(ABC): ---------- cases_dataset The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Labels associated with the examples in the `cases_dataset`. + It should have the same type as `cases_dataset`. targets_dataset - Targets associated to the cases_dataset for dataset projection, + Targets associated with the `cases_dataset` for dataset projection, oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + It should have the same type as `cases_dataset`. + It is not be necessary for all projections. + Furthermore, projections which requires it compute it internally by default. k The number of examples to retrieve per input. projection @@ -70,28 +70,23 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar String or list of string with the elements to return in `self.explain()`. See the returns property for details. batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + Number of samples treated simultaneously for projection and search. + Ignored if `cases_dataset` is a batched `tf.data.Dataset` or + a batched `torch.utils.data.DataLoader` is provided. """ # pylint: disable=too-many-instance-attributes _returns_possibilities = ["examples", "distances", "labels", "include_inputs"] def __init__( self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + cases_dataset: DatasetOrTensor, + labels_dataset: Optional[DatasetOrTensor] = None, + targets_dataset: Optional[DatasetOrTensor] = None, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, + batch_size: Optional[int] = None, ): - if projection is None: - warnings.warn( - "Example-based methods without projection will not explain the model."\ - + "To explain the model, consider using projections like the LatentSpaceProjection." - ) - # set attributes self.cases_dataset, self.labels_dataset, self.targets_dataset, self.batch_size =\ harmonize_datasets(cases_dataset, labels_dataset, targets_dataset, batch_size) @@ -104,6 +99,10 @@ def __init__( elif hasattr(projection, "__call__"): self.projection = Projection(get_weights=None, space_projection=projection) elif projection is None: + warnings.warn( + "Example-based methods without projection will not explain the model."\ + + "To explain the model, consider using projections like the LatentSpaceProjection." + ) self.projection = Projection(get_weights=None, space_projection=None) else: raise AttributeError( @@ -195,8 +194,10 @@ def explain( Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. targets - Targets associated to the cases_dataset for dataset projection. - See `projection` for details. + Targets associated to the `inputs` for projection. + Shape: (n, nb_classes) where n is the number of samples and + nb_classes is the number of classes. + It is used in the `projection`. But `projection` can compute it internally. Returns ------- @@ -261,8 +262,7 @@ def format_search_output( # include inputs inputs = tf.expand_dims(inputs, axis=1) examples = tf.concat([inputs, examples], axis=1) - if "examples" in self.returns: - return_dict["examples"] = examples + return_dict["examples"] = examples # add indices, distances, and labels if "indices" in self.returns: @@ -270,9 +270,9 @@ def format_search_output( if "distances" in self.returns: return_dict["distances"] = search_output["distances"] if "labels" in self.returns: - assert ( - examples_labels is not None - ), "The method cannot return labels without a label dataset." + assert (examples_labels is not None),\ + "The method cannot return labels without a label dataset. "\ + + "Either remove 'labels' from `case_returns` or provide a `labels_dataset`." return_dict["labels"] = examples_labels return return_dict diff --git a/xplique/example_based/counterfactuals.py b/xplique/example_based/counterfactuals.py index 0c6f1f04..a85f24e4 100644 --- a/xplique/example_based/counterfactuals.py +++ b/xplique/example_based/counterfactuals.py @@ -1,13 +1,11 @@ """ Implementation of both counterfactuals and semi factuals methods for classification tasks. """ -import warnings - import numpy as np import tensorflow as tf -from ..types import Callable, List, Optional, Union from ..commons import sanitize_inputs_targets +from ..types import Callable, List, Optional, Union, DatasetOrTensor from .base_example_method import BaseExampleMethod from .search_methods import ORDER, FilterKNN @@ -24,22 +22,21 @@ class NaiveCounterFactuals(BaseExampleMethod): ---------- cases_dataset The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. targets_dataset - Targets are expected to be the one-hot encoding of - the model's predictions for the samples in cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Targets associated with the `cases_dataset` for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. + They are also used to know the prediction of the model on the dataset. + It should have the same type as `cases_dataset`. labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Labels associated with the examples in the `cases_dataset`. + It should have the same type as `cases_dataset`. k The number of examples to retrieve per input. projection @@ -62,8 +59,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar String or list of string with the elements to return in `self.explain()`. See the base class returns property for more details. batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + Number of samples treated simultaneously for projection and search. + Ignored if `cases_dataset` is a batched `tf.data.Dataset` or + a batched `torch.utils.data.DataLoader` is provided. distance Distance function for examples search. It can be an integer, a string in {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, @@ -73,13 +71,13 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar def __init__( self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + cases_dataset: DatasetOrTensor, + targets_dataset: DatasetOrTensor, + labels_dataset: Optional[DatasetOrTensor] = None, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, + batch_size: Optional[int] = None, distance: Union[int, str, Callable] = "euclidean", ): super().__init__( @@ -92,10 +90,6 @@ def __init__( batch_size=batch_size, ) - # set distance function and order for the search method - self.distance = distance - self.order = ORDER.ASCENDING - # initiate search_method self.search_method = self.search_method_class( cases_dataset=self.projected_cases_dataset, @@ -105,7 +99,7 @@ def __init__( batch_size=self.batch_size, distance=distance, filter_fn=self.filter_fn, - order=self.order + order=ORDER.ASCENDING ) @property @@ -143,22 +137,21 @@ class LabelAwareCounterFactuals(BaseExampleMethod): ---------- cases_dataset The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions - for the samples in cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Targets associated with the `cases_dataset` for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. + They are also used to know the prediction of the model on the dataset. + It should have the same type as `cases_dataset`. labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Labels associated with the examples in the `cases_dataset`. + It should have the same type as `cases_dataset`. k The number of examples to retrieve per input. projection @@ -180,8 +173,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray): String or list of string with the elements to return in `self.explain()`. See the base class returns property for more details. batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + Number of samples treated simultaneously for projection and search. + Ignored if `cases_dataset` is a batched `tf.data.Dataset` or + a batched `torch.utils.data.DataLoader` is provided. distance Distance for the FilterKNN search method. Distance function for examples search. It can be an integer, a string in @@ -192,13 +186,13 @@ def custom_projection(inputs: tf.Tensor, np.ndarray): def __init__( self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + cases_dataset: DatasetOrTensor, + targets_dataset: DatasetOrTensor, + labels_dataset: Optional[DatasetOrTensor] = None, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, + batch_size: Optional[int] = None, distance: Union[int, str, Callable] = "euclidean", ): @@ -212,19 +206,6 @@ def __init__( batch_size=batch_size, ) - # raise a warning to specify that target in the explain method is not the same - # as the target used for the target dataset - warnings.warn( - "If your projection method requires the target, "\ - + "be aware that when using the explain method, "\ - + "the target provided is the class within one should search for the counterfactual."\ - + "\nThus, it is possible that the projection of the query is going wrong.") - self.warned = False - - # set distance function and order for the search method - self.distance = distance - self.order = ORDER.ASCENDING - # initiate search_method self.search_method = self.search_method_class( cases_dataset=self.projected_cases_dataset, @@ -234,7 +215,7 @@ def __init__( batch_size=self.batch_size, distance=distance, filter_fn=self.filter_fn, - order=self.order + order=ORDER.ASCENDING ) @property @@ -287,6 +268,10 @@ def explain( targets Tensor or Array. One-hot encoded labels or regression target (e.g {+1, -1}), one for each sample. If not provided, the model's predictions are used. + Targets associated to the `inputs` for projection. + Shape: (n, nb_classes) where n is the number of samples and + nb_classes is the number of classes. + It is used in the `projection`. But `projection` can compute it internally. cf_expected_classes Tensor or Array. One-hot encoding of the target class for the counterfactuals. @@ -297,13 +282,7 @@ def explain( The elements that can be returned are defined with the `_returns_possibilities` static attribute of the class. """ - if not self.warned: - warnings.warn( - "If your projection method requires the target, "\ - + "be aware that when using the explain method, the target provided "\ - + "is the class within one should search for the counterfactual."\ - + "\nThus, it is possible that the projection of the query is going wrong.") - self.warned = True + assert cf_expected_classes is not None, "cf_expected_classes should be provided." # project inputs into the search space projected_inputs = self.projection(inputs, targets) diff --git a/xplique/example_based/datasets_operations/convert_torch_to_tf.py b/xplique/example_based/datasets_operations/convert_torch_to_tf.py index 881f8b4c..2272edbd 100644 --- a/xplique/example_based/datasets_operations/convert_torch_to_tf.py +++ b/xplique/example_based/datasets_operations/convert_torch_to_tf.py @@ -9,12 +9,28 @@ from torch.utils.data import DataLoader -def convert_column_dataloader_to_tf_dataset(dataloader: torch.utils.data.DataLoader, - elements_shape: Tuple[int], - column_index: Optional[int] = None, - ) -> tf.data.Dataset: +def convert_column_dataloader_to_tf_dataset( + dataloader: torch.utils.data.DataLoader, + elements_shape: Tuple[int], + column_index: Optional[int] = None, + ) -> tf.data.Dataset: """ Converts a PyTorch torch.utils.data.DataLoader to a TensorFlow Dataset. + + Parameters + ---------- + dataloader + The DataLoader to convert. + elements_shape + The shape of the elements in the DataLoader. + column_index + The index of the column to convert. + If `None`, the entire DataLoader is converted. + + Returns + ------- + dataset + The converted dataset. """ # make generator from dataloader diff --git a/xplique/example_based/datasets_operations/harmonize.py b/xplique/example_based/datasets_operations/harmonize.py index 25beaee2..4a9760c7 100644 --- a/xplique/example_based/datasets_operations/harmonize.py +++ b/xplique/example_based/datasets_operations/harmonize.py @@ -4,17 +4,13 @@ import math -from typing import Optional, Tuple, TypeVar import numpy as np import tensorflow as tf +from ...types import Optional, Tuple, DatasetOrTensor from .tf_dataset_operations import sanitize_dataset, is_batched -DatasetTensor = TypeVar("DatasetTensor", - tf.Tensor, np.ndarray, "torch.Tensor", - tf.data.Dataset, "torch.utils.data.DataLoader") - def split_tf_dataset(cases_dataset: tf.data.Dataset, labels_dataset: Optional[tf.data.Dataset] = None, @@ -78,14 +74,14 @@ def split_tf_dataset(cases_dataset: tf.data.Dataset, def harmonize_datasets( - cases_dataset: DatasetTensor, - labels_dataset: Optional[DatasetTensor] = None, - targets_dataset: Optional[DatasetTensor] = None, + cases_dataset: DatasetOrTensor, + labels_dataset: Optional[DatasetOrTensor] = None, + targets_dataset: Optional[DatasetOrTensor] = None, batch_size: Optional[int] = None, - ) -> Tuple[DatasetTensor, DatasetTensor, DatasetTensor, int]: + ) -> Tuple[tf.data.Dataset, tf.data.Dataset, tf.data.Dataset, int]: """ - Harmonizes the provided datasets, ensuring they are either `tf.data.Dataset` or - `torch.utils.data.DataLoader`, and transforms them if necessary. + Harmonizes the provided datasets, transforming them to tf.data.Dataset if necessary. + Datasets are also checked in case they are shuffled or do not match in batch_size. If the datasets have multiple columns, the function will split them into cases, labels, and targets datasets based on the number of columns. @@ -93,28 +89,35 @@ def harmonize_datasets( Parameters ---------- - cases_dataset : DatasetTensor + cases_dataset The dataset used to train the model, examples are extracted from this dataset. - If the dataset has multiple columns, - the function will split it into cases, labels, and targets. - All datasets should be of the same type. - labels_dataset : Optional[DatasetTensor] + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. + labels_dataset Labels associated with the examples in the `cases_dataset`. - All datasets should be of the same type. - targets_dataset : Optional[DatasetTensor] - Targets associated with the `cases_dataset` for dataset projection. - All datasets should be of the same type. + It should have the same type as `cases_dataset`. + targets_dataset + Targets associated with the `cases_dataset` for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. + It should have the same type as `cases_dataset`. + It is not be necessary for all projections. + Furthermore, projections which requires it compute it internally by default. batch_size : Optional[int] Number of samples treated simultaneously when using the datasets. It should match the batch size of the datasets if they are batched. Returns ------- - cases_dataset : DatasetTensor + cases_dataset The harmonized dataset used to train the model. - labels_dataset : DatasetTensor + labels_dataset Harmonized labels associated with the `cases_dataset`. - targets_dataset : DatasetTensor + targets_dataset Harmonized targets associated with the `cases_dataset`. batch_size : int Number of samples treated simultaneously when using the datasets. @@ -158,7 +161,7 @@ def harmonize_datasets( batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() else: assert batch_size is not None, ( - "The dataset is not batched, hence the batch size should be provided." + "The dataset is not batched, hence a `batch_size` should be provided." ) cases_dataset = cases_dataset.batch(batch_size) cardinality = cases_dataset.cardinality().numpy() @@ -208,11 +211,6 @@ def harmonize_datasets( # tensors will be converted to tf.data.Dataset via the snitize function elif isinstance(cases_dataset, torch.utils.data.DataLoader): - if batch_size is not None: - assert cases_dataset.batch_size == batch_size, ( - "The DataLoader batch size should match the provided batch size. "\ - + f"Got {cases_dataset.batch_size} from DataLoader and {batch_size} specified." - ) batch_size = cases_dataset.batch_size cardinality = len(cases_dataset) cases_dataset, labels_dataset, targets_dataset =\ diff --git a/xplique/example_based/datasets_operations/tf_dataset_operations.py b/xplique/example_based/datasets_operations/tf_dataset_operations.py index f4300248..81743ad7 100644 --- a/xplique/example_based/datasets_operations/tf_dataset_operations.py +++ b/xplique/example_based/datasets_operations/tf_dataset_operations.py @@ -83,7 +83,7 @@ def is_batched(dataset: tf.data.Dataset) -> bool: return False -def is_not_shuffled(dataset: Optional[tf.data.Dataset]) -> bool: +def is_shuffled(dataset: Optional[tf.data.Dataset]) -> bool: """ Test if the provided dataset reshuffle at each iteration. Tensorflow do not provide clean way to verify it, @@ -100,7 +100,11 @@ def is_not_shuffled(dataset: Optional[tf.data.Dataset]) -> bool: test_result Boolean value of the test. """ - return are_dataset_first_elems_equal(dataset, dataset) + if are_dataset_first_elems_equal(dataset, dataset): + # test a second time to minimize the risk of false positive + return not are_dataset_first_elems_equal(dataset, dataset) + else: + return True def batch_size_matches(dataset: Optional[tf.data.Dataset], batch_size: int) -> bool: @@ -169,13 +173,16 @@ def sanitize_dataset( """ if dataset is not None: if isinstance(dataset, tf.data.Dataset): - assert is_not_shuffled(dataset), ( + assert not is_shuffled(dataset), ( "Datasets should not be shuffled, " + "the order of the element should stay the same at each iteration." ) - assert batch_size_matches( - dataset, batch_size - ), "The batch size should match between datasets." + if not is_batched(dataset): + dataset = dataset.batch(batch_size) + else: + assert batch_size_matches( + dataset, batch_size + ), "The batch size should match between datasets." elif isinstance(dataset, (tf.Tensor, np.ndarray)): dataset = tf.data.Dataset.from_tensor_slices(dataset).batch(batch_size) else: @@ -230,7 +237,7 @@ def dataset_gather(dataset: tf.data.Dataset, indices: tf.Tensor) -> tf.Tensor: Parameters ---------- dataset - Tensorflow dataset to verify or tensor to transform in `tf.data.Dataset` and verify. + The dataset from which to extract elements. indices Tensor of indices of elements to extract from the `dataset`. `indices` should be of dimensions (n, k, 2), diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index 9f3779be..78207ede 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -3,11 +3,13 @@ """ import warnings +import tensorflow as tf + from xplique.types import Optional from ...attributions.base import BlackBoxExplainer from ...attributions import Saliency -from ...types import Callable, Union, Optional +from ...types import Union, Optional from .base import Projection from .commons import model_splitting, target_free_classification_operator @@ -37,7 +39,6 @@ class AttributionProjection(Projection): the second to compute the attributions. By default, the model is not split. For such split, the `model` should be a `tf.keras.Model`. - Layer to target for the outputs (e.g logits or after softmax). If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. @@ -54,7 +55,7 @@ class AttributionProjection(Projection): def __init__( self, - model: Callable, + model: Union[tf.keras.Model, 'torch.nn.Module'], attribution_method: BlackBoxExplainer = Saliency, latent_layer: Optional[Union[str, int]] = None, **attribution_kwargs diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index edf23d53..804c0c89 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -2,7 +2,6 @@ Base projection for similar examples in example based module """ -from abc import ABC import warnings import tensorflow as tf @@ -12,9 +11,9 @@ from ...types import Callable, Union, Optional -class Projection(ABC): +class Projection(): """ - Base class used by `NaturalExampleBasedExplainer` to project samples to a meaningful space + Base class used by `BaseExampleMethod` to project samples to a meaningful space for the model to explain. Projection have two parts a `space_projection` and `weights`, to apply a projection, @@ -101,6 +100,8 @@ def __init__(self, # set space_projection if space_projection is None: self.space_projection = lambda inputs: inputs + elif isinstance(space_projection, tf.types.experimental.PolymorphicFunction): + self.space_projection = space_projection elif hasattr(space_projection, "__call__"): self.mappable = False self.space_projection = space_projection @@ -220,7 +221,8 @@ def _loop_project_dataset( ) -> tf.data.Dataset: """ Apply the projection to a dataset without `Dataset.map`. - Because attribution methods create a `tf.data.Dataset` for batching, + Because some projections are not compatible with a `tf.data.Dataset.map`. + For example, the attribution methods, because they create a `tf.data.Dataset` for batching, however doing so inside a `Dataset.map` is not recommended. Parameters @@ -236,7 +238,7 @@ def _loop_project_dataset( The projected dataset. """ projected_cases_dataset = [] - batch_size = next(iter(cases_dataset)).shape[0] + batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() # iteratively project the dataset if targets_dataset is None: diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index 22b80326..e5a93ac6 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -21,11 +21,10 @@ def model_splitting( Parameters ---------- model - Model to be split. + Model to split. latent_layer Layer used to split the `model`. - Layer to target for the outputs (e.g logits or after softmax). If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. @@ -64,11 +63,10 @@ def _tf_model_splitting(model: tf.keras.Model, Parameters ---------- model - Model to be split. + Model to split. latent_layer Layer used to split the `model`. - Layer to target for the outputs (e.g logits or after softmax). If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. @@ -89,7 +87,7 @@ def _tf_model_splitting(model: tf.keras.Model, "Automatically splitting the provided TensorFlow model into two parts. "\ +"This splitting is not robust to all models. "\ +"It is recommended to split the model manually. "\ - +"Then the splitted parts can be provided through the `from_splitted_model` method.") + +"Then the splitted parts can be provided at the method initialization.") if latent_layer == "last_conv": latent_layer = next( @@ -104,18 +102,18 @@ def _tf_model_splitting(model: tf.keras.Model, second_input = tf.keras.Input(shape=latent_layer.output_shape[1:]) # Reconstruct the second part of the model - x = second_input + new_input = second_input layer_found = False for layer in model.layers: if layer_found: - x = layer(x) + new_input = layer(new_input) if layer == latent_layer: layer_found = True # Create the second part of the model (predictor) predictor = tf.keras.Model( inputs=second_input, - outputs=x, + outputs=new_input, name="predictor" ) @@ -134,11 +132,10 @@ def _torch_model_splitting( Parameters ---------- model - Model to be split. + Model to split. latent_layer Layer used to split the `model`. - Layer to target for the outputs (e.g logits or after softmax). If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. @@ -165,7 +162,7 @@ def _torch_model_splitting( +"This splitting is based on `model.named_children()`. "\ +"If the model cannot be reconstructed via sub-modules, errors are to be expected. "\ +"It is recommended to split the model manually and wrap it with `TorchWrapper`. "\ - +"Then the wrapped parts can be provided through the `from_splitted_model` method.") + +"Then the wrapped parts can be provided at the method initialization.") if device is None: warnings.warn( @@ -190,16 +187,16 @@ def _torch_model_splitting( second_model.add_module(name, module) # Define forward function for the first model - def first_model_forward(x): + def first_model_forward(new_input): for module in first_model: - x = module(x) - return x + new_input = module(new_input) + return new_input # Define forward function for the second model - def second_model_forward(x): + def second_model_forward(new_input): for module in second_model: - x = module(x) - return x + new_input = module(new_input) + return new_input # Set the forward functions for the models first_model.forward = first_model_forward @@ -246,12 +243,4 @@ def target_free_classification_operator(model: Callable, if targets is None: targets = tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]) - # this implementation did not pass the tests, the cond shapes were different if targets is None - # targets = tf.cond( - # pred=tf.constant(targets is None, dtype=tf.bool), - # true_fn=lambda: tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]), - # false_fn=lambda: targets, - # ) - - scores = tf.reduce_sum(predictions * targets, axis=-1) - return scores + return tf.reduce_sum(predictions * targets, axis=-1) diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 6255bcb2..fe71f465 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -7,7 +7,7 @@ from xplique.types import Optional from ...commons import get_gradient_functions -from ...types import Callable, Union, Optional, OperatorSignature +from ...types import Union, Optional, OperatorSignature from .base import Projection from .commons import model_splitting, target_free_classification_operator @@ -32,12 +32,16 @@ class HadamardProjection(Projection): ---------- model The model from which we want to obtain explanations. + It can be splitted manually outside of the projection and provided as two models: + the `feature_extractor` and the `predictor`. In this case, `model` should be `None`. + It is recommended to split it manually. latent_layer Layer used to split the model, the first part will be used for projection and the second to compute the attributions. By default, the model is not split. For such split, the `model` should be a `tf.keras.Model`. + Ignored if `model` is `None`, hence if a splitted model is provided through: + the `feature_extractor` and the `predictor`. - Layer to target for the outputs (e.g logits or after softmax). If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. @@ -51,72 +55,55 @@ class HadamardProjection(Projection): device Device to use for the projection, if None, use the default device. Only used for PyTorch models. Ignored for TensorFlow models. + features_extractor + The feature extraction part of the model. Mapping inputs to the latent space. + Used to provided the first part of a splitted model. + It cannot be provided if a `model` is provided. It should be provided with a `predictor`. + predictor + The prediction part of the model. Mapping the latent space to the outputs. + Used to provided the second part of a splitted model. + It cannot be provided if a `model` is provided. + It should be provided with a `features_extractor`. + mappable + If the model parts can be placed in a `tf.data.Dataset` mapping function. + It is not the case for wrapped PyTorch models. + If you encounter errors in the `project_dataset` method, you can set it to `False`. + Used only for a splitted model. Thgus if `model` is `None`. """ def __init__( self, - model: Callable, + model: Optional[Union[tf.keras.Model, 'torch.nn.Module']] = None, latent_layer: Optional[Union[str, int]] = None, operator: Optional[OperatorSignature] = None, device: Union["torch.device", str] = None, + features_extractor: Optional[tf.keras.Model] = None, + predictor: Optional[tf.keras.Model] = None, + mappable: bool = True, ): - if latent_layer is None: - # no split - self.latent_layer = None - space_projection = None - self.predictor = model + if model is None: + assert features_extractor is not None and predictor is not None,\ + "If no model is provided, the features_extractor and predictor should be provided." + + assert isinstance(features_extractor, tf.keras.Model)\ + and isinstance(predictor, tf.keras.Model),\ + "The features_extractor and predictor should be tf.keras.Model."\ + + "The xplique.wrappers.TorchWrapper can be used for PyTorch models." else: - # split the model if a latent_layer is provided - space_projection, self.predictor = model_splitting(model, - latent_layer=latent_layer, - device=device) - - if operator is None: - warnings.warn("No operator provided, using standard classification operator. "\ - + "For non-classification tasks, please specify an operator.") - operator = target_free_classification_operator - - # the weights are given by the gradient of the operator based on the predictor - gradients, _ = get_gradient_functions(self.predictor, operator) - get_weights = lambda inputs, targets: gradients(self.predictor, inputs, targets) - - mappable = isinstance(model, tf.keras.Model) - - # set methods - super().__init__(get_weights=get_weights, - space_projection=space_projection, - mappable=mappable, - requires_targets=True) - - @classmethod - def from_splitted_model(cls, - features_extractor: tf.keras.Model, - predictor: tf.keras.Model, - operator: Optional[OperatorSignature] = None, - mappable=True): - """ - Create LatentSpaceProjection from a splitted model. - The projection will project the inputs in the latent space, - which corresponds to the output of the `features_extractor`. - - Parameters - ---------- - features_extractor - The feature extraction part of the model. Mapping inputs to the latent space. - predictor - The prediction part of the model. Mapping the latent space to the outputs. - operator - Operator to use to compute the explanation, if None use standard predictions. - mappable - If the model can be placed in a `tf.data.Dataset` mapping function. - It is not the case for wrapped PyTorch models. - If you encounter errors in the `project_dataset` method, you can set it to `False`. - """ - assert isinstance(features_extractor, tf.keras.Model),\ - f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ - f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." - assert isinstance(predictor, tf.keras.Model),\ - f"predictor should be a tf.keras.Model, got {type(predictor)}"\ - f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." + assert features_extractor is None and predictor is None,\ + "If a model is provided, the features_extractor and predictor cannot be provided." + + if latent_layer is None: + # no split + self.latent_layer = None + features_extractor = None + predictor = model + else: + # split the model if a latent_layer is provided + features_extractor, predictor = model_splitting(model, + latent_layer=latent_layer, + device=device) + + mappable = isinstance(model, tf.keras.Model) if operator is None: warnings.warn("No operator provided, using standard classification operator. "\ @@ -127,12 +114,10 @@ def from_splitted_model(cls, gradients, _ = get_gradient_functions(predictor, operator) get_weights = lambda inputs, targets: gradients(predictor, inputs, targets) - new_instance = cls.__new__(cls) - super(HadamardProjection, cls).__init__( - new_instance, + # set methods + super().__init__( get_weights=get_weights, space_projection=features_extractor, mappable=mappable, requires_targets=True ) - return new_instance diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py index ee94778f..0d7a8db8 100644 --- a/xplique/example_based/projections/latent_space.py +++ b/xplique/example_based/projections/latent_space.py @@ -19,10 +19,12 @@ class LatentSpaceProjection(Projection): ---------- model The model from which we want to obtain explanations. + It will be splitted if a `latent_layer` is provided. + Otherwise, it should be a `tf.keras.Model`. + It is recommended to split it manually and provide the first part of the model directly. latent_layer Layer used to split the `model`. - Layer to target for the outputs (e.g logits or after softmax). If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. @@ -31,47 +33,29 @@ class LatentSpaceProjection(Projection): device Device to use for the projection, if None, use the default device. Only used for PyTorch models. Ignored for TensorFlow models. + mappable + Used only if not `latent_layer` is provided. Thus if the model is already splitted. + If the model can be placed in a `tf.data.Dataset` mapping function. + It is not the case for wrapped PyTorch models. + If you encounter errors in the `project_dataset` method, you can set it to `False`. """ def __init__(self, model: Union[tf.keras.Model, 'torch.nn.Module'], latent_layer: Union[str, int] = -1, device: Union["torch.device", str] = None, + mappable: bool = True, ): - features_extractor, _ = model_splitting(model, latent_layer=latent_layer, device=device) - - mappable = isinstance(model, tf.keras.Model) - super().__init__(space_projection=features_extractor, - mappable=mappable, - requires_targets=False) - - @classmethod - def from_splitted_model(cls, - features_extractor: tf.keras.Model, - mappable=True): - """ - Create LatentSpaceProjection from a splitted model. - The projection will project the inputs in the latent space, - which corresponds to the output of the `features_extractor`. - - Parameters - ---------- - features_extractor - The feature extraction part of the model. Mapping inputs to the latent space. - mappable - If the model can be placed in a `tf.data.Dataset` mapping function. - It is not the case for wrapped PyTorch models. - If you encounter errors in the `project_dataset` method, you can set it to `False`. - """ - assert isinstance(features_extractor, tf.keras.Model),\ - f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ - f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." - - new_instance = cls.__new__(cls) - super(LatentSpaceProjection, cls).__init__( - new_instance, + if latent_layer is None: + assert isinstance(model, tf.keras.Model),\ + "If no latent_layer is provided, the model should be a tf.keras.Model." + features_extractor = model + else: + features_extractor, _ = model_splitting(model, latent_layer=latent_layer, device=device) + mappable = isinstance(model, tf.keras.Model) + + super().__init__( space_projection=features_extractor, mappable=mappable, requires_targets=False ) - return new_instance diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 8b753774..5e058c0d 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -7,7 +7,7 @@ import tensorflow as tf import numpy as np -from ..types import Callable, Dict, List, Optional, Type, Union +from ..types import Callable, Dict, List, Optional, Type, Union, DatasetOrTensor from .datasets_operations.tf_dataset_operations import dataset_gather @@ -24,31 +24,36 @@ class Prototypes(BaseExampleMethod, ABC): Parameters ---------- cases_dataset - The dataset used to train the model, examples are extracted from the dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + The dataset used to train the model, examples are extracted from this dataset. + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Labels associated with the examples in the `cases_dataset`. + It should have the same type as `cases_dataset`. targets_dataset - Targets associated to the cases_dataset for dataset projection. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other dataset should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. - k - For decision explanations, the number of closest prototypes to return. Used in `explain`. - Default is 1, which means that only the closest prototype is returned. + Targets associated with the `cases_dataset` for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. + It should have the same type as `cases_dataset`. + It is not be necessary for all projections. + Furthermore, projections which requires it compute it internally by default. + nb_global_prototypes + Number of prototypes to select to explain the dataset or the model. + They define the number of elements returned by the `get_global_prototypes` method. + They have a huge impact on the computation time of the method. + nb_local_prototypes + Number of prototypes to select to explain the decision of the model on given inputs. + They define the number of elements returned by the `explain` method. + (Calling this method do not make sens if `projection` is `None`.) projection Projection or Callable that project samples from the input space to the search space. The search space should be a space where distance make sense for the model. The output of the projection should be a two dimensional tensor. (nb_samples, nb_features). - `projection` should not be `None`, otherwise, - all examples could be computed only with the `search_method`. + If `projection` is `None`, the model is not explained and prototypes represent the dataset. Example of Callable: ``` @@ -66,17 +71,14 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar See `self.set_returns()` for detail. In the case of prototypes, the indices returned by local search are the indices of the prototypes in the list of prototypes. - To obtain the indices of the prototypes in the dataset, use `self.prototypes_indices`. + To obtain the indices of the prototypes in the dataset, use `get_global_prototypes`. batch_size - Number of sample treated simultaneously for projection and search. + Number of samples treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (these are supposed to be batched). distance Distance function for examples search. It can be an integer, a string in {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable. By default a distance function based on the kernel_fn is used. - nb_prototypes : int - For general explanations, the number of prototypes to select. - If `class_wise` is True, it will correspond to the number of prototypes per class. kernel_fn : Callable, optional Kernel function, by default the rbf kernel. This function must only use TensorFlow operations. @@ -87,15 +89,15 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar def __init__( self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, + cases_dataset: DatasetOrTensor, + labels_dataset: Optional[DatasetOrTensor] = None, + targets_dataset: Optional[DatasetOrTensor] = None, + nb_global_prototypes: int = 1, + nb_local_prototypes: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, + batch_size: Optional[int] = None, distance: Optional[Union[int, str, Callable]] = None, - nb_prototypes: int = 1, kernel_fn: callable = None, gamma: float = None ): @@ -104,20 +106,17 @@ def __init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, targets_dataset=targets_dataset, - k=k, + k=nb_local_prototypes, projection=projection, case_returns=case_returns, batch_size=batch_size, ) - # set prototypes parameters - self.nb_prototypes = nb_prototypes - # initiate search_method and search global prototypes self.global_prototypes_search_method = self.search_method_class( cases_dataset=self.projected_cases_dataset, batch_size=self.batch_size, - nb_prototypes=self.nb_prototypes, + nb_prototypes=nb_global_prototypes, kernel_fn=kernel_fn, gamma=gamma ) @@ -202,9 +201,6 @@ def format_search_output( inputs Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). - # targets - # Targets associated to the cases_dataset for dataset projection. - # See `projection` for details. Returns ------- @@ -232,8 +228,7 @@ def format_search_output( # include inputs inputs = tf.expand_dims(inputs, axis=1) examples = tf.concat([inputs, examples], axis=1) - if "examples" in self.returns: - return_dict["examples"] = examples + return_dict["examples"] = examples # add indices, distances, and labels if "indices" in self.returns: @@ -249,7 +244,7 @@ def format_search_output( self.prototypes_labels is not None ), "The method cannot return labels without a label dataset." - # (n * k) + # (n * k,) labels = tf.gather(params=self.prototypes_labels, indices=flatten_indices) # (n, k) return_dict["labels"] = tf.reshape(labels, (inputs.shape[0], self.k)) diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 8c1ada13..4ad07867 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -19,9 +19,11 @@ class ORDER(Enum): ASCENDING = 1 DESCENDING = 2 -def _sanitize_returns(returns: Optional[Union[List[str], str]] = None, - possibilities: List[str] = None, - default: Union[List[str], str] = None): +def _sanitize_returns( + returns: Optional[Union[List[str], str]] = None, + possibilities: List[str] = None, + default: Union[List[str], str] = None + ) -> List[str]: """ It cleans the `returns` parameter. Results is either a sublist of possibilities or a value among possibilities. @@ -76,13 +78,14 @@ class BaseSearchMethod(ABC): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. k - The number of examples to retrieve. + The number of examples to retrieve at each call. search_returns String or list of string with the elements to return in `self.find_examples()`. - It should be a subset of `self._returns_possibilities`. + It should be a subset of `self._returns_possibilities` or `"all"`. + See self.returns setter for more detail. batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + Number of samples treated simultaneously. + It should match the batch size of the cases_dataset in the case of a `tf.data.Dataset`. """ _returns_possibilities = ["examples", "indices", "distances", "include_inputs"] @@ -95,8 +98,8 @@ def __init__( ): # set batch size - if hasattr(cases_dataset, "_batch_size"): - self.batch_size = tf.cast(cases_dataset._batch_size, tf.int32) + if isinstance(cases_dataset, tf.data.Dataset): + self.batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() else: self.batch_size = batch_size diff --git a/xplique/example_based/search_methods/common.py b/xplique/example_based/search_methods/common.py index 109adab2..5f2a23d4 100644 --- a/xplique/example_based/search_methods/common.py +++ b/xplique/example_based/search_methods/common.py @@ -1,6 +1,7 @@ """ Common functions for search methods. """ +# pylint: disable=invalid-name import numpy as np import tensorflow as tf @@ -43,7 +44,7 @@ def _euclidean_distance(x1: tf.Tensor, x2: tf.Tensor) -> tf.Tensor: tf.Tensor Euclidean distance between the two vectors. """ - return tf.norm(x1 - x2, axis=-1) + return tf.norm(x1 - x2, ord="euclidean", axis=-1) def _cosine_distance(x1: tf.Tensor, x2: tf.Tensor) -> tf.Tensor: diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index 38269572..7698b561 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -43,7 +43,7 @@ class BaseKLEORSearch(FilterKNN, ABC): String or list of string with the elements to return in `self.find_examples()`. It should be a subset of `self._returns_possibilities`. batch_size - Number of sample treated simultaneously. + Number of samples treated simultaneously. distance Distance function for examples search. It can be an integer, a string in {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, @@ -203,6 +203,7 @@ def kneighbors(self, The n NUNs times the k-SF. """ # pylint: disable=signature-differs + # pylint: disable=duplicate-code # get the Nearest Unlike Neighbors and their distance to the related input nuns, nuns_indices, nuns_input_distances = self._get_nuns(inputs, targets) @@ -303,7 +304,7 @@ class KLEORSimMissSearch(BaseKLEORSearch): def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, - nuns_input_distances: tf.Tensor) -> Tuple: + nuns_input_distances: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """ No additional filtering for the KLEORSimMiss method. """ @@ -325,9 +326,25 @@ class KLEORGlobalSimSearch(BaseKLEORSearch): def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, - nuns_input_distances: tf.Tensor) -> Tuple: + nuns_input_distances: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """ Filter the distances to keep only the SF that are 'between' the input and its NUN. + + Parameters + ---------- + nun_sf_distances + Distances between the SF and the NUN. + input_sf_distances + Distances between the SF and the input. + nuns_input_distances + Distances between the input and the NUN. + + Returns + ------- + nun_sf_distances + Filtered distances between the SF and the NUN. + input_sf_distances + Filtered distances between the SF and the input. """ # filter non acceptable cases, i.e. cases for which the distance to the input is greater # than the distance between the input and its nun diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index 1d42b569..e842ebd1 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -31,8 +31,8 @@ class BaseKNN(BaseSearchMethod): String or list of string with the elements to return in `self.find_examples()`. It should be a subset of `self._returns_possibilities`. batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + Number of samples treated simultaneously. + It should match the batch size of the `cases_dataset` in the case of a `tf.data.Dataset`. order The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. @@ -88,7 +88,8 @@ def kneighbors(self, Where, n represent the number of inputs and k the number of corresponding examples. The index of each element is encoded by two values, the batch index and the index of the element in the batch. - Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. + Those indices can be used through: + `xplique.example_based.datasets_operations.tf_dataset_operation.dataset_gather`. """ raise NotImplementedError @@ -123,11 +124,37 @@ def find_examples(self, return return_dict - def _build_return_dict(self, inputs, examples_distances, examples_indices) -> dict: + def _build_return_dict(self, + inputs: Union[tf.Tensor, np.ndarray], + examples_distances: tf.Tensor, + examples_indices: tf.Tensor + ) -> dict: """ Build the return dict based on the `self.returns` values. It builds the return dict with the value in the subset of ['examples', 'include_inputs', 'indices', 'distances'] which is commonly shared. + + Parameters + ---------- + inputs + Tensor or Array. Input samples to be explained. + Assumed to have been already projected. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + examples_distances + Tensor of distances between the knn and the inputs with dimension (n, k). + The n inputs times their k-nearest neighbors. + examples_indices + Tensor of indices of the knn in `self.cases_dataset` with dimension (n, k, 2). + Where, n represent the number of inputs and k the number of corresponding examples. + The index of each element is encoded by two values, + the batch index and the index of the element in the batch. + Those indices can be used through: + `xplique.example_based.datasets_operations.tf_dataset_operation.dataset_gather`. + + Returns + ------- + return_dict + Dictionary containing the elements to return which are specified in `self.returns`. """ # Set values in return dict return_dict = {} @@ -163,8 +190,8 @@ class KNN(BaseKNN): String or list of string with the elements to return in `self.find_examples()`. It should be a subset of `self._returns_possibilities`. batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + Number of samples treated simultaneously. + It should match the batch size of the `cases_dataset` in the case of a `tf.data.Dataset`. order The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. @@ -213,6 +240,7 @@ def _crossed_distances_fn(self, x1, x2) -> tf.Tensor: distances Tensor of distances between the inputs and the cases with dimension (n, m). """ + # pylint: disable=invalid-name n = x1.shape[0] m = x2.shape[0] x2 = tf.expand_dims(x2, axis=0) @@ -255,7 +283,8 @@ def kneighbors(self, Where, n represent the number of inputs and k the number of corresponding examples. The index of each element is encoded by two values, the batch index and the index of the element in the batch. - Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. + Those indices can be used through: + `xplique.example_based.datasets_operations.tf_dataset_operation.dataset_gather`. """ nb_inputs = tf.shape(inputs)[0] @@ -329,8 +358,8 @@ class FilterKNN(BaseKNN): String or list of string with the elements to return in `self.find_examples()`. It should be a subset of `self._returns_possibilities`. batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + Number of samples treated simultaneously. + It should match the batch size of the `cases_dataset` in the case of a `tf.data.Dataset`. order The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. @@ -357,6 +386,7 @@ def __init__( order: ORDER = ORDER.ASCENDING, filter_fn: Optional[Callable] = None, ): + # pylint: disable=invalid-name super().__init__( cases_dataset=cases_dataset, k=k, @@ -412,6 +442,7 @@ def _crossed_distances_fn(self, x1, x2, mask): distances Tensor of distances between the inputs and the cases with dimension (n, m). """ + # pylint: disable=invalid-name n = x1.shape[0] m = x2.shape[0] x2 = tf.expand_dims(x2, axis=0) @@ -457,7 +488,8 @@ def kneighbors(self, Where, n represent the number of inputs and k the number of corresponding examples. The index of each element is encoded by two values, the batch index and the index of the element in the batch. - Those indices can be used through `xplique.commons.tf_dataset_operation.dataset_gather`. + Those indices can be used through: + `xplique.example_based.datasets_operations.tf_dataset_operation.dataset_gather`. """ nb_inputs = tf.shape(inputs)[0] diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py index 1e5d4b5a..778b79d9 100644 --- a/xplique/example_based/search_methods/mmd_critic_search.py +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -24,8 +24,8 @@ class MMDCriticSearch(ProtoGreedySearch): The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + Number of samples treated simultaneously. + It should match the batch size of the `cases_dataset` in the case of a `tf.data.Dataset`. nb_prototypes : int Number of prototypes to find. kernel_fn : Callable, optional @@ -102,8 +102,7 @@ def _compute_batch_objectives(self, # (bc,) objectives = sum1 - sum2 - # (bc, |S|+1) - 1/(|S|+1) - objectives_weights = tf.fill(dims=(nb_candidates, extended_nb_selected), - value=1.0 / tf.cast(extended_nb_selected, tf.float32)) + # (bc, |S|+1) - ones (the weights are normalized later) + objectives_weights = tf.ones((nb_candidates, extended_nb_selected), dtype=tf.float32) return objectives, objectives_weights diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index 6df09b51..ba26f838 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -95,8 +95,8 @@ class ProtoDashSearch(ProtoGreedySearch): The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + Number of samples treated simultaneously. + It should match the batch size of the `cases_dataset` in the case of a `tf.data.Dataset`. nb_prototypes : int Number of prototypes to find. kernel_fn : Callable, optional diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 01ff76cb..69a87ea7 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -27,8 +27,8 @@ class ProtoGreedySearch(): The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. batch_size - Number of sample treated simultaneously. - It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. + Number of samples treated simultaneously. + It should match the batch size of the `cases_dataset` in the case of a `tf.data.Dataset`. nb_prototypes : int Number of prototypes to find. kernel_fn : Callable, optional @@ -51,9 +51,10 @@ def __init__( kernel_fn: callable = None, gamma: float = None ): + # pylint: disable=duplicate-code # set batch size - if hasattr(cases_dataset, "_batch_size"): - self.batch_size = tf.cast(cases_dataset._batch_size, tf.int32) + if isinstance(cases_dataset, tf.data.Dataset): + self.batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() else: self.batch_size = batch_size @@ -98,6 +99,7 @@ def _get_distance_fn(self, distance: Optional[Union[int, str, Callable]]) -> Cal Callable Distance function for examples search. """ + # pylint: disable=invalid-name if distance is None: def kernel_induced_distance(x1, x2): def dist(x): @@ -106,7 +108,7 @@ def dist(x): self.kernel_fn(x1, x1) - 2 * self.kernel_fn(x1, x) + self.kernel_fn(x, x) ) distance = tf.map_fn(dist, x2) - return tf.squeeze(distance) + return tf.squeeze(distance, axis=[1, 2]) return kernel_induced_distance return get_distance_function(distance) @@ -486,6 +488,7 @@ def find_global_prototypes(self, nb_prototypes: int): best_weights = objectives_weights[objectives_argmax] # update the selected prototypes + # pylint: disable=unknown-option-value # pylint: disable=possibly-used-before-assignment last_selected = best_case[tf.newaxis, :] mask_of_selected[best_batch_index, best_index].assign(True) diff --git a/xplique/example_based/semifactuals.py b/xplique/example_based/semifactuals.py index 327eb1c4..1df6f0bb 100644 --- a/xplique/example_based/semifactuals.py +++ b/xplique/example_based/semifactuals.py @@ -4,12 +4,12 @@ import numpy as np import tensorflow as tf -from ..types import Callable, List, Optional, Union, Dict +from ..types import Callable, List, Optional, Union, Dict, DatasetOrTensor from .datasets_operations.tf_dataset_operations import dataset_gather from .base_example_method import BaseExampleMethod -from .search_methods import ORDER, KLEORSimMissSearch, KLEORGlobalSimSearch +from .search_methods import KLEORSimMissSearch, KLEORGlobalSimSearch from .projections import Projection from .search_methods.base import _sanitize_returns @@ -33,22 +33,21 @@ class KLEORBase(BaseExampleMethod): ---------- cases_dataset The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions - for the samples in cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Targets associated with the `cases_dataset` for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. + They are also used to know the prediction of the model on the dataset. + It should have the same type as `cases_dataset`. labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Labels associated with the examples in the `cases_dataset`. + It should have the same type as `cases_dataset`. k The number of examples to retrieve per input. projection @@ -71,8 +70,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar String or list of string with the elements to return in `self.explain()`. See the base class returns property for more details. batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + Number of samples treated simultaneously for projection and search. + Ignored if `cases_dataset` is a batched `tf.data.Dataset` or + a batched `torch.utils.data.DataLoader` is provided. distance Distance for the FilterKNN search method. Distance function for examples search. It can be an integer, a string in @@ -88,13 +88,13 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar def __init__( self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - targets_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + cases_dataset: DatasetOrTensor, + targets_dataset: DatasetOrTensor, + labels_dataset: Optional[DatasetOrTensor] = None, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, + batch_size: Optional[int] = None, distance: Union[int, str, Callable] = "euclidean", ): @@ -108,10 +108,6 @@ def __init__( batch_size=batch_size, ) - # set distance function and order for the search method - self.distance = distance - self.order = ORDER.ASCENDING - # initiate search_method self.search_method = self.search_method_class( cases_dataset=self.projected_cases_dataset, @@ -119,7 +115,7 @@ def __init__( k=self.k, search_returns=self._search_returns, batch_size=self.batch_size, - distance=self.distance, + distance=distance, ) @property @@ -138,14 +134,14 @@ def returns(self, returns: Union[List[str], str]): self._returns = _sanitize_returns(returns, self._returns_possibilities, default) self._search_returns = ["indices", "distances"] - if isinstance(self._returns, list) and ("nuns" in self._returns): + if "nuns" in self._returns: self._search_returns.append("nuns_indices") - elif isinstance(self._returns, list) and ("nuns_indices" in self._returns): + elif "nuns_indices" in self._returns: self._search_returns.append("nuns_indices") - elif isinstance(self._returns, list) and ("nuns_labels" in self._returns): + elif "nuns_labels" in self._returns: self._search_returns.append("nuns_indices") - if isinstance(self._returns, list) and ("dist_to_nuns" in self._returns): + if "dist_to_nuns" in self._returns: self._search_returns.append("dist_to_nuns") try: diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index fcacd6ac..8d0fb756 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -2,10 +2,9 @@ Base model for example-based """ import tensorflow as tf -import numpy as np from ..attributions.base import BlackBoxExplainer -from ..types import Callable, List, Optional, Type, Union +from ..types import Callable, List, Optional, Type, Union, DatasetOrTensor from .search_methods import KNN, BaseSearchMethod, ORDER from .projections import Projection, AttributionProjection, HadamardProjection @@ -22,22 +21,22 @@ class SimilarExamples(BaseExampleMethod): ---------- cases_dataset The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Labels associated with the examples in the `cases_dataset`. + It should have the same type as `cases_dataset`. targets_dataset - Targets associated to the cases_dataset for dataset projection, + Targets associated with the `cases_dataset` for dataset projection, oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + It should have the same type as `cases_dataset`. + It is not be necessary for all projections. + Furthermore, projections which requires it compute it internally by default. k The number of examples to retrieve per input. projection @@ -60,8 +59,9 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar String or list of string with the elements to return in `self.explain()`. See the base class returns property for more details. batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + Number of samples treated simultaneously for projection and search. + Ignored if `cases_dataset` is a batched `tf.data.Dataset` or + a batched `torch.utils.data.DataLoader` is provided. distance Distance for the knn search method. It can be an integer, a string in {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, @@ -69,13 +69,13 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar """ def __init__( self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, + cases_dataset: DatasetOrTensor, + labels_dataset: Optional[DatasetOrTensor] = None, + targets_dataset: Optional[DatasetOrTensor] = None, k: int = 1, projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", - batch_size: Optional[int] = 32, + batch_size: Optional[int] = None, distance: Union[int, str, Callable] = "euclidean", ): super().__init__( @@ -88,16 +88,13 @@ def __init__( batch_size=batch_size, ) - # set distance function - self.distance = distance - # initiate search_method self.search_method = self.search_method_class( cases_dataset=self.projected_cases_dataset, search_returns=self._search_returns, k=self.k, batch_size=self.batch_size, - distance=self.distance, + distance=distance, order=ORDER.ASCENDING, ) @@ -122,22 +119,22 @@ class Cole(SimilarExamples): ---------- cases_dataset The dataset used to train the model, examples are extracted from this dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + All datasets (cases, labels, and targets) should be of the same type. + Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, + `tf.Tensor`, `np.ndarray`, `torch.Tensor`. + For datasets with multiple columns, the first column is assumed to be the cases. + While the second column is assumed to be the labels, and the third the targets. + Warning: datasets tend to reshuffle at each iteration, ensure the datasets are + not reshuffle as we use index in the dataset. labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + Labels associated with the examples in the `cases_dataset`. + It should have the same type as `cases_dataset`. targets_dataset - Targets associated to the cases_dataset for dataset projection, + Targets associated with the `cases_dataset` for dataset projection, oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. - `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. - Batch size and cardinality of other datasets should match `cases_dataset`. - Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not - the case for your dataset, otherwise, examples will not make sense. + It should have the same type as `cases_dataset`. + It is not be necessary for all projections. + Furthermore, projections which requires it compute it internally by default. k The number of examples to retrieve per input. distance @@ -148,14 +145,14 @@ class Cole(SimilarExamples): String or list of string with the elements to return in `self.explain()`. See the base class returns property for details. batch_size - Number of sample treated simultaneously for projection and search. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). + Number of samples treated simultaneously for projection and search. + Ignored if `cases_dataset` is a batched `tf.data.Dataset` or + a batched `torch.utils.data.DataLoader` is provided. latent_layer Layer used to split the model, the first part will be used for projection and the second to compute the attributions. By default, the model is not split. For such split, the `model` should be a `tf.keras.Model`. - Layer to target for the outputs (e.g logits or after softmax). If an `int` is provided it will be interpreted as a layer index. If a `string` is provided it will look for the layer name. @@ -165,20 +162,22 @@ class Cole(SimilarExamples): attribution_method Class of the attribution method to use for projection. It should inherit from `xplique.attributions.base.BlackBoxExplainer`. - By default, it computes the gradient to make the Hadamard product in the latent space. + It can also be `"gradient"` to make the hadamard product between with the gradient. + It was deemed the best method in the original paper, and we optimized it for speed. + By default, it is set to `"gradient"`. attribution_kwargs Parameters to be passed for the construction of the `attribution_method`. """ def __init__( self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - model: tf.keras.Model, - targets_dataset: Union[tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.Tensor, np.ndarray]] = None, + cases_dataset: DatasetOrTensor, + model: Union[tf.keras.Model, 'torch.nn.Module'], + labels_dataset: Optional[DatasetOrTensor] = None, + targets_dataset: Optional[DatasetOrTensor] = None, k: int = 1, distance: Union[str, Callable] = "euclidean", case_returns: Optional[Union[List[str], str]] = "examples", - batch_size: Optional[int] = 32, + batch_size: Optional[int] = None, latent_layer: Optional[Union[str, int]] = None, attribution_method: Union[str, Type[BlackBoxExplainer]] = "gradient", **attribution_kwargs, diff --git a/xplique/plots/image.py b/xplique/plots/image.py index d822d528..aafccfc6 100644 --- a/xplique/plots/image.py +++ b/xplique/plots/image.py @@ -247,16 +247,14 @@ def plot_examples( --------- examples Represente the k nearest neighbours of the input. (n, k+1, h, w, c) - weights - Features weight of the examples. distances Distance between input data and examples. labels Labels of the examples. labels_test Corresponding to labels of the dataset test. - attribution_kwargs - Additionnal parameters passed to `xplique.plots.plot_attribution()`. + predicted_labels + Predicted labels of the examples. img_size: Size of each subplots (in inch), considering we keep aspect ratio """ diff --git a/xplique/types/__init__.py b/xplique/types/__init__.py index ba01d0c2..1f04d319 100644 --- a/xplique/types/__init__.py +++ b/xplique/types/__init__.py @@ -3,4 +3,4 @@ """ from typing import Union, Tuple, List, Callable, Dict, Optional, Any, Type -from .custom_type import OperatorSignature +from .custom_type import OperatorSignature, DatasetOrTensor diff --git a/xplique/types/custom_type.py b/xplique/types/custom_type.py index 0562621a..4a27e8ce 100644 --- a/xplique/types/custom_type.py +++ b/xplique/types/custom_type.py @@ -1,7 +1,13 @@ """ Module for custom types or signature """ -from typing import Callable +from typing import Callable, TypeVar + +import numpy as np import tensorflow as tf OperatorSignature = Callable[[tf.keras.Model, tf.Tensor, tf.Tensor], float] + +DatasetOrTensor = TypeVar("DatasetOrTensor", + tf.Tensor, np.ndarray, "torch.Tensor", + tf.data.Dataset, "torch.utils.data.DataLoader") From b684ad376ca26f2ea2c473812747fe49b18d1221 Mon Sep 17 00:00:00 2001 From: POCHE Date: Thu, 3 Oct 2024 14:17:57 +0200 Subject: [PATCH 131/138] example based: small fix --- xplique/example_based/projections/base.py | 2 +- xplique/example_based/search_methods/proto_greedy_search.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 804c0c89..b9afee38 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -100,7 +100,7 @@ def __init__(self, # set space_projection if space_projection is None: self.space_projection = lambda inputs: inputs - elif isinstance(space_projection, tf.types.experimental.PolymorphicFunction): + elif isinstance(space_projection, tf.python.eager.def_function.Function): self.space_projection = space_projection elif hasattr(space_projection, "__call__"): self.mappable = False diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 69a87ea7..1159aabb 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -64,7 +64,7 @@ def __init__( if kernel_fn is None: # define kernel fn to default rbf kernel self.__set_default_kernel_fn(self.cases_dataset, gamma) - elif isinstance(kernel_fn, tf.types.experimental.PolymorphicFunction): + elif isinstance(kernel_fn, tf.python.eager.def_function.Function): # the kernel_fn was decorated with a tf.function self.kernel_fn = kernel_fn elif hasattr(kernel_fn, "__call__"): From a876d7ea996372beb677c63967784bc2bd71d694 Mon Sep 17 00:00:00 2001 From: POCHE Date: Thu, 3 Oct 2024 17:56:38 +0200 Subject: [PATCH 132/138] example based: small fix --- xplique/example_based/projections/base.py | 13 +++++++------ .../search_methods/proto_greedy_search.py | 3 --- 2 files changed, 7 insertions(+), 9 deletions(-) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index b9afee38..c9dcd2d1 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -55,15 +55,17 @@ def get_weights_example(projected_inputs: Union(tf.Tensor, np.ndarray), device Device to use for the projection, if None, use the default device. mappable - If True, the projection can be applied to a dataset through `Dataset.map`. + If True, the projection can be applied to a `tf.data.Dataset` through `Dataset.map`. Otherwise, the dataset projection will be done through a loop. + It is not the case for wrapped PyTorch models. + If you encounter errors in the `project_dataset` method, you can set it to `False`. """ def __init__(self, get_weights: Optional[Union[Callable, tf.Tensor, np.ndarray]] = None, space_projection: Optional[Callable] = None, device: Optional[str] = None, - mappable: bool = True, + mappable: bool = False, requires_targets: bool = False): if get_weights is not None or space_projection is not None: warnings.warn( @@ -71,7 +73,6 @@ def __init__(self, + "should not be `None`. Otherwise the projection is an identity function." ) - self.mappable = mappable self.requires_targets = requires_targets # set get_weights @@ -82,6 +83,7 @@ def __init__(self, # weights is a tensor if isinstance(get_weights, np.ndarray): weights = tf.convert_to_tensor(get_weights, dtype=tf.float32) + mappable = False else: weights = get_weights @@ -100,16 +102,15 @@ def __init__(self, # set space_projection if space_projection is None: self.space_projection = lambda inputs: inputs - elif isinstance(space_projection, tf.python.eager.def_function.Function): - self.space_projection = space_projection elif hasattr(space_projection, "__call__"): - self.mappable = False self.space_projection = space_projection else: raise TypeError( f"`space_projection` should be a `Callable`, not a {type(space_projection)}" ) + self.mappable = mappable + # set device self.device = get_device(device) diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 1159aabb..988f8656 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -64,9 +64,6 @@ def __init__( if kernel_fn is None: # define kernel fn to default rbf kernel self.__set_default_kernel_fn(self.cases_dataset, gamma) - elif isinstance(kernel_fn, tf.python.eager.def_function.Function): - # the kernel_fn was decorated with a tf.function - self.kernel_fn = kernel_fn elif hasattr(kernel_fn, "__call__"): # the kernel_fn is a callable the output is converted to a tensor for consistency self.kernel_fn = lambda x1, x2: tf.convert_to_tensor(kernel_fn(x1, x2)) From 3bbf406ccdac53067b8f9144a9fc967bc06a4ddc Mon Sep 17 00:00:00 2001 From: POCHE Date: Fri, 4 Oct 2024 15:52:19 +0200 Subject: [PATCH 133/138] example based: small fix --- xplique/example_based/datasets_operations/__init__.py | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 xplique/example_based/datasets_operations/__init__.py diff --git a/xplique/example_based/datasets_operations/__init__.py b/xplique/example_based/datasets_operations/__init__.py new file mode 100644 index 00000000..e69de29b From f316e9f2166628e083a0dbbd3d1ceca9839664f2 Mon Sep 17 00:00:00 2001 From: POCHE Date: Mon, 7 Oct 2024 16:24:41 +0200 Subject: [PATCH 134/138] example based docs: add prototypes tuto link --- README.md | 6 +++--- TUTORIALS.md | 4 ++-- docs/api/example_based/api_example_based.md | 10 +++++----- docs/api/example_based/prototypes/api_prototypes.md | 6 +++--- docs/api/example_based/prototypes/mmd_critic.md | 6 ++---- docs/api/example_based/prototypes/proto_dash.md | 10 ++++------ docs/api/example_based/prototypes/proto_greedy.md | 10 ++++------ docs/index.md | 6 +++--- docs/tutorials.md | 4 ++-- 9 files changed, 28 insertions(+), 34 deletions(-) diff --git a/README.md b/README.md index b4d4b719..39582f56 100644 --- a/README.md +++ b/README.md @@ -390,9 +390,9 @@ Even though we are only at the early stages, we have also recently added an [Exa | `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | | `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | |||| -| `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | **TODO** | -| `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | **TODO** | -| `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | **TODO** | +| `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | +| `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | +| `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | diff --git a/TUTORIALS.md b/TUTORIALS.md index e3681cd6..e72aaf95 100644 --- a/TUTORIALS.md +++ b/TUTORIALS.md @@ -21,7 +21,7 @@ Here is the lists of the available tutorial for now: | Concept Activation Vectors | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1iuEz46ZjgG97vTBH8p-vod3y14UETvVE) | | Feature Visualization | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1st43K9AH-UL4eZM1S4QdyrOi7Epa5K8v) | | Example-Based Methods | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | -| Prototypes | **TODO** | +| Prototypes | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | ## Attributions @@ -82,4 +82,4 @@ Here is the lists of the available tutorial for now: | **Tutorial Name** | Notebook | | :------------------------------------- | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | | Example-Based Methods: Getting started | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | -| Prototypes: Getting started | **TODO** | \ No newline at end of file +| Example-based: Prototypes | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | \ No newline at end of file diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index 3affa7cd..6fbb3b8a 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -1,7 +1,7 @@ # API: Example-based - [**Example-based Methods**: Getting started](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) -- [**TODO: Add the Getting Started on Prototypes**]() +- [**Example-based: Prototypes**](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) ## Context ## @@ -61,9 +61,9 @@ We can broadly categorize example-based methods into four families: similar exam | `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | | `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | |||| - | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | **TODO** | - | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | **TODO** | - | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | **TODO** | + | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | + | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | + | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | ### Parameters ### @@ -94,7 +94,7 @@ We can broadly categorize example-based methods into four families: similar exam Returns the relevant examples to explain the (inputs, targets). Projects inputs using `self.projection` and finds examples using the `self.search_method`. - **inputs** (`Union[tf.Tensor, np.ndarray]`): Input samples to be explained. Shape: (n, ...) where n is the number of samples. -- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Targets associated with the `cases_dataset` for dataset projection. Shape: (n, nb_classes) where n is the number of samples and nb_classes is the number of classes. +- **targets** (`Optional[Union[tf.Tensor, np.ndarray]]`): Targets associated with the `inputs` for projection. Shape: (n, nb_classes) where n is the number of samples and nb_classes is the number of classes. Not used in all projection. Used in contrastive methods to know the predicted classes of the provided samples. **Returns:** Dictionary with elements listed in `self.returns`. diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md index 61751ffa..b7fc8e04 100644 --- a/docs/api/example_based/prototypes/api_prototypes.md +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -34,9 +34,9 @@ local_prototypes_dict = explainer(inputs) | Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | |:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | - | [ProtoGreedy](../proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | - | [ProtoDash](../proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | - | [MMDCritic](../mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + | [ProtoGreedy](../proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + | [ProtoDash](../proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | + | [MMDCritic](../mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | !!!info Prototypes, share a common API with other example-based methods. Thus, to understand some parameters, we recommend reading the [dedicated documentation](../../api_example_based/). diff --git a/docs/api/example_based/prototypes/mmd_critic.md b/docs/api/example_based/prototypes/mmd_critic.md index d80786cb..e4f9d33a 100644 --- a/docs/api/example_based/prototypes/mmd_critic.md +++ b/docs/api/example_based/prototypes/mmd_critic.md @@ -2,7 +2,7 @@ -[View colab tutorial](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) | +[View colab tutorial](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | [View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | @@ -68,9 +68,7 @@ local_prototypes = mmd.explain(test_samples) ## Notebooks -- [**Prototypes**: Getting started](https://colab.research.google.com/drive -/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2) -- [**MMDCritic**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) +- [**Example-based: Prototypes**](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) {{xplique.example_based.prototypes.MMDCritic}} diff --git a/docs/api/example_based/prototypes/proto_dash.md b/docs/api/example_based/prototypes/proto_dash.md index 334c941a..83d78573 100644 --- a/docs/api/example_based/prototypes/proto_dash.md +++ b/docs/api/example_based/prototypes/proto_dash.md @@ -2,7 +2,7 @@ -[View colab tutorial](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) | +[View colab tutorial](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | [View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | @@ -12,13 +12,13 @@ !!! quote Our work notably generalizes the recent work - by [Kim et al. (2016)](../mmd_critic/)) where in addition to selecting prototypes, we + by [Kim et al. (2016)](../mmd_critic/) where in addition to selecting prototypes, we also associate non-negative weights which are indicative of their importance. This extension provides a single coherent framework under which both prototypes and criticisms (i.e. outliers) can be found. Furthermore, our framework works for any symmetric positive definite kernel thus addressing one of the key open - questions laid out in [Kim et al. (2016)](../mmd_critic/)). + questions laid out in [Kim et al. (2016)](../mmd_critic/). -- [Efficient Data Representation by Selecting Prototypes with Importance Weights (2019).](https://arxiv.org/abs/1707.01212) @@ -71,9 +71,7 @@ local_prototypes = protodash.explain(test_samples) ## Notebooks -- [**Prototypes**: Getting started](https://colab.research.google.com/drive -/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2) -- [**ProtoDash**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) +- [**Example-based: Prototypes**](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) {{xplique.example_based.prototypes.ProtoDash}} diff --git a/docs/api/example_based/prototypes/proto_greedy.md b/docs/api/example_based/prototypes/proto_greedy.md index e46d9eba..0cb861d4 100644 --- a/docs/api/example_based/prototypes/proto_greedy.md +++ b/docs/api/example_based/prototypes/proto_greedy.md @@ -2,7 +2,7 @@ -[View colab tutorial](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) | +[View colab tutorial](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | [View source](https://github.com/deel-ai/xplique/blob/antonin/example-based-merge/xplique/example_based/search_methods/proto_greedy_search.py) | @@ -12,13 +12,13 @@ !!! quote Our work notably generalizes the recent work - by [Kim et al. (2016)](../mmd_critic/)) where in addition to selecting prototypes, we + by [Kim et al. (2016)](../mmd_critic/) where in addition to selecting prototypes, we also associate non-negative weights which are indicative of their importance. This extension provides a single coherent framework under which both prototypes and criticisms (i.e. outliers) can be found. Furthermore, our framework works for any symmetric positive definite kernel thus addressing one of the key open - questions laid out in Kim et al. (2016). + questions laid out in [Kim et al. (2016)](../mmd_critic/). -- [Efficient Data Representation by Selecting Prototypes with Importance Weights (2019).](https://arxiv.org/abs/1707.01212) @@ -72,9 +72,7 @@ local_prototypes = protogreedy.explain(test_samples) ## Notebooks -- [**Prototypes**: Getting started](https://colab.research.google.com/drive -/1XproaVxXjO9nrBSyyy7BuKJ1vy21iHs2) -- [**ProtoGreedy**: Going Further](https://colab.research.google.com/drive/1nsB7xdQbU0zeYQ1-aB_D-M67-RAnvt4X) +- [**Example-based: Prototypes**](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) {{xplique.example_based.prototypes.ProtoGreedy}} diff --git a/docs/index.md b/docs/index.md index eb6062ca..0e78c08a 100644 --- a/docs/index.md +++ b/docs/index.md @@ -360,9 +360,9 @@ Even though we are only at the early stages, we have also recently added an [Exa | `KLEORSimMiss` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | | `KLEORGlobalSim` | Semi Factuals | [KLEOR](../semifactuals/kleor/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | |||| - | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | **TODO** | - | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | **TODO** | - | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | **TODO** | + | `ProtoGreedy` | Prototypes | [ProtoGreedy](../prototypes/proto_greedy/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | + | `ProtoDash` | Prototypes | [ProtoDash](../prototypes/proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | + | `MMDCritic` | Prototypes | [MMDCritic](../prototypes/mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | ## 👍 Contributing diff --git a/docs/tutorials.md b/docs/tutorials.md index 0e3e9429..7dda10c1 100644 --- a/docs/tutorials.md +++ b/docs/tutorials.md @@ -21,7 +21,7 @@ Here is the lists of the availables tutorial for now: | Concept Activation Vectors | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1iuEz46ZjgG97vTBH8p-vod3y14UETvVE) | | Feature Visualization | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1st43K9AH-UL4eZM1S4QdyrOi7Epa5K8v) | | Example-Based Methods | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | -| Prototypes | **TODO** | +| Prototypes | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | ## Attributions @@ -87,4 +87,4 @@ Here is the lists of the availables tutorial for now: | **Tutorial Name** | Notebook | | :------------------------------------- | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | | Example-Based Methods: Getting started | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1gA7mhWhWzdKholZWkTvAg4FzFnzS8NHF) | -| Prototypes: Getting started | **TODO** | \ No newline at end of file +| Example-based: Prototypes | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1OI3oa884GwGbXlzn3Y9NH-1j4cSaQb0w) | \ No newline at end of file From 9c7e3eef5897d07f32f7048576c82329b1bcba88 Mon Sep 17 00:00:00 2001 From: POCHE Date: Mon, 7 Oct 2024 16:25:07 +0200 Subject: [PATCH 135/138] example based: fix pylint issues --- .../convert_torch_to_tf.py | 36 +++++++++++-------- .../datasets_operations/harmonize.py | 10 +++--- .../tf_dataset_operations.py | 5 ++- 3 files changed, 29 insertions(+), 22 deletions(-) diff --git a/xplique/example_based/datasets_operations/convert_torch_to_tf.py b/xplique/example_based/datasets_operations/convert_torch_to_tf.py index 2272edbd..9f07fbb6 100644 --- a/xplique/example_based/datasets_operations/convert_torch_to_tf.py +++ b/xplique/example_based/datasets_operations/convert_torch_to_tf.py @@ -1,12 +1,10 @@ """ Set of functions to convert `torch.utils.data.DataLoader` and `torch.Tensor` to `tf.data.Dataset` """ -from typing import Optional, Tuple, Union +from typing import Optional, Tuple -import numpy as np import tensorflow as tf import torch -from torch.utils.data import DataLoader def convert_column_dataloader_to_tf_dataset( @@ -41,12 +39,11 @@ def generator(): else: def generator(): for elements in dataloader: - tf_elements = tf.cast(elements[column_index].numpy(), tf.float32) yield tf.cast(elements[column_index].numpy(), tf.float32) # create tf dataset from generator dataset = tf.data.Dataset.from_generator( - lambda: generator(), + generator, output_signature=tf.TensorSpec(shape=elements_shape, dtype=tf.float32), ) @@ -86,21 +83,22 @@ def split_and_convert_column_dataloader( targets_dataset Targets associated with the `cases_dataset`. """ + # pylint: disable=too-many-branches first_cases = next(iter(cases_dataset)) - if not (isinstance(first_cases, tuple) or isinstance(first_cases, list)): + if not isinstance(first_cases, (tuple, list)): # the cases dataset only has one column # manage cases dataset cases_shape = (None,) + first_cases.shape[1:] new_cases_dataset = convert_column_dataloader_to_tf_dataset(cases_dataset, cases_shape) - + else: # manage cases dataset cases_shape = (None,) + first_cases[0].shape[1:] new_cases_dataset = convert_column_dataloader_to_tf_dataset( cases_dataset, cases_shape, column_index=0) - + if len(first_cases) >= 2: # the cases dataset has two columns assert labels_dataset is None, ( @@ -136,16 +134,20 @@ def split_and_convert_column_dataloader( pass elif isinstance(labels_dataset, torch.utils.data.DataLoader): first_labels = next(iter(labels_dataset)) - if isinstance(first_labels, tuple) or isinstance(first_labels, list): + if isinstance(first_labels, (tuple, list)): assert len(first_labels) == 1, ( "The `labels_dataset` should only have one column. " + f"{len(first_labels)} were detected." ) labels_shape = (None,) + first_labels[0].shape[1:] - labels_dataset = convert_column_dataloader_to_tf_dataset(labels_dataset, labels_shape, column_index=0) + labels_dataset = convert_column_dataloader_to_tf_dataset( + labels_dataset, labels_shape, column_index=0 + ) else: labels_shape = (None,) + first_labels.shape[1:] - labels_dataset = convert_column_dataloader_to_tf_dataset(labels_dataset, labels_shape) + labels_dataset = convert_column_dataloader_to_tf_dataset( + labels_dataset, labels_shape + ) else: raise AttributeError( "The `labels_dataset` should be a PyTorch DataLoader or a TensorFlow Dataset. " @@ -153,23 +155,27 @@ def split_and_convert_column_dataloader( ) else: labels_dataset = None - + # manage targets datasets if targets_dataset is not None: if isinstance(targets_dataset, tf.data.Dataset): pass elif isinstance(targets_dataset, torch.utils.data.DataLoader): first_targets = next(iter(targets_dataset)) - if isinstance(first_targets, tuple) or isinstance(first_targets, list): + if isinstance(first_targets, (tuple, list)): assert len(first_targets) == 1, ( "The `targets_dataset` should only have one column. " + f"{len(first_targets)} were detected." ) targets_shape = (None,) + first_targets[0].shape[1:] - targets_dataset = convert_column_dataloader_to_tf_dataset(targets_dataset, targets_shape, column_index=0) + targets_dataset = convert_column_dataloader_to_tf_dataset( + targets_dataset, targets_shape, column_index=0 + ) else: targets_shape = (None,) + first_targets.shape[1:] - targets_dataset = convert_column_dataloader_to_tf_dataset(targets_dataset, targets_shape) + targets_dataset = convert_column_dataloader_to_tf_dataset( + targets_dataset, targets_shape + ) else: raise AttributeError( "The `labels_dataset` should be a PyTorch DataLoader or a TensorFlow Dataset. " diff --git a/xplique/example_based/datasets_operations/harmonize.py b/xplique/example_based/datasets_operations/harmonize.py index 4a9760c7..01581b17 100644 --- a/xplique/example_based/datasets_operations/harmonize.py +++ b/xplique/example_based/datasets_operations/harmonize.py @@ -69,7 +69,7 @@ def split_tf_dataset(cases_dataset: tf.data.Dataset, "`cases_dataset` cannot have more than 3 columns, " + f"{len(cases_dataset.element_spec)} were detected." ) - + return cases_dataset, labels_dataset, targets_dataset @@ -122,6 +122,8 @@ def harmonize_datasets( batch_size : int Number of samples treated simultaneously when using the datasets. """ + # pylint: disable=too-many-statements + # pylint: disable=too-many-branches # Ensure the datasets are of the same type if labels_dataset is not None: if isinstance(cases_dataset, tf.data.Dataset): @@ -171,7 +173,7 @@ def harmonize_datasets( # split dataset if `cases_dataset` has multiple columns cases_dataset, labels_dataset, targets_dataset =\ split_tf_dataset(cases_dataset, labels_dataset, targets_dataset) - elif isinstance(cases_dataset, np.ndarray) or isinstance(cases_dataset, tf.Tensor): + elif isinstance(cases_dataset, (np.ndarray, tf.Tensor)): # compute batch size and cardinality if batch_size is None: # no batching, one batch encompass all the dataset @@ -187,8 +189,8 @@ def harmonize_datasets( + f"But got {type(cases_dataset)} instead." # try to import torch and torch.utils.data.DataLoader to treat possible input types try: + # pylint: disable=import-outside-toplevel import torch - from torch.utils.data import DataLoader from .convert_torch_to_tf import split_and_convert_column_dataloader except ImportError as exc: raise AttributeError(error_message) from exc @@ -222,7 +224,7 @@ def harmonize_datasets( cases_dataset = sanitize_dataset(cases_dataset, batch_size, cardinality) labels_dataset = sanitize_dataset(labels_dataset, batch_size, cardinality) targets_dataset = sanitize_dataset(targets_dataset, batch_size, cardinality) - + # Prefetch datasets cases_dataset = cases_dataset.prefetch(tf.data.AUTOTUNE) if labels_dataset is not None: diff --git a/xplique/example_based/datasets_operations/tf_dataset_operations.py b/xplique/example_based/datasets_operations/tf_dataset_operations.py index 81743ad7..b975dca7 100644 --- a/xplique/example_based/datasets_operations/tf_dataset_operations.py +++ b/xplique/example_based/datasets_operations/tf_dataset_operations.py @@ -103,8 +103,7 @@ def is_shuffled(dataset: Optional[tf.data.Dataset]) -> bool: if are_dataset_first_elems_equal(dataset, dataset): # test a second time to minimize the risk of false positive return not are_dataset_first_elems_equal(dataset, dataset) - else: - return True + return True def batch_size_matches(dataset: Optional[tf.data.Dataset], batch_size: int) -> bool: @@ -129,7 +128,7 @@ def batch_size_matches(dataset: Optional[tf.data.Dataset], batch_size: int) -> b if dataset is None: # ignored return True - + if not is_batched(dataset): return False From eaa9d49b25c38e1a8d859a56cc641b082e4fc318 Mon Sep 17 00:00:00 2001 From: POCHE Date: Mon, 7 Oct 2024 16:25:50 +0200 Subject: [PATCH 136/138] setup: ignore pylint too-many-positionnal-arguments because of retrocompatibility --- setup.cfg | 1 + 1 file changed, 1 insertion(+) diff --git a/setup.cfg b/setup.cfg index 4c6b64dd..db76b0de 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,6 +16,7 @@ disable = E1120, # see pylint#3613 E1101, # pylint misses members set dynamically C3001, # lambda function as variable + R0917, # too-many-positional-arguments - TODO: fix this when breaking retrocompatibility [pylint.FORMAT] max-line-length = 100 From aac96c253ad394ddebce29c41947ab8e3c3ef678 Mon Sep 17 00:00:00 2001 From: POCHE Date: Mon, 7 Oct 2024 16:26:23 +0200 Subject: [PATCH 137/138] example based: small fixes --- tests/example_based/test_prototypes.py | 7 +++---- xplique/example_based/projections/base.py | 2 +- xplique/example_based/search_methods/knn.py | 3 ++- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index 08ffed65..fc044dbd 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -21,9 +21,9 @@ def test_prototypes_global_explanations_basic(): # Setup k = 2 nb_prototypes = 5 - nb_classes = 2 + nb_classes = 3 gamma = 0.026 - batch_size = 8 # TODO: test avec batch_size plus petite que nb_prototypes + batch_size = 8 x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20, n_dims=3) x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=6, n_dims=3) @@ -114,7 +114,6 @@ def test_prototypes_global_sanity_check(): for n requested prototypes, there should be 1 prototype per gaussian. """ - # TODO: the two first prototypes seem to always come from the same class, I should investigate # Setup k = 2 nb_prototypes = 3 @@ -148,7 +147,7 @@ def test_prototypes_with_projection(): nb_prototypes = 10 nb_classes = 2 gamma = 0.026 - batch_size = 8 # TODO: test avec batch_size plus petite que nb_prototypes + batch_size = 8 x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20, n_dims=3) x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=6, n_dims=3) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index c9dcd2d1..1d3a1345 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -67,7 +67,7 @@ def __init__(self, device: Optional[str] = None, mappable: bool = False, requires_targets: bool = False): - if get_weights is not None or space_projection is not None: + if get_weights is None and space_projection is None: warnings.warn( "At least one of `get_weights` and `space_projection`" + "should not be `None`. Otherwise the projection is an identity function." diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index e842ebd1..8f688217 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -399,8 +399,9 @@ def __init__( if hasattr(distance, "__call__"): self.distance_fn = distance else: + base_distance_fn = get_distance_function(distance) self.distance_fn = lambda x1, x2, m:\ - tf.where(m, get_distance_function(distance)(x1, x2), self.fill_value) + tf.where(m, base_distance_fn(x1, x2), self.fill_value) if filter_fn is None: filter_fn = lambda x, z, y, t: tf.ones((tf.shape(x)[0], tf.shape(z)[0]), dtype=tf.bool) From 3f4b4064ba11dc4cf82ffe86ac509754a8c732ad Mon Sep 17 00:00:00 2001 From: POCHE Date: Mon, 7 Oct 2024 16:52:52 +0200 Subject: [PATCH 138/138] example based tests: small fix --- tests/example_based/test_prototypes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index fc044dbd..b163c97b 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -96,7 +96,7 @@ def test_prototypes_global_explanations_basic(): # for each sample for i in range(x_test.shape[0]): # check first closest prototype label is the same as the sample label - assert tf.reduce_all(tf.equal(labels[i], y_test[i])) + assert tf.reduce_all(tf.equal(labels[i, 0], y_test[i])) for j in range(k): # check prototypes are in the dataset and correspond to the index

UiepL!YBe9m^&3m0IJh{)DOA{Q1>01v>O67>kw3MxUg54RS8qDYwj$~zfquAY zY+=J#Q(-asrm+nRV@-u+>ud71v8l!&)hkD8Tm2<&l*t+1((5TN-&Y=N=^O5-ZuYr6 z;r`~S?GqJcWv?m=bc{}^^Lh3b#wWtRCncUKZv_>j7^y5iRWm+q7gw&0PpJluvuvOm zV0n18y?A@ z7l6;}ACv7_4|;oE`wz+XkmrBR`VpSM?Wc|YvwBdr{{f@_BWO>yN7=qv-$nVfAhS|? za2wQ+k>lA7-f^)n5dV-HJu6U5Up>V8)_S-@Hw)Ee&SoBB!iJ?HhaDXs{FG0%ENgS zwlS|{1J!AeMH{~QKWQznM0|JSt6#lQbosnqU*@|b+qaL%>q6m~aS&&_CWZBv2bDKq zPe3w)3@O(n!ukuHG8SlvR`>+BX2HmiO&MdOs;G}X9b`dhw@_wuWdz0t`@W6ge*rxEzGN$DQr`{we*x^r3hqe16L}`}XXcL~ zeqNq(*!_Ow-eCVN`ee`O!|2%V|j0RI)jEZGn57gG)3_o-dV)p_k{e?7qPZAwW& z`-`Z(ay7D}-39nf{9GC*N(L}|kYrNjV+_YKsEgcwT7T6!?Y(F}tuHIL=i1YF4$1MD zx_LaS{a>Vhk;Y@{Q9QCeXs$7S_}mQucQz@nQBpOeJjLbFY(+;u&Q8D+DH&`M>9&Uw zWWk_d4@le$+lNgiPKSVstxo?RpLSD>p`XAWV79-+0!n{zkp(Y-h)$DEhboWnH%;$O|Hn$j!N)A)2ND4lLiRJg=l60JbGZ6X_VAzNQ)n zrfAw5V(B>sP~noWSypj3d9;|4hD{=8Un&lzn2 z%dsGj0FAul6m*c6#uJW{2zMm6Nl-6sJC5V9VnXprspjrow0JI0awB6uXC2 zrHc{?N?%`Opc_;Bo9f|{B{Rgs*C+TtE_6)PTTN^1uD69MLLg7UijYv_eJ;0aph<1+ zseR>3u0Cw`SJ*wSVw=llD-K5NZfBpGpf4?b11tAk?e=)w;td{m<^Wy2_xHaS53Rla zrij4KFg%zIKNXtGoie+CHUz-tai6u9m(Z;c^<8F4+T8~9t z!lI6E#!{A-auU`V4{D1k0cT}<@6d45hK;ll*LzbYDt~~rTZ@Tl3Z)wQe~W#swL|}p z8Q9t6@s)6}9~G`t9)0{1@i6^)JR zGZ!L+10^3(4<+TMywZxBO1jU)k_!nw2c3&)Tfn183fPvd(m<7O@(MM0xu#*W^az3# zgX|SXxrXJSO@&pQGw#MHwnE#Eog*m4Zr{Klle%V4x-~Jku!YE41s&&gbPbQO0HLD7 z(}Us$g9pn8V)Wra^5B_+fGq6tMpl``MY!lJ&+RP^@|Q`tg_X zy$cqIdOu!6Q2g`DdE$K|U*!8}zKocMODz9k?~wc_aYOC}*0r9Zy>lRktJn+b6(kov z|0lo?8t}`ZgTdd0_reawrFQNgx(sZEaxd)Sz3?Z7U#=fD{Aq50j!jlUqrH9!+LJ#G z!HM&3B^LXC#8yaU9>rTAa z)F*kbeg3!kXObaE8Xd^cbG4F!`iP=U*c|mSjp}}^smH+JS9^vWNs~>o&LDZSQ$3hA zB#}*f%Pieg;ziNwVF^mt)HXIz(qzpJd0Fcuxf*9CuY^auj8DN?c)qqy(|y)y=B@(d zF4SXwf7Vg%WBJ%D*Pqstbm5c0#5=J5a=sc1yHiGubgiC4T>U(o*{bq9QkXy5o(z+H0f_o$oIFvAt{ zBQ^SO#=c>jHt#dqH(8tWA;acOypFgOY|a=@Gw`&lU_WKsldVAR?{VHwtL?8ce6b!O zUn~I%fquw#H>T3L?bQ#J1DahI>i;UapPJ|DLi^nXCTO8urWZ?I0~#&ZDU?XN(OwnZ<}Ph&)U zR;B+L^R}pN1N@Y(>JR7O{{r~C;JsFF+pjn3-zNAh=2`zF!!HH=NJiIxnuC88@Y~>7 zU2i*}d-Wd?e45&S7VultqktdP!#b?XruqqjLtj*HSN{d@gZixg4T8@Q{2{>Kr~VA^ zV|uM@zW})MPCX-@RNsa7+EQUc>#V(H6@JQcE*$&9w*P6%Iql(>ZacCDoZ26Vo!g$^ zM?3T2`jZ&vCiO7Jxkb10IHNp2F9ANs@Kag%IK9`cfbUi>L;L-i-|IiYh@x8v{w{{o zydBB>K?lYeaBz3fC7QPbneWQ}*HQa70lrJ6`9GR@R{t@zU!?izWxo{0&nm(PNYsqC z5)Rv`?Nk%tPa*s^!1udX&Vknc>)c(NBz>ls0*U|o9^|{X__%VAPTz>}dHLqTb z_u8Vn7=K!7>3v@Sd?Uk8=@toZB=`e>zmex*Kk$v_q2)Y+-w*gr3_qef89!V06a3SF zUt`Q?hUc?oiQxAFel6qeQ9UU8-@4lWzhgWH^#R!r_x~8$ze)WC;K%fS`P>6&uiS^T z+P_Hp@MD_)1!MkK;it~z!!PFcOdn3uJY?Hn!f>V!1#sC9(+6*U`#W>}9A&zZ?PrnU zOdlT2f9`8?@MHOK{cn&%UsPYs^B-b7Z1d53kq#u(O8`HmxAOb8RnYwW0r1U;)FApW z%k$HAnBcUI`_xwfeng+wzmGR-yNcky27JMw4}Cn(Z5I-p_R(%*{`X4wx(Plv@cFsXdVjTJpa$od=|oa z{?~x>{8ygap5RAg`EU!yGo@aH@o&-V^dH~}-IX+cswK3I;ivR+3Gbx$y&Lc=c^>v> zR(Kw|XD=}_KfZI}jGvivpUeE~ulaBcSKVvS0ZAYF zdg*=d1pF<={QQ;Qx9=i?6Q6p!q?3Ap@wSig^XGuS&5#>6klbi}x?9fA(`vJvA3A$7 zWzU{S9+U7>l73#m^kInTr2bQe{}TJ+m_EdOkl{v5HRN8~i5F;o3gs(; z-&_Efd>?v*?Sw0jFB@_K!EY{rOFl?^{DkLRen;?|JM!U@-tu^ULF38N)m8Y-1#n4c zxP1w=FQg|t{sOq%cii4d?F;u8w?7L`c*yPD)V>fOxP1Y9TpyJ3swoI>4AqK#V#u+D z@Vg4&z)z$7M+@MP-y4m*MvV{(6>I7lEJD{!_oF_OAo{LZ$6v9>6bQ zyj|3L82*{ZvhdfjoUjP|r1tmymf-I|`+Y30F6vQ+fA-r1e~nIA3cEe!wMLx4jb z8-(5mzGDi$mFE9T#`{V*2*B)}0@!W@B2L3e(*PlU72Puy&LjLK)6TVLU=q~^+<*_Url04uK050XRMg6th z{u>OxA8;v;En+RGpKmh!(|}8PY*BwT!@tGwdjXg7*rI+3!@te&e*}CN)00Jg3&a2U zNA%ps0GIOEqJ9m-p}8=>8<2F{6!g=4K4-jd8`g#CIp8#J(0}sUGv0FhzZAe3{~7+* z9Gv=*?Wv#Vb8vd@YCn))^ZIA}=YC#%893-Tzwh7hUXq??=NXy>(R08hJtsV;`CQ@l zp8;Ib^X$B3a&Sq{7xk;Tz0U9_(O%N?Mg0iD!6;}x9|v60^F{qQ!v(`X2zaY8|1pNE z4^sQv0B=@_o-gX%3^zSMqYs>hEcs54;}jp%v5n z8gT48~n(oDX012CkW-g z#c#w8lc(`i7U_}Jryo+vX}^vO9XkujtgPg zJYjd3RhOtz-`RUfsX*ls^ZE1=Li|xt)lVt!Mb3pb%3#hVP8}vXX>g$Q^RnVVZ3wQt zfs8ZA;YWc#$m7+>e%=Xx6-uL#6<+;RkLN1UQc@l)4OW<~jzM3;yX#{($aLZv%5U%< zoBnt2;md!I%$lzuly|CT@lACg0Ov>AZF8)>h1;$W|nLMsgbZRt_E|bwz$X>9frMW$ptT~eo zOE<$XyNsqA`J2{(b*1Q+lrw;A=13vYifhF#!(v|XIi$2PpA~GnmXpj!3WBE8`|{s! zq0qaKabNN%$V8G?*oZU`F82bL$xn*lv=`@daNX!LFEOvaS-0}uimoR(>6%-Oy;#nBv89dRp!J4M zc{6MPv=>`g#!q1UY*Wnb#g+`c*FEAt#7E#q)?_;|_E)#Xw&h`_oyvWH-^K9F4=I4( z+y&~7_9ub!pQpH6+Y-k1NzhJ6dw?Hd-I{6MqeSzvGNXhu&3lw+UKTEG03XEsT=!kX z2Ib(lb3Y%(c&;a}wjBI?hIi3?K1v#8wx7KWzaQ|e|MufNI4r(6Lt@&d5N&HS) zoLB%i1^jl*+ig5=r|#r=tCjP1C+6*poHyhfp?N#S?WHWt^Om7`E0l#pM1wProdYMC z{kjav>;>&%8)gW`5x{!;q|LC5Y=c{k_g{sdDuByzFz?6^?(M|cmxbb`O4fVrriwUk+;2%H6yBy+ospWb;MJD9v z2UkKqCY)JQ0iRNkbD2y1tbs-I2T|}?Jf{t7&-#*-|7>n!Z5S>-O0mi6*du7|yA==M zMBBFT-a4U&cyA4j(H<%Ryq)*r&CqjbFAkC2O<9Nc^3u6Tj)o2K9{Mfj@p7J5>{iU{ zpRm{VF<*`=l>EzS%q!#zCua?(S|%i4DCDgvu>MX&lxb$p7l;Ix6l25Qm%U=jQYBm_ z6g!!KgmJ!EEisAGOM3B)Q$iKrh0I`A>J$`I(27i`M@I3V#WBr<#e)z>yx6GkL`jem zwaAhn?$4l1sPKjb3a|PD7%tw+=nFIXKK?#0ig0e-iSHle_c6ZXS)p^Db#<$M}rjV>}~2z~l>1S&Z3>x3X&s&xmy36(S3}Ngrg5i%-!?^dOh7s+{$R_c4n zo068GREUeOKIHNinf+>qxP0->peXX2Ew`a-^!;tIOCV{N>|4r^*}kO=ne96(Lt<~| z%UsXox>onCb}f{f^&_GY-oiZQSJrQTe!ri~_4}IDZZU&#jQ6|Q@k*S`j#uJjcDz}f z#9S0$;=Q&G)o(8Hx(;E$w^_{oA`!e(=4hD0Jby{O+8wpsrGw6c8{I{ARb{_?CV13G zFCpkhao|Gmo%;3S2g*sn?|#(dw%YDiK1=%%c(_aa4ZihJ+sAa!dHmvIV2?sMp+6*k zjM}}Bs#2-m3Drh|H41SA&rtSX%0yNiPkA|UDHUW( z1vx2M5Q+Jy2nW{O*V7LfRb~W}zR?~8U-lxt=Y-#P5akw6Mj|Kufsj_?sa3+yN&D}~=Rcb)W zIx6j1>_I}Da!$lqj?`btl7h;rNC?{UGDFVh1mcutGMx~z(dE5B_9en63tr?0{s7+S zBLV*pBwqis@{dRxyM%o0$ge8nFi|IpvKQcki>@KnDe@H%P<~{yxUc3C5S`J%q2`vs zA+f|CTL)y{4=6juhs73T3PdhtmGv&hK~oGBvqEUn;};Q+ zydP`t!0CGmhY&m*RQ}7q&Sqi7m~e-Nh7Tk4tIP}|vq*@xKxL&L|F%~mnG0Rf*i_~1 zc*b}586nV*l$ZQwEeRss=!Z%b5XAV(DRKj6(xv}k?Q1y)t$b}tB!NV&dE=|b zw{kfp5JoV*{=6}*N)|U%Bb)2h_ze^qA>CuDns#?( zf=WB$E2VGuv*WQpgzR>m4XxG4Za0ufcgovS5Vkf_+H;B{qxQ&|(T-Q`mfyN%4NXi6 z-NU3HgJwD^zoG6GJde_|^4qMNT=>(&{7u^=lf1Nb}oKL^p6+ z(E^dqPOjSRK1K zbs{~-{cY{h;c@n1u14KQtprsB5aye*+Ds4%^CR6j^+cSi#JdCqWT1*jqtvR4cpXU( zU2P-Rwh(q}=fvdR$;poDj*hX8b}^B8tIbqY>=swsOcs0Q^96UGF8HQiJQeGhoZ2^8 z*D=xAQB&J-`Z~4P;xhB!)LHlH&iYP!%c`irg{?VU7)6%4?aD#1M@jV%UblAvulMtU zYLi=m*RL2Kb`SOdAGD?sbg)}?Ft>@)RGpuP6U*nhYzR5*mkZq=OP?pNfGUt|Y1aZd zmbaKZXtYD#+7U}{kypT(1vDCy-^SSKIo-nFv_t<3EZKpI0~2;ju~J&mGq`&{P$9Vm zT0ndl3r=`4GMHTOERS}{`0X+Hy&=mBd^}T4?RBgu?B0(G=eYkh;#ejA zkS)vjCsUaA#Y5TLrO3Qvb%9K^qNpkt@;VaQ|efQ(=#-hz^R zpeM{*P&vt(t9EDzs|k*3!X^@)8{XpdIX$Yn?~lP!hZ%I1a4tgf$rqJxK|aB!KFD&)7x6P8!Qs4#_dDj^!&nhN$f;JSMc@v3x*3 zPkdQ_8urIdWGgaw7c!+IQ8$?#DQ7p~KIsT#dX%b&9aN_%HzJ-TOj3J+52U-m%aNZ2 zo0Aw2cR_EeQBp2ED`f&17btK{_EcB}iLaxoD)^4f0}9zX6oeHsI{saX`EuNHi^J)5 zeeV(cb$;?*{Bcmfkb{wrh5Kz&QZVwOfJe4puF6G~myE6vIE*Q^kqF-?$BjY1)iQc~9F}g>umTOoAw{SG%F7jDKZH^JFnkIcY7%FMgHBCE(!5DFUWWzRcJA_^% z?Udq#{Cto60L!NgdA@fz+%5;rptl%*VPh!1<(6VfmMbF;QnJ3| zmSSYei6@`O66IFvr+1F~v^kwT0uP8t$uFZWp8Hf*#0 zShDYo7Kfq}cWbqn35YnDiRIGTVoU?7l){R$9E#Q=_y7*ql;w@$wOZp;`xDXH^i9XAg0xu_LWCk+kuM7m}l8p8zs(@bW>aGw!Aqrjp5jW5X>qw z)im`Q*u|(v;!AxU-|7|B;fm^-a{Q^jv3P8}dCLWJ2mArmm{8ZXTgFCb{DB>j_6YtL zUous?cgzlKY;OsNy%y`g$$7Nd-aRxIAFl%twj5o(9%q#Ixvk0+uAgVRbdkPQJg5H< zb%9WAy(FfY5!hiaf$l|g18LcTSSU#5K~Z7M2dY+6@^IA8;UWiCO74$*5tOEtnI6*a z(5tcZb*=xxk?DN_oAb3hwqM!a-qF$SDvr*~L_>Hi_`pTt8{#>2UOv|xqrw_%JQu}6 zPy{_`l>s71Xu;1_t=Z>_I>~XR-oj58|DOL~@r{Bf3q{k1#3z)OpeLhh1*yQO*NZ&p zN#Uk58A^bbv?J{jS1_RZlXfzDD1%xFq9Zj&h&P3#p5TpCv6a`&;0ICBxy@E$a~3Vz zUHH3xe~}Yk%@25;RJTr z;^rV&F=SkZiJGGhyWQb9`rb(7y+<7`tKD(b9{|kiavZ&*qT-ID4!VCd;ODiED{qC5 z^cmQPgIu{7Y3#vc*`_KDsrIY{Z)C()gPBQ(#pWy(U$xt;F7esSZ)`4%M?@@6o8*H* z{U&icWSb_$l_WzTg(yRWknxH$;4)O?D``d&U9NB8rt)BjKGNKjAiavIoWBHB4695O z5sA%L1-l`L{hfk75^-^ROS3r~2?ir!b9r@nS?jQ`yxce3TBgpsio^8}2Fomt zVu!<27W!hi%HnYNN89hKZuEK^tM6+c#acds@l9cTjWoV6N}ytVO%&@v1{fp~CtFA@ z8)s^%xCmu6EJNPCq@TvuN8y>hC`mv|C~Fl|VZgN0dV9t&!cO&(a|Xua{jZ?ITo!up z+#&gC)L#sinX%6Wbm^U-qjjisv4q?Re^FAKh`;Pkq}NIQvNfJElMO$b(2&I#Nodk8 z_a-y+OlTD@ss48X8xO>ib-4tXke-Y>1k0t41yEV&3!t*eEwS{1yh05wN^fuM@L60U zPCPm3*5bJA#AT;@xwLa9e!5ToU|%e~Q(kp)N@>cwt{p_<(k;MvwSr14CmkNmj4C=> z92v&6#!)~zJzA6rwVMhw7k2;@^ekIzTBnIfn~_nao_ESwq?rqn)Yp}mB|UEZ`?vEM zV=Y!G%Or)`-4WTmb!d-2ux(;u#8zr|7X?lJfEaw48$ZaAeX*3gRLez$YunuLhE|aB znflp|#F(mDRfkGnU;b9+q+}2BFUb;3SnGL1mKajjDQA=vTpw7L7|zNPsamYHmHDG>^T31`Tl^1(?U<#a4B$3{3=#18fmFkmhlBiHK7Oi<4iw3&^t@ms!Jua`1 z`G%8Aq7g#={*F8{k0qU@q@2}*5J|9RwMgHYCzog)bRaFFWCKVJc(ewJ?n_XO52TvI zkNyX_MXbG&a+#6mA-8sFd~IPR)4$4)cW;u6_U(2K98PhoNbegIddwzI^|sq4WW->&XIHaf3tcTtxrx$Ja?s( z|9O3Ue^0LTz0&o$V(sO5@Bb&OBT&2TS@BEgT3%(Vp~Kn0lr z&0!l+FjvKf;G$3^VV9duGmEIMqI)ojpj8Fuo4{D)tY^b?ZGh&6>{mF5`UMEZRmZH5 zCRA~;w>}ctIGOoGz-cS8LH=FTA68w=Q#zFQ7}cDd35|-DKxRYv@DQ#9vJ1r0O(tar z4Gr1K%g&H+lPY0mQ-V5=sWOuHCd$~B?v~0o#@-Z939f3!#bF#&9A*Msyp2R9SlH9` zTvC;b>uDh$UX1dSYfa=1)HsNOPst8?E2Az9_*6gYaY0yu>d=c?BH7Gf%*~?2!%*!? zOdP7HcyoDRK2Ux^Ma9@?dHJ_4xc2gWLz^}Yq3R&g?gzZ8I=}LO>cxqJIv?bxI?t2k>6Tc+J-NcVwaoD9f{RbWCX zD<6PZAsQw6IG|#QqUua}`6xzu=7OV_zGYrK;tynA4EXo; z#ilpcXI>N**e8rJxl3(7QV*!{(`2)_T(OEL#3d~4DLM=S82j7w*T6n}7xe4^qG?kB z!YNd^MGaAr45H*In{Z*D&_ci|7frb$t132T3ytiZL(t@=jXf2ZIn4+|Lz6(DH^C}{W4_JAzA;| zu#0_M+Qsw=*u!8qgU=5B9#hcw7X_~Dweq(Z2c0+cpzjwk&I5>rrg5(J1urGs6F488|( zrxn@5=(#T#d9&t~8r7qE`QB!Q<`XC*zv-9Kr?QMs`o*&H=d+)ye1=;n;u-laI=qDP z+BD%;t+^J|*Xl@1u}4me>%<-EFQN07Ls}&(Cah4GVIR9<>0-Vzv^q+lM{rbBLVaJd zEaDrTW@-VlBbn{RCP;r|;^3qwB<070O?8|SC5~^MLfkQO{>bnJ!v`Y%a@Vb{GIi6? z?Dp-mLo=H;wOIVn>aV>v(bz@D=or_sxB<0!9ZYv#1@1uFnn7h>b`ky*V9Qgn94Y$s-Qyj9ki`Vl!s<0_tio8QK zUSwd0hYpP<0tF?J7V?V)L*v>NG&Di=5TR8maXLDqwm5$(WyPwL2&XrmswgE^QGtq9 z_>1fiS`{g!f+}sBh>}`S?Gg1X*;LTB`W9dcz4c=8$dNnVgd(6Ae1QL+Us@WyL;b?C z?LU9&uv(mBc#VcJnscGtFHVS$q6U1kazepurt9T+!S1|#l~VW^TS4jcq$8HLFi<30 zxP&1pAE%sDj#Hts$4RGKYXX{pDG-`a`b;O4TU9}{l1-jv#amWS=mJ#%l4l@NO_C>a zFtWdE9g^mZf(0z+oa@g&Snn5j_^@ zrMx}T5{s&vD!rBuHC%vfvLPR%|7K2q{E( zEz|_p8Y%4-zaM;JNFu?}MkOocTr5xO$JQzlqW7bl;=B=uRjnu;*xa7z?&;~8p1h#n z-V#+Kb*9PSsQTX(wqo19NlX89kF{Z1yn4N5^W~Z5HW3T#1&O5f`~u$bR_tvr?QGO7 zg}_ViQp|}j$vZ8s!aP5YI>}+^>Ng+@8qG8^hSEr| z4pd|uTLoSxb#nH7F=HK=7h2DtkY>7C&UGN3tafY2tDmfhr%@Y~mdGtRA|=?r-&|yO zl_-o>m32TiD+)VnbxB|!TvR#_0#Fd(v_1BfMhD+bKoxypLcaD7W6HrUfO zGjY+}h}%+Ui!Z571HNsY!KNNxk4en;5>hSfysW})fnyBeQ%Zb6+=DQHFmfP(03?+} zx>UYgFrIQSm2#l|ZmN{U+){`%T!-0y2CGB|KlFNmc*?{D=1o-l%Ff>DcGSMoP?1?O z53CVn+EKQcfrPP1G|?rnjMXbM@5?oqFV zRPk2@TpxdbM$Zt%;(T$XsRq2M3N}v&!U|UuPuV%Ej~xy_#3Jc!$kG)mf%yvuoN%z~ zR7LT0d9hj4SB$70dvmioyzV1j^Yo6`-s8d|;Aq!#4A1=y`ri!Mq15(*y4!s6G`-im z!C$_nz75u{Phk&%yQECcR;19)^1-e+Q7fn=G|3gPxTdT5bXp`6R7E9Au4+)5o~zF~ zp{RgJ)A>dveu0#{m0YO45=9w_c*7+o-O5XrYBdx8p+v_iGp9c$OSaYn%rt|}C7VFo zD#*PQh52kABo<6od$mfo0AclL1)HKc)dhnBouh*e2Fi~>oK@3G&~$lI?5-|qs;sPo z03KMfIckdorGdD~RZ?HpJX{(GO#8$;OzvCCt3-L_J3Xqq*y{bN#p$j^(*3UJ?EtXs z^EG`+d_cL!ZL&S3m{xFViaC8)pBEoe?uGb&Jz(EsSV~_n?od7j*p-0&8^a#ZX9ax2 z0J{pX?=$Q{eVx(Y)4Bd0&}YQQmHY6FXL7J*Zucd?o(1f?#&74v?Mf1`Kjr%Snm&jA z9sumQ9PE4O@1K+h0sC_fmeRM2dz4*}Xa53OS^6QS^nVuj=uZL$AL|?p`%FC8v*8!#LuTbVX%#R2%0p(J_A%o()JL=hSYg%rxO zuXO`&8pzz_io$RR-08r3qWyjkBU9#Y<*R3P?a3)T&ut$1M*mx(pO&5x3KQO&Yyq|+kT z*E&oLwn_ybYlByDouUfpl4->q3={uXv@q%Lr1Z)f3dV_nwFD$iz7~}B#zZH+TGIB( zSlzJOX0GGdaF@Zw#&xlM@|X6BrKY)f<3M zk159x?SxtM#*${XbR+1+Ny#9~VyW5&&HzqD!ds#DcE`v{t!P0~CRA%F)y7Gdc~YTz z*dhm$5$|$Eb(GSV!drlmf{Au?HL+}>KbocEZ6buSF&I43E`q9C#e*o&R8bM!vJ|So zMdUSy!WHus751r_;;^%Ht|ENsErC!duxS(hJ$fLB|0X5^_L5-m0PhzAC)}lWr#h$q z>uK`IAJ;#J+R|5`UgQ>Kx5yY)Uq$AY-w;ntaaG2tSs;Hq2{_dWZ6gq=oe~~Sv{UK8 z00}>p_#={gIJ4VMEYD81<7RDLWEY%9QPkk{yphFi+D&!9DOWw2d=n6bTH;jOMB6Jp zgT{XU+zk1TnUg^{kQJZSB1JEllcv)TW^W{G0ADz*H4%LJbWw7_y{s(MpkxVJOmoQ+ zlY3b;MSQ3mEB^R3_jp zU+f4*YMP?G(+isym$s6NT>gJz!mp^s4mi(MqfOt`!iJ4&HKU)fXI*d}J9ZWc@uu-v zHmJqmI;Rc7V@lCB!j8V&(mgy%2-@yRZX=9rislab_<(E7#7-yV{Sbs71d%bilEMtE zlF_E9X^f8!>4>a8_hTR6!KjZ;&rr2JQeNhSkC;Psj%;w$#`g?Z%wCUbDlfB}d>+*v z4h8%$;F?f&YXZS8h$U$U>PVqe+F?#0*iyUfKCUMPBw$@*)jxB2?lkF<#z zudm4AHQ8hI87mD|hQr6hWwH89o!Z)2?6XB%Yh(PI_2MbJDdR7$5QCLaW1OkD?TC9c_ZD z4q_kcW6Ak5ZCEc2*Dz&cfRdOwa-{bN_NQ zHpsbmOT4L$kul~fRsA5rk%rb7dADc*QU`1>6qD!m7><_mmV)Un&3DMKp$51hCu=x# zkB%_o5S`^RVzofD#~pR?y@RiU#grD{@btE;&!10(!~3ecnxk>2-Qtb-+$i%EG#B-3 zZN}2n^lWdx_(NjW*S|2*HY?}5bLPf9a~rSs3Y6#EJ6InBbqNIA9pR>TwRRQz>`iSo zA9|IXZ%}y3!wFAhWHcsUR<3y35Kp;iib3GX+F|FK!(&wM!t7}0JihXf z5xj(p!ec^>+V8RudCurAS$Eb&Tz+E$E!&+y_0m546A#0&{Ts)Df%NaKk;v~1xESyU zJ{j<%ZsrB}7zp@32Xi(3F>sLQWJ;NaO-$B^oyMz`##xi6`APdijz-MFaDpY5322dF z1`H%9xiCuO3QDty^VM$1Ds|oT?sa6choS~2UazAqSr+F5R2&dZvf*PAxSi~a(}^m% zZO{R0HxMgTQ8P_EYSN=oRlKCtyKHJ~rnJwB<^uuItVIxRI-#Ma>YB5{4avQPVjLv{ zQXt*uo1(_2h^L0>YjJ`pgL#r#nW5#0(-IigmMzOiTXWsuI(%iBhY{7tW?BeN( zk|M)Gsg+F$(zC-je@Xb@RR96iiss{jE{>OX)lAg(4^_a-?{?a1E9%1G1=ok@@2vpX zg9j?Yb@+C}>!KC${<`9OZge=IrQg-lP&GH_YNoRA#m!aCna6Q%H6HYR`HlX-b)N;) z;s_(KY7u+qP49eDWyjXcpTF|{qOGVK8N|Lo?N4O_H4Wx*VzXKKg_0`9+Cf_Ayc;&r&7$y2Eq=7 zazOXRQyW-^-T;i!wqT7nKqa_eG)%8spk%d*<{?|u2KXS8^>Z^;tF}H`Z>-2Fbz$o* ztC?~;g%Sph;*~3;NzvZ=P<;5~%zS-m>6iBtb-F*`p9=Wz2>2huKY!p3e_+ZVXqlRt zI(oP0e=`V|SqbZZRHv{$1F-$S4w1z-DX=YKIdC2wkpTvSpzDB;%{2@g8F)aOb_o{l zU_5o4=>0KV-xyD?+h98mNA!;EumQlUgrW`h#?x_=QiZE^?qna?0HV_Ram@j4dkv`q zZ-^z2oY4+qe_az#9XvvFfA9kA-y;X<^2lKYN#Z&ucsKq`I zLHyl~pUmtdz|4C5WS^V8dmUzF6pj5z_fAe9B?&-V=Cz72`EkXn{1J~C~@#)wFmjulUI7RjyP@N2p#tp4<;}1ERT#` zaF|+ny{V!A+mYj*l&O|{*OTj@(KH~9>!7v)Vq2Y_{#IlnR(E_H(5EIA{4MojLDF#?ZfTHr?raV-4V_5_z z6_qv3RaTeFTIIA9IS*T0F3aI#;Yj2z}K9y~~Qv_V4ESEEqC8Epud1Kb}X@p57aWFj>?M3=Kvm3@6Y zIpo$Z1m-M4Pbyu!5D^utNVq615`>2E&OksCw52?Ry97VGkSc+11zO~xUUd%jdI+(K zJjr+-${{jhbc*n(0OOW{aoeeNA5M`efTF`g5jcZ{YWoM12R+)-RzT;xT!Qg`fpCra zR4#Yg4-Aw?2E>v&Ma(!H0bV`$vUt@}zKU0wyFu*!F&r*04~M_H{*sF)>%v}NINae~ z2rXPOw`p774X+%Lwthd0F#E?RcbZ3zNBx1|&+RQ!@eU{nh{=JsBGp?$U2>`wLA+^6 z*{&W&o)4f5LXaF_GN~G3PN;?gX0se_>Oh6(c)G;vtZ}t{LGevY&&{0Xhx^t&%qM4Q{%2e)oRlTQ@bh<>_Ui3my(sBC1 zhj%^oGrni*)EwJw$?dqe*@Szorym@D$MaY6y^hYLwKLh_)&`1R&{{X!$p3aTbzp_= zZ5@EhYoWi%?e1m!b{j4n?&V^~c3gx}Xf%w*v1}^cU%@m!nNfxL9&8H!ro| zqSd|J*18!No88Opn>%oU{s+1Laqb`R34$+A@?Er_qV~(v{3|?kh98Py%+fHH*JTGY z$8bDoo*uNko*#z+EYJX!H}Ejf`NnLwo47-?U(B}L!fo(~t=UIxqs~oAYV*=|Xx(C> zL$O%lrtfgNeEy1XQ?!{%pmq!lj!#TZVb#~o&98^nzG?H~mZh!R=qIoIKdAID#=~oz zHi;Di1N~!Zq40z6hZ6k>{FC?PFN`1dOF_YkPY4Uq`j8Lu!i$~ZaH4!-_~__(SywpR zW~)bOB424Z{O(9(zwgH2Snvkl`7BSA`XDzn#cY>)U%$=UIqbX4Cg!F(|9jg1hLB^$ zANV2uWAWjmBszR_^Je)85kmgga?d^Yh@oa|AvhuECHa?7T_M;bQ$arS zrn7B$lCYX+ji;JAK`)i2N|L`(qPpV@m&t`EKIH>IQGjHSiK8qN;HFT1c1eeCp`tYf zLBLGp^O-agr-UBtp;HZr#F)(NrHRbvbnb*dAQ8*ls$;xU~5Pq^Gp zQQX|r_#v0fn%GC+blZPZ7VFR^7XBjA?4rWnJYzGB@(r&+#B00ws zTyTCSo(_i?Fq&Y4?f^&v>MR9dCWSIg1xK`jS+KKlrW)g5Hp>&^!{{(DPTVIzQw;wc z6eWS(lENQqAoy70>_UsUPAxtTJCReVrovgtR!Agu&{SZqf)6d!K}G9D#DhjKh&Eay zP84Z!rB=u`1_Lz>!Ie!CIZKi>fXmTnu%30>nl884C?Nl@A;O`z0R zs+>r9G(kon*nG%(koJ~X%b{r`CZ8*4SIf&CQkQP$oaHaACi9&w{+|>f8dsD%1eQl z^OzgdY=tczHX93es8$To2Ql4I!bdoMC1dhB-C$z3-nMwE2RgGzPREi>XS6zO%R1>i z7^pVXr7l*N_;6LB)BAB)X%y6}g8hSL2Vn#>S0w6m0s&hvlQXoHVWL0{1608&2F6u) zGp0$BCN-v<^|5#L3J3i4ID=(t4hgt{tSX$l5R;dL!+#5hFWJkH1ji5X$CaOe$oMV4 z-FeSWR2eEgwwL}MZ=vbBd4yvOHq4Ck=cfDDhr?G~5iWJEf8QX&4vLHCXGZvQ1L;XF z{r&K(PT(A-5s@Xc$|sE&EqGxgNLb73<3qXu>B{lM-w?k)DEbWjWAwP)ZI-W~- z!Ka$o2GNAlpkV%F!C~tSWX4eUENts`Om`KGXk7o5&ZAQyKBW#J$pe{#=o}snsM;Jp z8|c8wqR=KDgb4x>fXN8+zvD0C*mbh%bk9SVBHixSkn<}f%CjtKUBLhco$HD~sBkL2H>JwWaj$!{EhL4(C z?ML2^0p%k~s)aC<41~Qg&4rJ0P{V{s!ZZ^(!{J*9E~GdLnk&0x6YaQccT=1t#e${W zaU#2JOftJdM^O@2tA=j^3I@Vrgd<@JI8OWgwX!H$oE#gvFpVyn#{R)%xkqzDy+b`| z6th$8K7TM0ZSMjB#ZkHuJ%FRAD&b?rOhm1#X7^BkrrrB;W!wQtGRV0& zRQ|~2hl?E+llk+ndsoR+Sx1Ba#-Z?~Z>v2%ADg@+5CD!6{VNBKQbgwzqBzsiiv9Ve9Uh_m5k z4uM{pc^oqwM+u)HU@**Gz_`^OI1OqqEBBfc=Bh9cRA0?tbS_>VvQq+PMML+)L z=5@i$Ukn>=p7rGE{yk+i;c%(hR1ywXmR&gZ55m$u`)*Ndwg`7s#i6gT-cSuiGvJQ} z{9Vnks(VYJo3{5p7u!8qQtBn&4i#r^Uxsb#IBGwPQI%&T$AaUKJXOH!CiqkVk+s;n zn0ZbfhyX7x_E!rI(o3`9#AKzLTGdj_JY`IY#g`*$DaBr(P>k)2HUfkmVebdc*<+Ah z51Fn&REY-os{n+-7_2|l&9?Dw9Al&!*nHg348%u<2S`msbGrw%UOJ`l_YqwvVuC8e zpI{{Porme&4*2h77BaxCl z2U|MkqZRg7HHX6$j<;UAaM61$!EiWO)P>nTFI-;VW6qfB-b0+~J{%Z|znPkQ#@gNM z_gz7DUYQRwfh)2Y?od)qG^@~_TEVov5g;E#AVOr6NJM45bVqm7IOfg){!JbNni~_6 z9_V49eGCC7J#ULz8lqvjRY>O6Doitc=;fKkZh(31bLg`KWQ0g-Xdt6h$ zQTKCTwF+K84q&E&FmxZDzXp{--x&NuNUVS8A%9@lAGn<`>~{G#;2&njoq2M2nCPHT z#vnsl)Cpxt95><+fN!Apn1MdRq+xphcG{;A?=VD2l;)y(LyPfrQyl@cTG&k)ohGnt zx3Ulkn?owVY&lRN_ow1c#$f2s62P8Bj1Fy+1B_P)>_UV*iQLO$4&WkjZa7tHW9VY6 zg|K#P6S|m=YunJWuj0Xy??3cqRxE3wuU4Jb=17-ZIQ^iz?qLt$j${?iXRB(Vn$^%> zI)I&Dm?OoF{-$s%?_{-RFom0RlDd9SxxcEq28xzct;82<7xKRwXQPP#a2qnM;?cT+ z*~u6?r?sH$xi>nzj_Zhx!K)xS!J%U|7xBq56RdAGH$_lt?Krkgbq~@p9b`9-bETs- zHm!|j0~MwEg(y9`NDz5}HQhq;30#S(F{>asLZli(u;tYLbu$;J!iR&o@gsY$Eb%}) z3yoh`3PD9|2rP{aZ8fPr?1Yh>i>-L zk*ClIir)Li;_A+=iGY6(C^VgT?j21m(YYoaIk#*)d>Fg2%>AzOwt0KzMmo$yeJo@0 zl(TiJC}V1i;Z(V)t+qqFV84Fb+~N(i@3;ye%g7N#nO;>WC38d;7i}fRNzzC-p7PamS8W0Y*E>t*C4Ul5 zU{{}$b#v0~FM&_Dg~Ke^iVJg+?CXijGEISqC05iP@TyxBj`bu)5AihBjh9Fws)*dH zJXGp5-NIH-k?f(<$aIsmhof2<^vYy=T!Uk^R_w*PJ4w2laKfKT^08E$Rirqu3v51y zztzq-?4R&Og2kQ^4{34iH7!6<)>=mk_GBaQfZQGVwc1d!G{?y6qM=|Yh#eL+bqZiX zwFr`6AopSKRe0S?!{KWna~`+Mws~xKncxO#cUm5U=Y_r49w{xWYqftb9R88&peaCb zX4ek&3A5Q+qN>FRr!6Ug>tnrXumw0o`uxgoP@D1xCX3r@|Cg1oiR&5mJN1PAW0MNl zqbuJMuOnFIi~3EOUn9=LsvKOoo6rWo{hm6aKLGzotNkM@pA_%rb`HRvW!POSKa#MO zpQ-<#|Eu~0ML)dsAL3E^T)7vYR^{5vYs9167uo<7xlQJyvQ4h-HAY)EwN=!Yp3sMs zubF}h`B=?!wv~WM*$QMVWyfAzv8NUCt{_%|;E0uQ!Z-leSA1g!D?@K61r24lD+so6 z1!3IvbxLoKf|xB(Iq7VbIrD_DWd8DBzsdYnl&Bwi?pu$%@aJ!Rcy`;@;v+lF}`Q@aph4{1iajeC>hE=Xf<)r1MZ+z9Czx6rM{OD7MSKjbC_0Ig+eCYx8{2Fe7ta19!AZkPF0-b@}*u|+>`6LXS`}FnC zcM#_`C%s_Z?$c&1Zbg2tp3@J$IC~l?B-@H%RmL3`GJg%yo&M9vFShJ-IpCmm`jY3r z#_ZkeUiNt1VC-@Caw6V?TQBZDJ;As7-OB_0gJAN2uQK@*nuEiqa4QdpUIA1lik&Xp zjVF5g2UcyWVuDPuZVy2RSHbKZRo4X$ZY{3_2UoNfN{*lgYnYw;BBI1xSKlf+Ej_&> zhzpJ1Ii26*5GXahyjZCZvQcrub^{Fu8@f z6y9F7byf>3ra=(izJ4gDwRKUDTl&_^_k#f+L!0omAI3g9Cai3TG-SrsH`LE3#OcI* z!`8t}prhXIY#kMkkL+qM@kF*Bm|pqg^nsT0z82LpeV|D_FVPUOG}L#md@QkkebwOB za+kAp$EbSUXj@5fV{B!1`oMu{^^1co<(`AnKxxp|a{Y7qEvV;cRmN972@t{FqTi%f zQ*0e*?&XL?Bij4##=RDOv%aFIh?ZtPl6gPV)W`7&nz|Cpyq{_6_mofQ4~Vx(`YeC9 z@}GFlH}IUO;x+p8X8T6ybt+63G=dNyOR~(}ti+Y@=gKW!p{?@h(DvUwI0TllDE})H90^@mS*DLtK7r z64D)VzroLlT=WS&k#dNbi9|u|Rrt-p1-v56CVM>hw+?m8ZQ2Qi|Fq<>l|9OASwa&ep;Oz!if7WN#(E>3RpYQ2m2+(ze~ zuu3DDN7hl9PQhiujJi%Z!bwWYF8n4TBWn!7yphicvQ~v&$n9r4aOe4Mt7CPuO{0sD z&Z8VxSL*JlOY{%KYr7$4n{9hCcaf`%IN-3_oSB&&W$gnMq0o3=Z+riSp-4wVWnfyh z7JsI)wXMG{(mzpLQ@MR|dRHS3lZx%5Hb*EN8XxHWe@J@|ILFTNT>N}Ty&rXr%19b% zBu$fMq?yt5neDT)eb>A8uG{wF#SIfydd8ZY>;t=m|9A-$3 z_z?X%<_nj4KjHU96N8z7aob=p8g_hU+L4X>fTF>?8^9sBMZOL;+*;|VbhCgZ*F*C@ zVjz@5{d#f>H)3M1-$)ZnD2kP^t>=1ph$!|$RU+O*D2@nzE$!gzdn)P@R>w`{mHj8_ z!peT#_PeE2+jmYq4NY@e;kn&;HP*utjM=4Qv>uko16)$bY+cD}Ad8`L%?y8%%sm2s z#DKZblvGN)5WtH#pjH%qyLzkUJ3jxfU%6U4fhgITII9}%FV*C;%Lb#20|oiUcr1bA z13%kN3cN@s#TnTE$`uWY7NpTBU`-`MJS0!xKrx<8jM| zp=`D|dtIbwX<{a?;NL{wKy|odsrLSkrMe>=u{lzSbbl-m9`GxHNYo!zTuN~3NGcc@ z4|q+1WJ-LrW65s+txJ~uQ4O2hhdPd%wP=nS#TtzgFR&Cv1x6=2mkT)h>|pT&PLx zIT57|bIUawK9u;>&CwjqgWIbnfV2TADVbwdrGEgk2hZYi&{~U^9SQjB40VPc2zycY z7`PZhTOF#-wS#4k&0GwZjW^cyu0mtqE$&#ef73pSVFeMtStq{-c6kuFsHe1Ds-;6I zCMbo#dR#A&)6F4nSZrG5AtRKXeM;&q^Cr?R@|IQ%dfn7oW)cLWK3xZ+elIjGvVzN= z`Cd3#U>GE%6ViX6GpmiUy{E3jTY7QsEE}APjh6o)mj9VhS5F_PGqJvhy|oymW`l*G zR;m!B(_0$^x*R4Hc)x0WUQ~(H@Yut`gvcm?0M!{TkB@4lib78jYQ-b7h>GJ)(8^l) z1j}FB0ia$91*bBF(aCIY^_oyVJDlwigZ*>yxFe9uZzvvlNZfOvw7$1LsKAjEwmmbu zj(|>nCSR`h=3!B$hkH_yuAW9DnM=a*?(O-bQ+sZCD4mTZ5FYUv9j1ZFJFnSv<%4uS zA})&`3DiQ&Ay4z9R0qrhWJ^$UPEciYPx}ZJn}Hh)+Gt)o5&`~PDAndF%`+6rtfy2P z;3yg!(IBV}A=t69PCNWi_dw+;sd;Eca)>2l6PuN61n5A;S_!IgOndj3LM+ToGDiuU z5|yce=5!2M8suIfcNb-CBy|Bf+7)ZUG{eM2DQ~@ul1L6q5k{PZZ|Ur5Kc*E`c7$n3 z{Q98?Y?z$4jJc6$3H?hn?^jGl>&6nYtAWOBEOu~w!}=<)06;h&97@tjXMy37iT3pu zi!U1#uiIhrjF>+Ckyz~O!N8}?yJu$iS~ik5z0us=Jj?=-i1EiiHqTau(FTCdC>#1b zZbQ8zt^HXbb-%2~QIJ~Y1jKT0E(PwOVHW#}OETvWuS7g$Wjsm`bqH2{oGZ@9k>6&B zZsOcZMH6L3YbBwXy5J%r+UaH?5vqeZDMn!29#lx2Nkxm}3oOfmR~>Ce%J{2x)h)XA%}wvyH#@VhcVVcwumC6Fx0n^Tbf{>W7sS%=NXN9h> z?9?vqdY&n>J(H6yg)AO_x{JY8TdOrT8Faq`z%sf_N(7tmin8m_taC zkhykzD8XE!^tP~_%nqU@soOazYDZZsDuT$|NWd*Wi8)`~hV3(1*?E9Th1Ys(djZAv z)k>2f=)qx;IeZM8Nm4D81c|~y#Q6?>xLUziw;#9`FR4pAG5MwO=?#aE@d45cNC~zM z)LmyLM2k|5p%eqkuvr;g&R^CrB}=mvMPbwQ;$`t(zqeUpx+CBZqpY-}!t^yA%LEh& z@l9>U=wG%klVADtQdB=2=(FBt9jecJ^y1axw2d~6(2G_Jur1f1pU-b(R56L&SUZRh z;yjb(1^B1P%95u+h?TlRvn45jfof;jD4Cy#Gq0qA#!+nQ6y6g|(X0hQ5>ZbvZY9N_ zA5fVNo>PfTY@|$knBt&>`$?7w`h)t#8c6*Jc|j3mCJ1vNY)_m+IF&*upEu%}A21a_ zX+dNFPeScSo)){6&nJyXPb+RDtiY<#g$@Ta7oBnYZi6g;@c7^rqp5(+R@gmy)!pBw z^vt(!-?Q$70aQeR;Nr2#1LYov*s$l=2U7z%o8La%f8T+~P%L&2u!-jVZ(d*2;`AE^ zW|peF->f);_P_?n!&;w}{#8Sx5EKQ3O?7mENAnXrM%KuZ8xf;vr8*LtzqMe+FYqtIJYvR83;%kR&f9oHM0P8 zdhpdqq@XHjgR->aSd=m`KT*9yyxfF)MDQU{W~%e}U7dlN92uP?+X{vK@BL zaz?WSxTEE|2_mogE2_XXyE_7WZF$9+FPbm|PV(F*O0~*4bqsz_h4Zx);9Qm$D#Oq{ zIMl~!c|xX+a=pOyl?8&l4DgzUc~*0k3By*8a@Hk^DF$H*HbA|XPq$vwMxh!?ofH)7 z^lPDQM)*NoZO__4+hZNY1%41z9S_E0UyH@c|BN&w@{CP)Brk{-@$?hk*1RF{5Bii32tS<13jZ0sW>Top&YNL!nTQ+#&C>uh797ze~ThzM) zxuF8(KmZ?N+Fi>zbpT@=;N1Vf2u5capk-@;Mpr?pO9{fc4?JHdC$APS*0RM~9N>Jw zS>YZg0B6--bf6S?+3m?9E!ayx>v+8C@*#AENLDZ0yfbc3JdwpC`bUl)2tfc{gHuNi z7*Ow3JvrymDw-I}g3Byl{6?K4n^GD}0zZ(~dx!JPY^D-W{$#Y30q&nhS zjD}pq3d@enY$kaINS?L|v;0F8K%@ZuL$`uMvn0L%+ zvO1K#U7Ztmxf$Lg-j3>Zr)_F8=51#dU@xa_`n9W5iK`dQws?T{%gBX)m5cHOaIawM zo}#%lR45<6EKLnkx*YR}$RB_;qBdKqRnJjwyd4kRj)pD4kuYN2fvm58Eiw-zlYQW|q?*uv z>45I#6T=*l>+nIBKzek5fe0|`xSB47V4`C{&ERx_#|oAPoGwOht`7oGGdR&}F52<1 z+g2ohFhiHYliOljLe?F*K(fCuO3of;*vrxU)3MRBTD4){FLMK%v5eG@v03S;B(WLo^yDU3xA z%oCph4atJKKa4^RA|Brynxq<0DbzM3saa4-M2WyLX^7ia9VYA*8X)Zo@u5q`#FduX zn8Xh+9h7|IhN1WSnuC(1Ke+I(;$P%D(38E4QBjJBs3ehG0F{OyUY6pymAGb=#~Gp> z&*6lEwx*mv^3=?+5-(!mDj-3q8NGGMk8UrRCo?wn8A%EDu~D+LXc_YA z)Z#73Is`CCTDO|5mZNBR)nb16_KARF=)@xqyL-&veeb$TXynvJcQIQ{+pzSmol6hO z!csDe=GCbR_NWu8v^c!RP5hZBS8O zf-mZU4F;YXhS3l#6OjO5Z3Y;dJx02hV1}NO8bKcxH4DU0VgUYpmg@(zX$YZ!^$Ytd z%v6YQ_(>awIo(Z&OgJV8>5$V8Hy;fUR4|#>ZT%=^1Q1k1CQjfpMZmZrNdwq?)G;pJ zm*9GW0$MdA0elHA5B7N510C_=A)oXpw4awp3fUu8gIvMm#6}G(SDw zA75Pf>6_pDvBs}ru^GT2dV_&C2Ls56KW$wIEp6C2nVyU6J+kfOjhh#5_KN@AIC3Rc z*f)a#093s6AL=_lm0NSiu~V-jGBz2^LK4>r_5d64uvACKD3sMz2so^U2?WB1l#30i zy49D#$QciTa%R+X*LxV6)12{(6{AqcA~|kY+`8fy~W%fQ^X&ahF;s zV6$K$T0F#>RSQ8l&A{z|aE_2A7N9v>wv)dJVkI24H`7t=MyAqE`TyXc2o?-r3L9bI@WKfqRvCfartUa ztKuFguY|fPC|;#---_2v4~gdtu{lu7abyqewD3&S)^N)L9pDGHDaz2+6T>vz1T8sa zYDr2%SU4!vv0`CS<&kF316%?xi4ytr#j!q^2y5})1Wfc=1^s-H5LL59c(i_)>2(u< zgCVgH!b>WWFG$K@2`(F~Za*(KEylqI#%hQo48B3|l?0l|Unp}=Rrkz(96&^viuc(VG8qh| zlA%z&Q24ZX921Zef6Lb1Wn!4^Xks!fT4}2M|)l5m9#U2lp2;d|r*q1v;+znLC2ZJ3& zdt}U~@Qe;7Jhb8=xX-}!z(}Vx_&tu=yyZ-RY~o4Phb|g*Q-~_;!S*|gS5mq0DS9OY z4Sz<87y66(ZYD~Np1ju4dQL_OC-PTrLlYKE`Y$&IFjJU=d}1`OrLo54?qL7c@{MxA z=uSrR(@9Tos+d|BDo1?X@#Ju6pinFpy21nN_LY|QHa@a0>5n9$jpBjbl@COEdj0+D z7qdf~$8saPhI{)4wq=IbZ7N=QY~9U~-Q6Qg`-WfN_`@~(cdtu~4v}~bgdIk&*9v;= zrhsq<`pH6)yx3m_Eu55K{}kvUCd6}|W3Lh=5vv6fmwJ`RPB&djLaoBrC*(HHcO(aw zVVqDyu!{;OntPe@CVqf=;atv(AoSv&uaYu6yYt4|_pS*>-*bO=s&C>a8gBPT!NB!z zKP2Lom+j}iT9`m_5{<8N;T`CIx`43_W3BwT9u+QN`(k%%=r=A;0-X|4w#(751WS(m z9Rm~@*eNj1QLKnjg_93CY}FACT#cBqRRN9~z_6h8YpF8%-D(bbBp5z8B?r8pl{^7k zG(!*(3Z^HpUWjFqr~$&rSExY>MFqTgK`j;_53h5i%!aCvVuS57fi_Tx_^pz0TUT<+~!WTVC7EL1F>bl6+48r)_x z!tm%|+%3L7vTkD3XUi4>vA!@$r5Zm=2$$7k%8PNkHE9+WtGmnR2&Q~}>rRavJ~AhQ zsj$uJQH(x^)AGl+~wK7-ikxMhg1T8}#RiGM{lE(f|g+eGR z>fwW|rx&lGY={gc-@7;Pk}Fixz}A}zeI=zN8?^cDHIy+k@E)`aVgXsPKtV#Qkc{U8-Z%D zo)NrE&h>Hr8LnRzZnm%1Lq?(QEkhY}QcScoC03%w?nFK`(EOUXa(Psm(%M83_9^z1 zw^5%>Wl3)NSmT*jlSKb$hCSrbLP=?>ram;B9Uo2g!<7|E7*GoH8jUXqj7w_Y1V|$Q zPNDq6?7Hj7Y$m=2>W|q=rqUs#;?7Y`Bdnd;u#Pi`X-`o$N=T@*_AH2xQCiQdJx!yq z880GfM_;{eJI%$qjhL40>*(S3Em)pNf8cnIUAsQ0hNj;oG_=eUq#x&#!zJN0RAR=DNwotYLC8lntt+QK0&Zv8#9Rao8 zjMvAfH&TL2-HEiw0(!fJV!7hTJb`jRWJm0nSAc2Y0Nc+k;6f`*+d?itzhU9!TS0U! ztZssZ%`V9pYM0yNzIxy?T-6pIUitwV;pS%s2Qwa*%OxB8QklNOotX8eA;$1k>Z#hspfPp+JHcs$dJJCsf(BB07E zWctSYydKYj;_k}j`n_J|*Ps9VuNC}mc$Lq8Uhy{g?5Bl!74~!#6(xtntOQaQC^u^c zzTOU4>B1E+fUb+UUa(kG25zVZ*6XWC1@6XHKd3!k*{ealuR*Eq#FneD^C?Ilo1j$^ z13s!g!SVWu83gG|1M~*a7mib1DH@d#(JWx+cT^sdi=aP-JGA4N#98cG4R* zEaJO%ZlH%d31)-FtkeUCPe^XfpvIwQs^xkgvv&89la72f{B-2|f&C;@009BuQxFgU z5f<7r8Lhk;DS##sIb~(sT*vB$dj*Ur#s9%ZOQtVVSY%+G!JUxQ(g5_dFTz27ukLqJ zrgB<@qW|*?1QuSR?V|p8;hMjjZ)9Gth0>Luh1W<0^e?v^q;T{Y2K&jpppQ;B)uVx zbEO+yCtnWz8bHNWvmnF=%E%RTx5ArNM7DpFRuNZiqm={L({Y*jEs=R4dV2b%%iG8h zJ4z>sVwpaH3>cvDM0QJ`=;yRUKjOUgF($hmLj;kiP2n5roqOvW*oof2nFU@py*_xwGufJp5?|*wB zFt$nYKJT^I#ofPUSIfQz@6^qH{|z_z9oEf_=P$c~W!NYZdgf4FV@$e7kH(p>rMw6` z5P7g<0#BMoK4-yY#Lz8#>QYf6Gd{z3$C|FA`6Bj|l@iCgX9s9e-N%Qu5A@HpV^k}^ z8LEjAj6Uxf2>>l6291fVG10|)5blDXLPYjKtr(eVl0^%YF(w`sdnX#75b4IR5JCBR z=XZBXk?7yN2uWb@Kj^e_>NfYv(q3GfPcFYeG9;cFqvRbA+k!d>(mQtN0AK z2^RS|y&o}J#Ub^9yP(3~p>sDVX5&xzJ@kB7{1cx4l%L-R5` zmr2!M|KROMkCqd`UzWQAjnBxAQ>P%K3r}A7jr=dm)7=ZsIPhmPSlP)>gD^2q#JvP5d&!*nt- zAH>|MgIX^be9Mx(M@uPhAHoMOz>d{pl(1cZSc#6I>& zCHV^4$MDbfYv`JS{0qmqn}|2wM%4U?W3-g%uGw))0se++ZYV&)RuYyCQSByzjk4IF zuIyrc*@dVroln?kh+EM)z(VD4U?!~@jA?PJ4kW`b3ZDwEmUgCuGlc+Icc4HTW1;(= zA$#mB>8Kmfvj5m{qq-n{{(;jxYA?&)m;y^ z-o>Ni@r9*|_$B^+^LzR3@x7?gK#wLmmjcHm^)?MG)f*6INhXwk`*j>j8o*!Gj{jihXEA|i;C0=J6TNE$a>h0YJ-UXv1a`k}ie3Q9{9 zJUpm&rTY!wxhaBDbKG%?Hxf*8l&8}jGyg$|?Qyav005~)HNH^pLB>8pvj-W9WeNK6 z3O8=bcqH4rkWc=b+w>@A8%kc)(@t8o^;;p^5<42tNY*fzw^&j*J zOlY@Yy^6UkG&YF8>?iSvA2ksFhZzEy3_>O`@S`(7FY}!f2`RB-P#ffUi9IHach>$3 zDr~dYtQe5(tQs}HRO>p6U`AJz#;|PF9%$$j+&~X&HAtz=pBpOzqPrN8X`VC`kwa*=*)SP+dy5l4!PfxR&nZ~Y`^Wu zac9q4!D}RN#5N}5a@g>|`VWdeIxPAdo5UADwV`2@fCB#!?Z*`6t%!M}sAZn`y@@-P zAO&b(t)K~a8F~T>8sH=#jF%1Go?iCB&=X0tXvHYyV1#&98$eK)zE87tzB2Z!kMQ`t z7=Kiud~Fwr2YXf_@jUbE1N<^c4?KKbQ7cI>N)!ipYA}&_x}q^nJU~rrk4Zw>;_jvV zZM!@Sm&(h4)``OKmgFD7OVy-u+x0B4LCcQa&K?!@>xKS4TFSh@#Xq-8OIzWDy@q0y zH?gcNkktTTuRSC!r1w!%YYX<1xMC!(9*$_P7);hsf#o&G^3pr?QnXfYph0O3@axoV zjOjvDn$z1un&{5{)eC(r8{+m)D&)C#ljxG@=>XBqAO5d&OLWMpbthSLNfI5NYN7+7 zgD$#YNPE00x+;kdqR~>1q|#|Uwabr@9|N+ta_&{hTl@Y;3gYw9qT0J9oRp){A@lT{ zvqv~$vBg4x3qY!!Q zK(RGm0Gs$kBVf~lg>w6LeY`z{@$#dc3M3^wmlC&j1)eK{tI`!ZYcJ73>ehKO^uUMt*a;>~ zuZiP*CNhyn%UVD76amV;13X;Z`?RcUKbXqDYpmJ=7OVi?7QOS%=sjs1^S_QpQ%8?> zN29kyqdj-t8H&dbM5BSDM%Rt(Y(ydDL&2-Wjvq(5BME~<_A z`T@m6U`6X74*yc7hOAV*M9#kv;>?z6fpZ+YM72#^p^j-icu6kRWj379feH&rMp%+! znPfW=vlsZmZ3$XotbA+=RXWkf%%wW`R&m`7umnZJ$dH25U%`LaF{@Lcr$>PQpmVwM zZ__VN&jM=P06+a+3KG5-G@0@(C-Lt)jR$$V5RLhRL}m`1O6zBvK4u4U$Nx*q2~m=` z?n(i8fs_ydh@5%Ed65@Vdw?9mm{gMuu|aM0s?kd4^a!Vj2x5Tor#i;ZFnO8S{-GFQ z6f)(G5mo#h!)5LrGo{q}6t0Y;bdwcn6b=khx(NxU6}u)Md#P^cD3~3MvbdMcs(gNj zj3|Dh3G0FBm!@Bwo(1vX`Im5NQ;79FaEbRl3j0fw*=3}r%TCtU(=AdT@{HgGZDV~` zjkI8>0d6hB7TB^J?WU86%HM~0$#5@YP`cybMs~4{%N@Ctl|gNcgQfZ)XA}o8U7|*O!SgSU3WSVu-G%@K}FSXRXYvacfi}5%tlL&nLzl+D$yx0E-Oq83! z64ux@E$(Qj&6R;2rqbF71YbOP659g%q#J8wO8gJ(6L5(@r<(9%AJ$ASMvmT_f^;ej z6c{aP1O|8ALgNlqn9#|okP%laFzpIYcOhU!4l@6Y-WnT*A@1_7Dtk;X zNh*0L;&VQKAec(`WXL3;f0ToRlzUq?NmINK`$d9hT|;XtL+k5r*58=3(W=AMgk4I|}s8RTOlrk`Z& z;$rNWp)Sl&FJ=fGce0qFUPXHO$JqLA{5%h}=xYI8y4TUuGr6(Nv*DXYduP9gEO75TTV zyRmMc)HLMIwKra6iN$t8Jx))VW3lzIn5FTR>A$O!mb@EHK@i^LQ?&j;(j>Ru+MzEw z4#@iQYSx!MSl0u*t{2dQqTNDUhJi-5SUpQJGLAq-FwvG_p|Ol)u8wA{2^mdkix}V& z6ERz~v_zYo!RuK@#|TYO?S@;+Z1_E`)K%MEJ1WJ`r)PWO@!vEh1$>3=QXyS&>Lwg8 zbad**h9<0bF@3PkrUy94qQw2w7>Yyjy-YeB5&&VDtWP}23@-=86>wKSe%SRTi(u#5$Q;}-s#k4ri6A=wE$W)3}FL49jKr<&n@n7IztOvP1HHE9NiGGJ2_L<8SB zO1qInAM>_|Wq@lFA9DF@qk+)4+3s|jUv-m^g_tqBod+AAj7G)2!%mOMY<}Vi*=*py zJC3m?nqx(Z8wk+4gW=fJfO&PS4p6t&f;3jb4bKaTw^q_^)HY(P)#*Ci0CTmt;vh5I zJo$w6F~wrHj0Xc_%xi-|cZ@ZN=$t@yk9tE=QGFN#bQ0plPXnw{9Yhio@6c%8Dxo1g zBA9m{2LxEA7&t<&nRpu8=9_L|!?uw019(1}a&L^HQ^WW&gxaKjgUU)r}x<@mH zdKZri4C4cc2N9Z81EqEmE3E={;MKvQ{p-b3+CJ2qc&JYuc6Che_(j7W$JkeAx?KcV zV~-=h`yqLd?~j4CkO4x+^{AOtSrjH-4Q%8DhKxcYe0yy&S)K6ajY2|tnHyxpQh$X% z|9!f5G1YnRjc>H>9rtz%I;(s~Jc-)}b*%000Br4eE-Cv=9>d%Zynfkw z>RHDd&&~X}wg7o|Tq83df6aqs>QTKdNrf zbwf*F_Wat4ZmlGGP#xn{p3mI@`gDj>I8kFb|2JM?bQ#U3pSO$8L!!@~GMH=*@88Ut z?Iwf4EhnGn`+N)T^B=`Gu#1Kz>X>UqWV;Is8#xu~jRpG*w65e=hlmfg3!nxo0En!y zxCFo>C=O59!74Ucm`m}X#Kbo&Zu_$*O@4#t2XkK8YP5;e^A7P1bW*TCmvW)S_U!k4 zvK_)Z{X8HDzzRzr0fyjFczuT?BKSiu2trKuq8ASrzFMKZ>gX0v3!6QdXg8^rB8OWB zahss$bCx@P5_u zCMMNL`&FEUhp;RQBOn6?`dP>0Rp=v{c}UAPAsc8Xi!)YwXr#ybzE*vNkvD@H3>&5! z^tYKjAc$J61fFm$1U)6~(+Ky1(G>iUpGXd^Ef+W+()F-Tr)RF4H$+ zbo-){;&zMGK1?9p4~n5wZ>n$Ln8|E<2MW^W2odp!6oGWWgl!1@kd01+sxFgLpe3UoOU2jCO%0D^tuaogCtC#98eyovD?XTp)z z6V2IFU4R{-(3XxO0M4nPj_U!t3T(z#hHa33Qof3tAkc8dEp4^lWF0s$Nq=>bWlQ?#bgjcX_8D{WBBT62Hm z5cw3HpdZ=3xkS!WzJ7%(F$E?l-7zf%W0~W*Ot(xCsM#3RE z%D4V_=UhCl&Bguqv)OvtaW5^A35$E3!@ZW8_nInE4!WLVOi3yOdj@mi76sBM}_a@_xL1s)Y?!>G61zuX!!CSWlo4AuSP|ta-ekC|$L; z$*9!p3*l41OiGewJ5IKJ<57~HxS?s?KgJHg5ft?lX)Z)&3Oocxp|9#$g_RX06BB6kKuQYKK57)XHB9AdaHr`Uwf048)wSJb z?hKaKGcgcV!wKuupe;K9keFcLN59)A(Dcyb$Q%vPxybpDFiSH`;ll0`l~D_XswvM7 z_EQq18#9a(2gU8JyK5P1#6yb;71Fx|CV=*t7;QYas(Z|C*FoXR#bv{q9JS%fjZBLp zmfT2d7;L3=b&XIfud+7(8=-%yLchmsNnVgGGa=eNXBG4ts(edIWdZ3+I6*HN`%j5ml{?QLk_67`cS2__FN9~#N1`_&) zPHAU{rppIH@km6-i4?}OD(_Jm5AuEJ@Qv=wl%pDDxsMVw1r`F!eT;A`G!XO(AS4$G za3yt=CV?x!v~>W=*2^gU<}fh{V7tpUY#hank1rpD8@@UXXPk;_vN-<1cp}{e z!_yh@i=$ZtakHHhqK&Vd8O){`FrcX6Z278dgAhNvHjs=;u1&qSZ;S*p zMeYqAkWf?ySYRY|0&b4#1WS;w?Rm8G{QpGL?Rm6wjX&1m0<~EL zO&6$hrNbS9Z0?mt#iQ~sknh`pH~`uO>u~^7HU5jSt|XCBq0{Mcu(kT`k~<`$6av z*(vRQ925(nxT7GaU%!WoRD`g*4nK*z_<6e~5F=RnjmP;uZeHE{o${HGYEITpbth*a zorrnX4P>%PSY7aLa5sR2fapcL8v`+Uauk%D7<8Y+sX-Eob!KOw=E0w;Sd1)gVk8<3M$03ODzmB26ri;pRUyAcwHMVcq_K)2!3Fwo6j zfmpgRWVepmV*Q&-6Fc+XfY-2dZQoovU>1)YMoFFQeBvpm!{$DinAtFz8Cu$$cLV|l zZc3MC2^I}GHt)L+wZ=6N4-l)O#4{*?w}O!X&@hykMfR0Kzr>qBh`c>-bjrV0LazZveeXrxr@xnewK|)Iqn+(T@|yhM1%K@HIepI2V=v;}h2nCYcjw#aGE_Lom5_pHMcPAMVw#Q0}x0MCfKr7a#F=!O#vl@R4ts2D^FgZyhF|!FsB1@}}b_?=QUeLsttWS-Ah72)+7|*@+HN_C@eH6tsP19Z4Ro?e2JC4V4J&EAs2y=B zcS|doFymBqt0QZD$*+D`GbJKQEq0c$DM)|-0<_!}iP4^+pLINL+cV6@=WA^m#8SO& z&)DQJ5z)N^oEY#M40tHHIjTy7XCXj7bUYwgLVhj+i@XRZ;y6}y8y>j&L$58|fAaWE zlYOVG2A?ZmJW!l7IZS4^A?fw^c!hH1t+(w71P;CHz8hx_0Dv3D=5j`O zfI6{^(!#{~R39uUIzMcfa~mBR3pRw}1d-=~%DJVD`JmtoD(Cp8;V#1IH><|;9_0=Zbk z6@#dC?1udp;H(N)P4R)}q%b15-*Df8R-1(i0VpGjge=l2V(?XFyTKwm&Eo}|MRq@B zHrp&lH`1^Gbv}hE6Cr&8-@gXmPxnrDO7vACu7<6xGOZ8U8P}|MQ zE~~pSwVHXwcX!Af{yNLN^}TI!VQtxw%?-=e&aV?)&A$c18Q(nunWx%h4*R5Tchf!F zJLeNYoR<5nOJ9!Y6H5I~#8Yk&yOEonAQO%Wy^PFZelZ}O=hTK+gD!KT$AF+YH;l|D zj=q3J3>q!A!Z@^+1zUQ^;u22*+4ISR;83)gwY>1FQ6AXc8kRsEq?mL!boeV_ z3Ut@uBFW}+m&Y2+H7#|DtFy@W9d;Gl($WM#5Z}D$0_@TODJYlZgmRDVds0B&C`o5A zB>s2FIE|cntXp}8tmN^hxfxYG&9zTNU%EO@E7|kDNi{|!7X36%oH-rKk3xS~H3mAg zbR0#8dA;=#lmWsobI56<-oLq(wU|s179{Uy-QN>`f6)bf&J=yl)?aSUknQ0!YKZ16 zf4MoU4N+SKP!avsd1Df_-y0*+K5b3h$R*Pjx@g*H4fabp)Xxx(WCT^rg4la5L6X$W zii(sw*&_8M15PRK#0)(;z61dcwV!*#`)WgNLF9H7G%tWQF})HB#eFFXC3Weg%L7xy zE5TJTzNNN3hWWa5?V+p-sHl31A?B&etv%$GF{oPVx353%E38X#+4Xn1HF?SM%U`_w z+U6?+gy%TsYlx}Gz@0?R`+6`*1y~74H>y5zJq^SgRF|Sea|IrI-Pao?U<{dY6tQl> zWlWd7ivDPD_1Zu*B6ZP)7YIWGqh}RX14HkUOSIO~(jlA!-^2MuENeBKzMUrlT53?) zMLo`CYH>KMt@IT3{Y4rKTfMgxY|u77j{l2~i-xUJ?S8M#9C&NnV`!?&-5&yYZPoz+ zx%mDl93sVGf41EpIQlNO7kbI2?9f%=i%p3B&Xe>6BD#N>%)D0G^m|=cDLnM~7Uj3--E#pakr7b)*7I-tD2T>=*z_o-$R$bvgs3nO7K)p^Ca2 zQT;+F@_yfSahGtL(?`7z0&V9`{l$I72Tpg6t^Ur!&(cpuLPCm0`eR_2a0QRJ?=po= z&iMDgAMYGxi&y({OlhV z9zNPl!;~(>x=!5v7P|M?xpTq5XejXWpGN`7altM_XF)k9J`8ITaT?J4N?7e?2GE%(lqKDE0*->#_2-f!7l9irBJHx}odN{}jgO~X0O6S)e49yeVCje} zaP9txkkDF@f=8_-g9U^2BBYPP&aKNY!hbG6!*!Qx2oX^g0AQ#TwIUBViQfrwQ4}w{ z6$mYF@v?zuVM<{H(&5Eg*t~|=#MV@|2>C;3$mR|mcHeCbSsdtKDE~>K`~06rqw;9P zDO{G+G37xdiCxk2--x2lO`zuO5$LNA<34(%T9|DVG%2ObFWnX016XE>Tb>hFt&I+? zloW{E=f=jHrb<|9nP36et!a(I4K4q%W&zhJ5_B|ts- zgCqx;yf$49z2T~tjsGffdsB2rk5jRp7tR#CTYvB(Ioz4(mamRR&wsO14DvXM0c%@< z92Oym4D=2Vu1J0pg?YY26@RO0Cf|qsX^CPj(<1KJm~&7r6k_PmGwE;@t<=NflWp74 zU_$}-6P-d5llf>@<8|1MuU*}W29rgOcgpOVXaqZv?)Re?8lrAU;k?OXWiuk0qD{5b zIkgw!L;U~+%gBoN7WdyorIcWE~>9}_iKJwVRXLI9O< z&8V8B2Egpg4Lx-Lt|dEvhz%@F$E~5I8s}rYF`c zJmc7Jl-Xgm*l!nE`shO%1oaCNKSYJ9=PfhoHiJF^-C{|PS;Av!xfp0Xj`S&M7E3^E z4D>ZVwYVg9iR&H+Mquk=QbOVW;JOVVO}-zM!q`tSe;%VUUd_9B169Kv-k!a9u2?bS$cg|gCP8?;A z48?I!xM%_<0Fw+J^XI)JaYz-BmPH&IJ z-W!W0pPu|0s>|Mo%CgCqe|D=uy!H_)208zNMiBEzU*qpI4E{5yO?^?ZAf_xypXK{} z5Zo{g`3B%OhN#m59Cp;X;r_xDY^K93Ooy37?BOfUUUL{X3y2YeEN$lqF9BPb+6yU| z6r3-Nvn&w$7Jw7QyCONw;S^>9wM5{zLkBR$jc0mpg0%xFf-q6N0!FM~5JePv=>*Mx zd*{b)*}wVdk@cr$<-~AxdcYG;8?26!2)euj)#T`wEfX_K@(o{j&pjW#W!Ki5UY|2f z9h?~o+vz+HHg-p1)x^H(g{yXv9vHxnUt&%Igz=GSq?A=*DmqNbipwU`TmX6~u+4B_ z*4*473AKbI0*etE&fx*}BA? zJ0|;30o{vLiHfcu%@v)ONZ>H!1u$lWs&#vYAii+kc=S)_`7fz;g^p33d4ooerzOcwa!)rM-CRmJT5HGOu)X&s#V)Lz8&+fi< z--4K{#`>ubt$lFI!5jZ%ZpewQk)^H2Z~VX;8c$U2y%xnVn05RZVvY$jtR@Y!L|&A@ zOM)<8;1q+^M7o}g-HN~B?@b#9em?l~#*}#5leFRr;s@B?98==&f=+%OI$7hf${~Ip zfqYZg2joElDMFGZ{5UK$&IP2It%eLG^sG8Xi7sW!|7Sya88nx+Fhg-%{vhJ|L5R7g3; zjGI*Df+bS-aa%hdm}gbT5WYb7_w!giRBja(bcTSoQSAZPaKULbnXTSvx_?BPB#;TP z{K&1UUJw-4kP|ab*5-(69P1^6WKPu*Y7JdmDYR{vRh$|o>6Ie*MUsMdNTSKZAu(Ho z`HXRN=#Bd#2~9Ye%!my=b4<$95u9TpKyBASDVib}kb z(`5n=zyjh{A^k05l={I?DNm4#AA%q>RNcdvU@**3f2^4@wH@HjAVkwip{SsD!6Bji zBaq0BPEHBh{~&>$ovclG(O(-}lrr9z;&!c5WJsw2XpG?z03tLYXh=F12OENHJ8-&- ziXe2$LVK?aG+3r;spLL+;F~mk|9C|rtIhO4ex7G{etiCcW5)*harPj+=fivUo-jRr zaNA?%jbFwruGsLyeLv6a1F6?+IPvJUJmXt&`Scav{Ige+jP*H&j9)-++!EuP--|Qv ztK!#++4*+tD*=xF8ggg1z$--m0n#<9BZ{gZ>dgWJ%g4=F!L3a+T;yTb@?1$#kF${< zr&hvwj`HPkLLoMb-9}Xw#BB$ngu09I>MlS#(H8=_lzJCK?79$!VqYC=3DpC7kVmHx z(*TL!Lr8nu@v?{QE@Mo*GZu@28Q~$jH#XLI0gMP}OfV>Kv{?-RsGfgMIN-E~<H*#u7FQES%b*Il zI@WWwe3@JvK1{8Am|6klf#aY7E(HN=327nHeoVi`fCCY`gK!LK!U$CT29!lnL<^TN z%)EH2@xxD>jh_{xCfRLPLXH0!GM-sBhs2d3#pIUdO&25!(2b4Hy*e1WC*)VGR7yj~GDbGR9AATv#n9(Q2(Lc&_y00;CD@rMY{-xL!?0-WOGkk2B{ z7De`gpY%4I2VPzyi^#Pr@@v3t_d5KEYZNmWInLp49c1n~=IgOt)MoX^?JY6I-(TDe zwd1q?)^Ukbv?qVkisR2AuEUD9Q+5EWy>Xa-(f!>jy;l50`~VDqNY`C>BW@ZPUg<$` zR(wjLez$9}Y6;CmouUwKLBgM_8Ii@qL=h8kvq3GXeCk1~Qezga0j+9)dw|)ZGhH_| zS9`IDp^nb6;!_uQgVp5s4e1@?onn__wtj-=1Lhm#_ytI;Ut@vOL$Wxg(JY<1uL?@P z!aPO~J%H3|f)r{PY*knh;9Mg+>dn=!JrN2$_R}loLm@HtUHPi)`BOsi`$!(z_y17I z*Nm~Lig;Bbyr=rZs*L*&hdDBGCAdT(>wCw#H%;VCsQWxU^><)Oi$l_ z@ZsxT``Q9u8cN4844nTajAIYR;f0k_0&byRq>%*Kc>?<|#cVCyMvB9}rIJ1+6 zvtw2tmY7-?OH65_&ih6i>pO*T?Q5@l_~7j{wtrk5?&nq?VijXc%IoD}#D>b6hp1^G z4n&Z*A<&wcZC-7{y4a9N7exR^f%r9@9p_)PTLVZa<6#5#rBen0 z73q$R_B1yorul-_M=lilJTip)3~9}uTVt;7rBUWdzKw>Lfz z2nK9edm~as&dDJ}xQh561*bg~Zunx;Jd`MR0|K0`W<&3E%#MPxGeG;unbPcRm{Dve z4N!($+oJijpZk}=fC^LR1wT-`7O`M;nKEma97}drcoQ0SAfDcM_?#c{uTG~2l7VfIM0{_1g7U(+B$w8u73^p*pJhU@3Dp? z3$;G%xWm=kqF^r?n^B~wvpx;JG}TpJa?5%`@1i@`!63pJ2mluoL7zKX1^PY{RJF6usv>RoPkl?GlTobxbRC3iLZhWJ1K!^RzgH@Pb(gK2)Oa5&eC4qjBAQ`nmfR2{i3PIsfT7o@?e!>uzWBg z><{=1waue0Tkwdu;-3?nMqa+!K7*M;MhaZ2olnC8<)0~U~VOMz&qXm8Ldq$K^kz8>#>DJipPw5mt|vx;gvz^m>{WZ(fmS31$n!^brY8xLLE*!ld@`UlT{D$&YO5=9ztw5dE6$H9=cKna-!e+#Bxd&EUW8 zg41I*CA%P5Lptmm?DIx4h3cw!?@C1#TX@0dy`lL){%HXJ5)mvqO}gQ5IMN~C-#kzA zkxMur{6xgt@q}!eo3Lm8334AsM0kz#7-xYvnMslDl@adXgWe*9ADO7E%fC9*@U>xgK3K073%~TW>O5UN)g{3tl}ATvT*Xdsey?) zLx!=868M_aTZ5)o(=-#oE?gM#q7zcB3>JAuR!c)cn^OgNLV)|%Y-yg$tz9~+#W<=A zhUhjJyxb(#@129@&!M4=csgLF?YsV(Yu0yc;l;y;7sp2YNw3rRmovL}9h=;;I1b8) zPY3;h?I&!f-hR?{XWO=ZgX#3<#gmp3#YKn5>ui|Cg!!u3xdWoo_`2=N>8X9FaR7f; zYtK{EXam?q4`Jv3H`^j${Lf*#Z!FigmR3$_!aZ859j>fgt3A2CT)PU$9j%-S+2&DY z#bS`6NHJUiUHU2_A6=Q&Bo3}h%5@D-LgG8H`VSn(rrDuzKalGW<03k2s?K>_3@Ai{ zw4!LTzY!mrCJKIt7Zm07HG%8(1A8%eVrf^5tU&<>@|xQCr4F$%i1%b>FWHh^`n>Q ztPQiX8^%T~rLOqX$EOFr)?cpl9Zp4ilA$zI-42JmF>C9VtO;_L-{Od zQpC2?Cc`P;dxW|1v z7LhbeRP-KOh)q#?V7Rh^Eltg`dco)hD8dpxfHE$1W`X#fsE|uCZKAz;Z>e_cxs^M$ zm2;<}(js`x9r|9igz@#*piW$rD7Z?;4eMZ#=n+9jFTz;V*iYU1CcJZxqTY?=v{PG7 z_u=(BZ^d#tK3TibtDd|b3&Mi{<+b?j^e&rh6Iau+Nc-xB%zj!eaqn_h{}i24wLQLC zVX}6&w?4PDKrij|shgmtCDnE{7SDeA3b$81d>p@#McaK7eh0mu-Ldo5J6`vuH`5nD zOK*D(uA02`MH4=C3cYV|q{H-upP0azhc$!7LzYk#CA~=V@*1M@NlVDzd=pj>DpC>n zsTx?%w{*R}#F{GS6*PX^q9wfpwr{AN(VU1V*2qFA72Rp`E+%`O?<5^w#bdfW<*`^j zlU1AYw%%|gpAB~P#`|2_svTRmc3e9T2KqbCgUQW{Q)6R8lM`YimaXhOo=ezacpN|f z?QGBMUk_#fLop>9|KAU@J$K6VR315@zat?UPs#Ey8QFYxu)%U`;QHII3*28^=r}{} z_8wZ8J>7#1H<*iA9mov_SO zWu;qNlUOP!X=QA|Zj^|gqqZ)zRzV*PI}oi&I5<91uKTz-mCu4>0;$!lSX9z;UY%7Jv(K%(ZHs)0eSTT}1k3(eH;^M3Ce5qGo{lQoY`$Nx#=+Rb$eYV zr&T|1%Cf<1vu@pHk&RY^afD76@gI$mwnIgH2ZxBormP+;Xe7xqgaZg&CA$O)a*Vx$ z^m%|KWLZCF_$=qS2*Oj^Cw)(K(BI_x2}Li4kur85B3 z+k;cRXAfzL9;SJ@vQ*o8ZY8Qs6++jG5PWC{_J0-oU!oS!i%bN)IE(43a?+LfN-?o@ zg&8*#%%gf0KBZKV2Qw!3(9SNPeu-;YHgCX$%&x(ukhiwPhrBV4S0;g`J!6R$`iHmD zRE8<~PjdzY&?S;@9`7&;p!YAHC}3rNhkYQ#!th4>bla5l;p~4QK&IIGYqS|#>)JPY z8A~DF=f?DX-MXff>6nkk*P6D#^RRnYC(de0Pj0tf-iC-rG<%HaFa&haJ_)rX9dj6N z%^{*o_R7kDw#_2#)0jsSMQs|hJdIg+l%l3hqux6a1okwI;np;w<51f)(riiU>db_B zW`=Rfc6h1nP6wN%}jF-aTyNYSVHyl?=dlB zOU7X#gM3N|qixQ-aYPuk$>IGpzuz)%wVDlvvhnIQmzkbKpNpo)w!NAVW~0ND9bK}E z_PNpamGzqW6bc0_BD(I-D;RiDkf4#@J1xm_Oniys(qFu=oqk_fgWm>EDzdjPW7FJPM@xq5H3q{tsTi2Kl=(=kMT|5&am7jGJ9Gkhmj|st=9Z zYc4N1cIezTGvuYWJl70*H@Td&xKy_gcZd4B73c2W?zPh)f65oKTbx@5yEjaq&*xx~ z?Hn#e5)r4xyL%Y?=56;8*RWKKPHOu7a3h&;qzeER)c_9;oo12b4Rfg&%mr^ibLzB6 z8Q%$I(=^9HuHYuceYssut25Rc^-Zo%4sLZ?>>*#uADUh(uG#6gI&%5a@J_$QiF+yy z^Zj&52jvBEAMCedT|}L;I;L3HF{>va%c_Hl$kQ_rGk`ElY0zCjEoY@r3vEvI?yMt<#0aWnmNj~j zL<}X!WGux>yb4CXDB^|SJ0T{E;=XP{lfSBB7#Y)!g^eCf=4;#M;)dOYn2|Z^)TIAo z_fmV{5M0H&`)k!T5WU@MHR)<2x|QYttp?IN^Q60w$NCI(fD4|cYk8#}E8c$&KFheY z43RXKds9{TwRzQ3R`x#-a~WH=rYb9b(_`P9<1&vn>g_-vVg_Cme;l zvu+M-yK$Ts>0ydZQoJ=Y2cu?3REaSOz+@zF7N|tD8o;aa&aNTa&HFr~PNioiUFzIv z+CMm(DNt2#$xa+Ae-F@*I(ML$grn7ldIK)I({9wpR?K%V_0%X!FuL+4t2(+a_j1G!8ZpF%N}s>< zEWhl4tJ|dKk!F3Hl=jFGk#|KAw|EIT>ZB9Dj8RWg-8S)e#C3u+$Xus8 z-QANO@T6Nv>cX`uik$vp&0|p!u^Iwl7&ZEfF(O;lJ0Xol3Dj2HyZ41c&vg7tDD)rY zG=Q?N@ci$MG;s3K&(Oz98RdXE_O(R!dR$_VxiONBC@qlCpx=0WQ=8@PfX(cZowi#469ksXj z&LElUa*u%&X3iqI#vqx zT~_f3M2Yq?8wBklpMSN@W3>1CeIU%Jq&ubTAg$#T25*o}^TD|oJb-kGht1DCZ2ofG0b@c7ra;%4 zSxjQKvNb(tJhK2RcokOgf;qgy*ihaQs?SF7G&(jRA#BuGszTFRJXw#ElFspYyxGt` zogF?Y_h_OF=7$ zPnSkYLt&-F;c*%@(eyU>OA;ik-F~;Wu$M;5s%rJ+XuDavv!Kb_SQeO~B*YEg#jaTf zpT%ZW<<{@=oee?O?{d&$SA2^+vvegUr-7fjm-@V3))X6UcsuLXYUteORGpizEa+J1 zgJar!h1rH8-TR3vMtUN@NYfXO;q$CVWG`N+$2`6O?F@YaJ#x0RMtme5=bWl7D!u?b zoP>p|!7K)e|6K|agK>yFK`J8=(u69GU5?C}q z>t(D+w1ECiaGV8Uv7-4PYB}mmhbo>2ys*c2*7Tuuev|Ez70WLb`=&g5ys5dl$!>|w zniUJ?4o`Z5Zn0kcZ|KD==foaR^Ju8i*%}g6s+H;ux@v$QMF8Ckp|mB&DoXhVh$qP( z9ailn&v?O4fJ)53VWS?b1Ls)yzCw({My&#>XNr9YpP-T`^lDUPPy^Kk_8nI>$oDDA zl=4>!fS@|nt%XT#ND(Bp-Gw<_)Ym#&fGJR3(@$SKL$@1v=9Rlwo)HS|o^+|3v3R=v z%k@>MR8_s-*SdIdtIxl!aKX2E45Yn024+O4mY|*tb)9Ix)Z7z-4?ruD1I3TdB47pb z5(~^g7g2vpE-H$F!ffP!LvSPOXULNuxd9x;LL!}t`2_$jqVA8U^Z=#uK-e#iKvkXG z_PvD7zU`zIlfFq(n`Hs{%wQh>$ntrF?&qIGUrwm^b02E2|KYT+zplQX?~zXXdcIft z{OglmZvoaqqIG4-TVVBlR z7f9L=XG&4Jkv0||)quxHGuMh~d@QOK-8M156~cKasDrFKF3B?%RA1vIHxMMn=}9mf zOO%WpqXqgeSc5ByR6wkF8m18(mjx+FSOta-uniL<-*qg^er$C*?)12B=Sd0O?sDG| z`WKh4s<+DL!i1Dmgh0yd^9L)+gFb(~QtqyGyK3DP%EY|PX8Yra%P51TmdAqUOZ=Cs zRlZVQt^4gU#HgWBj!e9Uw4W%j^`Ae@{Z{!reCFRy^OThK*i`B-no1OZOG@XX4@cqs_d-@{IP;vcYl=ES z6;mxO4LfOZmB)%#y5d!Fg@?42UU(EBd*KC+@;>8qNoTht@Ffws5n(D67xI5` zvdFkg;6Wt1n_fbUGspJ;sOtQo#wKvwknK=F`={V_Kh->AO$}(H8ig$o2^WqDDn3Xt zK1bAWRWSRn?R+5=`fmGwW1%_XDj{|?vLWtn2kpn(ox%a@?bk8fC#vs3w!)yjSE#Jm zLr~>xqb;ZXJ_$TgL@tLOs)j!N3iJ>j^Oy1Z!dVn}o!MjyU6T7?FLq6EXjz!F~w4w#Qy42G0P>B&mLnf_4L|Ui-ryCX{Ay<0H#?mH}0NH zH!=XzZLku8|HGq(&OlZ~ZLGO~*m}FL8csQ2#Q1>GCH6TiHU-sNUjpT`#o>uBi#PFV zDaX}J^lrV4Y06h|K=Rssqe;D!01JMs{5OXPkjqD->=?z{H)PQGyi za_t^_j9DVDxdt?Blb@aPn|y)~+KY_ju_(JkrrhznfdL6DzN)2q|S*8c-r30Dqho!2z+U8bLDk=A2*;DRl z;M5uyJW@83KUk*Pg9tEd$7hF5f&7A5l;Z}4QwrP-w5hk~Fq1)}FWA)~Bu9^%JXMs!)ZVV(CXwiW@lCiQ-#dJC>OPMHO7Jv+aP-3(? z-cGs~Mhx5jTx0-!CZbZ}3L_cr7x3@n03-WQIu<_b~k3qzJ-}6h(iuh_9FP{0YT&3K5Xf^b>jJ@DF;M%9O zImn#Ul=q{krx$?ZxXlVCN^3JCg*7)>l_;`>1`XN~nkoSBy!6Yls6k0Bw3-d3@hiBY zd%v3d?JB#dgWGVqkhrb66YC%TCO7|J+~6=r`cL7 zby3_I7F$7@1ZJyXnW%GB=yqWkFn8)Uff*DlH*>ev-Jkntomd(OX1ZLWH@_Bq_w9*vKOhM8E=y zhn%~tc0N-ycoA1}sHPZDfX?=&lrNer`iM2?0~B!}+V_I+HW=+DtG;v{FWmj}e!z8` z-smpvOE`n|9B~RJ>Kfhc#_c^{Kf{I$HgiqDra0!Cp51!yZkHAK=iwx;=(#_F-sVm&XW5iq3 zKN_8rC8+30$5s!8eCp^ZsVSoq-^ZHav4P*(Nprl@%OlnM<^M>WIyf)2-ZEuycFtmb zu{L|PVueE4`zVjpqi0@!=%m4smHjBhCQCQuKN3Hb``A|Lm$Dv+5z0prq8owh5Jto3 zM;bcPEEQ;yngkX3%}C(m$l9EUenCvkP>dNJxE~9i#mR;Z5p+Ou3QGwViTL5kmQC&l zP=hdeh}%{~bH6kRWs2RA`yE7Ifze}CoMZxFO8;N&mJ01gj2-RH*lKXXp_Ye;#)BHG zwVV1tntVzVA?3i2PVNSj-VIZl2+Keurdn(^p|ujkW@x1=_Ypl1^RjBM;e2^ReiUQ< zIPl&5$Wz*=X6|OIc(jLF(>>#vBvx%w%~2u-mRV?WvIizXCt!c>0lU$qcq7qPccQO< z<{&NAQcpHeMWHW{Nuq1ddlS7g`UWVN!;2PUUr=KZLXer`j0Up+wWYKRY^rQmL8C)7 zY1hI~D8`hMilvk1z&L!9qA!@Nx}4+r!JXYx?r5*SfH<8%>6vUNZatT(bc4dh!^7 zSp;_*vz3K00=qdSJBntX?mD0WhQ%4(_%HY}{1|&XlQdfhV}M zOlgA9N*S#$X|tO%i~muNjOcBpuGmHSA9OJgDC^z4a(xYVI&$0R+C|G}^m%t&DQ{gj zgU4Xx9E7i_zr*OS12sY_VU2*r2~3JqPyzMDiDY zZ=kuIv97gTDcT!0W<5a&H9(}q)CwladCMo(IHXUp0XbY&tCGjawoXE?SL)KFJZRuRi9 z%w)81$ODf;xF6|K0m$x-)TL~JE#Dkx5uSZp`4jVbI(whKmR>RMThexq)gN?2oXdjW zW$WvX?gcE>pYLf)-ON+Ev;VhjfsU8jyhekb=k_mW#-HXYrBiKrYx|Nny_Mc*Za0)kNH;k`c2B^7aj1^eXqA`S|?pyw3$-*=X9r#=7c3fJ~Q?Ap*MEOXB6T9 zIB%8VycI!|U=O#d8m)>_<#qBGQ?v#3sGN*cbRwR{v?Vx2Hey$7EY`lx3Z}Ui95!Y4 z1~+yCqGhL)K{o>SH5L}ez!`j38O|wU~yv|3s!*K5A5h)OY zMn%;llzTp@|ET7N{72{zq>*RKSh1H;6cOAS$jFV5;DNR!z<%+x+m#XM9+jV++JEqd z(wUmhsnmag=|lfZtISbiH9_VE?GCn{bi~c0F!>C0DjNYE{Lpc@4-6Ov#wv+TTBnER zEgrc`8;narp)c&o!-Lzj=kuM@x_ZTHTCeq+XOC>wMgxuqI3MbB`T42tIjSEuPcn(P z><)=?m0glf^S!F~$yfUU`D!gYS??zE&&u`SVoty-4zCNclQ1~ROdCe5Jdtf`qg}BH z`T5yS^2Y$J0O`&e7?(`dqsF9Z5i{J2ND3x2Tp0p+aE(LqyD?Ve@9rjYyErgJQl@j7 zCiMhYcS2KXvR^O_D1}oK4WKwqdvffjkzVk5 zW(JTh&^)jDJ0Fqw+=$iF#LGKiu(8;jv;>3E+az*nng1Lb*Qy8OwfC7?_(66 zO!6pHl?DP`BYQ%j`(N<{a^HhIbwnzgc-R^`7mwUHN0g6k@0=EyG#Xp{zW>G`J9>G% zQf(_oHg6G^yf;4usxjKDlld<~#(o9Vb&@#4kUWX7JlBg%qlCGI^9?OXk;4~EJP z^6G4yg1^ARk5#l-YN7KH<0gJgaM`61sE78+Hj=+VY6QtuCPGBiS?l~R`y{ln;9t?%j`%hJ`8>}vat|C|wZ)HDx zRh%SdlD;kW`2nTd=lyGKJl-vf@O@)3pkl{`H z-~XfEDJ>MnYDBvRsm*8gJE;l)sT1`I<8VsHZ}9s_oA`ZOZrjZBUC47Kes5sB^q6lI z5679VZbHrl5@WGuy#*bNK|jO@$T?6TjV5>@Dx;(w>Y1Jj}Abh9}`hYS+xS$U5?z2|P)|RXUe>EUa4JHAUp}UP$ zURzCD;cA{h#&YfwaWgi5u}t0Us4ac?O$T(m)3Oh|7l{Ak-aIe?mWf|bEKaiI-Dvqf zDZuab;=S+C{rung{*}nJ!Wjs5s7t&7hL5CLYY~)ARG*r(xQzB9+se*x48mZ*PfJy< z@IuDPjrCG1iSWOM{*H(@;<@YN;*DG%O0D0Y+e& zR*x>(^aqIey%Gtl@YdaD3f` zBp|v;{c<8UwV3-v1efkswz&Wk=YAldjD8>pkJgjD0Zx)av(Y86pW>jVlT8Yn6;3h} zPt#t2a||^E<|(#Rx$ekyig+v6Mi&zeUB7$x`Ud>LxWl;%o{M^LnK+S)U5azoa*2{! z$uvUaAf|;%UtToMtI`P(nIL7gmck|_C-Sh6vOL&Jm#^sQmS#ksV-~lt|JZ?X2%CNb){UEzg zE0vL|0YM;>ArgKl09Et6BwSW+NhvqoiVltG+yNIoo!V-Rb`IH=0IrBYp~ZnsxAcgS znJYHl(v$nfOu1fb?4H}!FC7#kJ-PU88k)mLIdWxDq>3$68 z6)E=2~8AB%|VR*3{Nh z(Kh-|9G?7~E2&6Gl+A`Jr_=e;=pHR;XzRj=rIVgfo5xRSCFlsg@j@z%iB^UXlVJpaY6YvH1@yQEVoQW$6-dp4d!F4uEJ%XZg~` z1J`X>zyG?m=imRpI}dzG)ZG*c-PEYFnVd?)E~12R?Aqn9nT^(64F;FVru)*h>o@M- zzkc0;_=6Yjc;LaE7v6u;T>QJdP~5N1#gsYPhdKIYd|kz zCrCo8>io%0qC-{i7)*8o4fxT&9)0&E{8xYUsQ4&n6W=>}^k^Laj;d>8YWq-|mUn5= z=1{IsJ;j45+KJL4#2{iy5Ge;vJVXV9C}PIWKc51drh+D*x^K-o@mOv{1AecIF5<6$ zz&{;4OuspKOi+(kvSCyfXk&t;-J({37RLZ+)x9j*s-)wj#el$(Q8C->P8K9Pf+64q zLNOhcpOnTyxKqKH5^*cBUw5n9^M}R$?2pC{b5k<8JD>mY^Uv3av$8)vd>Hem zjYDzmT|-U9ewWO@C%4OUu{&a)M#2UrN(CAYxX4nqaEgQlse4TLqm2-@YK>zBU_?sV zq~?0>glpKd=!?WoNERqmDN zpdF}D2zo70x4;v^+b%KmxVhL4>h5K&%pCwyW|S>tNVpU2)Y{;s(+ zc>D{$86>?8R4?UI1UZr#M866%GI<)1`TU5F5pCTSpgrwLWS-Kqa@d4(;j9qP!MA;^=9I|E1i(N*egJ`Ue zb5DE_2LPT6^%-duV+lj05O5Y%)e6+LaAPwW^<;7NP6)0#N{`TFZ9QqmPN4Wa(Gal= zL)*=T4A*hnyvWesK)DR;lC5K3N*&3hEr98%tRs`Z679>lL&RGcDNNtw7Q&W<1B{p% zKwF&kalq7v+A^8w)cFMcL}8UpTxryV7gLMR{zq+}+6P&xMIWZ24?$qd&OBNAaNZ;? zGfgFE0*FjtRO)3=GT7M=+(k*Uj2p6NW?mm#+tajNi}F9pcFgvEHzuewUxU? z{d*SO&=!j-5ie;pP?L047#T~*UPzY~^Jv0+unuGNM+d?cq`$bF4e77Z0ZOl-&|*Q) zL%Sm`H@SUKT_+u(6smEGI8IpAX3|saAh_!w*hIeyMqTqAzbdZiH`puioX`I{E{xV^ zu%!5Gtm;={REA{`54u#tomvjUu%0z^X&%~)wio3nFI~=E+Tin_ z%@^}?@S`%fn2y)N5U=MXeDTQFOrn8FN}fsAC9+A*pGm4ZXLKOhj9zr1e07pE5-(ZK zMbgwzfvO*vG;PD2P(~SV7p9H?B9qp|#Ii0-BByTAK30IT7UU43=!c^lP6+xVtmkl_ z7sC&}YQdL7p>Ll<6S-*bm*&5x&0}HFzkoS(nJ1>xU{j|P>uF#9-^DxNENel;EgPq8 zmohDI2*PH>GvyseddZkhPe2}``}VTy-b(;pny*X+4rGTdI(to>O3xV4D3RF7P0(Co zmWv{+&8T+`%RwYP91s>?t|)I$RM!JuMakSV-QL{00!OQv-k=?JtEIj=(cUxum@6KR zc7az*B|+%&1gsA}U=4V>gz$OnCPT2YD>}VuH`y0jf3UYdh&l^oU&x4fFBJbQmgC9* z<{gltjOlffi5M`Ei!0a;W7rj)7v{fOt`=W3xYD{~df2NUJJQJG0lTM;jsvhtTC_5x zde~}Q+&dNQG(_CL`(f@G@wp!k-86C__X*q6d+@s-i{)s`LpbX{c8uCLB9*|$fLIr- z9Vs7xPvRk|yma9SJC2tbhA7tmf(CB#tflK5qVRH6&JZ&)p$UNg(l33 zWrbxJV)AiTZ5clm00!HAwL5Ea;M7mdC~zkMcED#0K4L@)N7GoCgMdV#83-o(#GcaH z+P2zSu{74x8xN<9Up02s)%Wx^)O47hM)l(Q+92Lg5)8+C0 z64FMsYD9#a&Yg&n`;)Rj(QJb9Ma3bt;d6lXe!ke|!-eLCeSWp6(9e2ARcm02dL$Kh zAEWvwNFqW+h`{%hY=robVVry_@XwJ;0^CYix)E$+M5CKbxQkcF6r_e=hpQ=GiJ1hl zr+-^+3x#eA2^T+nZ}OAehlTfGEuNk4LB5w;wa4K%GjlFO!ODZV{XeyZQzzTP*T5ED zi$V-O|9jM;+y!`k7ta;(4g};DmPbTIAdf!j0>~q9UAUiA?+X*|BSd-H`$XU?7db<{ z@cOJ(@#sVXfKmly*~0yowKB)K6)^%q1(Tp1$%cSS3uXk|edG=;)IC8xqD&dx@<9OX zI8R%HxFE{!dg7E|hEbyUAW}}B5qa-1x7(vdN;-jXrJs z{^fWne)fyT+&fd+>%gfMRjY6hRb$XfCzD1}B!jDbBK?XE9xgs@xR4z}=h2MSMV>Se zV4zkb$*2x3OOmkwdQz+?F#af!8n;o3n`~Bb0zZs~!MYBiYV4=9tHa{oDU5CP$59$BX)$I?gbpQk`=11KYNLQN+dG4ePFs z%U{cd&bj^z+r%GW28PZ&?}9bBVtE^f4!u_NyNr&5{QMJX3>+FIU;qWz2HJhEv|k(* z54xPlO}aHd;lu=wKJ~GjGpY*#xvC~@TD(VxzYaoT^ zgIYa2kcp;*aOMg4G zc|EdEXoG4gQ<-n$z;4yqs$zAkL}9cBn?>mLkFfDjqQ| z$|h%HM8kXFNTcFC$oEY892L5<5jwO>5egOisvBs5p`xXfiBX4Tj`o%jvrvVqRX+m? z!ga5T5D~vm1&f>pAd>q_D@ISapjQ4eV+K7=hKww0jWwXWVFH=D)ZYp0$dMv;MB})H zj3lLlYQ3?nHIhgslUX-~F_COIR@qphpA^P~XOChq2*6OETXET{RwdJTz-ir3eK!Pi zGk)4WO2A1*jME7ej&+Z^D;xEsS~&SFW3uA~EA&od2~?1SnQByCGKv9W3xFXKMf_iV z^4YJ9d=A99V4J33ARbn%?(SWI$7xOR0X(==DqWjXF-l2XUaE|(g4`fDwpWc)756kq zs1TC?f`F=^kdqh*5Sr&EQ%7PUkyzAX9cV7}N`a1Yr(>#@&r!V5B^=;7Ue1+0sLIC* zu^z>KrW+Rxs`Wo|plJ}LT3L{xMbJCR{2yTd4P$0g(iedB$I3^Y4|38PFu`;J$~0p` zC}cMP+Cg3slqG5G#LKX5GF2eW5^0?EtjOO$;VgC;SK^eHwKA&*hcg45h(cPk8sEn5 zk_dv@1-q^dUyF2g^;oDPW$`d8l{3LQO)e|{e_8plGRG5W`6dmQQ^_Y0*a?3YI9##iMHgteS>k8-b9 zND1UIA_o!o4XDqsR1COc@>c2PTs1xB2k@xiF_){J@*bzpCV_ERM#qywb zPs2{)ecPeC6v*iWezaQ6VQHcH2AUGbsI1J19WqX}*=k0;Rs)zq`iz53h4htb0#zaz zvIUgONWKRb1|+i%a|M890Yo{0unYtNvFyUDEjn$Z=R2}Zg;=UUN(~dMv&jsKRS}~b zssX{HTU*XuAN~zvELALJ0ViwF6Hs;%Xw;xjtpswG`uGiCCfC3QI)n68Df*c8Qb$J^ z(~GU0-29{SQP=`(@pR}~*qR<1&-7#c^{a~yOEI$u(&~WGGho%*{o%TVp>KFDlkj=m zs6_+3><9rB+L@#e$R%hlg%FAv%Y105xeK7cOWj#~l1HcuG8;@mOW{#PVbX4%`;(^Syr#naz(y7i84m)xkB>uk_-!x&o!#&$^BCz0R`LBI7OKNRB`nbwE# zY=}0rXO4 zh@@;L7?RwXQV?3EBRsP0K#GB!T9r3OdeWF6S_w6wAw8MlU%M`%KB)o}wm_z>VVEdJ zJ}El_OE%3hSpE90)s3nk4(+RWqKC(E**_UyU(TZ!zGeARgWacJG(8*$v@9%NxbQde zmRD))?(ANwO61iu7WH4qc+mShTpbTG9&~Vh=wIkfKXb)P<|D{XYp)}J4EV7bK+9m4 zmDt+JCdhf{;+P;D7*y2ID&F< zxj%G?C3v_)MUho|LZQ~q>X&lAzey}x#$Lr<6_BzB7%4p0+gtp;MS#)e@;nAf9)syT z2EY~8)7q?|F=%7hB_(kJ5@Sj-@Iwbz8=sgdc?$L@Y+?o`w5wx~(2j*n$OU`55u-_h zmr&uUJOTo=TbX)b=P&{&V|4nyGD8K>mnu-I;Bb|7Nw=)a{Yy$LMTc)!!GksKP^hKr z9Dq>2nR^AB>3VH^s$TrL_;v1?DZTdj;hXa>#V8mpy})7$p~``ZllukG3awzk&Q_51 z+fuBRR>5kx0#ll;=F-H~aE)ipG)5qWLF^c~p)tu?Apk4{42BF0iu`~?4>ExR!c-tp z#z5FH;Hp>xPJt4UE&rL|xk)&qe>**1?qC6a^oNsCawP9+Ki}bg0uT5<>L(Y)v8qXx z5`3;og&X~Z7f6SG3bmgoZV7B4c?8Tf-x7FW8TF&~QUE4Jz8@7Z#aC48_A`4aJ96_e zkvHc6tUYr2Zi;tum;`Ng|iv4&be-Ci%Pry%9g$aHHN1^8Y`x+<9!u%t!E1m|f zzZv+eL3lM`Lr9OajXeF@rJS2E>L*&T~9z*DD)FIiuAii~tmIXuJ#w`9; z-&lQP*a%2W4@#oK77&yqiy!wvZf1XF(9BA7*bmiw4 zA`t%w_w%mybysmO7r0SUa8dlG<`C-6!cUX`0KV{V!Lv|)9&Vu-@H z$m39ol$}N#SEr0l6fzNkOz3EI^v9%8ojww!j*5&vqKR`121iB$e!TGdI)<G&di9qA}tTP!`q~Kk4Q(W=IdtD9~GD;U`W(609vV9U&g40wjd8lfQ{U6X2+DoC!gdx#N$8yvDozS z$GJ!1BJ`7=kiLRHOZ-9{*CF8r`kf%Yy5yJ>m@o>kp@N~8&hgHIAB}w$MaG3x=p_Fc zQdh+X6P`29%jNyzK)zcXm#)Tn?lTXsEhr-=If94{Ljwd@Rv|MJy&?Ann-UR# zoHUe?2nu~D^h=-rL-BMxcQstrG#<6m&&AA1-wotf)@Y`0QH!MDyVck*0BLkr4`*26 zyK!9XL|FnrnJ9epOKi^i5Ao+(R!;5`uVM`TfH8Q0$DkHJI8%HZ_kNFiUpj{Q zMx(Bj+vNX(geySLkmsIf+eEs8Ooxn_`yt$6H>mGWX{)~B&oV_Xmx*{B@)8$UqW#6s zqlg3oj)K^s;Ld{+xbXR;JFE+LCPisHF8=Zt{)XlFJjPa%+r*Xp`&8`<+Kc>T{0#`V zGG0U-$*>zmg8gB%pkDm>&-G$i?(3L7l36^6fr;;s={tBeq7@iCg665chybXZawpv) z_~*aG@t?q#F#dvaTEB|@<#~y{$_SLA7g>puxFke!*sl!WF-KPnG!FtZuL1tNst1OZ zJ*YXwGh`)wHx&A?kY~pDRAF{0Hh>Lt|H*Ae(bRX|K(nVL_t)HCOS~-u+eOd9+~W&Bl{eRQbjK8{?chP1 z)g13mgfq9?!r(nwO6A{`J>nJoQWMoj$8a@LS&(d;QV9H1zPrR>MaH zd9L=*JE~Dsio9(Q=}Z)Z2`6mEQw_5Qo=24Es0_zdjYbQ1tg<>1rKdPZ74(>vrNt@> z%1a~YQneX}VqpvY3#qN%j-f?6cb>6$SHm>pEp(&5>xNAm4jkOLe4yX>71mhO8g?#P zxNFD4S)IMcqkN|ypyC4uj~?vmH%dnMUORw~KNCGoArdtV&-FU6nP-=fwyx-aD-OZq zObj(|D0Be(c@~U@wTbj{R8Q^K{CM@d5ioI+UaZ8&Gc$2BPF1L8a@``WUO6BlnGJZ$(pB(?*sP`Scx0x} z#6Z^4cFJJ`nudm&@E6ben(2a`#$%!K*ib0;%$1@(78;6`hhn+s z=r5k&Hof|Ab6;Qc;9yffKGnake|3L9eX<`H`}#$M-x>SvzJ2G1VuR(em}t;GmiyZv zeL4m`J0d-aHFntLly*S>tcU&)qC{5YcI*cR(yh=wq+9i@Td@OS-2ru>ZaY~6AK?uY zS4BPrtqymMAKuIoQTkU|fnxb9>GBvZ2Xxig!~d46)`Rcym^6R|M|xhAO;v!xv@ zUZ?W2GU-y0p!MFVB&@_IHR*R%(%94duDclQqcj(aywUG+>1qC5@2OO=Q+Z*_rv9!D zt9Tvq_$|`p!B7=}jEQDi0Mh682myZBpP$tGBJvG=yMuHa`E=*4hzoB?FCGZBr z;hMTk9sZwtwr(^W98%vPJ-%VsdOG-E*+4`3s;o$g{Y-C<>G#i5&L!LLwvwc5tYib4x> z_ZC_xN)GYbc@11|x5;agw!8^WD%hI{lOGqMiksjFufNA5SYJ!V!%v3LEfuYrkKSRA~)yq>*+x>w<|V-!J=;mU55cO3f(CQa89n05tGA-tUylcO^{uPU>}IZ z1;Nvd;~8X2WxZ@?c~M{;CwWvyAe+NT?5coKmGYu8kf#de)#(Ub0Pd5YTAqI5H=)pf zgQruj4uwAbG_(QtzC|#p6QJRwwYwPYFrgh}-=g9K)(o^bMK-XE*vNzir_?;XD`UX0 zQx=J8D+ZB0fwN4_V)2dKE^%(|-VHkEr+(+p(nAW$kKt1%Vt;+XmBRck(vu?=J5)ch~K^p`^Z z7i4M#T&gY*UzFoyO5)>4X3EhuWSNvOxpfJ!VA*m8#h0_ut`&2c?2|eP>P+$^LSJXk z#223g=j&I<2XhuiXf@gcZd z$rGnl6xK?}S81sh4A8);RJ}rKLHV!08VWsn8x@uR%FCh9gIZ%UPTM0 z$e2Z(R%W6WD4WyC82MdKd%tGlKKfV>8FN9#BE)LP0YOCJk)TfG7m%+oXV!#)@y|LL zHt*z9Fxcd4#?!7urUC*6&m(Qm8D|*ghY|9kZPP_>(Ujn{8lDfMuC;?wk6-&Xqfs{P z_WO{)3zmv2w%Cs%+xWr@Ior7N1ZpiKc!E_g3|%-elD?UXHn$bp9H2FU3|wO}L#eO` z5jprE9 z#6}noTlV_=qoL5K-*?3(reFH@Yo;gRWKg=)W+J5sjqYT+DRko`U15HBV*DrT7W1PM z_o2bGbphVzh}2B^bMTBdX6n$xMux}=D!>ABtOlJy<;A$B&sy;qufcXAK}`p|l~e=> z7?C>Sqq4ZY<+duOHcqZ5jHcK%V)%#cglni_@2C=8UZI{SQk*hhtBAJq z@1KwKL8D=0?T!bQI}rp!sruP(=YBPO-G&9LXIyvYaF6VH@WS&K8o(u~Pd&Kv;)NbW zlx;R|Yi^cUIdjv-{c~onS$81HvR#?q&-qqS8Gagg=5D9yMc*4Szs;CmvOV0uoiPfN zCfJVdyAaT0)&vQXnAKtS$(sbA1u}nls|PF>uBD8EsvlJGJfp+znz6w;g42q~F@xSP zJ5zK?*dUGXD-gp!{mJJ~r|UCv((65eJSg$Q+#2yzZq?-Z0si&0>&g7LiTh}KC2+I7 z@(Nh85$r+8guwjs2^^LPpSB(2nFj2Gjs{ZP4$4t#aAhdf&kf8-rVF%n^>i&$Hj9mKD>GsZGk)|l)AU$uI_tqRpd*s-#xIhx zwY1(~Tj4xSdFX`0i;*PKk|Nv2P{hSxQv#s{&61lZjEVuVpxTidpdN$a^@Cc}GSQ}h zTZ(|XV5KY`vLg&aOU@S{CDCO>X)5CdJA^F6Sw@F#(``Qg17&3o_@9RTtwHSh$zxM>FtE<2)2ZeUC+@Qh zsCkR33~YnPhwQ;Axr=bSl#OS?Q)ZX^_mqiUf%zwoOvVCdKu3P&LcvTI~6graNi+^$%%V^I_qBj-e3Y+Bac zwN$wpd=RE(snk-_#kt#mrc95=XS{`nSp5tP7VVD$XZ&}et7b7rJZcwFEi*4f0yzi9 zaY{aT90Q1gKG4}87_8Q2dQNEYtX3g_?P_XBfW&{weT`2PL)o@0ce)btpvJH8)= zeq-B4yZ8BsrU~k)R0+HqvC>gk56t`&HS7cUuOITiT^c zH_0|^t!8=BeUe+;KLo7w_vMSU_dD=@{0`s0{-pcp8Sbm?IiS72R%;RDA^&@}B|_3B zHD*VLc$`=fna-AwCv~8N1i-*nn}KXr907>9jWawDr`3SOWSmpb-m2n`$VkO`J7Y!g zA>*SOcM$d@ReN^B9`&Q_=L~_TjB)|yzjLH<@p8ZKjMa_1&(yhG`prAV4?kSDK_}a= zEC1cJdFh8AZn)a)ap|twhBE_U{mQV{RKc?he<^bZ0$2i3j0qR5}&?4Rv_!6;%C4f%Qfk{+mQ-xeh*<>v! z;wuM5-WFu0qi-2A&k}_d_xb~ua7S9hzRe{ml z%=4z=`PBOt@O{{Vc>hR#C)RNzVSxEAK{|OKW_d~CG#!TQ%v5cVECvk;vw*i&#St>) zDm5bA)S79gF2-R`ywZul`&&$Oqb)ntjqsZe2Us8PVCHG(tT34mG)(HL+uY- zO=BKQD~PGciX1Zr_>#jLsn&F2oY*0WQAJ0LDpYcqDCIkm-^k8DvV`F*M;aXQN)o?3 zSgkXqfZ3$mskmjfHVm{4Y!M5{X|t|GC!VAO?onGw6%UdTn7L>@`)^Q9hzpfh;^=_$ zR+L~GEjTzRs>3&eV;Hggu-V2|8cIwbu1)IgooQfD6Z#1A7h)Tz=PGm-}sswJf;%u7f*l ziW&NSUH%UkGcayatT{;geoRehj&VYBj8Y$Qy2gMpzq)>$Nc$+y8b-L5f=)0U4rUGL zu@Z0uOMv)e)klzVgL+${)SR7 zxXaWcpzw^i-6bZi>Q_}1hMX&`)#fUF4`$HE4x3$LWG1=4iS@s+ptm6z=kR(cp7$*a zrs&n;eMIt#`WDM!9`Fhd@jZAQTHpcUs64s1>mgkA%VApv; zFI-bw8=NMGQL&Z#1{+ccu0t;LKQ**A81rxa;7Pp$)e1oXJInU( zs9o*1nr)6o&#ikcZLO2~qGyH`j?tmcV;Ut>;r*m(R_D+{l=-qj=k%0?sr`O;R;mE? z7#%5qEQ~3-uy^VNrjo%vnfsyy!?6}w*gey&(^_taWn9c6KQrT}c+9C_=_5~4}(dw5= zXSjlO5x2oA^zD_c?TOaPHcL%S?&;R*wsr_e^21qOow@hAX1(15oh4(>T8Y|tL1~T_ zVTO9j1}G$mFmYPvN?WVP-5G>c*eeM?I%OEZ$fh$RoP$|Cxg;e$jiKs~lwg3uD*&$~ z#osBH6W&5R$}2saJh{W(jb86%9#QStEgp~OzUcLe6>)FwYvMc)=3RzQAMHUTip~O8 ztEekO2tXLHoc! zdz}A>M@(~*iFrzH)aMgtD08>YHHkC4IEK-hpPoO0T+|Cu|7(`CNi462=M<;(Bm5e? z(#SFRiYsP%2yysNU5)Zp;_Q3}-a}W0!j<$iUpb{&`JYRN>hIG&OMd>xP}}+<>46jP z(PMra9_e@fQ@R1)LGS-DU7;Z+`2O#emx&+4N88Md#EW2WT>|t=m6RQ!Xx3H9%tB1? z<%w)RJ-sxUnT@BPOQg#oieoyi)1EEo{P}Y7w9Tb=xmpVq-0k!(MXlz(i9Mlf@$qv= zjL!$ee9PKRfSB*VtsSoPD!RJ_*Wgm-6;KT4ylguDH+PYYsnScQkgjMjARDEhQB`<;h-4w5qhKYzKi< z7SItrV-FJtHn1Uk#68js^}Dk z`#3m39!e9~!_!q7(N;A_v$Zo8XbOq0I2}-{;*gmOvzEBvKtz?P!J{XDIWwd51}s>( z)v^YUm$!PbPVtToxQ{!@o+vMJoato7srX?bt~&FYYtFppCqHXSv|Zb`V8PZi&e?(> z%jedvyH?zmySHjnPuIHKi(->~!{Dqf?rjUt*h+h4BLAiwm3P6Oazc-9WZwcp9_0N4 z?IChbu{{FQ!VqtS2GrWpS#bVC7uxB-h2(LB*eJ6~3rdn(g`!UZjyDB}7|B(LH$k~W zwRa?h8;B$P^L7HignwdoJf8d0(Yx-tc5LjByy65ZG0HnbC%z#*is#nF#rNVlTO1n= z+RNjPleQV7-a#2-glqz-4_U_92z#R=AnqZ4!v@@GU@E7E7II4C2oWo8L9mWNFTh?j z92vXN=0=A;# zESZYNa8gjABo&C-V6D*q)J!tsE5Lf=Ej|Fm(MKQc?>-`$azA;o`@_M;fa7<+a|9X> zy1FA(4Q7k`kCl~w1czxuRiwKc?>v(G3Ep^7UDE0F&i6STiPnHpR$bO*4GxuWsSvY* zwyv@&z>TmE(!RV(+uL0IMFh`G}7W0^|B$LR_nSt}}I4A~XMD?%+xnGn3-h-al6+~5wt z-h*IeL|@VOc*u5Br8 zE?a)U=R43_+E_F6wkP-BumKEWd&%^u$!D*qpI)NVmrSd#K&9r$jM9~NuZ-hg?qwkA zBuI?F*oe)evH2v6;6`Rl5PLes_JpeDi z#TP7HxO&S4xRcQ#4{QJ-E2q1>D%L)Y>-hAz4>zCQuvJ$$<%lt-mmQ!t*~-An66U%tNTP$;|{@BvYE?)5V2uAI zEep0s40t&2yq4+1cN!yKP#^1_i_f1{Vl`t`SxeiE;ZP`SY%fJgAjNDg?VTT=-&=|o z5qB?bQ|LuQdx;hA#fv?{u)TTu?7Q(*^mv!C?#=U}hF?r?KI1(6-n5oo;j*%@AyHEJ zu6aKHyyBPPm&(g0{Zh|-kP-J%JMdE_Z4fUychH`|x`}Fto~v?5u;(7A4U`lnniW!) z8s};y`EAF1dg($K2QDPFaL^QvZLGMF-uPXmVtB_^+a?#ipu>8q~%ymH6c=iI4Wa_H{d1*SXCKI;zCimyH) zp8Dju^nayu@1AwT4QGAtd-1al9z5&acVT4}`eFeSuNPjaDtNkhhXY|FQP-EDFDOyM zUK{Li@XoAkrjFQ#v@Rx;H7OhALJB*{V!_VXmWo}NfuOY_@ z@pgbCIwY(d3llnutwd_5>a&PlLG8@uj=l2A@t>6(5}V&S_tsoge7)|XYcI;|yE2`* z>dI%uy}4t6+1xI6CVIEtBX;GkSMEG_#_En4z!b+i&7G z`%vYl1bo}1M0uWZ91H}>OjEc0S|u8z6NQe3Zc-gR4kKbn@e%}~ur5<%qS1w2D@4R!@>~??of-|pv?nq!@m^#%xy1!dPKa^c!1CyA3v@!Mxh*bar;T zg{b)4GtXUh<^o@+X~E9B?%A>Pt}?x~9*PF9O2=Z-LNZa{`S8eLB-S&5^!5D3g)^nR9<~TLJFT>H;oOcB zBn8M7($4GwgC*h|Co#)9*%EPLtD?g>d9z_&WdrO14@?{^>*xf=w9x8i18+8P#qJ~y zmODJTA9*hH`Br(wTF>fu+#`oi{Ms9rYZ0IPH7x_)Pr*@%Hie~?TFrFg_pM2AOq78N zgjGWz1~mkM1IY%fNHBuHO%No6mIUm-td*VdRwN3l>`);u*06*W7dFqy-i85=+(<{WHk`9Re%`8^2G44V zpEG>Z=LdE!z3GB}O4~xq!}!xF2bk45qOD*cbV0zPkXU=c`b17L9VKcyxdeR;5!qw6 zCfs!+a*bfyz()Dc-#-o)-yNaQ1(w|x!-f1*L$AFb0Y8c%Ho@z^S$2lU*vo5#^ZPJ|Je>9 zo+j``i(fO24Zn>nW~74y!leTA!NtCTTWFeN(H@DU{(gHI3O!eCOK_0UHTJ@nB< z*MH~W!SDR}gAe}v=QrN?jyl(x&lDs7zr3{P`*?9cut$e+p!PQ;9gS4Fk;r};@xs_Jx{Uo06S540f^rqeM;^jebs{^|N2_XR zCRWuVluS#GE!0Wn`o^(oDz8vikzr^A9OkWA*#vhFSH8&VEe^7@0lHbYO}OMCK=i#M_g!V4_Lt~O^L!c9wSs2xieq8&?J zV^z#lL@-)*9@z9!0H0mzBD~v}NqrHj2~jEJk1|^}a*Uf@MCTX68?g?+o;@2X0#sa? z6?mL6gk=!v$y5PMK^2E;+;qA(QTEPU7;_63K`!c+Vtq_=BeI|FqQieW55ZP~a&A(e0$a zHX3t_qw%_K-^Cj@?%%NCx=`HLDtmmn>p_JQZ0OnRdozB(cj7pms^jtM4qv=l{2`YP z1crCaUw%=qyt_K?^Jy=g2>5(A#vgRfeofi#v)g^$XZ;ob`;o6T^pIS)SQk&WRL`xc zB!VzIA$O}oT0_Jl8vcy)y8rdYBE3T5bJlv?>Q48=U=M`jS8yVPqe=Gz7 ze2g(XV}sb?$7b^6v(IO2x{il`&^78Eoa4YbHaXU(le~C0SgE$OW*4nlPtvmuM!}LZ zH{u(|mgzVWbq;2aA=6O=uj1-QKrgZqGo~`(qOQ!@ zqVaGj*4Q_^l7w!X{*Pm&_0vaSmM5}jZzHifdk|uEwrgy&Dmf*I^fnhIHz2@2wqBJj zTOz$)g{*+WU^Eb3i{{rP$JVH?RwvS3oJKJ!%x*vZ${Bc`AaLt9K%#njAyI4H>G|OQ@uU~@WY&OSBi#*wTeT7|chk##^m1nIOuVpQLe45k zqGI^x)kFvmPnI_ltIisCkOeD72Y+lTa6zHU9*)Uj_?tS(GQrC@I}o}YZ%;<>A^Jj9 za8+59SH?Y2NYDoxHhe&Gl)I%T?mI(xHrR4sSQC$9^A?Z!8kD!1&g+T?!{&glB);tO z_&0q~Pdv8*K-~EoHr%pd!~QdT@gDJSEpgxY_BHX`b?2SfF0N@A#P=>c@rLN%E(@Q$ z$>-aayEnLCuGQ+6=L((E=UOnF!+Qd+oR_Co`m^K?SnIAyn7O`J%O4Q9`792{6rNc-mc@FbZIw7T%`tJHRT0P@x(J zjktsmpaxbH&nRze8Xhai`&>m-O-L5XL>slKs-k0efTIwVn4pNyVZ@TBdL@1ORA(wl zU=`7+0PqX<3*wtt*fsDX!M+G!YWifjvFF?j8?iso{@WzAlf6g0U+h6cL=WL&PWuGsndT)U zL^t)H$_uy<)H_@qj?_1+9vXi+p4Zex8=6~K1p*cc8;Uxvri&=(D{hb_6@a1Dr9%b* zvZldW2)B4l7wz=QtIxdg<{QuCKk?aD+`MNN|CyQl)Q50IJolO$)QMh$|FS=c*WaV} z-NAqE*fVR^9{iatrscjO#7zyk^|Dhh9uzin?g=L%F35LTj(C(J{{T$%mDoe=oID3W zbXLbXFFLBQ1Cx*TQllUFfG#>m2Yj;r`uL3&t74a*ec}CFvv@@qUjc=z`hDU`T{ZUP zpt>h(DS?z0sBOw{tBK|DxUTwBlyXGe0ByKYR`3}Ix4}tq19E;9N+A*84daV87&vo` zf>EU+=|(GbcEw=P>28d#)Vpj(z3v#wIlYo=eji^8yKf2N5qkLtu=~!%e6Bu5F^YHS z-sJXa35PU!YWoau{-Ic3{0-fE{2e#a`|%xH3*RA{@EspP_nRn3NT@kNf&;vQA;^^C z${?1k%+7q7;xZO>6vG%q@;`{T55%7d$#;DEgzh@d8N@iy_b#8@w$qPY)8w&h%700G zgwJqWkckQ0chTl>=8wRUx+Nq$b{@LAXJ-GQozoqSg&*HCM<18 zhvjlh9}sJOpv!7lmWSiFg2$(G+~!k&^+n~dHUa+_bV02bqB&S!Wa8~r7;owa%m2W(HETE*(DBVN>oHz>*NA5>;6^V@9!HQA<&Y>u*v&E^O{IQ@zQ;f2-la!jtOa!s0#VZ>t9iZ{DWH5HJ1-z(e=(kLFy!i+x1? zF=`^LUb1VZ?yo6k@undX^sjOaRz09VHPIJX6 z*B~@O{ZKYE^|z@X9|nn+E7?2t!v*Sdh4a-n3pY6$$A;Byg*OLgn2(N4>w<0;UA@d) zhki1r7HKI?{&p-|bPKpFX|*8UvTOn|-!kt|R&kX#S$^33>Nk7F-icM4>+tv2Q|s$L z@{#&_Sw95GI^#>gUF6hqG?Jd-)_(wxFASx4&-tg_2A*CG7UMI!@d*8D#+qA#<~)n128n2akOABCxuHq zv7vKw_lK3w6?T8vasN9tYM#U44hGq-Vg5ki@qX(=S?j~m$OVG;6n*~tv94MnJLF*% zbBvW$eqFY_?AOhHYke8mDc2ze`oXrV)>`cvEr9KX@B3bx)!z%P_HyX)m&}JS?=y`& z>~ne8)IMVH;Q)b^T|#0SiwTCFZf$}=W_=K>lloG_BT26-^bPZ&LIUf|wMG*GJf>LM$^O+Ay9L6A}ak>qWr9k9Q3j{C%Uut+F>UJg{NwOvL zyy^90cLF~gM)KV+n+LvA`LIN)3X3)iPBk$6 zZv>TYw5eE16Qxbf9tbV*~oJw6)4)~6Yv6Zs7g02BK!odBRfKoU=}g~jk_0*T`! zxQtx5#U36buvsP`wOH5~DOCtyHMLD_vN%%Y^L*ctN&pNAvT=a%eg(R>9g)Z*?=ftM zQ^%ekKkTx3Esh%^FNy>`6x(oy$3cLe5A^4N4REYDZW77F_;E|7Bo9z;^0=Xf$hqin zhjGU^!wJkVV(oyqSu@P2d=TrwMCU!{`H@JZuss_6%GA@nJegdsr~4epjvt*g?grrV zT*2oW=pB!hI4zKFTkeH*!o}4gDXExfW9?Kjnkg1(E|S2+I%O5n3MucAJY7p?AafZ=hNbK}2G{yOe>Gx?^Y|GDJ1dcv@$IwkCOtKUIx zxw(*Vht`o)Mz({57bf#>!45!NaU&O-rq*2)TH)$|9_^cg282Son*z2*Z=UBvn z0)}DOm5Za2vEXDL8$TBDsVNzN zE)OgYUs`nsF$tKXS zX1@fD1ZZSNrrd&op7Q(*fx(Psz+n9B69yAz--3bPhQa&JevxjzFAmrkKOPm9M5Bef zCcxlzNbDAAR8Yq8z5uBT1PP^Yh8EL!q1bdJ_0G zu3oT3P-Ke5t?g3y|NV~AHh7DUkAaLJm=7ntNYD|!0FxqUhyH}gG={5 ztncA`0~b?sr3on*XzLc8>2|cnVJzS>=3Y;1#}6Df)0kH%7b^FS);M`9Un3z(YT)6= zv>iBxQp>;xwbX?gwaE6Qv!gOvxd{c5kxR%WbZXe~ps%4N88bpOkAhsqi%v=BeizFbsIIw3~;i zx3H&mBJVcs*Wuz}r~X2=40NAX5Vls77GVhMAt!z0lH|T4waaAWaUlQxQ zVq5%>W!0gsRom8r7MaE{K5CF-(=^<$I+F~Rqb`S0LGxkwWO%J((qI&sDb1Qf@=t## zB-Kxq=TC(o9FU!c``w!ncV&m@!e$s9^s=p^te9r!2yYEN0`;)W&<;ez;S8ewyhk`` zZt;==XT04VcmM(mhchJV!X!ejE=bvwb_)w0m@1B z>A^^$fNyg!a$Yog-rG>;s?$N5KYLL!d0{e%PhcmG4Tv>}1%~P(h0ML+{ zGoT>>=ax%M17nRjwYdRANDZaMUmA=oT&5Wibeb>+)Z;tFrG?w6?4I_fkCAA$MSQhH zQ;=@yZ;HF{hyEWODSR@#G#XtRR$KAorouPWA%$;zDSAyLa!oY0BOKnbb?dELw(y*0 zuEF`P0Y0={j4Fc+(qe=Ib$voM@&s0hc+!HOYy-}LUhsq$VT41iJZK#da)JE}D(>Vki zy_qwII%{|yrd-BAjQ7A@paaM4CT_u96}sTPSVJ-64C^EXgY|rr7AY_$6&r83cY2VI z8%L>_$O&Qu9aQFWO8)5IKEzWp%!jg31ex?O$fS3-k@I*NVA314Q)R_q42%RR7S2F4 z9$m;$C8E`EAaR&WI}1?N%u+Q~FVy>g<>>s?3$xnW6JdwiUR6DUxKL1T#6gYY76N_(0dP3CIn%bq~tpW@D+y8~)bd~xC3Zz-itcY6w> z{xHG?9PUp_ULglo-0kLC^#y1ax}kSo4yy+OGlP~9Y>JyGg8Co}Anbj_rFtlpo6Bxg z!&fR-!GqwOa4rBE!*s%RuKGb?OrwYym!M6ArxLzFkYP#p7CK!m4e%RU=xFP4Hdi`4 zUyL<2L|X$Y5OoF9@vdpLUDYXH;V)5lur=1uq^^J5R~L+g{K#GutSh|fo9>T=szcF0 zy-$6?UmKfM5%st$7eOBu9}2*Bw6Z5LEpUf_MxY8d&cPXJUsE9DbX5f!{hWtx*wXZv zm%vtjw$Y{@i$8;_ImS!IK3r89DRr@7#GgSqMC&qVnjgZ`0i#xpO1U&n@GncbE(Px! zDc6H?v!vW0vOWJq%FWYHQQv@3#dlF3Z7OC-yGbK%wL54$BX3k0)*C1{3;pZ0%FVFK zrH$vTb_dn=ci+|Zz58>kTp#*uO1ra-?^xro+CY8WY!;XoV;rqUy?Rv2wHt^yD&y!; zms#Uz7r3xon^9wxOB&A_Kb3MNxLj?Vu6||?5-zIS*d$Nej0@DI@n-~|Hya;B{?h?G z?=ntMm&o%$HP`r{_54C(qxw&C2+vc-2K6C%-l>)t8?EQ-@ceJ)96TRXOL-Si95Rko z+pOmo6GG0i^nBJ?k*t|RPZBzufkq^1sIx;5WTC^tjObr?|# zf7*@5Ex5;ENu%+H_4Qii6d&#%Y(ugSU@05+d5&ojXA4_MDR-oXj@lr;Xy_=S{AsVgnGRl{C$y16>! zR)=CgQ5dx{lg6Fkod=j7Yt-*BD#0%!@-j@dtY4yZpfCa~B{CjaS?MkP12o=b5}Wu1 z$6i&ajuniUuVUu0b#+%>SyvT=`4Z-zd-4MFOpI@~;a5jALT(Le+GD6jbOSsQ{$c-q z7X6rx9VVuCZMduR$ZdrUf%POUTJ5}1p7B3B#{V?ejP3IyJd)Wd@sbK(;19KdKVB^V zVct1*frLylKgtkFZx=owztrdW>-Xb+v5WAe`VaLZb1{0^Xh1UQb60)g#IP6HgZ=E9e`7;nAMNsv84+Z zt~p{|JhtJ`53ETpTKKVPGiFSyt8JVy%?vNCtLtoDlg$=Yy1xoBxRb0J_&NZatbap@Mz zd@(xE{)tK?QxbrXVc}rQLhrZXkEMQ@`8z&zmIsh%iD?-E&h|qTdToGbjS|IRXRw2t{{I)PI*#!nb zByUC+O>_hNu|9P0Q~fD&S8`V*sWM5mG*b8l`qC6p5HA+KrdCK_u&%7WfdBn2>;oU& z&AHl)55lFzKm{ z$15G~53zk1)(7HmWX@&pAsiEw+{$nYUx*~HzaEf1C6e5^ zlhEB8jXq|c@mMs9iRIjm-T4?t2A6x|GVr;3u;N^>5eR^4i1yUEZ#3XEj>c+1a1A$( z=_ra{64BHRQT&KQc_n9G@9bG-1;P&Y!^|^ot}eWynhXC@_=Oq$_%k2B?0)=HtKYu= z*kkV}?O|N!VePY_;HNy;<%HX!39Wk0?Xi19hd0K!HWxqnOC2sXqE*VX?xhe-Ud-+Zp#2VPdWMtTQ9mloU8kNZ5HXjGJ#8*w9{>V#mGPnl6U4C>x??zD5vT!^Gu@DimqF@)@Zn6A;Db(D z3iu{;#5UO1ywKSOM%%fK!ZS4WD^nHuTUqBEidp8aLRWA@&q3PDK0$*)k5# z&@92rn}>KBujXr7D15F#RO@^TP2G)_ne>ckX~bHEqzZlDMj}$@79K_;L)jX{G6=fq z72%_?H*yd{7CQp7J1&eYLZ>AVrUG+dJEqcPjR$cZ$T=ZqJ~*^X5KFq*$)1}!=g!-= zoPEdbXKJ63j~!Z9_hfzjs^f#ECm1(-Z`*Rt9e13)`Oc*6H?pB{mn8GZI_J{i3#W*l21!U#WY;DiXa*)2N7szt`E@a6KG{q>0A=Orl<2Q zBAq3m=pSr@26#Nl*mhCwL4Hr*sR@wtEL_yLygq$7qJ910_J(k(d1)A_HG@-t z@P(O6mv%Mw^jzR*t*bj?{>SR;YU^em5(`X%<)Oyc%V3#IF<(~}x!sV!E4x_3a8s z2-JJba0ere%{gmZ=kukzj69t9Fm6m)`sZ;Y&hlXx(EKCUnqlqCjtop z>@7(Dj2M&JgB|j^*)k19Jcdr8wA6dBp#phnpIQgx?csuF#7g8n$@j?e??KYOk>MWB zN=pgwoUTt_7E9*CIa+R z-~80t`ntM${B5tR8;o2%1;!RWochfl;l#(4f6=qgPF(ZIk6s2T#FCi{NFjs-1I;8l zIqU>k5QuM?ZPg}=i^^d|ukKbLWGjQH8m**)trE39*NwaLyLop$y~esra2m!t^%iYx z4KsE8d^j2#!9*uugWSe~sEN52qIReIdAl(GptVS@BaJO_MXJrT;s58U8JetfOWN1z-@ab(cscp_pBZ=)jFz6D^SiEvxSt!~D zz3XTIdI&izZxomayw^SR8vBoSs_NCPH=cai8K)lIzAEr9@o-0d+p5#1ubKjS54N9l zk~86-eaLNtM_+K_h39vLyum*-RVU`Ro^#}cy{OapY#H=w#i~4CEAD2s2ml2YA0wNP zv#}wF!(3_oGIkon1Wp^54VSx96YfG%Zv&I3x@irv1Lc|_7`3+PToXY?YFQ7V?WhCG zmT{DjX)>39mgay+m_RNCn2*u3gyF!a+NQtzrC9v!b37HGbyI;TGQGZj`QpNJah!E= zjw!}+w|Evt0~t0RDT56H1kVR%VQ+yK2BSe?=@x;qxEv^r7%N?`+qs4G;$kS2TVw&M zpZ92k4~W^YS%PQnjT$!ny+F7CHt6Xs$xir5&lkMvvVb%=A3Jm-Mag=HcC;i2tyCn} zg{VLWgjTd{8yS>CvaH$m=6H-a=Pq$W6H)K!w#Jomk+AFCK-gNb56w z-Cg}t0JZSTP;b}J@+VS2GQ29;d_gb$`a9Z19%AhwIJf`e>5leU3+R@6z`3!#aLG$`a3bt&zT=>nQMGDmXL5K>Z};MJ z63NxCu`Y`N~a zRqv=zWFX(KXGh1^4*qi7{8T}=4OtB`JbCFn2eEA4bo${u$LUf~zrpKi6dsZ@ zFiY^G_h6P_4UJzp3Yk2xdWKD}Efm4Qd!89@v5&p#hu2=aeDUJKU#`7&&*H`KG_WR& zeJG!=D%y(|`!HImO>}iPtQnl#6OeWaNkSTXEYSpi854U+40_Lw0eNLUofGDhun|jP z8iyk{o8U`o)F)Tcqso1GWIraf=$V74j=W@CAOHt0+ORRpD|;zogx)=()9y)La>8)fjHDKlS&e(UT75m5$M!mQ|+Q{ zdy$6yV^ywQQ-BYjBD}0VN}Z&klPjdi2+%lSH=Sy2;hi>Y^X)uOh?rZ}CCT%oyVgLy397)h@LPCd0%1W?$F;F|YFd>yq(wu# zAJL5VID|H#jPzK|Xrj$&R5c*mN%*uB`si~0$s&TY<(0{~RdjUKQ%KfNNDj7JF6Kg`Oyan9e8?xJVLR&7r2PK|yn4x( z?VrF=X}h@B!Pd%NX{bYf;f9(OQe#h=ta1FRn@M!Cp)X|$|G&;j~Ix&;B<#LyD_WGy1( zx{v7StBA@49?03gh9+AtQLUG_!n zCGhAW+g?HunZt6|_+cI`%6ee6BAX%_GS^|(0KNigfvsh2tOZ@e8kux@R5V|kkvo@%0o^gU zt6}K(aekb7CE0rxtgCUX;z>ZCtk^adOJDIkAnnQ;Q3zd;v7KH^BuGGuHA$i~<@&KF z5Cddeq(1^)3VOI@Xgsy%4|HxS`ZIn%EZYpr2%z1LUiEQ76x6EeO>r$+aKw7R{>wFA zw&}>ePB`jwk45S1O&B|vHEJ>^B{tRurPVGD0fM4y^`;CYjiGU&v8A>xKC@IG5_*o* zD&mjcsHJTf(mT?WOmIb%<~#4BtH-d9wB1IbejB>jr_ivRK5LSk$?~>kUzGJ>$@wNP zk;zBUa*WkmN!coShpQ3e1<~Oct+R=&L%E86Rj z8)E4^h5e8RDvc}FnM&4zHHW2zu6Z-s!05OK0!N*$oMdlG_QwVy9H^~U?Nf&^kbBv* zoIQm1mUbtd6ZHJ~(5@{S#QJy08kB{hne_48rQNRGY?HM$<4X=bugZx}=Fb|ltV8fr z8b>&w(pF@XIANrw;wkO*fmO(HlGs2Nj7^aYq(w)I^M`e)_e5ae#J$kiXK#e15n@Y86W%D^1s93ay;ZXVJ0-!YGrRdO9p zc~F=)}3jxP(wq;Qrco&Up7E*MCdjb2%a4%T3eu=f=2iuhoNM?Zm363QhF5+yFp(OI+ zprw~8kCHaclRlg@@x01vOXpP>TaMJa{a8K-^dVaqdgGPlV;^CK=J5!d_X?Nhg|oNa z8;xq#K88-{&3moPIW}-?qZa+O`!Y!mRYv4sel-FYS@Z?cS-UU7oJf#!M97{JfW^LO zK1HaP@Vo#CjQ<6s%YV1K1<^#$Xa3TbqmOm9?qNNSOXAn;<=1M4n90w-Pk97ut+j5y$p=_3< zFNj)#5H)$9oD~pz7OPXZB69E=rNkOipVCV@4I6p&MlJJWpGquRKWTmra_umu15oDg zT0TWe0f9mJ@+pH!1-wMbuV8ZWhAc~iXOw^(B?Dkb^tOlNS5{?4k-wpHxg3ocrhMM(!cR%;0bLBdmKRj zj7*V3M*rY!FW$D|nHMBqRS_g!Rp8u*%mO2|6_5dNOqvO@`v=EBYpWK9GDe<9qm^<( zBtU8lDp*?c(m$LnDSvcLVmlCmE$c#CE^gmi%LiCU3=^0{%YHf~%ma8QK$cCDYKF+N z;FpQpY_)8(F-i}t6DWa6ilq4^3(ur~4E>7!1*e2X5N!oHSDH0NThc$A!-d74a!hh0 z2Tdi+ty8(xKN$IzuSkHbMt5*HX3>W-{$FS&v}%73ozmgErrJPY4h^XBu&$wIlZbH> zS`bNrNbbrgR%t#1)beRQRv)*7x0{M0St?pRi$WVj(uRQBX|gxCxIPzI zoKk82gx3{@X+nRnKFi{<@pxI*%A}(O_P6o_FLPZV^tK}B3nDzGP2-Yq#}oPkPi)eQ zDJeNDKpCXZ`RgdgSoS@-wh56?aSLsD8o z98;f5po5}qG@_!pM8ZWulWAEH6pf^z-4){ibKTo-7ryxR+n~r++~#sUbBk4nEl@Gj zMYjLIHBmKenB(-qw1$A>17~$~1PhrY7+*Gf2O=EBA7lXx@K#u;o*8?YpWv-bY8RtH?Y8TOzPs0c<$pNp8Tff zIg@vTp*JPwqmD`GfuE+u!P)`EpwFRj!0JX6k~047PbRnNuyu~#LJDc=HqZy3v4DAK zL;z_;R0Q)2sQ^!ta-4%e9=kq;;$;gA0Mut;7ker6SBo^r4el|QCf6XD40%$-r^Hx? z(GJcylVnBQOP!#tV1~55!SWH%;j-jdB}!@CRV)%N{p!V(w>`>Ww1aCLzT zKg@=!Ee8iH3wDpO#^H)3bV^H6=y-%cg%et2?7<>wY6*9l9HxShL7Lr8{SyX=aaw#g z&RZ5NC-J)C5@5ly$k&z~1wf1J^NeN#u-kHAza=Qu_OYjse{D^%o$WR(fyf9cJV7+Y zeG0GyqsD!zB+12w3r`P88&uX7*$x=8Y)AF`wX9Zihr}`Jy0>FY7Az-^3a})mTCkif zp+;n9C9XL1ZL?vi8x%1y0JtB5lZVj?xpm~2>oe<HrO<*TEJwmf)zqu4ijtEZTrgi=n+{D=wIt6YFPEpq1ZAZG$#K++O|U=Dq1iy-Lht_F%qXY1^Ajc!Ia_20hkiqIEXLtRQ@y9aovE}iXO|NRWdg+Zmyjg zMr{HgRGA@yfJc^gte%_~ z8T7Jo7%qnUvGbv0{Y}mB-Zpj)zz0mjY^gZ@5%p z)4+ULdzM4bm$h@~g#~P~JB4GNz-_#Eh+V~eSrQ`7UXk2!A3^M2*`15bEq7=_Pu;7R zGxsfPf0jAZt;<>+zfPrX(02;{OLW)XQOW{aqenKv5NECb*a1Cj_Wptm8*^41JLl|7 zo10`qd82=zU+ACa|FNRp0qcu(rWEH7+xy-dFD1zO5T55!Z2rP?OY9@iq9}0< zQ9hA?h@pTm8)2c!Vdjz?C!;QH(To(td{2f0G!cOvaxNb*v{DF0B`Ja@ov)HG%TIS1LxJEb%MS*;SCJl|k06uw;X^NeyN`@}&VcpvR9wAbRbF%E z#f$}N#Dxu7Q=CS8T9WTY+>E`^ZcmLa(Up#kt)S$VpdF;97(HKhT=l_o9 zXJLK8wBH94G1cKyh!LIu9c^ze{JQY#X4PJJxf!|j>YCANV@!R&kdCQW)vK{WT793D z2yIi{fRh99h%NgDZb#w?;s}UsjR&@FIB#Hng)E@|fHmF)9PlREKbSc6g&>+kJdAF_ zP7Q|~!J`qiNZ(_AgJIQ;d&eM`Gz+p{PLyTbY{Y;p+LM+0sT{a>;t~Cq{GtGbGznW^6Q>@7;wP2#9B0u^ zpAsrdDrDK*+ot!n-5>V}NQL+&DtV(2ECdms(1KN|ACqJMm--QxZp7_=P(ey7g(>@? zB(y#h1h$!_)eo8%o^`rbb7eQ@A0mLP$D+1J?`uoy*?y>i6(f_8#Yg@1@@^fA5j< z_r~|zLVxTm^4=qb8%ym0Lmb9GI9^OdalGn5)YGry)=@l*9!Ly3>-u28ryd;pSP>3p zyNu^x-ZR_B5c$EPN9H{T|K2_Ce7gAF~_mFXp@8PTLy;Ue^y~q4G*1CP~@hRZHreF5Eh|Y(1+s%Ii zYvWWI;{cGR2ucY8pOJ%Q*ZlB-0Rl&T9dd3AN@*Xa>Ujs?*>D-HxE9* z1GqOw-|Owey+c#Eg-dX6slGRV8264zv8WSeQ)hL+&ep! z`@k8vw@KeSbtCRwn#ygx0QWA^_qLqJxV_s_*_M4H-Z^VFA@|y7vy)vK@eLh?OwQ?S zvm}xoi7h?pB0lZWPvgr@zL-yYBzn<^v+wYYeA=L&y84g!AfF<(QJ3l(2@S71k56an zr{TpXoX@8-CD!$b(RgP*w%$?o0; zuByZOE5CLf;65A>rz~Io?!jEEkEhdn@v6Ic^}litUUm3)n|!`qcC9v){m_pm ze;SArD7^On49|_r5s~dWHh_!R8@fqVTUl_kK{?+({@$cqGjC_<4mrR(;R3q)-Xh?7lt|Z=|B>6ieHHxg33zzfR zuzuFtH)sA*&i;w1(Uq%@K5he_t<%pAJABRBllTn5#HTehw<0a8+xDw>+{|b9q((n^+nt~L z5})0zpMCsOx9_@_&z?+;KJ?Z6V^8zh?>SsUQ|LW_1#50xsb1HktzLC&*cmDuCN0(SDX-p|vv3`Rw=jbYUoWJf61h7=4{j zkJC@vc0T%RK0Pj!n}?^~O}Fpi)4BSo@2orinNR12a>w9l!<3pPy7>~rjc|WPiwY4@KZirsh`$<=)s@y>B>;`E+?G zcM6_%-~1E-JGwzXopsAM{)C^o|CmH4rlJ@2J$`fjd&=m9{j8iW*!S%6j=!^f`TO?u zd)053DgS=2@}y6o_Qn56rSPx#8yCCHzUDvI-QotmLqkoT)grI1`}5D~E# z;mmxfTBVLu$Ef4giRu(}x;j&xtP7Vv^>g(Lx8Ha|eTv76uBY&^#<<(~vl<5NdP;o;ypn-@ zwoW`TVWs&+<1cCnR@dX|+|*MzNnB}s9~o-CC}p>lmF-4WjJ>K}-7RJBLRmLrvwz9> zt0K*RO5I*o7CFX@dyO{~ssH2Z^C&yhxXgH7O;ekt?5?u1NzBq7oaM=`Jg#m<*|c%3 z@gvo&E|9X!18w)W7c!>AN zatDvA7g2U4;_v=m%~cd8_Gnobha0<$KdSkP8`$IO7bx3qTw;7%RjV_k?EbQ{ zt;R*hw^U4RL|OH?dNuX9x@X^FpZYiwvOb`)xV2mDQ9qEno>E_w*i>A7DDnyQS@odG zsjsVlQva-esJKBsuD%%hy1L5me+BXI$vF^KBTTt*QlN9 zMsfzZy~tqz;@r-Xt`PKO29;x}#*y z3;YPF7#@%QRq`3)>3g-=bxfcRm!@fN~c!}KFZ-uM)zPvK* z`{y72_-Fs}%U}PFyhZl=q1^o}c{jPi$Di6QT+x@Tl3#f6E4fF%{*8YU-snrA+*K^O z?dlyj+)Tcb-MG>!`Jroe-gwI=K7Ge$S?87D-V{o%S$7io*7HbS$iuC5;^t8&oP62` z&pPLPIeCGTOR>%*dFJ7zhmpAObS5P`ks^M<;$Hh*Q2@BTb0SWUc{&kZD2;;zBsSy2Qcm?e&e!xwzU9%HeJyf!?Q)1G762{zHJlj2IC4`5hBwTz`aC#4NiA9h zZdtGZM_}|-W;qEvMr-7(stqGYbuaxTjUV^HbRw z`*L%k)rY7DqFE~Va|`f2m-!W^Gqqs0W+)qpWUCN*u%#9^t6^Lb?OW)cr<&4<2Ao5~ z$Ss6pX;X7Y4^E@Fl|b$z1ZW;ahDEv6ASlU||iAAFoF8KS+X{-z>)mYhGRaJOL z|CsM<2>799kNNz{jCdOYfxltHUT6DGur5Y;;Dq+r5GBPH-XoH0I!nH|<4ZR{+)_#oII7;6jE zA9=fQqYd~g-hTUvBJf=k+NNINC?g+Y`_Q)2ItwAmX+}n0W2oT8;vvP%xnejH(jH=l zGYByTEys{E7bb}n_?3&IE812h3VkSTfWp?d2i;mPNI*oe{*pkn6IQKCt}5Km&)=|* zpkU)+!~UIYMJsUBYrx}S*-5_koJ4*knY@Xs0cymE2AFS`@O$7GNduyPXferHAe&yn{B%_)qosEZ>?JOUuZQui% zk%Qyr1DKE_u|nST!5Br7e&!pK&=#b#i22=)2hE*5d~gUrw@k|`5&7JMKS!p&1=%~~ zy8L%y!gH5!gz-!m@oN*fb^tBZ@GAZ9Y3utg>zi1i|6Qow_|2+S7p_{RD*37ZpX%D4 zv%+5{;gTEjluFDPatYu-j`;_62WjAr7!2Ah1Gz$AW&*r`K@roI083~X4YMO!8RdK- zgi~~SE5Zn83JW~`f`40;1UHS6!YBH>lmRman6aUMAxsHE&}3*qF1bu#$w~uGmn=Nk2h#M3u`AiX zFcLnDIQ7T0tzudl{?jLX~40sQEU<}i*GJ~Fs*ik9zdymHTw>-bmgj zJ^nWPdmn6{Z2V>}4j+Dpuo|l&IbjClR?$OVV&svi3abqLRg%;Ix!{uQZ#UBF?xQ^% zL!wuQa#)ry@5;?Ye`le;&T2%7jhPPR_WFZWp>QlwTQ|M22^j-r03C?WI2<#a2xkze zsEkD8)iu-V8)me$;s6Lr-{#45B>Fuap2Q&js#GS5fpx_BgMW(<#=Ikr&koMa=a$`= zTzbW^#~%A_{QYX_jZZEu+`aTM>-Poxed@9smvVl#EIoDUjZ3X3Lrc}tWj7Xny6nbM zSv)>%X%ciwL9=AQvf%CUdo2ia-rj31(+KR47Rk20?7bG``jNNahNXmAR+sFvvKzow z2yBhs1Bj;LIcXC$Qhm*Z4PxziP}NT!dMNpJ@}VE0LYogD58A4C?E~-uc2|HO-1fo1 zq4ULkpYQ*J`+k4fzK>dy`&qAo6wOf22!GP7`}Kaz$Zce8$c&g}@BU+bqDmhld}sfD z)<{kSIe}5}BH`5!4o=Y~M=b!A){rvG-uFNXEHh7^9qyS}q%ZHg))R~DhpwKi*+ZJ_aJ)s{((QQ5@im-h98Qb8J#H7WJi=bu+E;zeIltuEw~@;c!80OmJqyoRWVH?Cx7Q9BdpOG={;;It1tGv{ zZyHqREMEK^s!DF&oO})aSc?j0n2{;R>{DTLMxkH%3U5k@Qdt-5fGr5oa$Q`ktYPCN z)u!LOs_>wGxA5jS3rN;Sy#5Y89#;PGz-Q>4>Z`TIdDll+y-KYdQRT@)ayIvT|T@M6`&vJj(S`AVU2|FxB6kX z?h3273s+gqqgB51meu}=rF^`l_N`rzmcZx*5%`a*o~quPm5+2^cDlHCz+L)#a>ppGeAz4MTp4zv1P!nw)#=`+p#!ruyiRo@0y6GhSA zQhArrsQXK=AmNoQG{vT9HL9ATK2!a*u;qEqS>a9fxk6rDVpTR3zJlc0_=<3W|+_^{3 zx$@eRPua2KFkGNjGjc@QkREY`s=e?lCMukp%69I{9+uAa02_`-k1Xiv#3LsA zK-T94_<{-t>C3VwrE^Rfc$!$&Aw&k;7A60s3GeJs4#^+0XNa^QyE&bW!j=a&aIQQa zuMDI|79W2MDqRL3o(CXy4q>k5Eks(0o&`A4bhWR77I`SQ_!#`??LQXE+o9aD<56bG zp#*ayurTMZLh*W@hZeZITR(c#9{@sszU##b>x`ZjV}}+Er^P7_|TYs4oyi z47%W-qETEO?F|M(6#?}?r~)MlRZ>j7B433r6bw{^@U+4k#dYESU?>m>1^)|wxH2QU zShIfV$1Kgi!n!z12hstbw^aNxy8|fAKEMIvzd`}@L45#y@Zqm;nwQ;Vx-sx1Hvap7 zZ*gE;lW~QnxPl-!E5rZDZ=Io=CG;#vLbg_f49v2QRO(C^YbQVj!LqXsHR6&2L!eA9 zh8fDbhH^;d9YKoI+;ox?CuTH^8;?0mPj#YxI%dxz8!{t_@W5cW-4S+s!pJtx%t9)> zP$B(NuO<{%ceJ*w>1b^|e0{H)GqZQL8Lhp64oP+?QJQ*Aor+`&_k z@$>c>r)aZCqDKP)Bx{#1bTur!-_eqio0_8Zq*tuiv|?pnOD4TMoo<{!U#?%7>07aK(_zWkEv;!3 zxOobCW1SyBZ+x_&mz|cR73bWtYw`K^2**<J?}l{Y z2qTZPo=_XoR|xzR(W_E85pWg^H~=P^o>DnNRaF8^!J(`(4AHxgStPs6`}~!f%!Van zT-lB?>*VWpuCsQL-xn56*gPinMd+wS>+Qb4a}GGmxnvSj#8g4Iq%Ure`<$nO`&$@& zvPiza62yp;rY8ugd9ArqT_^6m?qtV#pT(ATr~3WQlTO~Det+jV=k5ZIPJl}USyMzp z%dMGEfP`Ns7Hpds>0U)*JVVG1FS0H}g}iB&TJ zlgrMe$i5H`g=)g#uC3?nQeGQK_+jnu_PX>VD<}42kz8AI1b20njl&zSI7i%%>Ej52 zr{0ePRe*{;IS?$BPfmJZumwAS&3@?Z_&W89vN1-l59J%jZ`l1oz7ykcBVP&pwdctc zA8|opc*m7w(hLx0qm4|J7YGZ4AvOnwvSt{R5a7x5kDsWOuWi`T!CBF>^eP!5dy-_G zLbJtnRFthi0HFgPqA~j{1G17sxuog3MBf8M$NYxf*Xp#shC5q!WkznDAc={Kq`kh+3>MSn`VFe1&-Lb8#P{ z4~XY^+Q2Vv79<;xNg>&uMiJ2TX9_RfK6CC+XL9ZkwYyO2y*1}(`Hd=xr0dY8A3Ibn z?5{+l#5CMnL8k&>{zemWJ?b)Q6G+gF^EK#{cxP7c8?AJ*921;d>@A*I?CofcJfjz8 zj%knb*q=21@JLe$cFrPjnp4n>tWX{5zEtYCE3aC2!j)H@q?dW&gVJ@!RoSbqT7Sa! zw2kI1V-TEhor#|p#^J>{u;wG|c~E*za_ED?gGVempn^y9?_(G;b{%?{N&pZEX7Edx z&r0V1_3*$6MCxb4J){Y5f|di$;ysEKV3l*nN$ak06{=gzbuF$t&OB!qy!!m>w_kCB zpRyzWS)ac3Z0QqW3(N@WKFR3}^T}o-snH7QjyRP_&TB|&pfct|sEn;iwfhvrrk1N9 zssm`$4`$+$?rX5EDK4ozJIsE7qQ$xEoHOr0$J8^fYu25#1KpzX)PMG^pFYd)KjDh) z>j5j^JET^%M70peDKDWHXb1v?!UaU7iHjAu}GuTq8 zJTQ-eaxkM(73(f{CY`r#s#L#sU3R?Rf95B#(@}5iM;>&mJfvxN;Rk`U_GJwix_APTlKJ0RkPwCJJugCjJM7+XfzmZ9P^12B79)U4tNlf?mQ z`q68*$61zWX901y4%qF1jFkn)fzQ9R=jC~sBMjhVB_nK0v|wy#5aWdQ)gEE8G(dX} zh2Dg5e&MdhczsTZ*XLxk6UH~uDcnoy&s-SfoCi-YFr3uV2y=VGHB`X>t%cn(n}YOa zg~pElCjx z96bl48E>C3+tTb=3TqJ^i+zH%Ycc8yyj$hOp;ad&K^;&FV0dr;=ts6zV2XMPVTtAh zzTd7F@-2q9!PW(a)ZScmy`7jYYLxXfyv>@$0s>tw>$8bnHf{A;R<_OpgE*v$ngi6& zI1@@}mt?olyY3)Vrsy}d>3v!;jW2s{N_bJMo1_sSkvgH@ge*C4N=qg~OS4W;GT<;p zv@m(0rd}09!L4IVS#?jeSy-gyUNZLxfB`)vq9cfq0CW&GA?Ujupo1z)Z73kH*Agqj zo_XM-aM1+(3x9d~>EzQK2wT;fcI@kVZGIbPk04H;8O>dZfdZ0|;5o9sNv=1q96j($ z)~4hq41n*(N^C1K+zG9@=#EcsNH#RQVk3Br=gTEU#&{Vs^dMj`u;K~Ck{-kn z)_M?%V$0MZlin3w-h_9<4!+wxQH6qq2LvG>!@TiJ%QApsb&0GgQNFrtSyK&=TFaWk zWM}pI%2i=`VMfQF3+3zq#&2Sx(N&%T2|()qWdrFP`BdYJ=`n4L@Bt z={M4YKHy+kZ>Ac*hwby^gE2z-3Z+BVX7bTK_J8Ya$U~|nGKGN*ddmf2N3&%dm41%j z0XqmTHn0Y0H2R z@dSNf!*Re!i-D|-lSg(6e8^2#BRyr-J(LZEa|V_wZMj^)-5~V-+)v}-oCDaLO^}fR z9+#GJyv5ED>&o;aU3*gV*4Mq^y?Wh->n*OjZm05G5NR4$6Ekflq)I6yC9HQ=_GFOf9@YyD6J})mzV0$+TDAaKp|U4J zC8w=G&cPO?Rk`+AAQNu$!DJCz-(0n!@J02Xs_-GTVH}?z^^q%s7M;5uDiK52a@DYd zoVOD*6NSc8VN2EZ$d3ALR0|E;ggR$f8hV_C?F!3ucC-y}P*vVus8H&|sKM&sxO)C! z`w-zh3j^wlg$?YYwhJxsq5oUad zHy2#~4b?bvXF;b^ zH~X^7DK>8n=ORh+KIg4M(bgkTbV+z**7EIF@abuh?Bzo_=NVWcmks4MY(tS55RGIj z!Z{yQm6SwM{*Y1-icwTDLshq>nKiyjL6O6h@r%^J3X`teWa%!i7`%a6htThwicT++{YUKAEXcx8VfZF3aAK!6ZmTV-oN@_8JoqQKb1rU`2S zky54;oRkJLP9^~Vx2&vdnch{pg2xiXIm#0?2>suV+k96#QPT-lI~$8+JAia4Y?6;TT-9|=9kYOMFvzkrhKTc*9^mArtAPk`$b=M*;`b^w z@ze9?wk`lO8LGi&;hn^ehjt{I>VgeE-=xn>2j@PWsC}K^udh?5E<5qWWsM=Px9pGO zuQXcU{S#zXw?QI2jL3WEWA!sq#us60l{cO;zHR)#c-dhZ`Qu1q+5wmIKo)XFHEfEv zAUK~hl1`6A&YOme2Mt0QBA^Ti!FLl%Y+)A6kL&sIF?@_(dDU?h6~guh442)#yv)yCbt)~EyV2IdH7CNFc=S8p%NekYXuVdK7V?k9Nb zkq71BNGA6P9)3MNy7>&Lv41%#*^DkO-ij)ATrH(;%j9;T)F;vBtM5dgKbIN3#bL0| zH-bj}Je9q6U+x~zpL;^N6EK@CxcqoJe?kjIcqeQCDzt>MpJL1R;mLie+^6u>63U$( z=*rE(sd>)nEI4OA3eE}To(DZUJDqz0WzI|IpMQbRp5KIL&xf)PvyvR1yb#J=iIy&V z3EJ%|fJEDV24m_gLb=^|d22fN9EvPX=bzinm!DgTmv@J?S6dUb1mrP)jO{L0x97M z;YV-0Y1ilOy`Mksi{x&(6Ik359y#%ox82TLpNeEp8_GRD2a@i&i#`Yx&%N+4UVreB z?|cuBbC2c1kB3p_8@#up2*m=C6kT^ib|6-w8(^ z-TmD2FT4Z~;-T#K!XtB5oSx>BM;=7uk3ES-?*v`XE!~8xt2g7-+jo88UY6Yv9=Yef zFFnA^^CGzm@5l2kx1hF|dBH^&U&`Ac|A9;NDY?HdW04xD??GZbRQ{9HG!34b(?z%c>q=J{h!Z+8=9XesQ zD^U|%)R|lotgUhPzBa9{E|~EiQdgI5I)CM%7tNSeSGU;L@2snvc3X1!{%QDkZ)Zox zi&NKF#phq62|=t+KIP+D`D~R>^RRd<;PD1RA%4c<9)Bzx4tTwVBWEvL)_Zh4hCc${BCT|RPjW*R9fDl5+Gs~ah2_jJ00thO zFZ9KMh)yBQ70mz>cq`snz5a+pGwBt{+09L}Gc%gfl_!}&U(kuqfbY{^@dv$b=fi;t zk1PELcct6w{)4B=gF_hhZuh%?GuN0&*n7nEc^@Zym2FPZT!VtO1hCx=p2-9Lh73}N zATat2_Gu6}=uw|vp@NmJ&wb8S8B{AC^jEpR+wW*_WWF2q{@`iT1D*>*f?+nuY!qx#*(u)jJ{Q&->C z-qF=aB_0@p7nLOj~ z_C;g2pt==*QEs={=JPsyuHWxl8N*F)@IP=*`m+ArKbof5B{4Gch*ZdwTj&rD{P~c? zm=7_75YGN}Tx~N_*=Zty$;YR0pNli%G`yEA3y{21w0%xw#>&x@$O%bI&LJ8Gp7yae zu?y9OVNHaC65v0f=MbcerXNSqYV}s2G=Se1_JRjfH+h3*!WSqF;`d`?WVp;On5c}C zB~7%U^h%;^I2ccDLkXue>PYMH96Hi+8%jml1#BqY6KyEzZj`MR8mWg<`Q#C_3Ku=bgrC6>p3qO6p{rAH?Rh^avp{c$JLR)+_KFU1uz_ zIBBRA@)4dJ5er|$9Q9MVgVN+l4A;GJ62}O2lQjRN27rg^gmvVKT05_!rK6qV5K);l zI$*lyUiHszzc=W3x#=W_B`tW<;r010R(;4(=XMuHKT7n%oRs%7MbA>)mD6*6Cj05l z6ua6xRafDMuDcp+WLIaRtGVh~15lf|I&Qc@ zAM5x%;kPxQq@1y)cy5=+NR)B@yN{@UHFtrZPs7ooC6#U52WfY%4jU4(vDArtFI$VW zX}A$f=UOoye;wc*5v8Oh7o8hw%*oI_HeSq z%W)hy>#8-&afmqQ!VmGhc9xviMtaV=3Ps^^mYtuK1a45HW1!m%^q1-yyu4bb-^Dm`z zBexu<&s_gp6r2Mo2o2Cek=%;a_&z+GYidVT^J6ICb>NOWoJ-W>ds-y7XsJj(irk~C zc%|)K$wxvo0W#04($m4>y{XDY&#m%V*eze9@(B?GI7|It z?LnWF&Ch5SZ@vE7s=wCguU-1u({@SR&#vVDiW3~xi(e}BY4lI{glAFh1dzr9{3lqx| z^T3<|E&Y0_;;#DYn!?^z$BNpfmRi+%_uXgIHaT0h{`9rVzRE!B(iw17Otv;xgev=B z%c%9`AWfi@GiYlmwsX`Z)u$NYl_pxy7K^Jltp4HZjT7Z47&3MguA4OTucDlpy)?Nc z!SJF!oUKGpqtN9JV%9zQb9XVbOD2wumEtcs7d^CQ&FjuqXH)GNci*jAYg?LXS2)5)yGRnF@>twQ?d}b6ZYkrv2vSrDywc>c879Q_8|Pg$n3c_ zSoX5AQ@PoAhIj;sLoiwuZRqGjC?e5!LfOf5E?o4a9Nk=~&Y|3Fs(|PLF*qL=2v?8^ z<7c)flC2-gwuh-S@`(zpH4HnJ_nYj&CNYL<@<3-iwP){@AgRpBoQ|C(nngL#GVlBZAX~c)o;R`9@9=lxw-lkWAAMCS*=J6vZ@w+K(n@%Oz;|Xj6)pkVNO2 zfR{El4%^sBy(W~I^f?%F^Ha+z>Q~ZfFjC46^>AY&uL$*KC=Sq|8q$l5IE$Lkrs$GLt^ zoVbaz3(|J#G+$lUNt`x!9QAVBG;Pzg&DkbtE`#5H-tO{%MN1^|{eGXL5WoWacHX>s z^XARWo4*MJfEvV$eQLc$#t z;|D)dV0LG*nu?K~oyt-U3V3ckTlGiV=*&L>7;rRxm+SU-8AJSd4ZCH3!J)>~BaHXNY@~tKNpk&+)L;Nb6(~Ngm z1ss;S{q%P~uBva;i}=MML*LuztH;+Fop!6C?{55rZoY^vUZj0^lz&Njux0*Eaie%0 zcKDYFcOhy6vqQeP#KZXmFse!l1hg7W7tmiSzTj$tzmeBjpja?M-djkW?<~kY98o(5 z!E$T?YkG3Tz8(UF$$6CRkTuFW!0{Gkdk!KNhb|a}tj#*JdDL#iNfcNF$YbN{u<9q$ z7@_%!uzlxB`*G7NCp$upEtkbLTZT?0?K`hxi^F0Itk|~+)d8|zhtZila$E{> zx7xEfP+j=WnGq~a4*leLp)TyM%bVl_WP zz0DVKTXV5GlBZ82gZKog!_g`n8j%w!x3C7$6oXF;-WNTc`El?IwKk9=;@4vN zur9!c2-pzuC~PoE+S}5Yn$5c*8T6 z1B09Py+wNIfd^hf%Ek1-5zA*F5rdG# zJ2)}Yq$LWZB`D`&&ST`kh%ir0nu{5Q2!0k*&ygUiOa1^@5-D_=6iP2%=`fTM=|bmY zUL}x*CC?~@I?+iCFrXAt)fej~U%SOy4W|(QB*UJpi+~q#s@^^UUdKG5PnDt>tCI^p z>~N9%1Z2{O^vrrV3%C>^E9~={v*4TB>(NW!I#wvI-&Dwt-gQ(vsp8W-2W8!Z;JWKj zZeVct^p>8(Z^>zi9`$>1yU?xf#p!%8LdtX!0AW9R2WJVg$?=XV9Z_Zt z|I<#IC%zIK(Tn^a=^f*;y*}Y1AeWLdqqNr07{#*b36y-nLEPyBLnsItK1ksuHG=e9 zpr#EFj2bI2t$a6jqbnp|?)G#ttjHNld`bZRiSa|-D=TW~X zDH_itC_8lRMfHLF8T@%C(B`>nARSe$DLT_YPoqSy(i{S)^nn1#>iY;xDi^XaJ;frO z3b5fA^Dr4}n(l}7g0&xRiD?}f- z;VS6F+6o$zmhh#5F)-ozR4&Sd|Nr_$hG+6oSDB)X{mQ(dQ#Z>w;wk97`knKJ#_rs# zfv~9mtu%(D4`s1L=+?v;p2Y~yHtiJYGn_d70s#=<1Fvd@e0wL2Ar9Wq7($C8KZUaS zEJdoIM`LL9E{q{+t#^q7C6wBvmn;e)g3K3B=>NE6gFZfsz)RI`SL?+|f{rnAQmBTA zN>7On%@9>y8o%Y-eHBle3UUbYhRRWML6t755JuiwI{PVDa2-2dE^~y%NrC>w`KQa= z!mBzDRk>6DYO`@s_RtS>0sbc2{P)C*#7@wEsc;O~C#QgXator|cL@(LAa89h$de`I zZY)7>Dpoh&O-t(LX)LLG3)Rx`(`43N3R|z|&{bsX0b%6!5>Gs9EmoUvrXM%Q$XI!> zAn!R#>2k-hN*yXz_Z+A1_8dS%$5Hkl#j%=ix*IJh>tQG-Zo8ctTklbD8XL#1m44jR zQ>^x%rkncvV8%QIrX1e`UGCfsrM<~J;dOb#hM9|Z9Xov4iPv5E`){}pC83eS;#16& zsB+pnVYTME%0t(@{?lPJPh9`T`{*~9`j8Zg zFK+fuI1v#{3r9~P@OjrAZ>2AreM+*4yBL=EjZNi>8pVOBUan}?LufG!6@n?sO$f8Z zO>~NFm?g~y$z;hD3b({K%fKQfLqtS-W}MuuWU`2@NFfx5$DL&sM&o8bz6zENLHgP- zKwq=G7TBtN+ca!M)Dp3&_4{C=?A>5ATPzN|d21+|+gO=tism9sGi4^>=~qg% zTW7q+tIX_KqN7lozRKj?+C%cYc)r>vw7}<4M3m;wges0Vz}AJ(B8wAA##J{RN5LWo zc|)cu%%@OE&K8ssSs0@QdH5{V)sj0^M+8pDa3|>U`!FOaoEu_10%Abg%j3c6&5G-z>(^1{1wpIMfJyKmj?u=osyk=^S=LtHaO zX0_K#md7#P67Crr>#;dQu}mfw`m$z|gtXV{tAiWo;`e7>&2JNh$1phHH&)lz;J0=b zXc@m@*CWSM%e`KK-!OBR@>{Iqx65tR`HjHjo$_K zo|n;HgSc+@h{52qIKXntYcu*MDul;%PP zVGraG+ExgWaui(Pc(fp=OLJK*Ev&B~=g-n&hg>n*GGXZ;@ePsqaz+zg6TaiwGR<&L z&Kf=bZqjjauj0x=0ZJ&<=SX$o63#||hjQnT9P-L}LQgu0u_Seq@-%OU;64cb@t{ZENQz>InuxiBkJe&F=Y!|S=%j$h z8)>DZM=s6bl;5~C-BbBP{B9wmQ`vYs{-vo^I~B=o5Y$X3ir(aG3u{v0VYTrVgZHb78>Z@oi7vX9tZ%c!jV=v$9;TBr{e zd;HOFimmlj{_x#@611z`Fgb#sC7VGMZk+!C`>yyQSih6PH-u^*^lCLo$(M-iq>y?Z zr2N*2Ab^l58a&WPPIq!vk>@H?oLgt=^T73vV2PZ@et3OB9$z@QvcqR zPNm_TDp8yHPI%=?+%r`o&lKEJbjlMEl-ZV)1-^H9ItVooAroZ-z9`{XiDJiDG&zq0 z8791jeM%6+3!eot8dQa>q{eXpM}F*l++i^+I43ByURu7);kXP;PV%jDTU0YYRg063 z|HAlS`E|Q@jSTPJbyK=KxMZEnXZk0POx;Sx+pVzJ)HlT(xoJ@X~Q8VH|p>J`7bttu|8(8`)xW4sTa8LK*NT(m z-EX1`{E$%sY|ry86C7PsYXE*t3bDcS1DUGkdWhG#cJNyBDHEVKe9FXYVr>DhIq1u( zU~1+$XSA*a&W&rg&754qqy^s8Mz!8=J$cR6rCeFg z3oCp*cMH`7S+HP3)dhZ3vy^@j=83=9PR^xGiEeNL4l>9mlab1nS}1Xi`9x>c=yZ<< zo3J!-qB#goq?_q&8Ujewg(wO{oXF#~In6l%;ekLQlh3ELq__epQdEZ6;ny}Zy3BA9 z0&{S=qIykBEI+hs_wdNB-I)c+m^yNDX4|!gr#WW=yMKT8?i+WqzgQv(S8m*T&B?7B zui_Gdc{zc}S!Dl>+=q;CyHJJe7zrilB*Kl9%YZaON_Db(s3UgtiNT1MlM8c?MwfCq zkjOztrZEkxVUFh>hFPTKaEVgTXPe{g88XGgl-gm_=Fn_&G|8{p+i{f=Z1n0G{TVfU z47hC7Tg+p$YUeSveyL5B&)rNC{-@~t-MMSFZaaDHwy`3c!n{DgDV*}B^!3em7bmt} zbIrD`ClO>~=&>NK7q7y}+{e_K8`UgHKMqJ%DJolSqdx`wnX|MsJ1`l7C3FG18Vq_r z&%i;vb*|UQjlgaXPgsGnt+w-1&+Q2eHS{i0(O`vh&EhIn!#o!(j2MD40FHFiL8>Bb zhF-{k!2){^46#rMHbFjPB~^rK$x=)Y9HI0a3R0J_svtJ4OG6+9_c#f|m`E#j$|Xtx zl90BPDD5i_jUIU19`Hwrq!27I^Ncxj98<&2`DG`44}O)wmM(}yCDSskUhH3LCw0tazEf#+NdAQ00;lZ#4#pwsx0J~csDkg&TH-Rmi zJhVvB8d<@oSW&L1VLOEWl}o_7O%q^F6AG>>l?@QD(Wx1*M;YFZs{_zB$kmuZdb_QA zcw&k+98F%u0pC1SFswfEh9GkTy9&*x!XXF-`jI?|{RH2$1tdx#2S@onIopP68JMCP z$CqXbGyg&Bx{7EHTCcnkv(PLKkJu09uCNz3h~)7&e%u-~i}z!dV997%Z1{l zzSelDpPd;#QWU*b>+AOKKZ2}lr&u^Rc03R`KDKRCbb4$%FTea!i_I$*j|{(4+SJvv zeRHueY0Z#Fq}&omw2Oi7KSliOO8C|^nT2axHkEi@H2jxHU^!YsqSJrK8!@Sm^EHji z1Hv%C$!Epa3#S;Jm7P8ck(GGHrZhp}JPBdas!gKtvC--V55#8Na2V77@MgMtct582 z<+zlm+Hfg;#5PNUD*5D*A$hY`-hsR+pRx-sGzy04=72hxd6+rR5Gqv}iW6M?U>9dF zMW#`Rl<%0AeSyf)(mUV>BY96D>KUtGNO_}T2Q)0 zx@Y;d=sor~)cq_fUA)Ke59+#$!#nKnFfUqRgIuPa%x=ZJ;F=h5*SSQ_O!+Vg@)g16 zz#;usj8C>$ZROEeq*!*zt!g$KrAUxzk!%IWJ&^A##jTOL;_}K~$TjD28x@c85`TwT z90Kez;uJfK@-}aA%%Jk9_9Ee{zbBqU|B*{O zZE+aQPG1X-+jUQ7n{0+}vIla5agVX~j|NY?O!6mS-tR}OzJpG18Og$;bU``GBdT%~ zLWjD0sKBY645rd!RD=MbG)jL&{)bv@kFu-0lmG{$5Sj68TIAgz**5NzThUV+Hj#5) ze{-D5@L>jP$-&%yfUsbS!d8F^46B-tHYR+nFtc%VWoDo}ECxcCY#bSj4qUc=U&Jd; z^mXiIM{A!TPOyE49+(+4hWtHSj&0lizT=znVV9-6?VY68hUb4I#<7Mvg+Z!Z)uP!= z2%Hlx1Vd>}7)J;y^XfB&V%46)FBn3UYSF=dHaeGq0?6X3;u@e0@zqryXKt{1#S z6jdEjD^8VxQ4ZP8lu1EklHfrf1OQoF&1WemsM2b2nd@~@Iax2Rg~k21_mABeFdV(7SUyo4HA(*7J=vb!DClYG8ag%BoNw*) z_TY$(5f0^-Gx^TWfkeD4?)2_))E*9P@&pD4+Y()dfHBwaWY;_Tx^R>w)saFvwYkNV zHW(dml`ORn*-%Iv6Jvf}+$Wwu9Ha+(?t{Wtg(^}Lp@G3KAEi+UJ|s|7$H_q3g=^!x z=-T)&u1#r6XdYVJ3|!;>%oJ=jp+xJe+yn++T9CJ&mAi{P1+I?+i|Feo)eg`qypTzr z#MC=bR1V>7AC^ff&RFrzx+kXqV>+ZnH-V?V%=j+)U_b$<*J(Q z%0b#J&PJ1)01G%IPx<6YF00yngDfHo+kiBe3is8i(-LY(RK>QkB%qN_<#AQg%szD? z7{je`LomQz*4D_{UMZX^9vD3N2Vs9pOZP=Hy?rx7M^COR9vSZo->NU9PgvTzublq3 z&PX7Z--bo@qU`A2J-vO0C;FQHU0*KuhLWwJqx-z}ZH1og+wuo)-;fBlw#6-h-z@y1 zCDrm+z_JyK^p?I&*G}f#zGysZ2|)Hu^Y@A~;xNW;N`Ob&LE}UbfC}cnx^oW~tHTu< zv|+@gP`;P!Y$i{0I~s8(3OG?sm;&Aw)RZ_1JoT&DZ#WJ}l~_CR1Ii8uA`YGKNI*Sq z#Or7QE_?${0aeCnXwvjp>YKBn0yW2sPRlt53l#RWWBi>480mBMZrjn1D$nOTF4>P3uh+E zyUP8%EJ%e`pIL)2bSZJ~YIr@@;p@x2v*VlhQ&D3DKi2n>n`WqG~qk}jzD|4 ziq;n-l4x^K|C9myxb`vfL(=NdCs_k!AZ5apTonyR#U;NE=2d;l)*2E-OfA1~YjL7i@bL_Kz?)NjH(+2~!T9nqURJ6coEm z9*^mjZ=0M>*0SN?P4wwqHa2;3g4y1Oi>6x-<(w|6_{{D)^g1Kn21JW{$L5*sZi^To zR$X=+HV|a>?a=L40qy$pLKQAn1nAm&& z2`7Y^e92M~rqhd>A|ST|7Ef9(FUH%g;L62Y>9&h;CRy33bETW-6%~eL)PN~+|J}fl zaxtxr01h zpQ_fgQEosa{)} zYIEDIbU)7zWP(ZDD_$xfp+V4!K~+Tyu}{Q)oaZ^B9xb#*)yKEi|ny z@5(6ShXvJhmBb?TU}d=&ev)1Oc6#LVobKBD@5gq+eEu!9@Xu0z=wuF*&XOBM-@%gQ8lSiz5 zW@L9RWH`9((i7ehZtt3*&n{sfhvolz;cG&51I_HA9T)-}kdjXns|Pngj)a4!V57B= zjfUd=RCLQ!oZGx-Lkkq$WhG@ZE{qlDk~X@a7H^=``L1F$NxN+!*-uh!NmK+^3 z7)T`a%y^V5F^2h~avd&|i^>fc*_oYSd5DUqTsY7u@#ZRvA_-m;pR>dC^pcxlATA zX-<9-gCwo~7WT7nF5KdDwMO%ird%lI8eo`aL+wo+c<*kDc7{WlV8Rn>6&+G|xNS8* z{@21#0ss1j`(AHw8pDC64SRY=dJBUE{2Oe=cv_s@6O$MAE9_(WSc|WR?++R0JP&M; zb~cL8i}8k$uEaaxA<=XMa-RX4#=U-yvq+2OTUw+UJGeg!Mg5_?!_e~BiD3(oZ(S-u$@bNu$B+w!Ke!m*jLx@3Oqt=9Wa`la|MApENVcWqXh1o%F>! zrT5xg5;H%^+W-8R=#-qMy`R8@`NVDsMF_-We@+)Ic4;?Vd}0qBJ`|79TF;Q151bBh z1NQx>%NmDrLW(7&4yFQkXv;~!*qngng$1YfjssVwx zF&x6dqyYed8Y^n=#|)LYiE9WpY)rOSVj+WAdxK~Q#VYMdW9L-&X#3FUI=7>zIM5Lt z?Vfs}YqG6bGBB@~8Kma6$*%RIUFp$cd4DCpy|*u0Zta3=63VoD{=da;aRTSAhiPAr zYp^S+8(v;&hH6a}in9ZP)q?3cLd6T`T6obZIz))0D{}$01Qn@VI#TqE(drF1Cp!w5 zU4k-zk(@JzlNp+9D0cR=IndJWENROt3YhUY7;JG^gnE96WH#`KP zH`X7cIK|J00-pubLdR!8$~5Joa;y}EYWPD)Q5p|>?M$`|-5r840G|Zw3cR~8d}nm@ zk3Klqd*)1U=wv>9v$!Gq<o3lve-`vN*v-j>lb)Bik$T-?I{1fkZl2~2!JX?-r(q`?<^_4CM0xNe&n_@-8)c&{kz}NQ z&TUd9c{_O0La80y;Dqc!TB5}jXiivjg@FoO>m$nS@pBwN6qFzoH#(1Lpm{K0&n!-D*5lpWUEJKYVS3!DP2OYd#nGYZ z-tI$t_MIy0vlF`X63jWq=Uiynj01p)2GX%G&sc6@b}gAxkO_SrIdBs5Mi$oaY$?4s`Jgp^|a{7p{f0+*FYFp5<^th!h!emX){Aq$-bNer@4yzbyzK$UwKnxe+>=E#oA%A8%pDC^zHvvOFeO zSKqD!Bu{N#<{diJ^*r`SH1|K)4r#j%^l?}Td$+vAMjSAhp(V!k_0q#*$G_4M25C1B5DaB=xDMzu8ii#%H940K||c< z>)zXK4cMcHQ{CmUe5k7}+`L3X3}(27U``XZME-EpwSt;RR9cdkw|HEl-C%}0()wqi z%St_6*(R$g7fD(RBO?WPb-ig|s&@61Ao4@umRxf@;3uu(4h#?mJjc z81}8)=Ag|8TroN;ixzw}Wykr~0S#QeJS1a<)dDR|y5nNW2ODOH4c}Y(>Vo?YMjd03a z@1~#tNtstuMKlE`W&=LOi!zF+Qvm24B$Z9eIwZAPf4lmQ z?d%uT!O0%0eKLV<9SzjZH`ngq{S%k;FCqNFd3#9dx@r$w2ypCGokXBlD;~QzqWrMg z1^06f{fw!{&K7z)kk+@ESh>}n8cYM}bN2QZOsv(^E`R#i%gZ~=o;kMt80@4|7}!4% z&!F;o9DW~;XhL$&i}&yX?g4%>0Y!2U`ydZIP86cdI<^B`EU6-n01rF1W;iR>jMdZ{ z#u(vb7#-I~MjcOc@Q#Uwpk`W__?%&HAKn~D`ZCUeYYzR^l6FTUp4~fYbC5@tofwiN zzq$71;KVU@>I(*s*<#qf{o{9vS3x2nb)s*$meS zP{-YX0M@@H{9gx9fH^QQ9gMW&H_S6DFb8}qqG#41+WlR#X8u>=e{J-ae&gi7iNFyP zapOv}XV%;7!H)y{Z;=0OqQ7|dY?S{7ceUtu#pudxT)&>6>*7Qc!4I1yo819t4IsON zP0f*LODvvPESk0aomMN$FAWg*2>(9}r%I5(R`~x1Dt>Rp`|bk=y3TeTz<>Ou>nz*n z%DLnxl26Nct-XjpAAThH_=oYz#%eDn*&ytZ83>#Ups*xsZehQgPeZ57V2R=YE_6An zjgqlykgPUf{@^fLm{K}D6auHkg)CMqE)d2!laaooQuPjevZ&^&dPWdw^tAg;g{fXU zN*4Vpl8Hnzzp1TbFSORqe49Q*J_~%>gY53wtrUk_q|1#8IAP)URm?~_OhuH1h%%o_ znFR9!l%XOqo<3)H^H12~ayoIM%6oP$tkQ`?qX_NLQ9hy*=lGUW4QDPLFGc&>$toR~ z5WX-@J+C$o>e&^X{Quy8*`wdQhlh(e0YU z`2-rz3~?Z(a=;)ZQ!sda{Ny8bCRCh@Cipd+@r&}#^G#BYJn?f`wf|XOsvmJys_jg7 zVby|P2jYhVXF6MOG7y|WM#EgI+WQXP`!Z$pq38OmxB?e`x}^IPOWrStz@5DNFQ`oT zE`RxDJx8Qh@gobbcY&(O(z*Y+g36IRaimk_h~3@M364bg$yn-JyomWo*R=UK*OukK z0MFV~{!q{c)ww#(&J|StAeKqXLu--HuBeT6zR)d~66BC0IgU1`scd5LLgO`H^x$o>;Mdc;(+u&kREIFLV zgzX1jDUNO|VmKm(>7)oPxDJ;ZT{kx2dfG#kk10cwW*o8!m&`bQrjd5M5E*1waz_4u z|5X|J>i6t2O(&^ylm9l-%USycRUW^?Uw*BAczHotaXT1pRm3))M7?L!s6((&t97WF zOK{6n-PoIQoHCRjK*52kgXf<)0Aw{g;h?gI$h*bCHUzy91cY)4Ale6?sJFiu^il#D zaz{~beOL08pT$L%W1p5RK~rrT`>$NiYqE>iCX=r`%YI@BO6HoK>aw(fo&oj+PsZ_f zLGYLzF9@GlLg()x=La?8Ih|%~h#g4a;iQ^DDhE_~6_px<2&b#Rl;m`Q9^Wf}?MK8W z2Q-XaPAvO;ulyyRW55A60W~}aA$2h!C%i>NFGe~#-wO~b@SG{`1tK2BeCC99{ubG& zOzz-FGaB9noo36zP=lr-DWTd7u4Y&>luVGN^Jxw$(2(?2jV8XYHsD-ql)O0Yyhwu? z3$PgA+hYW}RB16{51cF#d%4G1ee37Z0PK)~~)0inQcJ^xwKOIY^Xp*vu2!udiw1N!9RHk)Fh451+P5;>k` z+U%f@CI@$<`_)(zMSm%wfp2z@REWG!%7b`$=c%rXyEeP~;tBPgr!OY{%RPs-?S5d_ z%nbu}r_JiYr(F;1+ImCTfjYgOWpr*AD8tgA(kYw;(gmH}C(2+)Blm`uefnfl=>#}9 zPS3>(&ya_iWlFB z!l8RSHpJD-H*5u&+tg34fg5IaJ+K>M2445TztklbJ#E4*^S=_F_qc7JL~cbEJQoGK zPy!Bt7uCqq!W#rS(2DFtIl^}`OwMq$&KRa*j56JY7?Pb4`B1W|p}g*Ievl?Z*@a<| ztkSsNC2h*MnFF2!Exx8grR7vW?PszZn+g15sWRkiUrunj57RVPn~bwcxS ztz*bNpG91gNa0)g7?@r>CS@cG&6 zd-kr&M?_!l^j+_|$?a!b-o$yub)x9=q=Z|3*#({WQHT(iA;AS1+6o>5;8UF7JVfa+ zaWm?QMW|XGFBOX_I!NHFhIsCK2-`5=%Tsy|&Wxhog`YU5LJrZ!Qc#bHz+W{Z0?)T-iAt~PT9m|cjO+s`OJrMZ#k0d_~`3C?ekS{m*>t@eZEg$@fiCP zpARy9VE#YZ%j`5{JBUn0+Ln|4_VSV=IEu>0GQc+(bZ8Zu4AP*W2pKEHVu?}!)G;-Y z3e+^Yk(47>AM9mc?ZX(xExFonx)X^|IAQeV*cQKkTOuFFN8*=81Nb$GHD*8f1vmv^ zr`W^xV?X2rWqSpx3j%yDU_()H6$%~WcnZP0`Npcxr((90(Tsx<*DDJr{dE~1PM z<@Sg1Ap**=Z$K`EHnb-2lnz61!-Xck6TTFX?GftjGNEe{YxbG>!fUG6D z-rI*_)oCe#umL92lAet4>RGsU`{~o{;?t*dV}AeGx4uGu7vH^SVuVeM#B;o7_ygfZ zP6al`TiAEt&kUk+ILQNnVxcUq4KE05Aq|_e9Va-TsI^o)pDGiYAf!nU=okvIl_pq_ zcAMfBU+uL2MnC?scldI(_XYg^FXX+#dFcW_*@^>+;jEd9Gx0}Rj1zj zsw?O0rg+Z2Gx%kCu@+jeySb9r(Z?L9a#U5p1@jgQKkX3#WrU=ZYNnHfj_SU_+M$d- zz6Hd|TbPeKDLo{dbgyMv?qWC*$ofmnqctL+s=oEoKo>a=Se<>4{ z)+!2hLyVBd52UioPuzRoqv$46mIpz1R^P2A! zE%(20|GH2x(R%%3*I&6clnU7F*6y^et=Hjj1#;daw~@@a(BHl2Zy#z3!h9wxr`p;< zq_^T6Cv*Z1zhRkD3e>wIj^m!C!wJ=RH~l1@LGMb`I|J;NIl<8s=OqkKzC1?mT8jVa zeaP|^7+s)*Iet=B{Z?H%VBm$=zq!Y}eqXGwNivzu&3!FCzjxRxUALWjnV#x222YP& ze&7AakKDt0+?@fN&4%9C(%k?P2xPrmuS7Rn6T#5B`;VWv7X`o>&bK};Uc?@NH!CC1 zN`lp{>fmS296mB#bm$&41uTPPa;Y{kDU7zHmNrs=mHGD;hx0g_jYexbjv_n6X!-mL z7WRNgvPtD&U_i1(Y?XG{Bu zL7@DzbCB`z<4zcomKQ#cY*(lADBE2}QW?DP5EEQTiy2tT66kuH@P`H@NX>)!hvW*2kD*~YG5Q|;QO5WC4~(g`!$0c2vb0r>e2u+i*@9*#COMLGZO0ssCO z{M)yLfBYJ%?~Gujgx7@zo(@e&{OGO3_u{CMiaNm*hx8UH@GK}k5<8Uhb$j_?TU`cd z{|9@LAQCY6=pmQDH<{dyD%dcF^Uj^;;<=STT`h2_k3eN)lPSCCkxNfG;zms;R8*GQ} z|6S0dx3gAyaNqBQKlSZ2N)O%N2!C{X^mbND&opXk|1RWbrFI&nNAK5a<++|7y`8nv zgZmniADy21b{eIJ?q7@i)YGH4bD{L$ems}!r-tO`eC;$!kKV5fm7kU9(c4)oJ-Ba8 z@>5SweLIcPL-(&nepaGKZ)dgipuOLT{H)YYqx9(gTCM!l)1$YuR(fz>L-M23Q{PUb z^w9llk)L{c^mfii5AM_W(3t+Y&~_T7NAK79(qxWmI@>5Tb-p*R-!F>(Mk4{g0JB`vq_pe2M>gmzj zxlnp=Kc2gG`v>=Pex9$LM(NS}b)oXJ58Wq0QF`e9)yU6E^yuxZ zmL9a%kp9u>S*e{y>CyYOTKTD`M{j4X^x(dR&^mf)t5AIu&{M6G^-%g|S(EY2CpOxs*+gU9=Xm3^a586Mc{H)YY zqx9(gTCM!l)1$YuR(fz>L-M23Q{PUb^w9llk)L{c^mfii5AM_W(3t+Y&~_T7NAK79 z(qxWmI@>5Tb z-p*R-!F>(Mk4{g0JB`vq_pe2M>gmzjxlnp=Kc2gG`v>=Pex9$LM(NS}b)oXJ58Wq0QF`e9)yU6E^yuxZmL9a%kp9u>S*e{y>CyYOTKTD`M{j4X^x(dR zx}_sc)xIdg%Uh$7Vn_vr;>a(xdllwenL>kKWE&>A`&s$&XG? zeLIcPL-(&me(LGb+qqDBaR2W_|D3O#M(NS}b)oXJ58Wq0QF`e9 z)yU6E^yuxZmL9a%kp9u>S*e{y>CyYOTKTD`M{j4X^x(dRJom!= zKluH!eH=b>ex9$LM(NS}b)oXJ58Wq0QF`e9)yU6E^yuxZmL9a% zkp9u>S*e{y>CyYOTKTD`M{j4X^x(dRhW8?W=V z(7NU2r%`(Jew|N# z&PR{l&RXfgeXEk6mFTH&r%`(7{tJ{0c28>&H zX>phpT@Q;q%EACg`ct%!kJM(1i-!HpQ`tgq~_uO~c zJ<<<+dE$$s6OS);;dPHcK6+rw(W9F`@x_S={Nl*bna43osJ+xR|F>d~h}zG# zPiteyhfm1O|FzgFK84DRS&S>HT~h63RO*rei$uVUzG5}bk!s@r0jwtDR2eUsrYdWw z8jq5ABE}4m5GaH%r;17wKH-igj$+#c-1BNn6W!6$ih5;DDy9oTZ=e8|(nN@wE|Xfc zxWvI!%L6E=s1_Yf^KzJK{VKf*WigoRy7kDRH+6mB%B%l~HJxs0IUNX|jz&+T;^R{_ z@1-ZNJ-~ik^S`>yEET^fyEn!6$zMNP8Y{14E_2T^~rq+(mirV;_#W9WDVQk3{~ zo>i7qm!^`pVo|GZ?e9(YdIKnPo20)v(UPle&WR7ca%&_a9(m;>kq8*M^ckqz1?YGV zSWd-lEh1Iav!He@B$}7e7SE!zq>3V>LS&E|vCrfJ;@y$R zD<2V$@MjrNO*}55pe-songly)!*Ng()KE2nY=B0f6g{t4i8A#H0}3vXL(Eo^Jk*8@ z4@lYII77u$FWdFr@4ff`eE)mjfAPeNuRHPLnRnDa^^SM2U67R$=5<0WdEADgxKk`W zhmXQIDwThieGRp5lT^c#BQ3ai#l&jdMME4%NxG^91Yhqr>0_5k=70A}CZqMUW)o_5TO{lE zB)9Yvx8Zf}9~Cc?EEf9OZZVooe=3>q(egd1^b_}SgXc#i??msvu&;XVaUBy~^FI)T z2^4Ck|M+*^`0i`;oe&UR0=}8hznkFqea+*>eJS?q=jiud)U?~gKJ2;2`d@-i+_?CA zjC&`(%d-!nmTbQu8w+z@1A#JCjl6ED5mh3qUL&!`>!LZMV+wkWV1%7YDN;p3j?_c- z2zi~hqL>fVo&~6;>E8QIQh;p?up{>bn^1-i_2WWe(DN|?b?C$>=)Xeni_p+NU8)gsRhLa zsq`JH%K8*3NUK0)87E5|Jd`TlQ4ze)E^zd_3-6w9GJ937F1(@f{^?+Jh16n270hJ!mqP{F4(5n)N7mYwaAc1~zGfUlu@~NP2u^mb1rkTxG z3skbi^2n((E>wOdeJeqQtJJa5K%0cNSQEexZe}w*d(a}`?K0@nULf)a{cImgjUHcdw$wyfKja9f#zzQW>O2iGmNU7k61lRNz@gGcam}@Iwu1(LGpyYq$ zLLK^N*M0K`0s6uOs3w>Cc*ovVkM4 z@3Q}`{hdzW3@~_7vmfDPqI&ZD|GBJRtknIVowdA@{IlM-~G+s{w{Fj2K7LZz#-3z^*xJhH zp2z`g`J`i$dspJrcdL`#|^QLW=zyHWjbOoi?czlPhVenH&sc>pru8tvzOwEDkR_WV1+i&$3TH z{gN5D)i(b3i6!qbrdS8wIR7K|GU#_(z91eK3w#~e%tX)@18paTXFX;C*arN!hlH1a z6EWb)h;Isf2-lA-z0SVPd*Kj1r+$OB4+%TiZ`mQVbBp-MlHWgun(u!l9>VYK^FUg_ zH+Dv}JdW#M#r2Hf2ngVX19-l4eIqage*#h?{X{NmjB*u#-TA`)$G zO+-ZRR5aR=8ZUNDr|Dhyw!!H=o>*=KR;L}T;F{D;}+gtA8x zPa@v+3?GBy{K!Av0p91h0 z740s90<#u^Rckk6K3MJ+J{l~`7+TVdP89k zGxGJG8QBirz8m{j6q+V9ZD?+es|RiE;*Zc?6o3lQlgJ(fx{UUqz_Q1P+y1$w<)57f zdIXB^i^4AaF|p_Q9EF{N0@J{v!jcT=k|>cb71HA$e~}Av)MB1=KTXuL-Cj8Y^_TDJ^awfC7m7jb>uJYpYEC}ji+L<@EQ2G8P$jy+tW^&+V&InbQ z4*-?|TlYFopfJ_4*2Po}eF8m+u9MGmcgq=z|hb$xT3X+jz zQ7MDqeu84p_0cL+KoxSiZAk9%%2vP!z%?YNco}aSt?k&)sEki*T#xa}`(41~&^T}6 zLN;b7h2b><^0;W~EQ0gwKF;^4c>GkhOq{k%a!J>=heqVh#Bc8l4q|>V z80eYq@1OR+U`xe3t>t9Lk$zf}cg+t%_P+$whGVEjPFx`9mPA2m!;7gvI4ISyiDWFy zYv2Pt$*o#|fEBXxt-sX)9%wkQaPL(G?imu2a0u|a^(MlEr4s#sd*x!KcaY$OVoaSV z+$BUf31#+jB>C2hc8|p4CxU_UwfC2^eWiFj89BNCkpNz$NH}+50uG*Rp z1fiQ2Wa)YAE2o4iqka&A3(CTif5t;&C3rw52P1}A4?vV^fN$CZm^yO6qqs0;a1ZhQ zCblvF#i2=D!p5C;mb&m0qQ=jrxYX|n25Ns#vOXH`pZ@xE|5fq$cr5kbsTeBML%!yJ z#`)R`?*Tlfeo#bsOgRvh;#K~M#~fgQPhh)5v~?9^ZOdUT5|mH~G^HU@z{CUa8SrSh z=IO@O5N(L8-GY#$%0CXycORWG3QxYU$BwH1lzbcpgFgP^uAud;3)(lA<8+_nQ zFc1tpf0BCEnJ=aLBgx(0U`PA?EsCSyj>Ue+4E@tDPxlY>q<~Ewt9`%49Z8Jq zycZwEz8R>&fT!wGI(Rqbe2Dq?Edbm5ro_}xB;kDxz2D|l@{~^`;jj8^SpO0QF#rpiAB{KCT;8+c+7CO1P zs~#>k9u(<^NC7*HK~uqs(ic(QD@i`j|Xu9(j znw+eJ_25rwx_YsasyYgM?IXSd&PAP@CirAaN+0-2xRrqQNS!56BDoh>ektOhSp)~W ziUe3pTeB!w>x|MD&VUB_L1-CJ*MRoY38Hy9030AyNQlEA864+`ZM2jIKOT#zdOnie z8;>6@XE|rxlHD|RImz2K>?cV7fHU|zUW zovCt5i7VVBKsF0DBuf`Yp=cXcy23HfHPBGc zKwI^tVt0=o4|D;rTJQh!FrGSTRAG(jqsl?*u-eev0geUM9NKqo-jDtuHIOcE^PT)EP`S0_zcSHQ_H$Kw}s(W~9W1(E;JRup#h*^s}3 zhVsEZB$iNa{nL{lq6?#oefSdh69a3QcF;he<<66Ek#P24HOWbiyi|4az=9LrVeT^q z`V(Pk*^5#xSkU%mqI9z=B`muDR z29g!kb~&RiAD?}FIeQ`=AE|w)jg3Z3A?j%ZEPKU<3A0><8Dn zIBpc6foK`&R6Rp*NVLAz&0y*n>Z2^h1J8 z^;a5(dxtuu@9W~17zVNxYz;Z)EUpaE7$n@m*5!U2dYAwR;> z2Pi99!L+e);b$%nb&(9tYJrf4CJNmcw-H>5RxoZB!i8jB*$0#~tXCeghCJ8@5sZm} zUrzXd4lfT8gqlgfgp?(Wb`p2ly>ewp?)NFFGHu_5K#0(lQXE@}gVtqX3Xv2HRQfYn zEZ3UFy>!lUtRm))v_~TC>!hxhRH2YcgzqZGVuv3wu;KoWj{g7Er|^H!^1wp$(?~dy zDohoE9}EP0{)9YgDFtSRpAk&`99X-F4IT*!#RTqZOhcQf(D z4c*INTFR!g(A!ELyz>?1yk|}{3eDiDSilD*2VsI4NhnP@9)`=csagz^yTm01Q~KQI zIfc}?{iGQyn-d9py=|@Zi1Vda8rDU>|!?{5ogVYjsgvK zw^uGgBus$%!yDP>t!BCl0PB%E$Y6$buTB$+U|=yqkTgSDn#U2zE~7pgtm_VsVE+Jk}p7Moq#P%7`e>Oc)< zWiZgao&6>j>uB#dbYN_U^PmYBM5|do)S_6UjZ{vY!1X$a9Yrp7iKge9f zS#=p-redtuC&J-`BM{7t_4a}{WaIu1Y}~&D9$-m$yHG{QsY~uuG5;6`Vv^fAt`XsP z6(K!E2X(SopqYTZD&WX+=)Jor=R9O~>yZHR;f3)9CIW3(bF@n>49PLC;%kR>YbC#E zgj7V>i`76-7~x`tW4O@jm9s-~yHECjbLj=!mdxO6I!Fd2&~hZ-IrrC5VV=ujo~v*?&j9iVWC>kWof9O3@i!} z;M%aQCj$U@j!a(1g$Q{QMG_)lXjh9I&$EaK8CY4tR4ekS2Ba{h3rqvFox6)qxEN$< zl*>{94D5qu^@1-l^U0mqBG6Lm9g@>txr3x^piauL7u8G$RgxCArr3+pl0}k6w}6+s zPQ~L>OHGM|&+I;|T&;m-?ED7`JANJr21%}LFZgNE>d`(rUhq!$TTi8$gMnh5eRP?o zc>s+?cyF|o$np)HzEgs800s~_+OSii+yp32!n6)?-5J8pjvGcw6R8%I(%KOeSjV1pEMhZZ?0ph z4u5EJy8i<*-*{`bML~Qw?08s9WLwDY5-kmo@M@l%jGcsv9gO8wRBRcOhuayj?KS0U ztXHmtSV_qYh1RoPnnl$@js~4pUqEZlIr*Z(;r+0Of|$d7L-wpX%CKhvt+_COWa+Dx zzuNKs414!KB_pi6oV6zsux@LAPb#$3mo5LWxBa?EWMe=)6-!-OHr?MlhQ%cj=K4WfBXh`KIHLL0$hu7yWOIodiIgqT%^j)O=2p{T^g&3+(Fg2! zMRXC4wkpsbaaE-C1Di!f#C})l=p;)bNvJpsB>Q9L67h{)te``l@x*$hOvljDWRkQ8y%|zw<_%%Pq^wQ zsA0kH1|I+|jE$tO89VT{4J_$sqrvVV1V=J36(81A#R&X!3^wm4wvBa8liO#qe}e@D^t2^C2> zfv*!DWfSIihHJsk*}kZg!-IYOU(i=Uwt78ch;aB%XUqV2KNPgH~AtJD3< zFVs#>_ZOy8(3-87KG+WG^2FB&O5)P}ab$16?xGb{83e}=&Lbs=%q_UH^ zU3lp$P!bonH>9(23@J@uSwEHmjv`6e+M0q?y4Mxe{W5G+!813Znj;eh85;y{bqq$p zN`fmkVU^@;R2i~hR&cpmN<3RWeckGAZ&`UOfcXe z8{>Mbrcc)a|9Qa_X#?^(*&M3BvK6S0H>vv@GP|orZe_#agWmGZvl6pkPLApl)$Vq0pS^%1~vyAD`86R!}gQ&!%wn9 zqN7R?yiW#hk(LnGLptIpgYSRmw}OG*e)3+DYi;)a#QH}EQZd$abs|xdRFj(>tIg9C zyLHEh{>r_h<&q_P(Kca=P_>Z^Xm%wBb#A9jHu%(dZe^la4f2$kAgGpuJV}p3R8tRI zD0fmcq62XF43xqZO1x7Y0n5g<;+3$Tx21OV{PCxHxLZdbpk}6g8e8V#+pfK~_6+%R zCW8Su23p!!{`%KnzUZTRH}X(UYn}k?gOGHm@9*K)q_taW~u$>=>;xuJ7(M;4)QnpSVspdiI=V- z)dcRqe(9gy+AeuJ6ZB3U_SUxkG>2bZBBSlX)k4)tHVshU$?qc-+P z_M9arRkew)aHeW= zZIo$qZE=C~0~wwcf6>%a(I=eRwVmH0W9^ph-L%7fpE}#r1&uZDQ~R^rF9*BRO0zD5 z9$=$O9v~ef8XkHFSF%`!v5&8|ah1_VF@!eNN$=v(Hr4KoWiW7LYtmK)mZzVrP|QWm z7q3pmbA)}Ia18NL9*}jFsk!F3*~yOAKEwx;J^xyUCuG^6+bf7|HY3IZePX1Z#nB~f zF-WbTXV}pa$}l;x;VhP;9)(<~7#ND*C@HXC33AtsA48%IYTxo(ZDtGW@v~n94VRhSlFM)u{!LcPvVWYv z1NTvkn~=3Vr8%2{9DNkC6cihBPGkXNi^wx=FA=U#edp#>w)WCpPLJLGkhRJBklpTa ziW?4D?PjNR!fZYvIn8#+8mnz$|2cmQ?fZdVPI0#d?D7d0t!V-#Qh+C*Behz#dlVl= z2GK_J)}X0?QHGUAkpv6QNKd4Vy!MmM7!-B4us3Ucg#P z*B_sM`#hWfYmbC0!YhAB{Ced#^MmuBg14(#*skryNVzxBgomR8Zmy~|K$F)BYn8$G zOGy!kMpVsY%TJgE(5}THV9R5#PO(5*L({>O3!8C`UQo^#CTOtQL+yE=X?^bbM6jc+ z@0;bldpbH?QNJsn4J9(6vEIp%bwl&>I$h|)U-CYDiH29#OTzs^5dW*f#G-T{NOuU+ z7{+36U%5CYa%Yi?0h%4pqo~qv4{gt-EGFx^Yp%L_*lMyQGad1C++lH7dV4EwizA+< zwpr8suh=AFrB<8~&~nsldc=-T7i7fY5oySk8t`Gzo54T;FyMcLBz62Ja=&4dcnHYP zKzj0F@7iUHJ^7*kC%)%3*l zZeqf&q`XPq#Pr^KH+j>0@51kM=H7)xlf2*W_j&%$^CuDRot>RIbLPyMGpEf+I`u0f zvEzgXN@dc(AaGw+**Vf38UDnkL(OsxzDMp4ZGCTv$-0QyBzypNHP!|Wr^indoR zY1AOm*d;cjr5zT6A@Ny9&nht}b#HL%XT=?mrcZkg-oJ|Mve*w-0 zz-qK!37dp7@T?Ha2hIX?fCh^W2juB70JVqhSB!HA77TiY~K~j<@&%@4{iqW3lJKA1E zoOmL;s{53vAx1{teF~e|9inkg!Y>QsOaZ4pEP9-&7^n0`qH#*6=`4G2w7-IJV@tx| z$WokH8P1nmQa-lPaGV@Q95E0as9|HV2Pb6{IXJS<|NY=Fo<6b4!>+>ob$9fRY~jL; z0-9ZZ@ZM*lhPC$@9^=d>?LSEv(ZRchf!evJ~2rP;#)F|6pIZ}?GQ{yK-)vcn*} zN=xN{DnOr&Pr}KxB(xxw1VqRX8(xVKk0yztGo>;#0wXT043Cg<#R%vP!tN0?ac&YZ zMz~4+)mbD%j5=#7!zwKcMJ>l?syn0r8#<<1m$HWrFaA)Sy?il?N&l1S-hY*tv<6;3 zapyoFiu?UzNBI3CB*BaQ*2O*BifG!|5&dX_$bO%NQDm97U}tf&mvq!VUp zz%at;@*JK{I+z-Fs1^vUs+6W}y)qnMOUNZ#9vQj`fjDZyX@p8$U`kTm2>#cPZ2sq{ zl-PT7fz2}>D);;Me((1Wt?>IR`r~l#wxV7V<5m5LFA9GjM)1azu|41NTHtN8a;;-= z4FExq-r)T;kaQE_L|}^Ctipwat+;Yv3;r)5v%0c!*GRRH{Qbahb6$nkhKwEIWNFn* zP6NVnl4VEMhk1QpRE>#`gN|Z{($4DH9(SSk z685my-G=rp?T5&yhOoy==`TCOB0)~@> zlQV!NvyHvS-0il;rX&}ni=+u9MfH41QfW%6%UeAJ-Jt&imBZPCvj0~hf4%B9An8&6 zBd{K_*&gf*$0AyZ<`QbtkP?G!lOx2ygIR>SBxI!#%|qP9se*j#cfa$~Dblx|m3GFI zk~+Xrf9s3^MXSHvJ^I*Rnf%ZL_j)(n>(f-Y*Qf7>-7>wjl)<&$igA0=@NEA$uJuFZ zd4y!)q^eQs&?M3X(3_gTNRdLjcfJ)DXKe|_j+x#43w=<; z&Wy7}Tz5Sa=~cxnLB2dZcNQX9Aji;n$-p}vDRw{216IYE9}IGWOFOT{puGlp0I z?EvWs^Q0vpuMks#(+QoaDSQzo0_j?zkZT3$h(mc~ZBlDGEf4%6vZZAXN0kf^ru-z0 ztg*eHID&2xiB2gny%C|>gY_XCQ(E9)ZeaB)v*ByYTyV6ua1r&ZH%7qbIu zPZT5vcV98A-Yz4dJ-^jpXqqn`z`X)EB|oq}q% zv->Cll$$hBiwGEYCv{Ix-EuCmXGi8F9x`;D5{BatVgRe4;-dp4kZz?&VfKZlHzfD!K-4|B^M3~_@>Y4K2i3)F25ic%VLwf1^E(0 z!Sqn#yG^=VTU``2c1#onKar=BEncV|;p#+S^!x#))O z$5>iX@l-j5eZ0U6KO*TR+#9D1#}n8QNVJJ1zfC2Uc4lZfiSI^J)L>@@gApRyEGLKU z>7yCB+WOPW0B97#iA3cv;gJ3SVZ`xOw!I3P9%xxfCdeSiP$dIMKwK~+QPMyY`%!@k zaA7F&$RcOebY^*^qaxfYomB@yrxi%55y?PDv7rzrBm-sFT_I_GWZg5H28#H2F`&GN z9VoH1=Y|NGj~Uth)bVmgbB3_$K6;VmQsn@pGDe7eY<_sMEU3Ym%FI^ z3#WVs+NrbKxt7Ym+^_ww?0(5?26(_od5T>>@HXmy{T+za(e^s!6_`hlU7nPM^6vfHk^S~K&#aP#-+nWT9(Usk@WbwY$q}=k{1w9RHvQ-Iz1_>d|F>Qq7(b-L+8=kj@|Czz3+Ct!}e?UR{Fjh zEdnllD(j;^yInV0j4JPgw|$>>SMS&Eb_thVzCXNnIa;^d0e;Ha@xTMh2`_{%ycfD! z@m^|U0_FH&i`m=2PwVTaptQGLS--Emf4e83-L|N9Pe8jZcpuiU-REAoBtm5+gkCWT68IC#V#z}r_2MObe;W}jSWG&8)Uomz7X$)M88TGDCd3~UeIqhV)Y!lcI|fHvgxd^`~j!D zuif?gw7atBXXTQpc2_v zyGQQR?rOk$`WcfZL}PE`1(26fW{8V3JieCRQE= zs;ZqRHy88es6*rn4gyK#RlZ<><`(ft`EcByFOM-zE8h6|hq=SoB*a*%u|{VL&MVd9 z-P*0)Apk|N(XVbk5obR{ZVN{iCXZiM8Aj?PK!o5tTjKUh6-QoqPIXQ}q0_1P-VYuq z2s7E`lWsq4t0B-I+t3|K@&yn1eQ^VI1$^KKcmh6Lze)V};RgbegD?8Lz%@ql>w5Zz z{XK#W`#}BcM3-ax%4K^6u5osKZ1=Jq+P7pobtuQT=u=wr)Cci^-ahbIOy6g;kI81h}m8rQc6siFs6R^bV+~H2UV6qMG>0< z0gmQGj*ll}e78p4frn&go&5}a9uUC~)?ZY4^nTT^+}^LA?D-b)z`NXq`eeo1eIwd` zT(*O9tna9DL}?Qovc3Yz+^)xbJ?{X|L-F1v@vea1ty2zy-EfuTr9*bZG`V!sD2DwJLp`H^+aA68xGHKmd?~~NNg%pZ+BL@L^ zvC_KE?!z`@!!ib)&TRwIj99mx8iaZg1C^Uk<2HxlH^hN^@4@n1j`^ixqMgcvXmHYDS z&}G0}ew_pKKqK*|FUC1AUnOB)mJr-(1B|(-#N1sZ=OP67ReA^fZz~3@{zYUl%O)O` zQbm60oj#b7NP!`Z{s>|(v~>lAGRCuz&gw`nMZu6z6^4^!+NUH$xycZAodQ|&YdRA= zUEFe~XOIhj5}X4h`NzUt3Od8iT#9mqE^7#~v-!M34a#sv>CF(l=QXh+NLnBf^eDlj`fH^(!6N=%vd;Y89AM5Q$wX_&vm!%` z|8ZvE9{)d?naBwLW?t;!{kK`6bwqpCLY5DE<{0Pf2ip{R(|F6sd14K`IlM9QuqQQo za1(=u6#hQsOQF?tdOe+HO=k>E1g5};Jrv3yvKf$$8AMc&m+NB43-o;ILB53Sw9;hF zV`#~~6!(!}p@my>QxdaH)2$_^?*=Rt5*Bzof>*yComA$0+$@&nn`Zt|hYE?k~E0spKucvn$zR z#Qa#4yGWNgP>^eaQla1v>x4gnDbuJ>A#yC?59Qe*EwyxMAS7Nw#d5NIQ4K-2ee~yTF^T?RgS)SD@~rvTO(H z-pZ#?U4^8IhFS6(om77Z_WE0UK0y5@)W4`-eV40=FH}F@-M`+d-g@`4YuI)^)m`dp z7%{)9%3X%`M<};CPbVtJ}s&Kx+W{8P1g|-dxIenCqs*6YxtS zWiK?GU8%B`?YEd9oko3r$x@UYnE0$juu4!fytGZ9%@+#_rE3K|Mr~^e2;0tu$PP%Y zO&U^?nx4Mc?apx5WM&16a?2|HZa3%uD$CHUG+$!s%DDLTsloiLocx&7J7}y=WBk|j zeER>RvFiVmu^uePnwGZzSQkWwIyx?XMr5dG%DJYsR}PxRncB`jqYv#ZUl?8P`bC!0 z0_^5zgJzmeA;}t&Yor`9R9`uyR$1N%T%IRoACz>T3)u$RFVG&!9ht}}cVok`%TfOh zbu3m494R}ww|&-{Jb ziz=t~Kr3>5u$mPbfZ)7D0;Cq$6+xJ1Zr%iB3>} z7s`t&6X8I77+i3>-@nsJ%_tZ;6z8a;4@w&qYpSKb#pT}AjLekOjI8B|q|Y|oL#9+$ z7tUL-$_=+7*QgIOl6@Ke;w*ct*kgCVR$Qi>Y1`>2#M*YR9*99B-3P>@a4w0Z3I&fx z1`Wfp9CW!HG)1hzQHTaTNcsX$E|6q->`aEaBWy}TqeMXkp#Vu42n7Qq3I>v)Pt=6+ z5wfr=E%p@_ZO_Ht@3j^AO3IjoFGUx271PRdNuiO zpHCWk)*{sy^$$H1_VW=vlc|4rN*$?$^GM!Y*;58qmY3ZV(hM%B!Lqh z#YG^V1~Nt$QuNJ(^x>RA$W_WYnSj`()2j?BiL2>M^Mv(eIJ}Z69FvEm1~kuEo{*J6 z6s&s_IB0kO(;Wq@R^?Q}jFpni-maW^K*9Ki3?l(b2LIf$+S8ASNo792|Gjw$^U{OK zv6DyD&Pa&0xG|%os5;*_+jRSawJ|miqI1->QvbS=PUd}jQh7!peX(5*EJ?iI!w+HG z)okEtv^@`4=nL^)pbw_+Q>iS!0ohJ(RRbugLjhw2R1eYP$%xgGzKw*#a55#ftxJZ- z4;j=XB^V=$+pQxCtI`WxV4Da0dW4-cbtZW%)aN8e1*vfd}@hhxe#@ zcKgU(rQ7-iTR#~@dw7qk_bgw;PUDZDq*vd7z2Fhl!+TV{FZnram1?3SO8E>sb}v=XQ5uItY_~7Hr0XOv-l*`OOy4!le!$(XY~54 z==S8gV$>c^h3Iz#*4aY5+j;`bf+ppbQ*$$_Ci9JHQKfn=PF6Tz+)5!;9yM4NT;7vh)y}JJGj!wQ&sYU5- zd`ja}dbR`4q~o^BLDRkM*fxY56mZ!OYzWwM2P??+QbJrB@t@(M65=XFmDpq9*g<+B zMLOh?DuvR=`DKKpOfKMhwbNTk$M$fp%r%t@vml+|Dd~hvCR9dE6$}L19)hB>T-sQb zj6`g#odq6f2G>F^4Pw(WJTw7*^vOy1hpZfur<0Ew8(%!R zFh?W>2G1>NyP&pmq1VecC3!Edm|mP=ZTVnKRZ8aQg+38?MW7&%WEGYKE?hTyRZh^C zG@xqqAkU&>*@lUwBZBisj!%lko`oO6r;>_1c453Am=%Ht;v$heMXF;UgHYO_kbvG= zDjq7zs{t~BiL`6dIVm?m9okkuK|JsEzJF;F{Fw@-Pny+UR9IS}1$`nG!6f+-_w);M zg!qYUusYEV9~Db1!S!i`*84YcO^Xsvf`*yRyg+k*OJ zPq8f>%(h!F1cD%Qdhhw3f+dwo%Ht=q>7-I-LP{+VPO*q=QW!RVO9m=f$lZ=02IwKA z4#L&|5E_QtiT!AgqZnx)U)VhsDENHBumR+QRXlF79Qef53*^#g-XMDg)e-(S9F}(0KLXREmBULKjrb%)GK@8!<-m8qyrn?$T%u#j-GPH zp&+7Wp>h}M70Piq@5ryYt#2-Hq6jJEu%9gzE_AHeRhLcS2PKt|zR01dtSeb1xr`yE z_3uPCav?&T9eHpEu;86DND&?>r3(BWEAbd5yI?m@^7c9++kF&g1FjzZwY;CWuen3$$e{*W; z{EpPrj^ON=xRixC_PU4X(Ot+*H`&QFXaE`UNkE1n)vn|a47Xr}aLW92eaXZe6(5PM z2w#ygCX^2yqwtbrX*FEX_lKD$y8JoaHBa_|dDXP>p5Xi_xM<=eKcabq))cXuocIxv z1BI1X5BZX-QuI?1zqzYUrBHOr$DS#}T_^;}f;~jKMv*;)0+Ns>S=vlI?vM|U=~!jy zK3ZkzM2T5cL>w7{b2z-g{#d>_Blp&2&sW zJ!hALe(>xI5{lvhB@MZgh|B5C^-c-aj>PJTOj4%s2?q*qWO z($NvNp@GuVdmxUDGNl(1nRTT}TJEfJTq=i3w>&s0?ut*2#c2eouE=4WJQm?q70MY1 zuVq9^QN}VlV}g>0;W3d;jL=SJc*2&z*`p>@ZeTm(`8b9IJ;>ytgsR}Ydq8^pfS-}~lHtxPGyMz75fpzkVk_t)Y1jonMEj8Jp1Tsh` z(O?k!8cvai{}dVw9DwCu@@3vbWCybclPaxVsgzSI%`I021)j&rd?gfwBqLw5V9yF$ zo+J#0ED#A8Urve~pB*63e|!;}O5R_O(RFesP3vC`GcO%e85wBQ8Zz|azO^9NNP}`b z`K-8JbILKVC?{PpmA+2wM1DUaAKT?`IOPXJp8ko$+f5%R*A(7y78ne^6GR?i8P7%9 zc!)qDB?tJ_2%qq4564qX0Vt*`GewI>auk_}RR(iWa5!R~!Z}iK%mL=YaWW=D20P`r zB_1UYD(K}wh~x?tkQSC}NV;6pneMS&+)403Ue-+5e8S%J0!c#78nQz$WH1oCQX{Jd zYN=lBEnj$PW^l&bL;e0C3;q5wd$lxSnMk6dwc<GH>?s2CpB zk#w7fM+aqSz&?{rJCTwN2`$WJ{zLNbprjz=9!MEt+yiKiDd9<{3tb6l__u@3s^9Y-Gfop3?xWfrZM z_kay&y~uSDDc{pq-mm`qPWg#A!wUsEHhqx%e($^MW7M$E7dI;JLk5No6eW_Huu4b( zmhlh}fRSHTqRdWW3OH< zMdl$1(Un>$Fvf=U1Y+Y-Ou$K`5Hb*hNpi?gCtR0;G!SS`YJuQ5Z_1*e~PrFO|15B)X7Y`5t^ z)~9lK<3*J>_Dp6!O1;7md{u+Kelpsfg%wqS_`eCt_m0$tJOy^jN=jP|DLO?;9^n*{ zNtBd$EEc4SA=^J>kh-d_N@;8C9Dxa#C{IKPqSzJdwIGy`H!i{{h;J05&>N^hI{RvA zhU_f1uWT!`<0+BuPo=2AIu+s_r14!|bu{XZv+Is4>l$qvFDKC&MMOqtd`&1&gTF>i zXR&8j3g#2SN@!d)4(z#wgEHwwMf#cWh`KQ}`=LQ}2KLv43Xwx$S9YM7qLN5*{5y}D zerALy!$D!M-4y3}l8JDV;|n*hnKrW}%Izj%SvbToCmdp%SkJTnfTAW$oj9de%qN$e zLAI@-V39xBmzhzV`J7{njk3wod|%7DuJu@koUKp8c@5-($wtCcW>lYug2 z9uneK7ZVLumx6aS5WP9_|zz;;nmLEkAU27W3eO_QPh9}zbmh^s&v!qDJq zTW&x;Q`JEu2-rsBC_>{fppo*$k(1YdLnHhUqkR&4afwv+Ebbri$?En#AMsf{!@;R` zi|3!w+WlRy*QYUp+_=5Jfe+$G`tsQpQHN*4ztN+>zfmWijodCcvXXxziDV3!%jn7r zuxR)@O7QUJ8x$B_`{4d*^vLa@0H#J!LKnE;8>rB^7V>&_fc$Q@52!QZo$ z-Xv;#;dw3nUG*Rpl@n(eU)SGRj~@(iFVoqNw_;pwXhLj5BSc*3A!^?+g@7aE(wEsX zWV{sRb#|hRD9^l%M6~z5Y7bir>2me7Q%)H^j}J!sTP92mR#&j=34izj{w84GA8PkQ zndZ?I9ZVDDd|~59Pe2`Mw?bYQDAfQa63NZUpz%wMd+$~ot$L5_v7=^76};S9hYw>H zMCsu#fUV(GHU;}%jhxTh$=0Ae2;bdF&U_;48xk1MvP*|AS!cStOQ@9gHZ0^Gg8Sz{ zREX$}o;COdjwtzDB=wMK@ zV0?;i>XyM_C3#kCQv4YbO`yM@a4*jec_uwr2`3OQp=<;snmF-nqy>?uP(*Ih{iTi< zD4o*j)aj$FOtI4M)1oNDP%8X9P!P%jT6YRNjr9OYR0Vm>MD$`C@W|s#33!DG-AvBq30*ybq8drBDx!d zL}-e=qbNLhCd%VAtru+eas+Vh!w4v_P|&YoGcr11VyDJ_ zPzXO(j3bL|)_vp1;!-hQ_>bc;Tm#aMK6-R+M&;j)OOH#if>}lV2L~P`{aU4LRx5dg z^xI`?qzt9>AdVx-uXS;sY}RMXp*0Xy9w%8U(16xQbfLaMxdr?l=Qb!Gr z5OH`qyP)L@WnwS|1M~@$<{b$rDZN`pKOH-16%fOVyNlC^84Mt?H4)5!u#}vVQxFkb_f^4@Y~?t; zL`kcBVV^(8)aCn1rO?saz8v$XIIb=Qiy|&A7<-?O#7K;td?eO_Hg1R3YMwK44A^#P zhK~gD3qy@y>r|kng3=i($z4{WiE{u_yGsp90)^?uVpw?tAexH=4@7fH#Oy~*h1@m& zQpqSU9)-1pwy+ew@cNU_T2uB8}E3o%*9}ALZ~RqjK0H zqRO$#*pINwN9s@B3e*Cgda5tW?fR49sz~Jx*b9Gz-8)jh9!ES`AMlq`e5BL9EC+oy zNO&f}Dqc+Y5CJF{H84wY*tf&@97$@37Lj)@MLfUG1Nx&64{jOvmh!$;mRab#A-(EPMdOUACBRYroz?HdlQx!3D` zB>%u*=CD9PNzLNYcB<>)uB*HEX3lWS<13Qt>h*arFPgO-D zkD-y%zU(3g;W`(BeWX3uzF7=N2)m>S&jpcbm*S$c4noXHc`n`7%jru=FfoP8rAu0Q z5ttVlh|5XzDS_9z2Ujp5cPw=0;iTMhBI_7Ws6ysgWVU-T)D3jZ8OXd)Hh5|KqwQP< zrjts-HVz_(30c#8EF<0>@18ts8L(4Yaa(M1NoG9EgvG_xv~m(628x? z5QmULnH=B{GZ5FqhfSJW948nOgHpOU%1Ht%Eo@wHzNWi0NWU*V!IA1eF~o4)7lxtB zRNRF$3=h1&M>6CYgnIT|mVff12}P^gE~V?3kUz|Q_|_TQimqSY{T_Hs>$`T|H5F@1 zk+8%APuU85?Bi*rArRSKWN>!sO7O=kP(aGO9Rca=hudVF%j#i$gQFjE&m$LaB^k^^ zDM;BPg=jxLAeBLahjbgswziB?JmLr~cfo3vA;4 zaVfb%k!HgW$wi_w=~bDvLpowAg}nAw;r(M)YA{uV!ggNp4_d) zZJ%@CWmR$E)hoiWhMyOb`w+$sw##+&nXl}0?spee(#0=9-o(%H)HbO zz#Z1v%9~D}Syxp!6C9*7y9$SQnc^9#VL}Qnwc9PzdN}trQi5Rtk~AyBLb`YeS`9cS zrO1o>Qpld@mrI$D@%5HiHwt5O4!J4pp=6(_A8`Wx?tVMjCp3B%8vDs`d0Be*;tcvbTJzwAU7bOH>1=QW zaJL8im+s7sr5GG(SR;QcnXC&Mf-Y5PI(VKuFXD1%=k_^4`N2aBbYfM?Yp-#S7?v>Z zR?1N!>mrv<&Q&bA`|~2!)cpWG1mEfYgHo}v(svYHseJCbXT!l&%Q2JE;K7Hx%)od0-UAe zd0b3_QP0wOk{tVF_0+$!E&=@xcl8W~}LA`W$xE z%9YJ~_hJ@GT2{BNk8_!-fHxNK6oDy;ba4j3Bt=XRemDWpM6&B5gA=*0C?S>HEEIXn zVNoE^9iz#n*JR^@S5W7dUupNu;`iF!{?54URlz#wzy4f^PsA-4fX2RCEU_}oXep4Q zPOt2W6AFt*7?FJ;gd4^gXD!ICsWtA=346;W86 zYIUA35BJl1;8|0Z=c_t`H5F~RV*UE{S8mwgEYWw~>Hhtlcixe*rn6Sz<60^46p`7I zNS8=T5gSjCFT$FP)zhSx5@5s9MUp8xi~tUDu6KZtGLAE4GNc1!!|sBR=JQG^jTO(> zwzmgURDAvNUV7nP@F_3lr>40?v_CvxIJA}L(u4j2_3;F5(?8VVu`L0GA< z^5tbjluQjaDpoT8$Rp>^J8I*CRcD=WsL#7>%Q-ydf)_71e`U)BMLUl<{D#}yq0W^}iaeW)fYY=N~su!dg zq!`g&i4`|N41_~0JOq-j8&|ht@zA*}@wjOdiZ<1+Xa1?B1E+2n(fB>DT25=M6Fl!m zta}f5ei&~4SXD@o1`DsSlRw%f116&?!RsQKvI|)&ak{JTdxHMlavvWdnH;?nv%BR! zYI}Om7d%Gl((4p^V03Q^?!}3dhHGT@L2po~+l(Y^$YUcZ)Wb2-H7kZP*xA`9Bf51- z5{N;nV&C}vxaQzC(7<4yckdtgi&S1%4(N9a#`F@Of*qg&d>jWBxF$&hy3Be_SIUOH z5(00p>748h(5?fCq~GZcMmJTP3e|!wZZpO_<{07}`k6lO89E;#;^IH~BtG7R2-w|? zLkk)**7d(LF_Qw270BOXOW+A2{e9mqaW}9F#y# z&?A!G5Rs~Me<1P(;xm$sXh9Hrq63pj#s=I<7)pnGQ%Hr|7M(@*CHoZnn4o&(357Or zhxm_~aoDOOX02I6yLL4359~H#&sx%OPPd!=DYi8%Kezw9!DfnigQEcw-r|}Mx~UE_ zV6rqjBvTZBy7bvfr`dxCLf@>}o|SUH!Fj3w+^zC?58G$<9tipUyB^wq2I)M1IPOvZ zghPjobwTgWT7?d)3c&8ScIC;GIU7@#TZvD=+QrSYa_wSUr-K$QO~YABk6yb9d1}bD zO9xCk->1lImQ{F7zHaW&#VhKbTVKDaXu`DPSn4-Z2bNAfV&rmO^?jqH^VL0{f`8Q8 z`SI+lF2b?W<({r44!fkUCuv*cDzjvYQOYrBNe5O7isQ)Yg5)Ys=+z!d6ke6X&ZNAu z-M@pV4PJ_@zOUd(!DTKI7lV4faiYoRT`A&xIN)Bdi^SNK-LRLluUEt-3LQKk8GhlF z;O&*lmEe7}F3l>+hbeu(z-H-6DR_m2vT#2fVl!=Nz6y!s<&!qa35beas<)bO@9dR;4?2%WFE} zUZvE8QYho5g$wu)5l3;GP$A~o3Dcn_i^>%r$r&%TYEHgKY;>6jt5`EblEA%n}}V7h$RIGXh*<}$ah>|G?1($gA$N8!!dL@X=HnjhjAF}TV5ZE z;M3CHlScL6`+S+1J{TrO&Zw#?%&+DLW))}nOEQC1!NSVQLZ~t^UlBNBD8?WDI80UI z5YBJ^VaV1C*@=7(sL=jH-qT<`iIN>k+tGzi`^QHbj^ZPQ{4G!ewA5G$=&xJl%{|wt z0np+^1$RUdb&zu3=kAKNU40VVq!;v1lsZ^8)HTozIF?Ov+a|4+bALNnH{ehqe~@1} zd~Kj;!v)FUAen4Syyd;O_+GCS&tDiE=khPf@%wX@_+4?;81tii4%-G8JlLto`&mW0 za?(S>Qvh9b8A!B=s6j^paJZ_A^=(|EBtKocRL70dCckct~^+- zk4Hw6T%2=2^wfjAe#2dc=rM}I2RcJA(*S;4!jV@B;~QNXp2bWrTeKl223d0B?LXYV zSTJm5G4c(#Ofp;)>N}cYq59 zh=tK0BOO60=&ZE-91P6v2v>Tfz+;7a3P$?E05G0Y5l<9@Q|uv*ofz^nr86f_7|@4| zF~-pq0-?iH=zxZ6HYkH^fZx;sM35YS{p*r6rpfGk~y zGvvnLfb6nCraR*Oa<})BMC+1X#4g&@>0VYBL7ymnXACk3;ucXr@AVOr%HbVI5fgX7$gD%BXNID2*(8Sx$nM9z@l@F3N| zwtGw1CB<@roctKcrPLWH> z&aCWx#+ia~Mz4?F3B!nAa%QYGYPIY;al3lgjy}|$KpH-NhVV1(m4xT%%68`&`Mi}s zooB9?%;!9-N;;e4JZp+z$2!ltlE`jzo(+`0>^uuaWgj`uvC2R`#(9p{o`_@4bAmF! z?e5Tykafy56vnk6@m(N0$ZK4;20E97&wd|oJ@bEr<@4EdZZ z%csca0eDXE*l-q5d1ARNFT}GqF|!LJtss29e8h3+96RX<675lY-?WG)DfsG zFRv`2-&OK=wfsFy{;r|l73K1GWnj{bd4Y*-jg2c>*K`Dy<5yervUMG;ZS80-()V=P zV=D?!aeC_-Jk173E75t2vJ{;wvl~~gYgvj18|>OBz$0G{U_b=uYJ8&wZoD?4Xs`-k zsFgra+kqLaZL62I)Dn>R>>ps54;X0R%@Vu-P&gQ>h)N)$68{aswFd!3ncvvf-rTw- zP(HY#vb3^t$S{=sLz_1I1d$@AwgXe$4rt5pUppXQkAG|M-%@-^7u>G_4oc;?R-(pQ zz|)GfevQgb7M~JWyHJW9cgrNqEon|`#m|OAI zj8-@H)>tQN*rVvc2mN-Yt->^kkTp9YjIlaNGOj}Suu>HGVSJpe+Xk<)0F0zRA0Xqh(R+5SF_aJ_`7 zj}EI))_(;3VWSpo+yrI25e!pX#9AUf72%z5NqD1mW@BjI{z@Sf#6;0aAEU76_OE7& zu+WT_3F|iYn`F=SJTy5_*|WI;HCrUUmr1;?llWZ$ytGJ`WwW4wvH%n{7h$|J!M5h2 zp=?y3-xZ}d7^xufx2_>AIw;ET0OM-}7M4mp6C0=bTqJQr>(Rzii^KxW3{jiC z28r669Ha@~vuT|82T^JN(NgO+hoin%gPYhC zx_`|G^$`alSnc&|*ItYMiC5b^g?j4a82$URS1N7teXDg}=n1C3>YwU0O3c~w65)Nc z`e=SyB$kM;+avDlf$-IiQi7T|1<}C1y;5z0%H|71dp0MuN7$ds{$D7N=xS~+{`a3} z8>0T>qt&tju-lN>xT8IrsLJNzM3WJ&O0-J+kJh72A+-O|4nh>))Nh~IfAp)-W<&t| z{}KoF{b;%We!Bmx6D>q`Y?8Hz8xZb@`e-MMrj)xy1^dLl&W z(m+ zUa^r1)~#tk7adKF0d%#xo!X}tlG!IUu4!yr+7g(xZdpt7^1!s_<&A6F8v{!L36-=r zH8up6ZIsnXY7fkDdJjx&MXO6YAQP7a8k^BvTVTB-BUeQ_a9S$~0Gi;^4g$C>(7Kks zR0JqD23nTV*q!f{#>7aRL{QN;sD?oE8VN#E>spMV2~A;C8=6~M0?Qf$>)IPvtZOL= z;Jgu7P(OF_%z1MIV`eN0EEqF;_Lv!S7Y#?vj;2<8+PJ>aZl`(m+LmUF40vo?x~5|z z0X%)e>~WJ(f6UnWY4vj#5zG_o=gycgXHH<^%-Ml4fmvf_&#fOfZ`zpIfm!or&zd=B z0w!h-aPnVpxkAoGTVtT1v14g-OFPE72s76XkXjl7O-t7|Vg{EtHm?UjOFsOaMQ0O28pu!-fsL=L5_$&2f8KOw5>e^w_?( zad|UHpz(<1jdE^E0?jLI;%aNe1ffbp>+*H08*#GR(A2!Vi7?*Y*KG+AT%d9FGQi&p z3!!h_nx!q6UlgxxZKFwdD9-^GliJZLkr!#cB^<|A?j=HXm4y;0a{1L@(XQ^ z?dw`Pnt_4HR|9AYw6MGt#DoqVlKp!MFAYdq>m4&1a{6m(-2ggi19a<`wlsspfdG(d zt0ZwCq@BjGp~+qoz1^>t$L0SW1obwSopsuax)O=|)k`;GMmidSTKP#Iq!_7ONRj2{ ztY)GBdl3*{0E3X&YsDg7i7}v+R*Vtcqcq_3i#NjW1}yU44@*F#EsYrRK7F=#pbbd@ zE81FD_v;_5?0+VaMWjmNtaLUk!>+d$e7016e>I-unRXv)rS;I#xT2$M<=U3grjFI9 zh0Tg}va6t` zmR6xn)h1~p@%#VYFCEMJS|TPNWZ*$}(W*<=7Fy{X4|NWaS5cU5)ZP(O7 z^e7aJ2!b4o2j*7clh5}399SHMSsbC2Ys0l_jiPJDpq(~|jwDtu#k-?}hpqSoQugfs z<30do4B9vqk^DM*r%mW}EeQRwN*r$VO@L*~4eMDVEKpupd6MCAk_y{HI(!B);NhPI zuMF}6$wdz90nm>ZAWA%lIKX1$>nuTrw^9&nxl{~QK|xdv#n4cwhH7vO8IFA0BXR59 zXjo0?;5ZI<7*4?9a1xG_^~w}wDvpEGF=I1v5Ig|K!8s5|=fRtBfwB+>v;%Pu!ojeB zEQUAK5{&c~+~Is8vaS6anOiO zl{2`uuqrl~RkI;%C>zFVSS=gQMzE2}2g-k74;;}4x7v7vH8j$umgOke8d*8g=`T!kR8MhMo!Mf>`=CZEoIBt za@N2a*$TFjHL+&4iXFyUU^iXO*05H#R{2=@gdNV>SUc-r>)3j>fgQm%vLo41Y!f@0 zZDz-?W7%=+cy@wvJv)(YVJESZ*(tDroyNAZ)7crg9po%_HamxHW9K5v_4(`qb|Jfn zUCb_Fm$L2bGIlw;0y){QVpp?kkoDj?c0IcRSu}29JJ`+a7Pb@F`fdYX48gs07yR9C zhrj%t>@M~%b~n3+-OK*X?qm0}-RuF}^!pHdm_5QCWskAP*%RzZ_7r=XJp~;1Ady~Dz-e&KxciDT$O7uSafc=Mk$Ub5pvrpKk>@)T``+|MR zzG7dqZ`il&JN7;91N@Qw#C~SKuwU74?05DD`;+ZO_Q4()Js9U)-eA-Q2?yc@n%8eLR_`@Km10)43lv-(~VFp3MV1hv)JqHEkB&M@pj(9*YWjy13!XqQddRM@>|dRIln&lhqV8RZUaVRlk~{W~y0gwi-}#)Lb=B9iZl`1!|!h zREyMNb)Z_J4pK|iGPPW-P%G6cb+B5k4pE1y!_*qJRvoU6P)Dkx)X{34Iz}CUCb*4H?JwTnU&Qa&8^VIq30(GIfNIg(JNIh6RL|v>N zs$8KiQJ1RA)a7b}+NiEjSE@~Fv${$>Ol?tDt83I&b**~1a-rI$wyPb=rRq9$y}Cg? zLfxnysUD?nQjbi9vt0$-@s$0~P)RWay)Kk^d)UE32>KW>p>RIa9>N)B* z^<4El^?daL^+NR`^ig;k z>VMP^)sNJV)lbw<)z8$=)i2a9)vwgA)o;{q)$i2r)gRO!)t}U#)nC+K)!)?L)j!lf z)xB!B+M_8N)3~N;nx<=p1}{y`(qgn&El!Kq5;T|Q);wCGmZW(#pO&npXsKG7mah4= z3@sCqShg0>a}x4by70 zTF4qBq~t*oMO&?{(OR{&+TmK8)~$LUS2JHxKqjscrl(tDbTHCB0qaCXqryZ}Ipq;2~(N5A%)=tq*)lSp4YNu;wXlH6? zX=iKaXxp@Nwez&|wF|ThwTrZiwM(>1we8wv+U436+LhW>+SS@M+O^ts+V$EE+Kt*x z+79hz?G|mPcB^)q)~SWGu(nI<(r(x8(C*ak(*C90t=*&DtNmNMPrF~+tv#SUs6C`T ztUaPVsy(JXu05eWsXe7Vtv#bXt39VZuf3qXsJ*1Uti7VWs=cPYuDzkXslBDWt-YhY ztG%b~(cafS(Eg)+sC}e;tbL+=s(q$?u6?0>sePq=t$m|?t9_?^ul=C?sQsk zs{N+@uKl6?sqNLewH{s3na*`p*K{5EY)oC~mL8+W>T!C!o}jyQx9-ss^(5V^`}AZz zMNie!^mN^?XXu%FmY%H#^c+1`&(jC!`Fep~s0Z~Ty;vWpm*|7^QoT$s*DLf&y-FXf zSL;Lcq53erMz7U}>m&4$`Y3(0UZ;=I$Lizs@%jXPqCQEVtk>&P^r`wZeY!qFpQ+E% z571}pbM(3TJbk{tKwqdY(ht-R(ht@T(HHB7>Pz&c`Z9gF-k>+?EA*9mlisYa(ht*H z^ws(ry;WbUAFj9Q?Rtm4PG7HY(2vkJ>PPBF>6`SU_09S*`my?P`tkY+`ic4${UrTl z{S^IF{WN{6e!6~!ex`nwezty&zD++@KTkhjzd*lGzevAWzeK-O->zS#U#?%FU#VZE zU#(xGU#nlIU$5Vw->BcD@6d17Z_#(^x9Yd)oq9+Q>$~(W{dWBh{Z9QZ{a^as`aSx+ z`oH!2^!xSQ`UCod`a}A|`XlZs_>5#D#Yi>MjC8|qWEh!7mXU1)j2t7^$TJ2Q`9^_JXatQSqu3Z| zlo*4IQlrc$H!6%uqska;R2xH#p~f(y#;7%h8zYR7#wcU7QD=-X#v0>{@x}yWqA|&s zY}6Z5jH$*nW4bZJm}$&14lrgLbBwvhJY&AGz*uN3G7dBjG7dHlF%}z#8cU3&#xi5M z(O@(hD~y#!lhJIfG7d9ZjMc^(qt#ez9B#B3?M8>O&RB13Fpe-b8b=yO8Jmowjm^d} z#<9k6#_`4p#)-xj<0Ru`;}qjm<1}Ndak_Daai(#Wakg=evCTNwIL|oWxWKs3xX8HJ zxWu^B*lt{ATy9)pTxncoTy0!qTx(osTyNZ9+-Tfn>@aRNZZUQmw;Hz@okqwA8@r4y z<96c?<4)r)<6p+z#y!Tp#=njGjQfq<#skKK#zV%##v{g~#$(3g#uLVq##6@A#xusV z#&gE=#tX)a#!JS_#w*6F#%spw#v8_)##_eQ#yiHl#(Tyd<9*`;<3GlS#z)4-#wW(7 z#%IRo#uvty##hGI#y7^d#&^c|#t+7i#!tr2#xKUN#&5>&#vjI?#$Kb_=rI+OncP%O z&D4<=%QS^)!G%B8j5FiS1k+`@O^=yqCLu7$XC|8|W~!NHrkj2#pXb>#2jRnnq|0Rs=}-^tIWY>wK>EbY7R4N%vy7}Il>%ijxtA^ zb>0CToE$DC`nRUmF89E)#f$kwdQr^_2v!cjpj|}4)bR7 z7IUY0t9hH*X@<K4(5}zF@v+zGS{^zGA*=zGl8|zG1#;zGc2`zGJ>?zGv<+ z-#0%n|6_h=eq?@Zeqw%VerA4deqnxTer0}beq(-XerJAf{$T!S{$&1a{$l=W{$~De z{$c)U?lrqjL}CagIPR~|1f?i5g%FmA5eViN@ghOEgj;w-BI4P+$Ul@UQbejq6Y0V) zGDN1x64@dkazw7k69YuPC=i7rD2ha}7${1_AWi7%GN|8c{2T zixFa^7$ruFIx$9!730KsF+ofelf-0EFQ$m8Vw#vPW{8<$mN-Dn7IVa0F;C1F3&cXP zNE|2*5(kSz#A0!%SR$5+Wn#H#5RGDmSSgxBvsfh#6D?x3SR-1+T5-5&6YZiytP|_S z262SgD2^0IiA~~Yu~{4=jupp=XNj}LIbxeQ zSDYu#7Z->N#YN&`af!H8Y!{b_%f%JqN^zCAT3jQp71xRD#SP*{ag*2~ZWgzQo#Iw; zo9GlF5f-~dm$+TrA?_4+iGPW^#XaI)@o#Y-V&-;<`^9eYfN}?7kRKKgDqYBG@`!jy zIUZT2P8JV~N5rGb&EhffxOhT5DV`Efi)X~M;yL9}@w|9JyeM80FN;^itKv1~KIMM# zx_CppDc%xqi+9Ak;ytlPxmUa|J`n#AABvB}$Kn(5srXEMF1`?7im$}i;v4a;_)dH; zeh@#3pTy7N7xAn3P5dtY5PyojqFeM>ip4B$sg`Ex$RuwfP!-CJY^7MKR+^P=`K=5q)5@~4t$>wdXv}J?T5Gs9!Ws#^*=VcI8e@&M##!U73D!hwk~P_?x29NA zt!dDx&9G)#v#bNG+14Cut~Jk^Z!NGET8pd$t%IzCtwXHE)}huCYpJ!&T5dI1jn)cl zrPX9LTdSv({T1tRt+A){)jx)+Xy{YqNEXb*y!q zb-Z|cOI>kEGI?dW@oo=0BooStAoo$_CZL`j`&a=+9F0d}NF0wARF0n4P zwp*83ms?j@S6WwDS6kOu*IL(E*IPGOH(EDYJFJ_nTdbYdt=4T;rxmio)-J2dy4||N z`hUc|2Y4ID(La0u2$HvVw+j`^HMZk8ZgCs}M*v4Bal!^!mMqDV+?2!$8cC2KKm#Bp zDo*dc_nz44y%#5On$w)#oWx0-?j&}4w`2c*yEAt<$bR4Fd!Fxm{-5;lc4lsOXUp8| z&TPSl?T^?WwLfNm+`iWSg#AhTQ}%WCr|r+!pS3?{f8M^{{(}8Q`%Cth?XTE3*k853 zW`EuOhW$bBwTYnU;@NfuUiHz7FHd@z+O=7b+%6fs=B90cv zh-1ZZ;&^ca?qNDnoFq;b_qJXoP7(JJr;7WE)5PiG3~{EwkqmLRxW70@JV1w!m%f#hkzj&BB5f@p|E zu_T&eS+qo3tcWW`M|4F`tco>pP#hBL;;?v>xJo=)JVrcLJWf1bJV88BJV`uRJViWJ zJWV`ZJVQKFJWD)VJV!iNJWo7dyg%^zUXT)d4=fvm5_2LWSi{eY-%i=5I2Juz#HSu-v4e?F!E%9yf9r0c9 zJ@I{UqxgaNq4<%wN&Hy+MEq3TEPf__E`A|?DSjn>Eq)_z5x*6`6TcU?ird5=#2>|< z#Gl1q#9zhV#NWj~#6QKq#J|OV#O>k^@n3PLxXZDegp+hq_%)=oGwk607n~>(j^ntF z=SW968E1pD(b?o|c8+qkI7d6jILA82ImbIEIQMc+bWU8nsd5y zhI6KKmUBPnZ0G*YInD!|F(>Qf9N!st@=n1iI)O9coa;Q$d64s9=RD^j&O@C^XUds& zW}I24~;1z7dw|YmpYd@mpl8NhdB>-9^qW! z9B>}#l%0xGb!txCnRDiy1*hRGI!jK|S$0}Z+gWk0bUIGg={c*;nsd-O!d7ATd=NZm3oo6}EcAn!r*Lj}veCGwu3!N7^ zFLqwyywrJ_^K$1E&MTc)Ij?qJ*Lk1we&++u2b~W&A9gc7EgB;{4Y6o%4I=R_8Y756&N*KRJJP{^I=A`J3~1=O4~L zoqsw1cK+ks?%d)0*SXWV%eCBun{-p|kehag-4S=xwOxT+W+eb}w-+buV)-clWywb06+L!o9*h;6BnVyA`+U*4(-~ z=gzweZo^%4m)xei?6%ytyW(EycHFMpb64Fp_n>>oU3U+=k8-baAMHNIeXRR9_wnu% z+$XwEa-Zxz#eJ&#H23N5Gu&so&vKvbKF58o`#ksg?hD)(x-W8H?7qZ(srxebixL|8)Q5{@eYJd%Jsw z`(O7?_b%Kil<<;X${X_1-mo{~je52xJjZiA4<|bn?v2{uZS*#Io4upFE#A@IG2XG> zao+LX3EsWD6TOqXlf8R;r+D}APWA5Vo#vhHo#CD7o#oxnJKMXzcaHY}Z_LYjInVdT zy}VcOieBJNc;|W#^d96r*gMaAi1$!$(wp+8y%}%TD|uVJ^Sy1}1>SaVhj*d3)7$0k z_Ac`FczeBl-o@S}-lg7U-sRqY?_u7kdS$QTRlS;5_vXBLZ^3JLi{6sg z^p?Gr*Y;MtE4_}_^?Kf_x8@!64teX|Vee7iRoPqQ_d@SQ-iy7LcrW!{=Dpl|h4)JDRo<(;*Lbh> zUgy2udxQ5z?`rQF?@ivDy|;L8_1@;a-Ft`kPVZgbyS?{#@Acm2z2EzQ_d)MN-iN)9 zcpvpX=6&3|*87C_N$*qMb>64F&v>8pKIeVjyWabP_eJkZ-j}_vcsF=o^}gnP-TQ|3 zP48RYx4rLp-}S!dec!v$`+@gE??>KE-jBVXct7=S_I~F5-1~+1OYc|Suf5-Rw|Kwx ze&_w(yVbkR`-AsK?@!*Jy}x*W_5SAl-TR04Pw!vezrFu>w|jSZ|Ml+l?vj>F$fQik zA(@uLazu_wTMFq&S9(%PB{On^+$cB6&GIO@MIJ4Wk;ls8GBMDraVjDPo6FBFVB$=kYh3{bJCaNGA|3VC<8em&y^3950Vd-=gEi2 zhssGgC8y<#oRuZHRh}=m$qVFmxkFwkcgkIIx4cO1k$dGnd9l1iUMeq>m&^U~Ve;Yf z5%LOoKt58IWkptHP1fa{oRaV`AqpN`E2< zugDwZSLN5_*X1|lH|4kFx2@}}Ps{Jf@5=AV@5>wI59AN!kK|4A$MPrgr}Ad`Gx>A* z3;9d=EBR~r8+nWTt^A$*y}VW4CjTJ+DE}n?EdL_^D*q<`F8?9_DgPz^E&n5Lmv_kj z$~)y<%2ElHR4Fy2(rQ?Zs8MArp&aEZPf4XzMr}|V)h4xB9i_IYqt!9$SaqB_UY(%s zrA|~Qsgu>c)hX&e>Qr@Kb(%U|ouSTDXQ}(Cv(^38IqCswOl4J0`D$F{RY4V1peEG0 z>VfJ(>cQ$f^$_(?HL0f5w3<<~s-(86^VK$Wf!eNis0-CjwM*?*7pXmJuiB?BR+p$t z)n)2(wO>6Ds9vOAtX`sCs$QmEu3n*Dsa~aCtzM&Et6ryGuil{EsIFGms5hxMtGB4Ps<)}P zt9Ph(s&}b(tM{n)s`sh)s}HCTst>6TtBND!I>T~Mz z>U#AB^+ok1^=0)Hb%Xk<`kMN>`iA5Z={ZRc#-K2i3exiP= zZdN~2KUcp{zf`|czgE9dx2WH$->KiLTh(pq59*KVPwLO=FY2%AZ|d*rAL^g#U+UlL zKk9aMhx)I&Q{9!p(WOi>lgbR?Lbc({NM>#{vmvuFvnjJVb5v$a z=IG2ZnPW4@Wsb+sKHn>IV&J$#oQjT>KI9vM#apwE5*cxRZ z<|EMEiXD*4Wh$&YA48n;`)QqpK0I`OrQ8`>z+-q@uhFd4hZ`ms*~T?#a80&RO&Zj6 z+qldP3X|I|NH!Lo3;Ovwi+$ML9xI5uG=R)w7i6o69dC{5@Q!j7n>UA>CUAGe<27T* z@D6IArU{02p!P#eJcf5t`WB_%8B5}}V#x4LO5dUeZk1Qs*zVbe5T#~Y^Qn4keq3>_-No)}xV6GMjgP#HQD?4?v)6F7VO zP3Ls`@aVoOcBVF)<$vHveh$s(#4{%G3@55lXk#xP z_T8;^TK23~1D$TYmfl-kI9PUPSGo=G#{StxD_t_VZPib)wT#cnkdq5*_BQANgA8rc zT(+BtOZqv~1i@{Vmn*fh7N({8$c35{IP5g7*Fw94B`p0+?rJ34jr4Al z&x(E~*E=J+j=h6zQ%8iU+3td%GeX&b5=*)6+WTmlEuT`3!p_Q9!^)^l= zN`rOkXf3UAM`rdLDIS?QOks_}%S#5dOP324&?q@gO1cm7S!Y* zM^zrg#UwrjDx4@s;OHceDx+l{R))6f{+h>Qcq`R#o`UTZESX?r7k{yB3O&Y&fyY$B zY~Ak*-@kBWS zMbyWPdK(b*&AZne``={X$g zFDhLzimYnjDzsZOUDL`cUDI?MA3JOLuGaFldFnTAJ*E~_96^drN8mcaQAg0K%{B%P zXOWAD4>3KjZuF-=TRR>XWXf{b>`%Q&TgVlS8iFX$Abn7W#zLk;9&H^LFd z-WKLyH!-I*19`fM&g_jeJ4W9RHS32L(#tw4*F0&^Zt17q=`9Tu=~;Vc znDLNS=wjDwh-cc%^?4fKsD7g>3k@0)JDQ8^=;0aTm_|_hj^;q{XeVTDh<0ZVyWsU} zH-gZz_MVWH8$l4}G#aJ7l)@0_ENTdXkAgYCb!+wXXav~wZcnot>2U+BBAUAz>&o<+ zPM==WvC;?4b6r2}%R|QUaMTbc>S3L23Jp1eKMgyv;VeG1p%f^pjJ*XqTE$pj?^PGP z3L2_{Qy*qbv!_CK9?K22rx!_u=53!J#uAG#9=bRa0Le8CYVQbNrLWXc<7`pqc)5V0 zXrr~mI2(cyd8meZ3}fg-RgyjIk3!20_spk3-_<04r<44hPV#p;$=~TDf2WiDolcg9 z7rLt}^^P$8X~5w{3Oe+{2!{bP2BhAhcYAd6Q|yBjteaqTrPElhBpI%ht4jzv zd9XthVt*;iOr)W$tM}^esn_bZtQ1G*Dq}iPk%tHBGSbo6;CEjLx9G!qp~bumnk zNe;3TPWb?Px*y6FB+}7FbccfK8kYDZTJ7$NZRTJm7oD#bFiln=!qFUrRAyaJnA07o zqgHJd*=0;Q!bt-Nn~>YG8B^;uVvdp+T8JHGFnlS-MhdAASM6KS#Wmlau7KfbrWhJC z1rITWI-3CqlCF!K(YQ&-1Q%kD0e*Ewza7y7qb}UwcWhF6IwkZxqQdd)paneD4o^+6(CmrPHNeAJoYT}nC9pvXp2l;vCpJ)Dg=AUQ&`7)s=%#M;8GR1R0S?o zflF23Rw;7+#k@Dr7gU)jvDJew97atiGjWmd0q^8bQbOz^Ikhz z4CL-IZe5&gMY7PFVGyxP&sGh%LC=~8c%==mp(&gYP8%1_$Sh40hxUV%0-yB01wccZ zQF?oMWu*-7qjugTFI-LTTuolqfDceRlv~N&jntlnc6x7Pez~05S6&@qpGs=?LL)Ve zzun!2tMdsG7@_cWXG}3;1c%#8tL#h+HmNtq2v7v% zzZpU?v#1aRV5Kp_-49@3bm>Q{3=$Muut4yNmF}@_Mb1cfGyqN9w4y?gl zu~<%C-cHRd)>B&->#nAD7aH*CH5St|hcS?(w;f)nHJ-zOFdT+D6 z)*jvm{ab4%FJ2ldK`Re!$BhE1eRw+!U@f(Gp`6~kP+cfz&M$Xrkb_ngSCnDOTTRV& z*74Uc^+wUkz1I3->hjfY^73-Be0Zp$%a615_2GFcf6bJ?j`APo@*k%1H+1>+BWi;s zUH+yi|1y`qrOV%LrB_V(JG%T`Q~n;6e|2eSPM3d4mtViFn)1VIl!i@+@?)K6u>5tD z9|MYu5*>)jzmD>+m*GZ@dU(B^s-f%V(ZiY=^>EGfur78Zx!y=FuBV!a)Wlkd%5w{1`d(zFJH1Vdf>XcQ_El>F(JuOHPCh1E%&}n19SYL!;s6V_A-OO*{T-UNi8Eob_ z14P9!50^1N1Q%FFCZiDF%pdT9He3Sr&Qfa?+%}{1`l}uwZlX_okVc=)0BwW*;s+`E z8ycW&G;z>ceV!Xlq(PF2#nqvbQLF({#>A@Nn_Oyix=>pglT(baK`WdA{u}fsJV0?w z9BLfLyd_u~C3PTdE@{5`2=&e{A1GIr7RrY;V{ajjZ8SP{9=3R`iK`YULoHl5+G(Ztt2FRaa)&Mt7Bi`CT#ErVU z21%PJ)55d7!&QOYevt=lHpKz}uV+<7HUPneBcssfq*P>JxmdYv= zY#xXd)QFMk5I178)LBL3_8LA6BQUe!j{?>Ej-Tml%ctrjKRtZcrWJ z^#I}cBV)u;AAe-z*t?^3Z-kS9I9>P5JRG5RI<@Vbvlde_ESya+Lo z^fE+QRQu8So9z=e@i-G7MmF(S6Q@Z`)#7Y1^NFD+aV`T52$2jw(vvvH|LMIl+O8j8 z8C}=MH=6eYsW+M^10)rWbUc$(;V8#=s2ZYTg{mPUBrhr_uPi4oEhi6_Q`^_dsiift zq4PPKy61Cb<>qr_<>qrmXL4QpKrjUfwk4M=! z7sk?F9g?^uaW1Jx*}MDVHQdvW zB&H)??&YyK?(W9HPFNt+J*tb?6@gBvPag1Yhv_!<*91b+=K33>wUqhlO8>bwTBFy;01kQY4ii{DNFN`~wCxq|C z;i&yYzeWOPa42KFsF55@4@0uk=!+2NJMA2b#;cQN?dT3VhP&)YR&b@3LOzoFSU2E9eqdCEWxA;4|yaY4lKL1@`B!_cMt|;uRPbS z!$OD?RzO`K!UhxLN+j17p-$t#J?dkB@EZ^ zaqe!dJdb6(Fv~`BHDtA1zOp<=bTmC0fw-=eyLFD@!LDt=y-jc2ZVZvwJBgTKW^#1} zEfqzjl2loOwnF=wH?tTmjbZPwzjT&~qu=m&!2 zTv=ZdTIR~jns%`EQCM8?5{7Rb6PXf17w8p3lN}tfh(@priPj=RFS@HsxZ09n5+wsu zi}3nNf!9|GyuMP%7v*7H^riATCFSTW`1y(qSNH~AbGzobUGsC|a2*@B4MQ7iQw(9V zBSB6NyCIn>=5XICk%iv*$Z@e9mE(5d=m{%Q*B4lFW}B&i78j;)d4e%%%8t+Mfcco| zb-=|5xH!Q$RZ&O7J_ubeY*mZIlM@vMgoP#FGuZ+)W#I^1$bgF#aA5-O?SOkb;KEIe z32d5S;ifDma+ER1v5Y6kz$i@Qk1=fZTNDvSG1MCRBiz|gm?+wdM`Sp0tVyDE#{ZCp zOFO}(o#4_=aA_yFwB@luF~Fx!WiE4Lmbo#@T-q}CZJGPF%zax9Fl#)bZOdh*iVRsKS%EPU09gu;XdhO3(j0aS14inn8xOI)2B8PEtbS%uhUFV_IQ#;wU4 zdI~je%{u2(pL4YS!o&?+TLKC!1fjtgbd3sQ#L+q|f}zC1_|Rfue51xf#OSdwfmUP; zQ)A6><>y$I=UA5KxQFJroO4E&vtu(xY7iQ!L1?4~p{XZA)A$HYJrNqIL1-Eup=o@C zrtuL@>T)2Q;&7V784hPTEKxYi{AZc}Ec2gb{)u*f0FX?C#fF(B-O*8q?SsUH3$wWB}9{HK`z6!V{A z{!`3_F0a7mSdmg*k?KRS&n^{W1r>NXF2v+j(wJ6pXJzR zIrdqOeU@XN<=AIA_F0a7mSdmg*k?H|u$&fHP75rj1(wqS%V~k-w7_y&U^y+YoEBJ4 z3oNGvmeT^uX@TXmz;aq(IW4f97FbRTET;vQ(*nzBf#tNoa#~j(Mc3D zRil$A1WsWZ$45FV5ol|wd2eeX$J9G53B=v+40_Xi0?+Jt^l&!R9LzJAn29pPyftKF zYbxcfiIm|?l%c3a;OI%Be>p&y;&}p*^=hr#}-WO4JJxamBm~Hj%=kpvLo22qc>HF^P^W{ zAp&hpHQa1XBULOTX2k*%o|QxiF6zhtcl%!%pPxQrM*$6q8ov>rc(ZzNcqzt-zbiv z)4qLtui5I|$Ge00#wgDy%El=3h$Qy)!CnN~no8x-L@N2detmionAvwkcV>D=5cNMq~U`UTuM@db$5I@_N8Gy_Q4Pmmm(#9O$Q?oF~c_0L{lqqS@c>Qia`6a zFem#^2#L$1*J3>ao&Ehra1QrjTOW@$9Bh3$nt1JxI!PSvg9{O8_d;grgW6G(#*V($ zJS=GKiqZ!~42o?tt^S1pIBFR((g#k7(PVTiL6jpfGubbmX}L`KNJ1=zY3Xg1nHa5sMLJyg3#tBF4E40{&lw&42W|Ctjv^fUvxt_E+26#~) zvxwD_PHy7`Go;f?z`2%mdI>n!l1^@COLTG@As2v7ZUg56_*_e$3*eIvrsNmt9*usL zeXg?4CGfe*K9|7fD*IdlpWAty+j*QzFwRXd&LtS<5{&1=V3PV)fgcBIohz_}giG*7lvaF=i4qSFFTdu@Za5O6(OYu~)3bUa=B;#Y*fIE3sFs#9pxy zd&NrZ6)Uk=i4qSFFU|uo8R2O6(0Qu{W&5-mnsT!%9=rX(NpiKbhX` zAJ9wdqgM@V8j4Q&ZH|ug#fYSH*+W)h4_S#lWF_{DmDn>@V$WEKJ!2*IjFs3kR+^p` zhOIuPQML`(KGY~@40JcH>?bR+pRB}wvJ(5rO6(Uav0tpjez6k!#Y*fKE6q&NSqI{3 zjy)Kz2D%$p_Kuan$7{#g8~F>tF=GlL=eBK2l#cp$b=~7?->_#i&PXQj9rsjrmpVi_sZmR#scM^uuh} zGXxvIuSj`gj|WaJ=txqpj2*<)84Cv87@@6pYY`W<)H|b^q(R*RxiK2X5teMaQVwfq z9ziYa5w4e)uzQcbNot1@Rhs$|0>d-sODx)9 zmV-;s>Sg?_tOU~-$|V8C({^68Utpu&H-OpF%0fg+A=gx=Sx>Fh<^Z<=x0#92JGn7G zi|g@lD5XB!#ewryjo$j>p1Fe|EF@En&O@4F8g7VskEv5OZLLI|v0D+h5Oi>f2rf)B zm7(z&X-cVHqfWH(>?kh8(x3jaOKn+MZDNBMzcm^*H%6y+5gHAP&jhJusnZcu z7Lb5Dh=%^$7!CahxnpSP2R@F?!tjtRqgL3yvp!nwtRAl8@aIso(J2otOB)h>IYxCG5(2bEpRQ%Fyg8zQ3i$%5ZNL_mnJ+V6$lzd(}o(=ISjR zavwtC_Q*Vr)R$Y05&R4ikM&cm=mg zR+l^q^lb#WmBz?6Ite_wZ4O(`rQ+h?1LcOzDqvh(=~oE=y(-Lh8*dnMuGa9FGQu?09`t ze+AgvT(j|E8=$L6a}5ZhCK&)%>FYgm1e-l_1nQXhwoy2v$TZZ|TkOnNy+FHC2<~ER zUt@aLHQJ3|xQK6hLhq3q-Gwn5{(l;-v#eXQtXs3JTeD;^<+5ZjAtWKq(%lEZX>7@o zwFI1mHcQqLa1z=qSxdl4XtRZqu8}FgDC%wi>@!8x4QOlK%@Ars@lbz?XDUUYsTxJp z%9#Ug0bOK&n)^?CPONhB|{1!7p;iz3A_(#4KZS%)fKFEf?_0>Ug|4T z2&$kj30hsLach!|lgpBggOFQ^Y#iX+N@U{z=T;&c2RIjnY@A${Y#fB7Lb7Dz0B4~i z8wWUzz*(|!fEQh^K&@5j*0DPcwa~65T{rl$88h`FaV4tkA!esWo|eh`v!%aZj2d>j|(bYL~;ZP@zD!=_7c#U5$=w0#3Nc~zGVQb9l>bZ7ww zqAB;vgfh|laUFwGL@oiBly(h(Bexyx8o22FEPz8US9Rw#Vd7w>C)-kku9H27spBqgc9qBScFT)sJ=) zsMWc+bH0t~Bm}=(ZsJtPG88=!0@o z>ia|b=qL^W_0};Siy3{z>p4bC>XD;?=p*=~bb4%n)_T~0e07Gt!mW$*aC&#MgV&f8 zW5-^rD^M@6#|B;84cEpUOt^WqjmwmJwwXlf$;Sw$fpigE zp7|~rM6bT+J$YJ{Nty(Q=xBAdg6|y1=(Q4oP63F~q`iXc?~pZ`0lC3F8^gv|@VEtT%NPLs&H1^OT@wGl+?u znbN~J&|K5#1dL;vgr;nw^UZ4w_c|+0r5g*S4d1&c4$ewYWM{1nbA{n*^I!-oz&c%s zXR1dKV94RCo5R&dBNbeLz1pp((O`|X=(olwTHbWSFia6l9OQf*G7>cwPD3q260v+z z9qBeOVZ$AVFl`tdZI%zOW9GOvgh$;w2&=BQz@4iFBurr#L|)}wj>pQ}_&9_Cch9Xt zpd{{C>9h|am6>=NStpfu419MEdx0Y`Wywbjh*ll4H{)$EHh; zO_v;-E;%+`a`}R-CClWD!yee!jH{GdwJvdC116WBv~in4C&oWt6*UanElSAZq>BfW zmCnb}8$26p+FuO2U0ZoM9&d9z-sX6G&GGn}$h(kR++)nKLqsQGYd1%JOXSQY zBflkZE*bePfwO3^(UK#-C9YVdaUsXXOOB0~92+mWiE*J9#4u0P3d(fWM8RBJ2er4_ z>@`-J>r5trVfGT>)<=V!i;Ttr9BmT(8S~w8V3r0 z6f5aw@HT)RF+8*hZtcd6@2mJYZA_>2wPEd+-NR?4@BFCaNKLQN#chjR3vzVka+8yo zWp%3N_CYG`rg1jU9M9bscF34KE$fga8o7%fRdS%~SPst}o@FFHfG ziD*8C(KcXxDlLtf`FwrU)P1?Ul0n|f4Ly7;Le+N~heox6!`<%9)S-4~sD=CZQmuB! zGdUUNF1a^x*V$X_U%n>q59f<8{BcsSNj?&L$o zTmDYThYFm(2J)c-=Tfkdnqwn1$3|+7jno_)sW~=MbL2hE<;Z)AkbFEjHdb?Ntmepj ziue2tkoOcg&2V#Uyyn<=&9U*CW8*bPdkb?p+5?M_TEVA<$DEG^M?J)K+PLo=8W&?7 zrZO<&^bn>kT+D2g)(R_qW$je!@Q~|~e6Hs*Z2HBX!>#Y|>0vTvpnhlhV>G$)={O(Q zl6v;X<~$5{EJ;vODTtcs0H2l}bAFZ-r=O(;@M*;n^e`UTe}0y8%+85$yzZ?;V5OJF zHL5Tvy3N&&S)(N`wCplY;>plY;>plY;>plY;>plY;>pl*uY9<+TPw0$15eOhcpJm$}axX*^T&xW|qhPXe${Miuqd7Spy5ck;-_t_BV zqaryzDuR&Zjty}>Dgrw5XG5Hiih#~?&totj6#<>UJ2t!ds0iq@)IPM>nB$`&2>H9K zF#ig-KOGdy`E*bW@mTI@(GfVQWuF!qfwO*K8J{!+I8_zx)&psQ^J{!+I4~{+$jy_L|{puXmn};zz%8~PFUkXBg&%RWjeW`p@ zBj?w-{p*FH7FI|cJza-;6t_dv%+$RHgBP}JtER(I9b#hB1p)K(&bot17A|pU)^Tks zZi;L+ZDWb*b+A$Fv}qjfl`EqdV(N4CdTm4xNSc7j)Uqa|rVs2H#fkMf7<9c612kGV z=8JtIjn)#Jn&p-HNUgqxaV^VXjzgcraoNIcjQv8SyX!67iyoGc#!Dlu`a#_WOr-`N=K*S*2Z(VVAjWxs80P_EoCk<;9w5effEZ_sZ=45+ zaULMXd4L#a#XHW5cbpaPI4j<9R=ne^c*j}sj{Sm~fhK0-!H-x0yQR$Fie) zOWEug{xN~Rsyyv-f-y;z%+oF>;H1^^bYTE+ntbJHixcoEWH=Vjkkne97f17BB^?2i zosc7Ka)P-^-*rCg>j+pyjUsRnvh-o+vqc?YJQjh=Ks%n0lPN8E^@BT9fB#O`fGT&r+Lb zsm-(0=2>dvTIT$x`03c_&6F}aG zjs}bb0KfozS`$%SzsZuH88I?i|)8N)WGiU8A^Iv>+7I_C%oUyX+>vSB`kV7P}x z9giZ1Igf|s84t^YZqEFoPP*0oZNNtHi z?U~5)yeH4|o;=Tc@;vX!^Smd|^PW7l==IlGxNE8nKeY2+n~%nKsO-ZJ@)`Mx0qU$xfFCSLoQ!o zy9NQIqKz)jxytQQ<#wrZyHxWohEDvDR|B_YwrZ|vP(~>KDwnA`F-*=U7oG^1 zyM%BYZk=k~A#xY)VWzP&A`chh5yf1d7ZvjCM##?;J#wNO`?|}utZ~cK zxMgZAExZ(wuQ6AimF0O>malU^)ww;`nUH5^LY|!o`Pn?lRBRJVo*fE#b|~c8p^#^X zLcY#rsI#nK0%l~TUdK&Foq4l+zRBgE<8sV#Ip(-u=D1(zw(4Adj^%ZZ<#monv^nk# zIyjDa+zxbr95|PO4(J09`I2-%@0qd{X!kT!E%7hV?rGrTwvh)tJGJTS0>o*k415_8 z;!KAc;{?98TK$j~)BdtiAv<&Qz-#k? z#NU^wI6SW_@L*ry!M?zQeSydF0*~bd9?J_nmKS&|FVKEwxVyQSJd_uBC@=6(Uf`j; z;7{0HEIZWaF!y#LoDq`huF_&_fqfJO9?T1VX?PCHi!20%@$8U6E?8aBv)SRj^aEI0 zXbKp|ch*;WtF3x^cM}(prYAdalMdrv`p#;1cq=Y)YBh$Z^*pof6k{@P(9dO96jvGGklW)t|!vi0whT)%# zV5~H4uGIA%UwZoh?oa70=qTNxIs6#aNJ}>cJg^u_IxE!EY~mMqm@DuwS6~yrz$Siy zP5fe&guKWmVv&_!k(FPOm0yuf#3GxB#VQGSkyT!?N&;T2(nnUT(#KV-asD;>h>JD) zh>A7(h>JCnq+*Re;$n?H;$n^Suk(9035)ExEV800vZ5)nqA9YXDYECX$V#KgN~1`( zNnnA4`OsEQ;G`^yw3QS1q-oD0ZRG?$ZE(z!IHdXntP}!P3IQ7%0UH|u8yf)|8vz>| z0V{-njg5fLQ((a*DP z)l$G}DPS8TU>hS~8zW#FBVZdNU>hS~8zW#FBVZdNU>hS~8zW#FBVZdNU>hS~8zW#F zBVgBQz)PY5FNp^1It_S9G+@_hz)PY5yG{dkod)bW4cK)Wu_QFLg&MFS5wHt2;C0V{U8n&Y5&^G!2JAu&*o7Lf z3pHRDYQUyMz@|jNrbNJ|M8Kv*z@|jNdzk`up$69n@dzk`up$63fP4junRTd9ZLbbPy^nv z6!4CvfX-`R8JWhDfL*8oyHEo;Pu9U*Bb*i5dt<50yYr> zHW30g5dt<50yYr>UTF+?r7_@@#(-V40k1R$ywVu(N@Kt)jRCJT2E5W3@JeIA#zDZw zLBPgAz$=XbkJkZ@*8z{$0gu-KkJkZ@*8#6I2E5W3@JeIAD~$oKGzPrV7|>A-Yzv_I zL_kM1fOG%zI3MshAMiLI@HijvI3MshAB5vP{SF@T;qj+LaYd zkN=ZA{!jAwKgr|iB#$E%&ZolpR5+gs=TqT)Dx6P+^QrJWqQdit3eO`dV>IuojFEv= z;U&=uFNs!oo>1X=LWSoE6`m(lc%D$s-z{m$S}%>ddFkeCo`no~s&tRL@a8>$y7N{+OqKJukW(Cfp)VKZuFDP_aAB z?2qV!&aU7Z%Byck$Mrb4$p_%a_G!l%^%v2GY85}}0Q$=Vr z7(z2sMQAiALNkLxXl9TIjRYe!54l=!g?jVj`Q=QSbBh&B7(pAP3l`1jE zVvf)>B0^IMgr*S@no1xvjfl`x0^!64eLJ%LgR6*g&KOFJaI|@CF1{X!xFtr9MK`yf z4ip^*c>I=a69HRw!fU$quaebb_iAr2d+7%#W>0;b0%JIiD%{{4$2XX(t>YAhYpn-x zG;ti?Y%Z6^-J5YDemrFW-(ar09*BWkt_N^jZXL%r(iPWn5`JUR@QTxLGkP4|U@o?f zQxvYZj^i2ZEnFBN^gDyPQQ@xZcm#^ob~$tab^;46U}L|=2NF})@^kfTwj z$ib*#gho9gH0lvyJp)H4E*I@*%7lKtOgJ)YvLNOvWI-V0PlqfB;PC$<>wd-t?PqJy zyJcGT*5YY2)=#3byQMG+9_-9OHdi4-1EEoq2#uOT$Zbl725|nu$S*3 zx#3A=Zu9NegYbT-2tTr1MJ|_K$I!}&3=o7|E;2xXbGgU>0nX(j0|YpiD+r{%LAYNG zoh^Kq!CC2?hpTh@sWjG4qp`sha}_dQ z5OV3sd;xA+6(P$dt%Lw)xulg4;PuV@D|P#fsVQ-ru(@7W)8T#AvcJ&~beXWTo=6)E z$rOw!oO*myc++ky1AbT7z!&ZId4`mJg>`y;-G6Bv4LYqv0vw&21XsU1%)4~egPtbM9@KV_nyt>d zXO^5C$j3Ar-cOqX)@iL2bn3i1=@^`)H+&IhkEKfM3+nYf-?2{AIUt5_V)TEb;O;T< z#|-@1!QCSi`&pR1lXs6nDs(oOJPz&I=QYq-#}2%TcN~{=#tfu)&%7jYnS6|#0XmQ~ zrbjed#6ExAGvFW9zc@RVoVZQoC68I0NdG{?V6HlKOuRVVgwj9P5X*0a#s^aBAC0}I zq%%tYyhA*x#s`yX%H6q6W+iT&kEO&9zYOwJAlyT`JbnmkAR&GP@Q9QdO}U4p6WCK_ zMxlXLot#4b?}EYC9YI()uGw(6{;03@C=$`ARW-bbRgWlw9{FMpVx@<13H)v zgnLMrm-sP`csihi=|H%LbQ5y0y)!@u(}8dg>1HzKhdyE)HyXk~+6{(u_tZ04iJSgo zX){0vQYsMcAzfbLS0Uo*fDWbu;U3aWNX*CM>3|NV1K}Rh%}7kt1MM^ z|2Ro3;RcNlq|9i_JtWO5J;8}5)%ajiO}U4p6Z)qDVkvQS^`q^Zsv?N$J8 zCS0XbX=Zpet2Z4|kfk6;flt9W1$hbz6ci~4D43vNl7cA;rYT?!vxJpQkR^Uu;+G|U zS>l%^ep%v|C4O1rmnD8#;+G|US>l%^ep%v|C4M<#lp{ttVg$F!Ec`Y2m>ewL>n%+O z+H-Rvf?MXNAIxFXdQI~0FY}LhyINWftR2A5%;2!8L11VQietTaXN>3#2CS41U|c?6 zdMOj9Xh|I)Xi23sP(25rMh@sGgA^Ud{7lV(FpkOCvaE!avTRFQM_JxocUcO!1v6yyZ#@z4lkh)jJ=uCD;Ai1~(t5V_T)@x6|D^SN>&1Xyg8xbD zrPiwezZ(CO)@!Uc0=^pmlh!rX+X24=|C81`t#<={5B?{u_gWtW{2}~LS|7Hq1^fy8 zPgt|e9gUzz9t?j{>nBDt7cvQo)r5@kzXo%{&kk0w6~_+!aW0RCk1Q-H5aeh%>G zlh*_OLh>tsZ%Ez%_^Zk91HLhN6W|{we+u~K0NEL8x zWC<{iy8>Ptc{Je1j9zFZMt6?xv{IwHMt1?;J$ez~J);i?{D{#5fFC)Ewi;a=Z312% zT?c%4^eVuQ9(@Ynr;a`i@YC(ntb~2KeTJ2?&$Q13e3pGbz-QZ8z&Sey*tZLSi*^xk zU|$KiW3O6Cd(B=0e9%UD?8iv-ul%0;o|Tl}SCCMJ|5j2ZGaIc$W>e-^E15Yivm5fOScVms?cYT{y373~FSyyO}n6&`i-Leju^cJK(3CM}C zA$$>Od=oR0Gb6JZZm#r0S`)pb`K8EL2`ct-^ zfV-~et*2?dZ9T`--jo(K(IrV(u6d4{C$-M7fgSTqo2PC)T^s$h&C{|x)ZkR>Y^z|M zXO*m-)+N>z)|}O{*6{sPK8vQ#3D8XICr(aOh?0bq>ReLz!c5pR`6g0_C>+{PVfsN7 zfA~CtE{I%iwr|QZLvk3#u%yraPr=^=9(JI3yEETTTjV8>gJSbdY(&ZQX>&qI5%$SwjOnY;A@f-2yJhSjPb`phgn#DQHo|L`x;LJ^^%P1VJk? z4LXS+D`y4P9_up1{2HY2IHW!g^{-ex>nYaD&>q*C(zwuv$>iI#E>6CQDfnhUfkuuf ze=rp2<&cud7z(s=L^;P$prb>I*0ab5nmVFD+kygp9Z{fjL4no|DO&S_0^J=^pnpMu z1_vd9x}0jAf!gT$%~K1jY`jh&b`j@&RV`YXiqZy%|mdU8&)J8R|gxaE{ zI!m3V&QUpaqB=!QsZ~`{JJkhhpW3gUrJj<>fOnAIl0G_pZ2GwL@#z!N_e!6bJ}G^2 z`rheN()UT9n!a!PwDjreGty_K&r07feRlf(>2uQgbRk_#2kD9Qx#AbZ&t@do^tI~s>KgS{ z^>+1c)bxYuqv{jt)9Ul4re9HCRo_(KRX6s1cc(qB5C`TQwl)9ffT|Geg>Lhg^HLcdvR<%oQR~M^?sb{OFW;Xo)RMV$e4^_IR zZ$w?+jJm#4y;r?oeOP^5eM)^+eL;Om>AHSheOrBB{Yd>({X+dl{a*c1{Z;)_-LCG+ zq%tEJC*x(5N~vSj29;K}I!bx!Om$y%f0b1aQ4d!4QukI^9#FIDLbXj@r1q*y)#d7$ z>dC6Csu|3$tcRLDKIZ>*FW2s|pP!U|<`I4UPW1NsslU^oPhXEdPheb1LOYz@*8rI< zm>*n|*^KAwGn?>yZRQv}-yVwr&5syI#bRI{ff#ybafkXZ!hc{)nSxf@Y3+k1dL+i1 zrq#hL;W7B`o`IR|OE8yxgY_2cUDgMzk3maaZ{1*h3p3E4SiiJ>XZ^`armdly>oN zx!kT{JOK~=X6Sq72^|XfM|j?1p1(EE-@7nDYNo{?qd0zcJt1 zg0{d+OOM6Yke-F1^w2A5JxWJ(JtG^!+|^19{RU49{jmnE6rXpEnR(ZFX5NJ#GRM5@ z5;O05Bzgt^FbnF>pOOX+>Bu#T13nSlkFTwkw7$j7I@vr=GtaZlGq=37ykwn=921zw z>8Jju^R$pxI71p5oJSZQ32ThP#IGq#y@bN_(-0dui1P>+?|{x-N$kY)ahPkJVaBm5 zteUlGt)LyQvYvpM%5$t2S+B5OXT1sf`hC_%V8eXQ`m*&6SS~kNKeujy)$%v%Kge`A z;lf@yCUIio)Wlh^Rtkv+A>Xr;nCbsteLnR6I>&w;lU6!)W$H@HN_A7POi~9^hpnO1 zV^UAGMpI8uy~fDhFzmByl5a`A4Yuj~(H|COMwWFl{!YW++4#%hFR&7Kekbw%|K)jy zev5gAp3&Zb-jn$5PqofM-xsV0-Aw}|tP>MIM@-8+)8VBzgs*pN2HI*u*&QD#C+MT*2rRT{LLZpPoQ(SA23u07&;lcD@70s9zlZwTKe zMm0UL1b5L}J{$J;gjJ<^=|(=UFDzIe12?o~KjY*Tk7IPaP+LZv#*C z^@(_P&~JLqj(Ja_bk`ktUXS!zSDc1+NLsG3->sy!+=q*B!{6Be8p2U})-!>d?{NJ8;9i!hShZ8@+vxYuD_x}qWo0v+RpV*n$ zm)M_pWMVGSOmq^55|2qdDe;WN^AayfyejdA#9I>YN_-&kvBY(W>$S8eMAFBzV4m3| z?A?TnQHXw?p^Ta5*~#($)2r2h)RA4`nb9}@GjIN9HdrgEb*;R*Hdr_7S`q68$x0f6 zq}h-L*NR9gX|N&^f=CF~S~rLl5o;w4B0;Q(gzyL<1XogP#oFN7AR-|I5fN*%B0)l4 z&)oZ-?>=A5Zs~3a=lbQmbI(2Z+;i@|b7ny1e3A^7#BDB;@uKo^{jpzt4%YJ?lF2=w zeHZeMraxjnh8E*@2>dR>CX)FO;qNg}#i2h%?avS=m#Nt6`yrAHVV$V`4(^LutRL&a z_|0t=4HZ86hsg2(!r4Bi{)6z(>A#>{&ZK?)xloq`#y- z0J$Kl*ioof$Dm4^i)w7|Ky_^$vU!u)qP~kNYKKuR?fXoGiBMT7bP7Ygn-WL~P(MiF zQcNlbRjS@cHL4F#b?QUd{oh8;Qgl0{6jW{K9-w}p8$iQAqd*ftVW9g!Q$RC7^FWJ0 zka=*0m=g^0D}23r9kCCCx9wpRkG-DXy2np zAj=o%Kfm>M2#uKBCR>7K;{3-DH~u-~x0~oRTA-8lygkkwm!b&3G*k4y%+*|CUW>Gua$Ak1b$}*iyEfJ;7G8XW1&Y zmaS(S*k-ncy~cL1U2HEqz}{qUv19CQ_AdK?eaOzTkJ!iT3j2&*XSdiL_9e$~Jg0IN zcZkd2vbYd;ggeR=b7fF<=$6e$axyxKD*T!{n-CQ3x$PIBL+&FiKyT?s( z)7%`lz%6l4xK(b0d(OS!Uh%2C$ZNdAr}2mRY(AII=L`7~{uo~YwdgZ&_o;!q&n3PI z?mbuecK$lw!}s$y_+fsOpWws%eSV6c;ph29ewlyDukoAwHoq%S0xQUZA$UT%kO|qI zClm-pLa9(LoDeF7vqF_nE7U_3x>;xut_dANm(VK=2sedW!kBPdxGOvm9tyLFRG#?9uhOeEHNY=5s!++VwrecJSm10 zc8h)Dpg1Ish~wfN@t!y-PK$Hmg196;5m&_x@wxayd?lqyqNGWVlqMaPvZY)pUn-PJ zq+?QrbV@oSotJ8)3(_U2NxC9kmD;82QjgRx-H?W*QE5U7OZTNIX-1lt7NuqBskA0- zO54(|Ov$V)%ZBX9>2ju=Bj?Ela*3<#ZnF_8A_HCQjRD`m13n#Ij)>k zPAlh>YNbxOs5B~d7->g zQ&mybR7Xuy53AW~u9~kFswL_%wL(3mo>9-MHR=WRlG>zRQLn1)>UFh8?N@K8!|JFy zp@!A_>XbU8&cmC7W%a4Lrf#a+>TZyN`jre-ED!2dnNY*Z3l;>6png>j)hoYtRTZoa z)(0Dc&B2!7wO~iEE7%(x2;PLc*%;K!?n1@vA=JtqL8WX3>SXIsCEI}-83PqERkPqd zM2427g|s8uQLR`j(~fH=wbR-;ty-(oE^3Y1Wvx|f(>k?otxp@&hO`lFT)U&)(1XuwdX0WTzoa+m zSM;lTyMA5o(fjoq`mjE#Pv~L&zCNYT==1ubzN|mh*Yr(&Ti-P(gEeHsFgzpO$TV_{ zJfpxUGD?kd3@}_E9<{>k~%rZmf5%Z{7Y?hhF&6DP7^PE|2)|nU0M)R`SYPOl3 zX1CdA4w^&eh&gWFG4Gj^=CnCyE|^Q^6LZzvFrS+*%vV;bC0d&0SZUT_E8EJo@~uLv z#5!hGSf{Kr)_JSOx?o+hnyf3vSzG#YtdS^o?2_x zrnPPD+LX=OvTfL&oo;8^Id-01U>Dh?cDa4RuC&kERd%giZ#US@c8h(@?y$S;UVFg4 zY2UKP?A!KT`+@z?p0yv@kL?xvnZ0gr**o@2hjDmEbu8zQli_4JA?Jv5)G2n#oa4?( z=d^Rqsdnm|i%z3+*=cp!oKC0P>2n61A!o!HckVd%oJnWenR6DLCFhB=>TEdAofpn4 zH`Ntg&2`)~_pqDo=DPWApeO?@qZh?!3F`F1t_NHFwk9c6U9>V?Eh3JkLw_GQAux&nxhXyi%{+JK_|=anM*Oh|JW=oQu{_p!8u9ax zJjus+JBg34Q@^} zjd9FB*oL_N5OW4(qW)nBI}r{?+WlXV{?}pbAAc^hXsmxk;GH^s1=;Yi?`LW&>Sr`x zrVNdRZGgViNOUc*Z!6GPah-`a`~2z1CwnrJxG&s5_w0{iq@P@O5|iuB495E+`(v?z z?Lm5?KCD9Hl50(3Y%`u0wx>Hu9`msb*_Yra*iesb!}6ql0`K?dUObaJ%i?p4#w4~4 z>p}C1>Pem>)|1R9?SB>5PufG`=(x#qA?zgA4#it6H}P1c-+Vk2%>_R3NL z@%2weeEVa3KMseYe#S8wU+Y<a$`{@{!s~$=JiYq(2!P8(7aCrh1u)m^|!b)d=Ig3(J6=`(!qe zjBkIxf1i)WkIMMyJAE0v55(35#}DF@aeLr8VtHiqYt|R@aXieSu`r%G0AqW| z_<4PtB-%{MgyVB0>?x%G2>dWw>joO$NYaZ4$M!%n7veUA(Ox$aJzrohRA*Eto)KMj<3Z*Yeev=Zb6U}_{T zvk&jt1N=Cij-O>zBJPR#4fuo)qkYl3f9se$FG!5--)}<$I?K@B4%KxJ?8i8iaK@2( zFvk9j)blmv42KL~-XpSU~#tFJ_d@byc`x^b^SlIS=yWbqQq)$k^-#;<`WBx%plh+07jPr-k zo)eDz3L+NcJ_2_kn=!vF!TzNE3hil?u;0R4_<$WKrl`QDfoduzP!p)7LV>!#=hS=t zmt55E1Ji*S>a)NXfqzk5fu+C_^@ku6{3X>B{7vvPYA5(7O{aCu(>%Hjbfuy%;3@R& zJCDA4m(lm`27Dt-!8cnJe6>ZxcUu%y1yl>T9{x4}H3PN4|E~dc0PX_n1%gu^zUQLg zi!KVj>7wDQE{eMA@2R+=lW_kUMEo+pjbIss@v{h)hu(YpG46|ZcXE%9@(T&cGUx|A Z{pN4K`}L3fzn`ys@^Am|-~U?J_FwB5@0S1o literal 0 HcmV?d00001 diff --git a/tests/example_based/test_image_plot.py b/tests/example_based/test_image_plot.py index 25908f44..9bc994a0 100644 --- a/tests/example_based/test_image_plot.py +++ b/tests/example_based/test_image_plot.py @@ -95,6 +95,3 @@ def test_plot_cole_spliting(): predicted_labels=predicted_labels, **outputs ) - - -# test_plot_cole_spliting() diff --git a/xplique/concepts/craft_torch.py b/xplique/concepts/craft_torch.py index f429cce4..e0e23520 100644 --- a/xplique/concepts/craft_torch.py +++ b/xplique/concepts/craft_torch.py @@ -1,6 +1,7 @@ """ CRAFT Module for Pytorch """ +# pylint: disable=no-member from typing import Callable, Optional, Tuple from math import ceil diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index c03c1665..1272ec65 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -114,6 +114,11 @@ def __init__( # set properties self.k = k + if self.labels_dataset is None\ + and ("labels" in case_returns or case_returns in ["all", "labels"]): + raise AttributeError( + "The method cannot return labels without a label dataset." + ) self.returns = case_returns # temporary value for the search method @@ -305,7 +310,7 @@ def explain( projected_inputs = self.projection(inputs, targets) # look for relevant elements in the search space - search_output = self.search_method(projected_inputs, targets) + search_output = self.search_method.find_examples(projected_inputs, targets) # manage returned elements return self.format_search_output(search_output, inputs) diff --git a/xplique/example_based/counterfactuals.py b/xplique/example_based/counterfactuals.py index 8b486dbf..086f8f52 100644 --- a/xplique/example_based/counterfactuals.py +++ b/xplique/example_based/counterfactuals.py @@ -69,6 +69,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ + # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -288,6 +289,10 @@ def explain( The elements that can be returned are defined with the `_returns_possibilities` static attribute of the class. """ + # pylint: disable=arguments-renamed + # pylint: disable=fixme + # TODO: remove pylint disable the issue is fixed + # project inputs into the search space projected_inputs = self.projection(inputs) diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index ad8ad878..ef43731e 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -3,8 +3,6 @@ """ import warnings -import tensorflow as tf -import numpy as np from xplique.types import Optional from ...attributions.base import BlackBoxExplainer @@ -73,11 +71,11 @@ def __init__( space_projection, self.predictor = model_splitting(model, latent_layer) # change default operator - if not "operator" in attribution_kwargs or attribution_kwargs["operator"] is None: + if "operator" not in attribution_kwargs or attribution_kwargs["operator"] is None: warnings.warn("No operator provided, using standard classification operator. "\ + "For non-classification tasks, please specify an operator.") attribution_kwargs["operator"] = target_free_classification_operator - + # compute attributions get_weights = self.attribution_method(self.predictor, **attribution_kwargs) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 9d076bb4..501f4d3f 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -83,10 +83,9 @@ def __init__(self, weights = get_weights # define a function that returns the weights - def get_weights(inputs, _ = None): - nweights = tf.expand_dims(weights, axis=0) - return tf.repeat(nweights, tf.shape(inputs)[0], axis=0) - self.get_weights = get_weights + self.get_weights = lambda inputs, _: tf.repeat(tf.expand_dims(weights, axis=0), + tf.shape(inputs)[0], + axis=0) elif hasattr(get_weights, "__call__"): # weights is a function self.get_weights = get_weights @@ -94,7 +93,7 @@ def get_weights(inputs, _ = None): raise TypeError( f"`get_weights` should be `Callable` or a Tensor, not a {type(get_weights)}" ) - + # set space_projection if space_projection is None: self.space_projection = lambda inputs: inputs @@ -170,8 +169,7 @@ def project_dataset( """ if self.mappable: return self._map_project_dataset(cases_dataset, targets_dataset) - else: - return self._loop_project_dataset(cases_dataset, targets_dataset) + return self._loop_project_dataset(cases_dataset, targets_dataset) def _map_project_dataset( self, @@ -200,10 +198,8 @@ def _map_project_dataset( # in case targets are provided, we zip the datasets and project them together projected_cases_dataset = tf.data.Dataset.zip( (cases_dataset, targets_dataset) - ).map( - lambda x, y: self.project(x, y) - ) - + ).map(self.project) + return projected_cases_dataset def _loop_project_dataset( @@ -228,6 +224,7 @@ def _loop_project_dataset( projected_dataset The projected dataset. """ + # pylint: disable=fixme # TODO see if a warning is needed projected_cases_dataset = [] @@ -245,9 +242,9 @@ def _loop_project_dataset( if batch_size is None: batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this projected_cases_dataset.append(self.project(inputs, targets)) - + projected_cases_dataset = tf.concat(projected_cases_dataset, axis=0) projected_cases_dataset = tf.data.Dataset.from_tensor_slices(projected_cases_dataset) projected_cases_dataset = projected_cases_dataset.batch(batch_size) - + return projected_cases_dataset diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index ea859690..eafafe84 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -9,10 +9,11 @@ from ...types import Callable, Union, Optional, Tuple -def model_splitting(model: Union[tf.keras.Model, 'torch.nn.Module'], - latent_layer: Union[str, int], - device: Union["torch.device", str] = None, - ) -> Tuple[Union[tf.keras.Model, 'torch.nn.Module'], Union[tf.keras.Model, 'torch.nn.Module']]: +def model_splitting( + model: Union[tf.keras.Model, 'torch.nn.Module'], + latent_layer: Union[str, int], + device: Union["torch.device", str] = None, + ) -> Tuple[Union[tf.keras.Model, 'torch.nn.Module'], Union[tf.keras.Model, 'torch.nn.Module']]: """ Split the model into two parts, before and after the `latent_layer`. The parts will respectively be called `features_extractor` and `predictor`. @@ -45,15 +46,12 @@ def model_splitting(model: Union[tf.keras.Model, 'torch.nn.Module'], """ if isinstance(model, tf.keras.Model): return _tf_model_splitting(model, latent_layer) - else: - try: - return _torch_model_splitting(model, latent_layer, device) - except ImportError as exc: - raise AttributeError( - exc.__str__()+"\n\n"\ - +f"Unknown model type, should be either `tf.keras.Model` or `torch.nn.Module`."\ - +f"But got {type(model)} instead.") - + try: + return _torch_model_splitting(model, latent_layer, device) + except ImportError as exc: + raise AttributeError( + "Unknown model type, should be either `tf.keras.Model` or `torch.nn.Module`. "\ + +f"But got {type(model)} instead.") from exc def _tf_model_splitting(model: tf.keras.Model, @@ -92,7 +90,7 @@ def _tf_model_splitting(model: tf.keras.Model, +"This splitting is not robust to all models. "\ +"It is recommended to split the model manually. "\ +"Then the splitted parts can be provided through the `from_splitted_model` method.") - + if latent_layer == "last_conv": latent_layer = next( layer for layer in model.layers[::-1] if hasattr(layer, "filters") @@ -104,7 +102,7 @@ def _tf_model_splitting(model: tf.keras.Model, model.input, latent_layer.output, name="features_extractor" ) second_input = tf.keras.Input(shape=latent_layer.output_shape[1:]) - + # Reconstruct the second part of the model x = second_input layer_found = False @@ -113,7 +111,7 @@ def _tf_model_splitting(model: tf.keras.Model, x = layer(x) if layer == latent_layer: layer_found = True - + # Create the second part of the model (predictor) predictor = tf.keras.Model( inputs=second_input, @@ -124,97 +122,101 @@ def _tf_model_splitting(model: tf.keras.Model, return features_extractor, predictor -def _torch_model_splitting(model: 'torch.nn.Module', - latent_layer: Union[str, int], - device: Union["torch.device", str] = None, - ) -> Tuple['torch.nn.Module', 'torch.nn.Module']: - """ - Split the model into two parts, before and after the `latent_layer`. - The parts will respectively be called `features_extractor` and `predictor`. - - Parameters - ---------- - model - Model to be split. - latent_layer - Layer used to split the `model`. - - Layer to target for the outputs (e.g logits or after softmax). - If an `int` is provided it will be interpreted as a layer index. - If a `string` is provided it will look for the layer name. +def _torch_model_splitting( + model: 'torch.nn.Module', + latent_layer: Union[str, int], + device: Union["torch.device", str] = None, + ) -> Tuple['torch.nn.Module', 'torch.nn.Module']: # pylint: disable=import-outside-toplevel + """ + Split the model into two parts, before and after the `latent_layer`. + The parts will respectively be called `features_extractor` and `predictor`. + + Parameters + ---------- + model + Model to be split. + latent_layer + Layer used to split the `model`. + + Layer to target for the outputs (e.g logits or after softmax). + If an `int` is provided it will be interpreted as a layer index. + If a `string` is provided it will look for the layer name. + + To separate after the last convolution, `"last_conv"` can be used. + Otherwise, `-1` could be used for the last layer before softmax. + Device to use for the projection, if None, use the default device. - To separate after the last convolution, `"last_conv"` can be used. - Otherwise, `-1` could be used for the last layer before softmax. - Device to use for the projection, if None, use the default device. - - Returns - ------- - features_extractor - Model used to project the inputs. - predictor - Model used to compute the attributions. - latent_layer - Layer used to split the `model`. - """ - import torch - import torch.nn as nn - from ...wrappers import TorchWrapper + Returns + ------- + features_extractor + Model used to project the inputs. + predictor + Model used to compute the attributions. + latent_layer + Layer used to split the `model`. + """ + # pylint: disable=import-outside-toplevel + import torch + from torch import nn + from ...wrappers import TorchWrapper + warnings.warn( + "Automatically splitting the provided PyTorch model into two parts. "\ + +"This splitting is based on `model.named_children()`. "\ + +"If the model cannot be reconstructed via sub-modules, errors are to be expected. "\ + +"It is recommended to split the model manually and wrap it with `TorchWrapper`. "\ + +"Then the wrapped parts can be provided through the `from_splitted_model` method.") + + if device is None: warnings.warn( - "Automatically splitting the provided PyTorch model into two parts. "\ - +"This splitting is based on `model.named_children()`. "\ - +"If the model cannot be reconstructed via sub-modules, errors are to be expected. "\ - +"It is recommended to split the model manually and wrap it with `TorchWrapper`. "\ - +"Then the wrapped parts can be provided through the `from_splitted_model` method.") - - if device is None: - warnings.warn("No device provided for the projection, using 'cuda' if available, else 'cpu'.") - device = "cuda" if torch.cuda.is_available() else "cpu" - - first_model = nn.Sequential() - second_model = nn.Sequential() - split_flag = False - - if isinstance(latent_layer, int) and latent_layer < 0: - latent_layer = len(list(model.children())) + latent_layer - - for layer_index, (name, module) in enumerate(model.named_children()): - if name == latent_layer or layer_index == latent_layer: - split_flag = True - - if not split_flag: - first_model.add_module(name, module) - else: - second_model.add_module(name, module) - - # Define forward function for the first model - def first_model_forward(x): - for module in first_model: - x = module(x) - return x - - # Define forward function for the second model - def second_model_forward(x): - for module in second_model: - x = module(x) - return x - - # Set the forward functions for the models - first_model.forward = first_model_forward - second_model.forward = second_model_forward - - # Wrap models to obtain tensorflow ones - first_model.eval() - wrapped_first_model = TorchWrapper(first_model, device=device) - second_model.eval() - wrapped_second_model = TorchWrapper(second_model, device=device) - - return wrapped_first_model, wrapped_second_model + "No device provided for the projection, using 'cuda' if available, else 'cpu'." + ) + device = "cuda" if torch.cuda.is_available() else "cpu" + + first_model = nn.Sequential() + second_model = nn.Sequential() + split_flag = False + + if isinstance(latent_layer, int) and latent_layer < 0: + latent_layer = len(list(model.children())) + latent_layer + + for layer_index, (name, module) in enumerate(model.named_children()): + if latent_layer in [layer_index, name]: + split_flag = True + + if not split_flag: + first_model.add_module(name, module) + else: + second_model.add_module(name, module) + + # Define forward function for the first model + def first_model_forward(x): + for module in first_model: + x = module(x) + return x + + # Define forward function for the second model + def second_model_forward(x): + for module in second_model: + x = module(x) + return x + + # Set the forward functions for the models + first_model.forward = first_model_forward + second_model.forward = second_model_forward + + # Wrap models to obtain tensorflow ones + first_model.eval() + wrapped_first_model = TorchWrapper(first_model, device=device) + second_model.eval() + wrapped_second_model = TorchWrapper(second_model, device=device) + + return wrapped_first_model, wrapped_second_model def target_free_classification_operator(model: Callable, inputs: tf.Tensor, - targets: Optional[tf.Tensor] = None) -> tf.Tensor: # TODO: test, and use in attribution projection + targets: Optional[tf.Tensor] = None) -> tf.Tensor: """ Compute predictions scores, only for the label class, for a batch of samples. It has the same behavior as `Tasks.CLASSIFICATION` operator @@ -237,6 +239,8 @@ def target_free_classification_operator(model: Callable, scores Predictions scores computed, only for the label class. """ + # pylint: disable=fixme + # TODO: test, and use in attribution projection predictions = model(inputs) targets = tf.cond( diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 5eca3c65..d4c61815 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -4,7 +4,6 @@ import warnings import tensorflow as tf -import numpy as np from xplique.types import Optional from ...commons import get_gradient_functions @@ -51,7 +50,8 @@ class HadamardProjection(Projection): Device to use for the projection, if None, use the default device. Only used for PyTorch models. Ignored for TensorFlow models. """ - # TODO: make a larger description of the operator arg. + # pylint: disable=fixme + # TODO: make a larger description of the operator arg. def __init__( self, model: Callable, @@ -69,15 +69,16 @@ def __init__( space_projection, self.predictor = model_splitting(model, latent_layer=latent_layer, device=device) - + if operator is None: warnings.warn("No operator provided, using standard classification operator. "\ + "For non-classification tasks, please specify an operator.") operator = target_free_classification_operator - + # the weights are given by the gradient of the operator based on the predictor gradients, _ = get_gradient_functions(self.predictor, operator) - get_weights = lambda inputs, targets: gradients(self.predictor, inputs, targets) # TODO check usage of gpu + # TODO check usage of gpu + get_weights = lambda inputs, targets: gradients(self.predictor, inputs, targets) mappable = isinstance(model, tf.keras.Model) @@ -108,17 +109,19 @@ def from_splitted_model(cls, It is not the case for wrapped PyTorch models. If you encounter errors in the `project_dataset` method, you can set it to `False`. """ + # pylint: disable=fixme assert isinstance(features_extractor, tf.keras.Model),\ f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." assert isinstance(predictor, tf.keras.Model),\ f"predictor should be a tf.keras.Model, got {type(predictor)}"\ f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." - + # the weights are given by the gradient of the operator based on the predictor gradients, _ = get_gradient_functions(predictor, operator) - get_weights = lambda inputs, targets: gradients(predictor, inputs, targets) # TODO check usage of gpu + # TODO check usage of gpu + get_weights = lambda inputs, targets: gradients(predictor, inputs, targets) super().__init__(get_weights=get_weights, space_projection=features_extractor, - mappable=mappable) \ No newline at end of file + mappable=mappable) diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py index a2d7ca6d..a92961e9 100644 --- a/xplique/example_based/projections/latent_space.py +++ b/xplique/example_based/projections/latent_space.py @@ -4,8 +4,7 @@ import tensorflow as tf -from ...commons import find_layer -from ...types import Callable, Union +from ...types import Union from .base import Projection from .commons import model_splitting @@ -47,7 +46,7 @@ def __init__(self, @classmethod def from_splitted_model(cls, features_extractor: tf.keras.Model, - mappable=True): # TODO: test + mappable=True): """ Create LatentSpaceProjection from a splitted model. The projection will project the inputs in the latent space, @@ -62,8 +61,9 @@ def from_splitted_model(cls, It is not the case for wrapped PyTorch models. If you encounter errors in the `project_dataset` method, you can set it to `False`. """ + # pylint: disable=fixme + # TODO: test assert isinstance(features_extractor, tf.keras.Model),\ f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." super().__init__(space_projection=features_extractor, mappable=mappable) - diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index ac1c9e7a..b43cf467 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -83,6 +83,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ # pylint: disable=too-many-arguments + # pylint: disable=duplicate-code def __init__( self, diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index 77dd768b..db9fe5af 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -64,9 +64,9 @@ def _sanitize_returns(returns: Optional[Union[List[str], str]] = None, class BaseSearchMethod(ABC): """ - Base class for the example-based search methods. This class is abstract. It should be inherited by - the search methods that are used to find examples in a dataset. It also defines the interface for the - search methods. + Base class for the example-based search methods. This class is abstract. + It should be inherited by the search methods that are used to find examples in a dataset. + It also defines the interface for the search methods. Parameters ---------- @@ -84,6 +84,7 @@ class BaseSearchMethod(ABC): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. """ + # pylint: disable=duplicate-code _returns_possibilities = ["examples", "indices", "distances", "include_inputs"] def __init__( @@ -92,8 +93,8 @@ def __init__( k: int = 1, search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, - ): # pylint: disable=R0801 - + ): + # set batch size if hasattr(cases_dataset, "_batch_size"): self.batch_size = tf.cast(cases_dataset._batch_size, tf.int32) @@ -144,7 +145,9 @@ def returns(self, returns: Union[List[str], str]): self._returns = _sanitize_returns(returns, self._returns_possibilities, default) @abstractmethod - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: + def find_examples(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -165,6 +168,8 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ """ raise NotImplementedError() - def __call__(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: + def __call__(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: """find_samples() alias""" return self.find_examples(inputs, targets) diff --git a/xplique/example_based/search_methods/common.py b/xplique/example_based/search_methods/common.py index 3daa89ee..109adab2 100644 --- a/xplique/example_based/search_methods/common.py +++ b/xplique/example_based/search_methods/common.py @@ -5,7 +5,7 @@ import numpy as np import tensorflow as tf -from ...types import Callable, List, Union, Optional, Tuple +from ...types import Callable, Union def _manhattan_distance(x1: tf.Tensor, x2: tf.Tensor) -> tf.Tensor: @@ -112,6 +112,7 @@ def _minkowski_distance(x1: tf.Tensor, x2: tf.Tensor, p: int) -> tf.Tensor: "euclidean": _euclidean_distance, "cosine": _cosine_distance, "chebyshev": _chebyshev_distance, + "inf": _chebyshev_distance, } @@ -129,16 +130,15 @@ def get_distance_function(distance: Union[int, str, Callable] = "euclidean",) -> # set distance function if hasattr(distance, "__call__"): return distance - elif isinstance(distance, str) and distance in _distances: + if isinstance(distance, str) and distance in _distances: return _distances[distance] - elif isinstance(distance, int): + if isinstance(distance, int): return lambda x1, x2: _minkowski_distance(x1, x2, p=distance) - elif distance == np.inf or (isinstance(distance, str) and distance == "inf"): - return lambda x1, x2: _chebyshev_distance(x1, x2) - else: - raise AttributeError( - "The distance parameter is expected to be either a Callable, "\ - + f" an integer, 'inf', or a string in {_distances.keys()}. "\ - + f"But a {type(distance)} was received, with value {distance}." - ) + if distance == np.inf: + return _chebyshev_distance + raise AttributeError( + "The distance parameter is expected to be either a Callable, "\ + + f" an integer, 'inf', or a string in {_distances.keys()}. "\ + + f"But a {type(distance)} was received, with value {distance}." + ) diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index ed6b92a3..08726515 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -14,12 +14,14 @@ class BaseKLEORSearch(FilterKNN, ABC): """ - Base class for the KLEOR search methods. In those methods, one should first retrieve the Nearest Unlike Neighbor - (NUN) which is the closest example to the query that has a different prediction than the query. - Then, the method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. + Base class for the KLEOR search methods. + In those methods, one should first retrieve the Nearest Unlike Neighbor (NUN) + which is the closest example to the query that has a different prediction than the query. + Then, the method search for the K-Nearest Neighbors (KNN) + of the NUN that have the same prediction as the query. - Depending on the KLEOR method some additional condition for the search are added. See the specific KLEOR method for - more details. + Depending on the KLEOR method some additional condition for the search are added. + See the specific KLEOR method for more details. Parameters ---------- @@ -29,7 +31,8 @@ class BaseKLEORSearch(FilterKNN, ABC): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + Targets are expected to be the one-hot encoding of the model's predictions + for the samples in cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -46,6 +49,7 @@ class BaseKLEORSearch(FilterKNN, ABC): {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ + # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -54,7 +58,7 @@ def __init__( search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", - ): # pylint: disable=R0801 + ): super().__init__( cases_dataset = cases_dataset, targets_dataset=targets_dataset, @@ -78,7 +82,9 @@ def __init__( filter_fn=self._filter_fn_nun, ) - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: + def find_examples(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -99,7 +105,8 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ Dictionary containing the elements to return which are specified in `self.returns`. """ # compute neighbors - examples_distances, examples_indices, nuns, nuns_indices, nuns_sf_distances = self.kneighbors(inputs, targets) + examples_distances, examples_indices, nuns, nuns_indices, nuns_sf_distances =\ + self.kneighbors(inputs, targets) # build return dict return_dict = self._build_return_dict(inputs, examples_distances, examples_indices) @@ -118,7 +125,8 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ def _filter_fn(self, _, __, targets, cases_targets) -> tf.Tensor: """ - Filter function to mask the cases for which the prediction is the same as the predicted label on the inputs. + Filter function to mask the cases + for which the prediction is the same as the predicted label on the inputs. """ # get the labels predicted by the model # (n, ) @@ -143,7 +151,9 @@ def _filter_fn_nun(self, _, __, targets, cases_targets) -> tf.Tensor: mask = tf.not_equal(tf.expand_dims(predicted_labels, axis=1), label_targets) #(n, bs) return mask - def _get_nuns(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: + def _get_nuns(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: """ Get the Nearest Unlike Neighbors and their distance to the related input. """ @@ -152,7 +162,9 @@ def _get_nuns(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tens nuns = dataset_gather(self.cases_dataset, nuns_indices) return nuns, nuns_indices, nuns_distances - def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: + def kneighbors(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the k SF to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. @@ -190,14 +202,17 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Ten Tensor of distances between the SFs and the NUN with dimension (n, k). The n NUNs times the k-SF. """ + # pylint: disable=signature-differs # get the Nearest Unlike Neighbors and their distance to the related input nuns, nuns_indices, nuns_input_distances = self._get_nuns(inputs, targets) - + # initialize the search for the KLEOR semi-factual methods - sf_indices, input_sf_distances, nun_sf_distances, batch_indices = self._initialize_search(inputs) + sf_indices, input_sf_distances, nun_sf_distances, batch_indices =\ + self._initialize_search(inputs) # iterate on batches - for batch_index, (cases, cases_targets) in enumerate(zip(self.cases_dataset, self.targets_dataset)): + for batch_index, (cases, cases_targets) in\ + enumerate(zip(self.cases_dataset, self.targets_dataset)): # add new elements # (n, current_bs, 2) indices = batch_indices[:, : tf.shape(cases)[0]] @@ -222,8 +237,14 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Ten # (n, k+curent_bs, 2) concatenated_indices = tf.concat([sf_indices, new_indices], axis=1) # (n, k+curent_bs) - concatenated_nun_sf_distances = tf.concat([nun_sf_distances, b_nun_sf_distances], axis=1) - concatenated_input_sf_distances = tf.concat([input_sf_distances, b_input_sf_distances], axis=1) + concatenated_nun_sf_distances = tf.concat( + [nun_sf_distances, b_nun_sf_distances], + axis=1 + ) + concatenated_input_sf_distances = tf.concat( + [input_sf_distances, b_input_sf_distances], + axis=1 + ) # sort according to the smallest distances between sf and nun # (n, k) @@ -243,7 +264,9 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Union[tf.Ten return input_sf_distances, sf_indices, nuns, nuns_indices, nun_sf_distances - def _initialize_search(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Variable, tf.Variable, tf.Variable, tf.Tensor]: + def _initialize_search(self, + inputs: Union[tf.Tensor, np.ndarray] + ) -> Tuple[tf.Variable, tf.Variable, tf.Variable, tf.Tensor]: """ Initialize the search for the KLEOR semi-factual methods. """ @@ -260,19 +283,27 @@ def _initialize_search(self, inputs: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.V return sf_indices, input_sf_distances, nun_sf_distances, batch_indices @abstractmethod - def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: + def _additional_filtering(self, + nun_sf_distances: tf.Tensor, + input_sf_distances: tf.Tensor, + nuns_input_distances: tf.Tensor) -> Tuple[tf.Tensor, tf.Tensor]: """ Additional filtering to apply to the distances. """ raise NotImplementedError - + class KLEORSimMissSearch(BaseKLEORSearch): """ - The KLEORSimMiss method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of - the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the - method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. + The KLEORSimMiss method search for Semi-Factuals examples + by searching for the Nearest Unlike Neighbor (NUN) of the query. + The NUN is the closest example to the query that has a different prediction than the query. + Then, the method search for the K-Nearest Neighbors (KNN) + of the NUN that have the same prediction as the query. """ - def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple: + def _additional_filtering(self, + nun_sf_distances: tf.Tensor, + input_sf_distances: tf.Tensor, + nuns_input_distances: tf.Tensor) -> Tuple: """ No additional filtering for the KLEORSimMiss method. """ @@ -280,14 +311,21 @@ def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: class KLEORGlobalSimSearch(BaseKLEORSearch): """ - The KLEORGlobalSim method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of - the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the - method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. - - In addition, for a SF candidate to be considered, the SF should be closer to the query than the NUN - (i.e. the SF should be 'between' the input and its NUN). This condition is added to the search. + The KLEORGlobalSim method search for Semi-Factuals examples + by searching for the Nearest Unlike Neighbor (NUN) of the query. + The NUN is the closest example to the query that has a different prediction than the query. + Then, the method search for the K-Nearest Neighbors (KNN) + of the NUN that have the same prediction as the query. + + In addition, for a SF candidate to be considered, + the SF should be closer to the query than the NUN + (i.e. the SF should be 'between' the input and its NUN). + This condition is added to the search. """ - def _additional_filtering(self, nun_sf_distances: tf.Tensor, input_sf_distances: tf.Tensor, nuns_input_distances: tf.Tensor) -> Tuple: + def _additional_filtering(self, + nun_sf_distances: tf.Tensor, + input_sf_distances: tf.Tensor, + nuns_input_distances: tf.Tensor) -> Tuple: """ Filter the distances to keep only the SF that are 'between' the input and its NUN. """ diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index fe9b50fc..f1141623 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -14,7 +14,8 @@ class BaseKNN(BaseSearchMethod): """ - Base class for the KNN search methods. It is an abstract class that should be inherited by a specific KNN method. + Base class for the KNN search methods. + It is an abstract class that should be inherited by a specific KNN method. Parameters ---------- @@ -32,9 +33,10 @@ class BaseKNN(BaseSearchMethod): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. order - The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. - ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are - the best. + The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. + Default is `ORDER.ASCENDING`. + ASCENDING means that the smallest distances are the best, + DESCENDING means that the biggest distances are the best. """ def __init__( self, @@ -51,13 +53,17 @@ def __init__( batch_size=batch_size, ) # set order - assert isinstance(order, ORDER), f"order should be an instance of ORDER and not {type(order)}" + assert isinstance(order, ORDER),\ + f"order should be an instance of ORDER and not {type(order)}" self.order = order # fill value self.fill_value = np.inf if self.order == ORDER.ASCENDING else -np.inf - + @abstractmethod - def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> Tuple[tf.Tensor, tf.Tensor]: + def kneighbors(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the k-nearest neighbors to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. @@ -85,7 +91,10 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Uni """ raise NotImplementedError - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> dict: + def find_examples(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None + ) -> dict: """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -115,8 +124,9 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[ def _build_return_dict(self, inputs, examples_distances, examples_indices) -> dict: """ - Build the return dict based on the `self.returns` values. It builds the return dict with the value in the - subset of ['examples', 'include_inputs', 'indices', 'distances'] which is commonly shared. + Build the return dict based on the `self.returns` values. + It builds the return dict with the value in the subset of + ['examples', 'include_inputs', 'indices', 'distances'] which is commonly shared. """ # Set values in return dict return_dict = {} @@ -155,14 +165,16 @@ class KNN(BaseKNN): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. order - The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. - ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are - the best. + The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. + Default is `ORDER.ASCENDING`. + ASCENDING means that the smallest distances are the best, + DESCENDING means that the biggest distances are the best. distance Distance function for examples search. It can be an integer, a string in {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ + # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -171,7 +183,7 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", order: ORDER = ORDER.ASCENDING, - ): # pylint: disable=R0801 + ): super().__init__( cases_dataset=cases_dataset, k=k, @@ -216,7 +228,10 @@ def compute_distance(args): distances = tf.vectorized_map(compute_distance, args) return distances - def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], _ = None) -> Tuple[tf.Tensor, tf.Tensor]: + def kneighbors(self, + inputs: Union[tf.Tensor, np.ndarray], + _ = None, + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. @@ -254,7 +269,6 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], _ = None) -> Tuple[tf batch_indices = tf.tile(batch_indices, multiples=(nb_inputs, 1)) # iterate on batches - # for batch_index, (cases, cases_targets) in enumerate(zip(self.cases_dataset, self.targets_dataset)): for batch_index, cases in enumerate(self.cases_dataset): # add new elements # (n, current_bs, 2) @@ -291,9 +305,9 @@ class FilterKNN(BaseKNN): """ KNN method to search examples. Based on `sklearn.neighbors.NearestNeighbors`. The kneighbors method is implemented in a batched way to handle large datasets. - In addition, a filter function is used to select the elements to compute the distances, thus reducing the - computational cost of the distance computation (worth if the computation of the filter is low and the matrix - of distances is sparse). + In addition, a filter function is used to select the elements to compute the distances, + thus reducing the computational cost of the distance computation + (worth if the computation of the filter is low and the matrix of distances is sparse). Parameters ---------- @@ -303,7 +317,8 @@ class FilterKNN(BaseKNN): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + Targets are expected to be the one-hot encoding of the model's predictions + for the samples in cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -317,18 +332,21 @@ class FilterKNN(BaseKNN): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. order - The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. Default is `ORDER.ASCENDING`. - ASCENDING means that the smallest distances are the best, DESCENDING means that the biggest distances are - the best. + The order of the distances, either `ORDER.ASCENDING` or `ORDER.DESCENDING`. + Default is `ORDER.ASCENDING`. + ASCENDING means that the smallest distances are the best, + DESCENDING means that the biggest distances are the best. distance Distance function for examples search. It can be an integer, a string in {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". filter_fn - A Callable that takes as inputs the inputs, their targets, the cases and their targets and - returns a boolean mask of shape (n, m) where n is the number of inputs and m the number of cases. + A Callable that takes as inputs the inputs, their targets, + the cases and their targets and returns a boolean mask of shape (n, m) + where n is the number of inputs and m the number of cases. This boolean mask is used to choose between which inputs and cases to compute the distances. """ + # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -339,7 +357,8 @@ def __init__( distance: Union[int, str, Callable] = "euclidean", order: ORDER = ORDER.ASCENDING, filter_fn: Optional[Callable] = None, - ): # pylint: disable=R0801 + ): + # pylint: disable=fixme super().__init__( cases_dataset=cases_dataset, k=k, @@ -380,7 +399,8 @@ def _crossed_distances_fn(self, x1, x2, mask): x2 Tensor. Cases samples of shape (m, ...). mask - Tensor. Boolean mask of shape (n, m). It is used to filter the elements for which the distance is computed. + Tensor. Boolean mask of shape (n, m). + It is used to filter the elements for which the distance is computed. Returns ------- @@ -402,13 +422,16 @@ def compute_distance(args): distances = tf.vectorized_map(compute_distance, args) return distances - def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Union[tf.Tensor, np.ndarray]] = None) -> Tuple[tf.Tensor, tf.Tensor]: + def kneighbors(self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the k-neareast neighbors to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. - In addition, a filter function is used to select the elements to compute the distances, thus reducing the - computational cost of the distance computation (worth if the computation of the filter is low and the matrix - of distances is sparse). + In addition, a filter function is used to select the elements to compute the distances, + thus reducing the computational cost of the distance computation + (worth if the computation of the filter is low and the matrix of distances is sparse). Parameters ---------- @@ -443,7 +466,8 @@ def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], targets: Optional[Uni batch_indices = tf.tile(batch_indices, multiples=(nb_inputs, 1)) # iterate on batches - for batch_index, (cases, cases_targets) in enumerate(zip(self.cases_dataset, self.targets_dataset)): + for batch_index, (cases, cases_targets) in\ + enumerate(zip(self.cases_dataset, self.targets_dataset)): # add new elements # (n, current_bs, 2) indices = batch_indices[:, : tf.shape(cases)[0]] diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py index fae99771..324a6318 100644 --- a/xplique/example_based/search_methods/mmd_critic_search.py +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -2,14 +2,11 @@ MMDCritic search method in example-based module """ -import numpy as np import tensorflow as tf -from ...commons import dataset_gather -from ...types import Callable, List, Union, Optional, Tuple +from ...types import Tuple from .proto_greedy_search import ProtoGreedySearch -from ..projections import Projection class MMDCriticSearch(ProtoGreedySearch): @@ -52,9 +49,17 @@ class MMDCriticSearch(ProtoGreedySearch): Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ - def compute_objectives(self, selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel): + def compute_objectives(self, + selection_indices: tf.Tensor, + selection_cases: tf.Tensor, + selection_weights: tf.Tensor, + selection_selection_kernel: tf.Tensor, + candidates_indices: tf.Tensor, + candidates_selection_kernel: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: """ - Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. + Compute the objective function and corresponding weights + for a given set of selected prototypes and a candidate. Here, we have a special case of protogreedy where we give equal weights to all prototypes, the objective here is simplified to speed up processing @@ -63,8 +68,9 @@ def compute_objectives(self, selection_indices, selection_cases, selection_weigh ≡ Find argmax_{c} F(S ∪ c) ≡ - Find argmax_{c} (sum1 - sum2) where: sum1 = (2 / n) * ∑[i=1 to n] κ(x_i, c) - sum2 = 1/(|S|+1) [2 * ∑[j=1 to |S|] * κ(x_j, c) + κ(c, c)] + Find argmax_{c} (sum1 - sum2) + where: sum1 = (2 / n) * ∑[i=1 to n] κ(x_i, c) + sum2 = 1/(|S|+1) [2 * ∑[j=1 to |S|] * κ(x_j, c) + κ(c, c)] Parameters ---------- @@ -87,7 +93,7 @@ def compute_objectives(self, selection_indices, selection_cases, selection_weigh Tensor that contains the computed objective values for each candidate. objectives_weights Tensor that contains the computed objective weights for each candidate. - """ + """ nb_candidates = candidates_indices.shape[0] nb_selection = selection_indices.shape[0] @@ -102,6 +108,7 @@ def compute_objectives(self, selection_indices, selection_cases, selection_weigh sum2 /= (nb_selection + 1) objectives = sum1 - sum2 - objectives_weights = tf.ones(shape=(nb_candidates, nb_selection+1), dtype=tf.float32) / tf.cast(nb_selection+1, dtype=tf.float32) + objectives_weights = tf.ones(shape=(nb_candidates, nb_selection+1), dtype=tf.float32) + objectives_weights /= tf.cast(nb_selection+1, dtype=tf.float32) return objectives, objectives_weights diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index 21a8ae2a..3e31bfcf 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -6,11 +6,10 @@ from scipy.optimize import minimize import tensorflow as tf -from ...commons import dataset_gather from ...types import Callable, List, Union, Optional, Tuple from .proto_greedy_search import ProtoGreedySearch -from ..projections import Projection + class Optimizer(): """ @@ -29,9 +28,9 @@ class Optimizer(): """ def __init__( - self, - initial_weights: Union[tf.Tensor, np.ndarray], - min_weight: float = 0, + self, + initial_weights: Union[tf.Tensor, np.ndarray], + min_weight: float = 0, max_weight: float = 10000 ): self.initial_weights = initial_weights @@ -59,11 +58,13 @@ def optimize(self, u, K): best_objective : Tensor The value of the objective function corresponding to the best_weights. """ + # pylint: disable=invalid-name u = u.numpy() K = K.numpy() - result = minimize(self.objective_fn, self.initial_weights, args=(u, K), method='SLSQP', bounds=self.bounds, options={'disp': False}) + result = minimize(self.objective_fn, self.initial_weights, args=(u, K), + method='SLSQP', bounds=self.bounds, options={'disp': False}) # Get the best weights best_weights = result.x @@ -71,7 +72,8 @@ def optimize(self, u, K): # Get the best objective best_objective = -result.fun - best_objective = tf.expand_dims(tf.convert_to_tensor(best_objective, dtype=tf.float32), axis=0) + best_objective = tf.expand_dims(tf.convert_to_tensor(best_objective, dtype=tf.float32), + axis=0) assert tf.reduce_all(best_weights >= 0) @@ -121,6 +123,7 @@ class ProtoDashSearch(ProtoGreedySearch): Exact method is based on a scipy optimization, while the other is based on a tensorflow inverse operation. """ + # pylint: disable=duplicate-code def __init__( self, @@ -131,28 +134,35 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = None, nb_prototypes: int = 1, - kernel_type: str = 'local', + kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None, exact_selection_weights_update: bool = False, - ): # pylint: disable=R0801 - + ): + self.exact_selection_weights_update = exact_selection_weights_update super().__init__( - cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - k=k, - search_returns=search_returns, - batch_size=batch_size, - distance=distance, - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, + cases_dataset=cases_dataset, + labels_dataset=labels_dataset, + k=k, + search_returns=search_returns, + batch_size=batch_size, + distance=distance, + nb_prototypes=nb_prototypes, + kernel_type=kernel_type, kernel_fn=kernel_fn, gamma=gamma ) - def update_selection_weights(self, selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective): + def update_selection_weights(self, + selection_indices: tf.Tensor, + selection_weights: tf.Tensor, + selection_selection_kernel: tf.Tensor, + best_indice: tf.Tensor, + best_weights: tf.Tensor, + best_objective: tf.Tensor + ) -> tf.Tensor: """ Update the selection weights based on the given parameters. Pursuant to Lemma IV.4: @@ -172,7 +182,8 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti best_indice : int The index of the selected prototype with the highest objective function value. best_weights : Tensor - The weights corresponding to the optimal solution of the objective function for each candidate. + The weights corresponding to the optimal solution + of the objective function for each candidate. best_objective : float The computed objective function value. @@ -181,15 +192,17 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti selection_weights : Tensor Updated weights corresponding to the selected prototypes. """ + # pylint: disable=invalid-name if best_objective <= 0: selection_weights = tf.concat([selection_weights, [0]], axis=0) - else: + else: u = tf.expand_dims(tf.gather(self.col_means, selection_indices), axis=1) K = selection_selection_kernel if self.exact_selection_weights_update: - initial_weights = tf.concat([selection_weights, [best_objective / tf.gather(self.diag, best_indice)]], axis=0) + best_objective_diag = best_objective / tf.gather(self.diag, best_indice) + initial_weights = tf.concat([selection_weights, [best_objective_diag]], axis=0) opt = Optimizer(initial_weights) selection_weights, _ = opt.optimize(u, K) selection_weights = tf.squeeze(selection_weights, axis=0) @@ -197,14 +210,22 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti # We added epsilon to the diagonal of K to ensure that K is invertible K_inv = tf.linalg.inv(K + ProtoDashSearch.EPSILON * tf.eye(K.shape[-1])) selection_weights = tf.linalg.matmul(K_inv, u) - selection_weights = tf.maximum(selection_weights, 0) + selection_weights = tf.maximum(selection_weights, 0) selection_weights = tf.squeeze(selection_weights, axis=1) return selection_weights - def compute_objectives(self, selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel): + def compute_objectives(self, + selection_indices: tf.Tensor, + selection_cases: tf.Tensor, + selection_weights: tf.Tensor, + selection_selection_kernel: tf.Tensor, + candidates_indices: tf.Tensor, + candidates_selection_kernel: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: """ - Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. + Compute the objective function and corresponding weights + for a given set of selected prototypes and a candidate. Calculate the gradient of l(w) = w^T * μ_p - 1/2 * w^T * K * w w.r.t w, on the optimal weight point ζ^(S) g = ∇l(ζ^(S)) = μ_p - K * ζ^(S) @@ -231,12 +252,13 @@ def compute_objectives(self, selection_indices, selection_cases, selection_weigh Tensor that contains the computed objective values for each candidate. objectives_weights Tensor that contains the computed objective weights for each candidate. - """ - + """ + # pylint: disable=invalid-name + u = tf.gather(self.col_means, candidates_indices) if selection_indices.shape[0] == 0: - # S = ∅ and ζ^(∅) = 0, g = ∇l(ζ^(∅)) = μ_p + # S = ∅ and ζ^(∅) = 0, g = ∇l(ζ^(∅)) = μ_p objectives = u else: u = tf.expand_dims(u, axis=1) diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 0625e8fe..f439039e 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -5,7 +5,7 @@ import numpy as np import tensorflow as tf -from ...commons import dataset_gather, sanitize_dataset +from ...commons import sanitize_dataset from ...types import Callable, List, Union, Optional, Tuple from .base import BaseSearchMethod @@ -14,7 +14,33 @@ # from ..projections import Projection -def rbf_kernel(X, Y=None, gamma=None): +@tf.function +def rbf_kernel(X: tf.Tensor, + Y: Optional[tf.Tensor] = None, + gamma: Optional[float] = None + ) -> tf.Tensor: + """ + Compute the rbf kernel matrix between two sets of samples. + + Parameters + ---------- + X + The first set of samples. + Y + The second set of samples, by default None. + If None, it is set to X. + gamma + The spread of the rbf kernel, by default None. + If None, it is set to 1.0 / n_features. + + Returns + ------- + Tensor + The rbf kernel matrix. + """ + # pylint: disable=invalid-name + # pylint: disable=invalid-unary-operand-type + # (for `X - Y`, pylint sees that Y might be `None`, but it is not the case) if Y is None: Y = X @@ -70,6 +96,7 @@ class ProtoGreedySearch(BaseSearchMethod): gamma : float, optional Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ + # pylint: disable=duplicate-code # Avoid zero division during procedure. (the value is not important, as if the denominator is # zero, then the nominator will also be zero). @@ -84,14 +111,17 @@ def __init__( batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = None, nb_prototypes: int = 1, - kernel_type: str = 'local', + kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None - ): # pylint: disable=R0801 + ): super().__init__( cases_dataset, k, search_returns, batch_size ) + # pylint: disable=fixme + # TODO: see if leave the choice between local and global kernels to the user + # by forcing a global kernel, we can simplify the code self.labels_dataset = sanitize_dataset(labels_dataset, self.batch_size) if kernel_type not in ['local', 'global']: @@ -100,19 +130,19 @@ def __init__( + " ['local', 'global'] "\ +f"but {kernel_type} was received."\ ) - + self.kernel_type = kernel_type # set default kernel function (rbf_kernel) or raise error if kernel_fn is not callable if kernel_fn is None: # define rbf kernel function - kernel_fn = lambda x, y: rbf_kernel(x,y,gamma) + kernel_fn = lambda x, y: rbf_kernel(x,y,gamma) elif not hasattr(kernel_fn, "__call__"): raise AttributeError( "The kernel_fn parameter is expected to be a Callable"\ +f"but {kernel_fn} was received."\ ) - + # define custom kernel function depending on the kernel type def custom_kernel_fn(x1, x2, y1=None, y2=None): if self.kernel_type == 'global': @@ -120,7 +150,7 @@ def custom_kernel_fn(x1, x2, y1=None, y2=None): if isinstance(kernel_matrix, np.ndarray): kernel_matrix = tf.convert_to_tensor(kernel_matrix) else: - # In the case of a local kernel, calculations are limited to within the class. + # In the case of a local kernel, calculations are limited to within the class. # Across different classes, the kernel values are set to 0. kernel_matrix = np.zeros((x1.shape[0], x2.shape[0]), dtype=np.float32) y_intersect = np.intersect1d(y1, y2) @@ -128,7 +158,10 @@ def custom_kernel_fn(x1, x2, y1=None, y2=None): y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) - kernel_matrix[tf.reshape(y1_indices, (-1, 1)), tf.reshape(y2_indices, (1, -1))] = sub_matrix + + y1_indices_flatten = tf.reshape(y1_indices, (-1, 1)) + y2_indices_flatten = tf.reshape(y2_indices, (1, -1)) + kernel_matrix[y1_indices_flatten, y2_indices_flatten] = sub_matrix kernel_matrix = tf.convert_to_tensor(kernel_matrix) return kernel_matrix @@ -139,27 +172,27 @@ def custom_kernel_fn(x1, x2, y1=None, y2=None): def kernel_induced_distance(x1, x2): x1 = tf.expand_dims(x1, axis=0) x2 = tf.expand_dims(x2, axis=0) - distance = tf.squeeze(tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2))) - return distance + distance = tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2)) + return tf.squeeze(distance) self.distance_fn = kernel_induced_distance else: self.distance_fn = get_distance_function(distance) - - # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset. - # We take advantage of the symmetry of this matrix to traverse only its lower triangle. + + # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset + # We take advantage of the symmetry of this matrix to traverse only its lower triangle col_sums = [] diag = [] row_sums = [] - + for batch_col_index, (batch_col_cases, batch_col_labels) in enumerate( zip(self.cases_dataset, self.labels_dataset) ): # elements should be tabular data assert len(batch_col_cases.shape) == 2,\ - "Prototypes' searches expects 2D data, (nb_samples, nb_features),"+\ - f"but got {batch_col_cases.shape}"+\ - "Please verify your projection if you provided a custom one."+\ - "If you use a splitted model, make sure the output of the first part of the model is flattened." + "Prototypes' searches expects 2D data, (nb_samples, nb_features), but got "+\ + f"{batch_col_cases.shape}. Please verify your projection "+\ + "if you provided a custom one. If you use a splitted model, "+\ + "make sure the output of the first part of the model is flattened." batch_col_sums = tf.zeros((batch_col_cases.shape[0])) @@ -168,15 +201,16 @@ def kernel_induced_distance(x1, x2): ): if batch_row_index < batch_col_index: continue - - batch_kernel = self.kernel_fn(batch_row_cases, batch_col_cases, batch_row_labels, batch_col_labels) + + batch_kernel = self.kernel_fn(batch_row_cases, batch_col_cases, + batch_row_labels, batch_col_labels) batch_col_sums = batch_col_sums + tf.reduce_sum(batch_kernel, axis=0) - if batch_col_index == batch_row_index: + if batch_col_index == batch_row_index: if batch_col_index != 0: - batch_col_sums = batch_col_sums + row_sums[batch_row_index] - + batch_col_sums = batch_col_sums + row_sums[batch_row_index] + diag.append(tf.linalg.diag_part(batch_kernel)) if batch_col_index == 0: @@ -186,7 +220,7 @@ def kernel_induced_distance(x1, x2): row_sums.append(tf.reduce_sum(batch_kernel, axis=1)) else: row_sums[batch_row_index] += tf.reduce_sum(batch_kernel, axis=1) - + col_sums.append(batch_col_sums) self.col_sums = tf.concat(col_sums, axis=0) @@ -196,7 +230,8 @@ def kernel_induced_distance(x1, x2): self.nb_features = batch_col_cases.shape[1] # compute the prototypes in the latent space - self.prototypes_indices, self.prototypes, self.prototypes_labels, self.prototypes_weights = self.find_prototypes(nb_prototypes) + self.prototypes_indices, self.prototypes, self.prototypes_labels, self.prototypes_weights =\ + self.find_prototypes(nb_prototypes) self.knn = KNN( cases_dataset=self.prototypes, @@ -206,7 +241,14 @@ def kernel_induced_distance(x1, x2): distance=self.distance_fn ) - def compute_objectives(self, selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel): + def compute_objectives(self, + selection_indices: tf.Tensor, + selection_cases: tf.Tensor, + selection_weights: tf.Tensor, + selection_selection_kernel: tf.Tensor, + candidates_indices: tf.Tensor, + candidates_selection_kernel: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the objective and its weights for each candidate. @@ -231,41 +273,69 @@ def compute_objectives(self, selection_indices, selection_cases, selection_weigh Tensor that contains the computed objective values for each candidate. objectives_weights Tensor that contains the computed objective weights for each candidate. - """ + """ + # pylint: disable=invalid-name nb_candidates = candidates_indices.shape[0] nb_selection = selection_cases.shape[0] - repeated_selection_indices = tf.tile(tf.expand_dims(selection_indices, 0), [nb_candidates, 1]) - repeated_selection_candidates_indices = tf.concat([repeated_selection_indices, tf.expand_dims(candidates_indices, 1)], axis=1) + repeated_selection_indices = tf.tile(tf.expand_dims(selection_indices, 0), + [nb_candidates, 1]) + repeated_selection_candidates_indices = tf.concat([repeated_selection_indices, + tf.expand_dims(candidates_indices, 1)], + axis=1) u = tf.expand_dims(tf.gather(self.col_means, repeated_selection_candidates_indices), axis=2) + candidates_diag = tf.gather(self.diag, candidates_indices) + candidates_diag = tf.expand_dims(tf.expand_dims(candidates_diag, axis=-1), axis=-1) + if nb_selection == 0: - K = tf.expand_dims(tf.expand_dims(tf.gather(self.diag, candidates_indices), axis=-1), axis=-1) + K = candidates_diag else: - repeated_selection_selection_kernel = tf.tile(tf.expand_dims(selection_selection_kernel, 0), [nb_candidates, 1, 1]) - repeated_selection_selection_kernel = tf.pad(repeated_selection_selection_kernel, [[0, 0], [0, 1], [0, 1]]) + repeated_selection_selection_kernel = tf.tile( + tf.expand_dims(selection_selection_kernel, 0), + [nb_candidates, 1, 1] + ) + repeated_selection_selection_kernel = tf.pad( + repeated_selection_selection_kernel, + [[0, 0], [0, 1], [0, 1]] + ) - candidates_diag = tf.expand_dims(tf.expand_dims(tf.gather(self.diag, candidates_indices), axis=-1), axis=-1) - candidates_diag = tf.pad(candidates_diag, [[0, 0], [nb_selection, 0], [nb_selection, 0]]) + candidates_diag = tf.pad( + candidates_diag, + [[0, 0], [nb_selection, 0], [nb_selection, 0]] + ) candidates_selection_kernel = tf.expand_dims(candidates_selection_kernel, axis=-1) - candidates_selection_kernel = tf.pad(candidates_selection_kernel, [[0, 0], [0, 1], [nb_selection, 0]]) + candidates_selection_kernel = tf.pad( + candidates_selection_kernel, + [[0, 0], [0, 1], [nb_selection, 0]] + ) - K = repeated_selection_selection_kernel + candidates_diag + candidates_selection_kernel + tf.transpose(candidates_selection_kernel, [0, 2, 1]) + K = repeated_selection_selection_kernel + candidates_diag\ + + candidates_selection_kernel + tf.transpose(candidates_selection_kernel, [0, 2, 1]) - # Compute the objective weights for each candidate in the batch + # Compute the objective weights for each candidate in the batch K_inv = tf.linalg.inv(K + ProtoGreedySearch.EPSILON * tf.eye(K.shape[-1])) objectives_weights = tf.matmul(K_inv, u) objectives_weights = tf.maximum(objectives_weights, 0) - + # Compute the objective for each candidate in the batch - objectives = tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), u) - 0.5 * tf.matmul(tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), K), objectives_weights) - objectives = tf.squeeze(objectives, axis=[1,2]) + k_objectives = tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), K) + u_objectives = tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), u) + objectives = u_objectives - 0.5 * tf.matmul(k_objectives, objectives_weights) + objectives = tf.squeeze(objectives, axis=[1, 2]) return objectives, objectives_weights - def update_selection_weights(self, selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective): + def update_selection_weights(self, + selection_indices: tf.Tensor, + selection_weights: tf.Tensor, + selection_selection_kernel: tf.Tensor, + best_indice: tf.Tensor, + best_weights: tf.Tensor, + best_objective: tf.Tensor + ) -> tf.Tensor: """ Update the selection weights based on the optimization results. @@ -280,7 +350,8 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti best_indice : int The index of the selected prototype with the highest objective function value. best_weights : Tensor - The weights corresponding to the optimal solution of the objective function for each candidate. + The weights corresponding to the optimal solution + of the objective function for each candidate. best_objective : float The computed objective function value. @@ -293,7 +364,7 @@ def update_selection_weights(self, selection_indices, selection_weights, selecti selection_weights = best_weights return selection_weights - + def find_prototypes(self, nb_prototypes): """ Search for prototypes and their corresponding weights. @@ -318,43 +389,45 @@ def find_prototypes(self, nb_prototypes): # Tensors to store selected indices and their corresponding cases, labels and weights. selection_indices = tf.constant([], dtype=tf.int32) selection_cases = tf.zeros((0, self.nb_features), dtype=tf.float32) - selection_labels = tf.constant([], dtype=tf.int32) - selection_weights = tf.constant([], dtype=tf.float32) + selection_labels = tf.constant([], dtype=tf.int32) + selection_weights = tf.constant([], dtype=tf.float32) # Tensor to store the all_candidates-selection kernel of the previous iteration. all_candidates_selection_kernel = tf.zeros((self.n, 0), dtype=tf.float32) # Tensor to store the selection-selection kernel. selection_selection_kernel = None - + k = 0 while k < nb_prototypes: - + nb_selection = selection_cases.shape[0] # Tensor to store the all_candidates-last_selected kernel if nb_selection !=0: all_candidates_last_selected_kernel = tf.zeros((self.n), dtype=tf.float32) - best_objective = None + best_objective = None best_indice = None - best_case = None - best_label = None + best_case = None + best_label = None best_weights = None - + for batch_index, (cases, labels) in enumerate( zip(self.cases_dataset, self.labels_dataset) ): batch_inside_indices = tf.range(cases.shape[0], dtype=tf.int32) batch_indices = batch_index * self.batch_size + batch_inside_indices - + # Filter the batch to keep only candidate indices. if nb_selection == 0: candidates_indices = batch_indices else: - candidates_indices = tf.convert_to_tensor(np.setdiff1d(batch_indices, selection_indices)) + candidates_indices = tf.convert_to_tensor( + np.setdiff1d(batch_indices, selection_indices) + ) nb_candidates = candidates_indices.shape[0] - if nb_candidates == 0: + if nb_candidates == 0: continue candidates_inside_indices = candidates_indices % self.batch_size @@ -365,41 +438,64 @@ def find_prototypes(self, nb_prototypes): if nb_selection == 0: candidates_selection_kernel = None else: - candidates_last_selected_kernel = self.kernel_fn(candidates_cases, selection_cases[-1:, :], candidates_labels, selection_labels[-1:]) - candidates_selection_kernel = tf.concat([tf.gather(all_candidates_selection_kernel, candidates_indices, axis=0), candidates_last_selected_kernel], axis=1) - all_candidates_last_selected_kernel = tf.tensor_scatter_nd_update(all_candidates_last_selected_kernel, tf.expand_dims(candidates_indices, axis=1), tf.squeeze(candidates_last_selected_kernel, axis=1)) - + candidates_last_selected_kernel = self.kernel_fn( + candidates_cases, selection_cases[-1:, :], + candidates_labels, selection_labels[-1:] + ) + candidates_selection_kernel = tf.concat( + [tf.gather(all_candidates_selection_kernel, candidates_indices, axis=0), + candidates_last_selected_kernel], + axis=1 + ) + all_candidates_last_selected_kernel = tf.tensor_scatter_nd_update( + all_candidates_last_selected_kernel, + tf.expand_dims(candidates_indices, axis=1), + tf.squeeze(candidates_last_selected_kernel, axis=1) + ) + # Compute the objectives for the batch - objectives, objectives_weights = self.compute_objectives(selection_indices, selection_cases, selection_weights, selection_selection_kernel, candidates_indices, candidates_selection_kernel) - - # Select the best objective in the batch + objectives, objectives_weights = self.compute_objectives( + selection_indices, selection_cases, selection_weights, + selection_selection_kernel, candidates_indices, candidates_selection_kernel + ) + + # Select the best objective in the batch objectives_argmax = tf.argmax(objectives) - - if (best_objective is None) or (tf.gather(objectives, objectives_argmax) > best_objective): - best_objective = tf.gather(objectives, objectives_argmax) + + if (best_objective is None)\ + or (tf.gather(objectives, objectives_argmax) > best_objective): + best_objective = tf.gather(objectives, objectives_argmax) best_indice = tf.squeeze(tf.gather(candidates_indices, objectives_argmax)) best_case = tf.gather(candidates_cases, objectives_argmax) best_label = tf.gather(candidates_labels, objectives_argmax) if objectives_weights is not None: - best_weights = tf.squeeze(tf.gather(objectives_weights, objectives_argmax)) + best_weights = tf.squeeze(tf.gather(objectives_weights, objectives_argmax)) # Update the all_candidates-selection kernel if nb_selection != 0: - all_candidates_selection_kernel = tf.concat([all_candidates_selection_kernel, tf.expand_dims(all_candidates_last_selected_kernel, axis=1)], axis=1) - + all_candidates_selection_kernel = tf.concat( + [all_candidates_selection_kernel, + tf.expand_dims(all_candidates_last_selected_kernel, axis=1)], + axis=1) + # Update the selection-selection kernel if nb_selection == 0: selection_selection_kernel = tf.gather(self.diag, [[best_indice]]) - else: + else: selection_selection_kernel = tf.pad(selection_selection_kernel, [[0, 1], [0, 1]]) - best_candidate_selection_kernel = tf.gather(all_candidates_selection_kernel, [best_indice], axis=0) - best_candidate_selection_kernel = tf.pad(best_candidate_selection_kernel, [[nb_selection, 0], [0, 1]]) + best_candidate_selection_kernel = tf.gather(all_candidates_selection_kernel, + [best_indice], axis=0) + best_candidate_selection_kernel = tf.pad(best_candidate_selection_kernel, + [[nb_selection, 0], [0, 1]]) best_candidate_diag = tf.expand_dims(tf.gather(self.diag, [best_indice]), axis=-1) - best_candidate_diag = tf.pad(best_candidate_diag, [[nb_selection, 0], [nb_selection, 0]]) + best_candidate_diag = tf.pad(best_candidate_diag, + [[nb_selection, 0], [nb_selection, 0]]) - selection_selection_kernel = selection_selection_kernel + best_candidate_diag + best_candidate_selection_kernel + tf.transpose(best_candidate_selection_kernel) + selection_selection_kernel = selection_selection_kernel + best_candidate_diag\ + + best_candidate_selection_kernel\ + + tf.transpose(best_candidate_selection_kernel) # Update selection indices, cases and labels selection_indices = tf.concat([selection_indices, [best_indice]], axis=0) @@ -407,8 +503,10 @@ def find_prototypes(self, nb_prototypes): selection_labels = tf.concat([selection_labels, [best_label]], axis=0) # Update selection weights - selection_weights = self.update_selection_weights(selection_indices, selection_weights, selection_selection_kernel, best_indice, best_weights, best_objective) - + selection_weights = self.update_selection_weights( + selection_indices, selection_weights, selection_selection_kernel, + best_indice, best_weights, best_objective) + k += 1 prototypes_indices = selection_indices @@ -420,8 +518,11 @@ def find_prototypes(self, nb_prototypes): prototypes_weights = prototypes_weights / tf.reduce_sum(prototypes_weights) return prototypes_indices, prototypes, prototypes_labels, prototypes_weights - - def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], _): + + def find_examples(self, + inputs: Union[tf.Tensor, np.ndarray], + _ = None + ) -> dict: """ Search the samples to return as examples. Called by the explain methods. It may also return the indices corresponding to the samples, @@ -433,6 +534,15 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], _): Tensor or Array. Input samples to be explained. Assumed to have been already projected. Expected shape among (N, W), (N, T, W), (N, W, H, C). + + Returns + ------- + dict + Dictionary potentially containing the following elements: + - "examples" : the expected examples, + the inputs may be included in the first position. (n, k(+1), ...) + - "distances" : the distances between the inputs and the corresponding examples. + They are associated to the examples. (n, k, ...) """ # look for closest prototypes to projected inputs @@ -441,8 +551,9 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], _): # obtain closest prototypes indices with respect to the prototypes indices_wrt_prototypes = knn_output["indices"] - # convert to unique indices - indices_wrt_prototypes = indices_wrt_prototypes[:, :, 0] * self.batch_size + indices_wrt_prototypes[:, :, 1] + # convert to unique indices + indices_wrt_prototypes = indices_wrt_prototypes[:, :, 0] * self.batch_size\ + + indices_wrt_prototypes[:, :, 1] # get prototypes indices with respect to the dataset indices = tf.gather(self.prototypes_indices, indices_wrt_prototypes) @@ -453,4 +564,4 @@ def find_examples(self, inputs: Union[tf.Tensor, np.ndarray], _): knn_output["indices"] = indices - return knn_output \ No newline at end of file + return knn_output diff --git a/xplique/example_based/semifactuals.py b/xplique/example_based/semifactuals.py index 572d508d..616d2dcf 100644 --- a/xplique/example_based/semifactuals.py +++ b/xplique/example_based/semifactuals.py @@ -1,8 +1,6 @@ """ Implementation of semi factuals methods for classification tasks. """ -import warnings - import numpy as np import tensorflow as tf @@ -18,16 +16,17 @@ class KLEORBase(BaseExampleMethod): """ - Base class for KLEOR methods. KLEOR methods search Semi-Factuals examples. In those methods, one should first - retrieve the Nearest Unlike Neighbor (NUN) which is the closest example to the query that has a different prediction - than the query. Then, the method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction - as the query. + Base class for KLEOR methods. KLEOR methods search Semi-Factuals examples. + In those methods, one should first retrieve the Nearest Unlike Neighbor (NUN) + which is the closest example to the query that has a different prediction than the query. + Then, the method search for the K-Nearest Neighbors (KNN) of the NUN + that have the same prediction as the query. - All the searches are done in a projection space where distances are relevant for the model. The projection space is - defined by the `projection` method. + All the searches are done in a projection space where distances are relevant for the model. + The projection space is defined by the `projection` method. - Depending on the KLEOR method some additional condition for the search are added. See the specific KLEOR method for - more details. + Depending on the KLEOR method some additional condition for the search are added. + See the specific KLEOR method for more details. Parameters ---------- @@ -37,7 +36,8 @@ class KLEORBase(BaseExampleMethod): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets are expected to be the one-hot encoding of the model's predictions for the samples in cases_dataset. + Targets are expected to be the one-hot encoding of the model's predictions + for the samples in cases_dataset. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -53,8 +53,7 @@ class KLEORBase(BaseExampleMethod): projection Projection or Callable that project samples from the input space to the search space. The search space should be a space where distances are relevant for the model. - It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in - searching the input space, you should use a `BaseSearchMethod` instead. + It should not be `None`, otherwise, the model is not involved thus not explained. Example of Callable: ``` @@ -80,8 +79,10 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar by default "euclidean". """ _returns_possibilities = [ - "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns", "nuns_labels" + "examples", "weights", "distances", "labels", "include_inputs", + "nuns", "nuns_indices", "dist_to_nuns", "nuns_labels" ] + # pylint: disable=duplicate-code def __init__( self, @@ -127,8 +128,9 @@ def returns(self) -> Union[List[str], str]: @returns.setter def returns(self, returns: Union[List[str], str]): """ - Set the returns parameter. The returns parameter is a string or a list of string with the elements to return - in `self.explain()`. The elements that can be returned are defined with _returns_possibilities static attribute + Set the returns parameter. The returns parameter is a string + or a list of string with the elements to return in `self.explain()`. + Possibly returned elements are defined with `_returns_possibilities` static attribute. """ default = "examples" self._returns = _sanitize_returns(returns, self._returns_possibilities, default) @@ -143,7 +145,7 @@ def returns(self, returns: Union[List[str], str]): if isinstance(self._returns, list) and ("dist_to_nuns" in self._returns): self._search_returns.append("dist_to_nuns") - + try: self.search_method.returns = self._search_returns except AttributeError: @@ -169,13 +171,15 @@ def format_search_output( ------- return_dict Dictionary with listed elements in `self.returns`. - The elements that can be returned are defined with _returns_possibilities static attribute of the class. + The elements that can be returned are defined with the `_returns_possibilities` + static attribute of the class. """ return_dict = super().format_search_output(search_output, inputs) if "nuns" in self.returns: return_dict["nuns"] = dataset_gather(self.cases_dataset, search_output["nuns_indices"]) if "nuns_labels" in self.returns: - return_dict["nuns_labels"] = dataset_gather(self.labels_dataset, search_output["nuns_indices"]) + return_dict["nuns_labels"] = dataset_gather(self.labels_dataset, + search_output["nuns_indices"]) if "nuns_indices" in self.returns: return_dict["nuns_indices"] = search_output["nuns_indices"] if "dist_to_nuns" in self.returns: @@ -185,31 +189,38 @@ def format_search_output( class KLEORSimMiss(KLEORBase): """ - The KLEORSimMiss method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of - the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the - method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. - - The search is done in a projection space where distances are relevant for the model. The projection space is defined - by the `projection` method. + The KLEORSimMiss method search for Semi-Factuals examples + by searching for the Nearest Unlike Neighbor (NUN) of the query. + The NUN is the closest example to the query that has a different prediction than the query. + Then, the method search for the K-Nearest Neighbors (KNN) of the NUN + that have the same prediction as the query. + + The search is done in a projection space where distances are relevant for the model. + The projection space is defined by the `projection` method. """ @property def search_method_class(self): """ - This property defines the search method class to use for the search. In this case, it is the KLEORSimMissSearch. + This property defines the search method class to use for the search. + In this case, it is the KLEORSimMissSearch. """ return KLEORSimMissSearch class KLEORGlobalSim(KLEORBase): """ - The KLEORGlobalSim method search for Semi-Factuals examples by searching for the Nearest Unlike Neighbor (NUN) of - the query. The NUN is the closest example to the query that has a different prediction than the query. Then, the - method search for the K-Nearest Neighbors (KNN) of the NUN that have the same prediction as the query. - - In addition, for a SF candidate to be considered, the SF should be closer to the query than the NUN in the - projection space (i.e. the SF should be 'between' the input and its NUN). This condition is added to the search. - - The search is done in a projection space where distances are relevant for the model. The projection space is defined - by the `projection` method. + The KLEORGlobalSim method search for Semi-Factuals examples + by searching for the Nearest Unlike Neighbor (NUN) of the query. + The NUN is the closest example to the query that has a different prediction than the query. + Then, the method search for the K-Nearest Neighbors (KNN) of the NUN + that have the same prediction as the query. + + In addition, for a SF candidate to be considered, + the SF should be closer to the query than the NUN in the projection space + (i.e. the SF should be 'between' the input and its NUN). + This condition is added to the search. + + The search is done in a projection space where distances are relevant for the model. + The projection space is defined by the `projection` method. """ @property def search_method_class(self): diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index 1b213288..22d9d42f 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -14,8 +14,9 @@ class SimilarExamples(BaseExampleMethod): """ - Class for similar example-based method. This class allows to search the k Nearest Neighbor of an input in the - projected space (defined by the projection method) using the distance defined by the distance method provided. + Class for similar example-based method. This class allows to search the k Nearest Neighbor + of an input in the projected space (defined by the projection method) + using the distance defined by the distance method provided. Parameters ---------- @@ -31,8 +32,8 @@ class SimilarExamples(BaseExampleMethod): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's - predictions. See `projection` for detail. + Targets associated to the cases_dataset for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -42,8 +43,7 @@ class SimilarExamples(BaseExampleMethod): projection Projection or Callable that project samples from the input space to the search space. The search space should be a space where distances are relevant for the model. - It should not be `None`, otherwise, the model is not involved thus not explained. If you are interested in - searching the input space, you should use a `BaseSearchMethod` instead. + It should not be `None`, otherwise, the model is not involved thus not explained. Example of Callable: ``` @@ -67,6 +67,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ + # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -77,7 +78,7 @@ def __init__( case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, distance: Union[int, str, Callable] = "euclidean", - ): + ): super().__init__( cases_dataset=cases_dataset, labels_dataset=labels_dataset, @@ -108,8 +109,9 @@ def search_method_class(self) -> Type[BaseSearchMethod]: class Cole(SimilarExamples): """ - Cole is a similar examples method that gives the most similar examples to a query in some specific projection space. - Cole use the model (to be explained) to build a search space so that distances are meaningful for the model. + Cole is a similar examples method that gives the most similar examples + to a query in some specific projection space. + Cole uses the model to build a search space so that distances are meaningful for the model. It uses attribution methods to weight inputs. Those attributions may be computed in the latent space for high-dimensional data like images. @@ -131,8 +133,8 @@ class Cole(SimilarExamples): Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not the case for your dataset, otherwise, examples will not make sense. targets_dataset - Targets associated to the cases_dataset for dataset projection, oftentimes the one-hot encoding of a model's - predictions. See `projection` for detail. + Targets associated to the cases_dataset for dataset projection, + oftentimes the one-hot encoding of a model's predictions. See `projection` for detail. `tf.data.Dataset` are assumed to be batched as tensorflow provide no method to verify it. Batch size and cardinality of other datasets should match `cases_dataset`. Be careful, `tf.data.Dataset` are often reshuffled at each iteration, be sure that it is not @@ -188,7 +190,7 @@ def __init__( if isinstance(attribution_method, str) and attribution_method.lower() == "gradient": operator = attribution_kwargs.get("operator", None) - + projection = HadamardProjection( model=model, latent_layer=latent_layer, @@ -204,8 +206,8 @@ def __init__( ) else: raise ValueError( - f"attribution_method should be 'gradient' or a subclass of BlackBoxExplainer," +\ - "not {attribution_method}" + "`attribution_method` should be 'gradient' or a subclass of BlackBoxExplainer, " +\ + f"not {attribution_method}" ) super().__init__( diff --git a/xplique/plots/image.py b/xplique/plots/image.py index c90d956b..d0a556d7 100644 --- a/xplique/plots/image.py +++ b/xplique/plots/image.py @@ -234,7 +234,6 @@ def plot_maco(image, alpha, percentile_image=1.0, percentile_alpha=80): def plot_examples( examples: np.ndarray, - weights: np.ndarray = None, distances: float = None, labels: np.ndarray = None, test_labels: np.ndarray = None, @@ -263,9 +262,6 @@ def plot_examples( Size of each subplots (in inch), considering we keep aspect ratio """ # pylint: disable=too-many-arguments - if weights is not None: - assert examples.shape[:2] == weights.shape[:2],\ - "Number of weights must correspond to the number of examples." if distances is not None: assert examples.shape[0] == distances.shape[0],\ "Number of samples treated should match between examples and distances." @@ -278,7 +274,7 @@ def plot_examples( "Number of labels for each input must correspond to the number of examples -1." # number of rows depends if weights are provided - rows_by_input = 1 + (weights is not None) + rows_by_input = 1 rows = rows_by_input * examples.shape[0] cols = examples.shape[1] # get width and height of our images @@ -309,7 +305,7 @@ def plot_examples( # configure the grid to show all results plt.rcParams["figure.autolayout"] = True - plt.rcParams["figure.figsize"] = [3 * examples.shape[1], 4 * (1 + (weights is not None))] + plt.rcParams["figure.figsize"] = [3 * examples.shape[1], 4] # loop to organize and show all results for i in range(examples.shape[0]): @@ -337,12 +333,4 @@ def plot_examples( else: plt.imshow(img) plt.axis("off") - - # plot weights - if weights is not None: - plt.subplot(rows, cols, (rows_by_input * i + 1) * cols + k + 1) - plot_attribution(weights[i, k], examples[i, k], **attribution_kwargs) - plt.axis("off") - plt.plot([-1, 1.5], [-space_with_line, -space_with_line], - color='black', lw=1, transform=plt.gca().transAxes, clip_on=False) fig.tight_layout() From 53b4ba4bceb051ce1895e59980b719eb73213300 Mon Sep 17 00:00:00 2001 From: POCHE Antonin Date: Fri, 23 Aug 2024 09:58:16 +0200 Subject: [PATCH 103/138] projections: add missing targets warning --- xplique/example_based/projections/attributions.py | 5 ++++- xplique/example_based/projections/base.py | 12 +++++++++++- xplique/example_based/projections/hadamard.py | 8 ++++++-- xplique/example_based/projections/latent_space.py | 8 ++++++-- 4 files changed, 27 insertions(+), 6 deletions(-) diff --git a/xplique/example_based/projections/attributions.py b/xplique/example_based/projections/attributions.py index ef43731e..9f3779be 100644 --- a/xplique/example_based/projections/attributions.py +++ b/xplique/example_based/projections/attributions.py @@ -80,4 +80,7 @@ def __init__( get_weights = self.attribution_method(self.predictor, **attribution_kwargs) # set methods - super().__init__(get_weights, space_projection, mappable=False) + super().__init__(get_weights=get_weights, + space_projection=space_projection, + mappable=False, + requires_targets=True) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 501f4d3f..8ed8cfdf 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -3,6 +3,7 @@ """ from abc import ABC +import warnings import tensorflow as tf import numpy as np @@ -63,13 +64,15 @@ def __init__(self, get_weights: Optional[Union[Callable, tf.Tensor, np.ndarray]] = None, space_projection: Optional[Callable] = None, device: Optional[str] = None, - mappable: bool = True,): + mappable: bool = True, + requires_targets: bool = False): assert get_weights is not None or space_projection is not None, ( "At least one of `get_weights` and `space_projection`" + "should not be `None`." ) self.mappable = mappable + self.requires_targets = requires_targets # set get_weights if get_weights is None: @@ -167,6 +170,13 @@ def project_dataset( projected_dataset The projected dataset. """ + if self.requires_targets and targets_dataset is None: + warnings.warn( + "The projection requires `targets` but `targets_dataset` is not provided. "\ + +"`targets` will be computed online, assuming a classification setting. "\ + +"Hence, online `targets` will be the predicted class one-hot-encoding. "\ + +"If this is not the expected behavior, please provide a `targets_dataset`.") + if self.mappable: return self._map_project_dataset(cases_dataset, targets_dataset) return self._loop_project_dataset(cases_dataset, targets_dataset) diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index d4c61815..385b84c7 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -83,7 +83,10 @@ def __init__( mappable = isinstance(model, tf.keras.Model) # set methods - super().__init__(get_weights, space_projection, mappable=mappable) + super().__init__(get_weights=get_weights, + space_projection=space_projection, + mappable=mappable, + requires_targets=True) @classmethod def from_splitted_model(cls, @@ -124,4 +127,5 @@ def from_splitted_model(cls, super().__init__(get_weights=get_weights, space_projection=features_extractor, - mappable=mappable) + mappable=mappable, + requires_targets=True) diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py index a92961e9..0aadfd68 100644 --- a/xplique/example_based/projections/latent_space.py +++ b/xplique/example_based/projections/latent_space.py @@ -41,7 +41,9 @@ def __init__(self, features_extractor, _ = model_splitting(model, latent_layer=latent_layer, device=device) mappable = isinstance(model, tf.keras.Model) - super().__init__(space_projection=features_extractor, mappable=mappable) + super().__init__(space_projection=features_extractor, + mappable=mappable, + requires_targets=False) @classmethod def from_splitted_model(cls, @@ -66,4 +68,6 @@ def from_splitted_model(cls, assert isinstance(features_extractor, tf.keras.Model),\ f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." - super().__init__(space_projection=features_extractor, mappable=mappable) + super().__init__(space_projection=features_extractor, + mappable=mappable, + requires_targets=False) From 3de21507315efd74a3be0c3e21faf68d427c52e3 Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Mon, 9 Sep 2024 10:38:39 +0200 Subject: [PATCH 104/138] docs: update prototypes --- .../prototypes/api_prototypes.md | 23 +++++++++---------- .../example_based/prototypes/mmd_critic.md | 1 + .../example_based/prototypes/proto_dash.md | 3 ++- .../example_based/prototypes/proto_greedy.md | 4 ++-- 4 files changed, 16 insertions(+), 15 deletions(-) diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md index 2dfba112..6814cac1 100644 --- a/docs/api/example_based/prototypes/api_prototypes.md +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -40,6 +40,17 @@ local_prototypes = explainer(inputs) !!!info Prototypes, share a common API with other example-based methods. Thus, to understand some parameters, we recommend reading the [dedicated documentation](../../api_example_based/). +## Specificity of prototypes + +The prototypes implement the API as follows. +The `search` method includes the following additional parameters: + +`nb_prototypes` whci represents the total number of prototypes desired to represent the entire dataset, whereas $k$ represents the number of prototypes closest to the input, allowing for a local explanation. + +`kernel_type`, `kernel_fn`, and `gamma` are used to specify the kernel, as these methods are based on the MMD distance. + +The prototype class has a `get_global_prototypes()` method, which calculates all the prototypes in the base dataset; these are called the global prototypes. The `explain` method then provides a local explanation, i.e., finds the prototypes closest to the input given as a parameter. + ## Prototypes for Data-Centric Interpretability In this class, prototypes are selected without relying on the model and provide an overview of @@ -55,21 +66,9 @@ For both cases, submodularity and monotonicity of $F(\mathcal{P})$ are necessary The library implements three methods from **Data summarization with knapsack constraint**: `MMDCritic`, `ProtoGreedy` and `ProtoDash`. -[Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) proposed `MMDCritic` method that used a set function based on the Maximum Mean Discrepancy [(MMD)](#what-is-mmd). They solved **data summarization with knapsack constraint** problem to find both prototypes and criticisms. First, the number of prototypes and criticisms to be found, respectively as $m_p$ and $m_c$, are selected. Second, to find prototypes, a greedy algorithm is used to maximize $F(\mathcal{P})$ s.t. $|\mathcal{P}| \le m_p$ where $F(\mathcal{P})$ is defined as: - -\begin{equation} - F(\mathcal{P})=\frac{2}{|\mathcal{P}|\cdot n}\sum_{i,j=1}^{|\mathcal{P}|,n}\kappa(p_i,x_j)-\frac{1}{|\mathcal{P}|^2}\sum_{i,j=1}^{|\mathcal{P}|}\kappa(p_i,p_j) -\end{equation} - -They used diagonal dominance conditions on the kernel to ensure monotonocity and submodularity of $F(\mathcal{P})$. To find criticisms $\mathcal{C}$, the same greedy algorithm is used to select points that maximize another objective function $J(\mathcal{C})$. -[Gurumoorthy et al., 2019](https://arxiv.org/pdf/1707.01212) associated non-negative weights to prototypes which are indicative of their importance. This approach allows for identifying both prototypes and criticisms (the least weighted examples among prototypes) by maximizing the same weighted objective $F(\mathcal{P},w)$ defined as: -\begin{equation} - F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\sum_{i,j=1}^{|\mathcal{P}|}w_iw_j\kappa(p_i,p_j), -\end{equation} -where $w$ are non-negative weights for each prototype. The problem then consist on finding $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. They established the weak submodular property of $J(\mathcal{P})$ and present tractable algorithms (`ProtoGreedy` and `ProtoDash`) to optimize it. ### Method comparison diff --git a/docs/api/example_based/prototypes/mmd_critic.md b/docs/api/example_based/prototypes/mmd_critic.md index e9f436a1..020c4ccc 100644 --- a/docs/api/example_based/prototypes/mmd_critic.md +++ b/docs/api/example_based/prototypes/mmd_critic.md @@ -27,6 +27,7 @@ Second, to find criticisms $\mathcal{C}$, the same greedy algorithm is used to s !!!warning For `MMDCritic`, the kernel must satisfy a condition that ensures the submodularity of the set function. The Gaussian kernel meets this requirement and it is recommended. If you wish to choose a different kernel, it must satisfy the condition described by [Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf). + ## Example ```python diff --git a/docs/api/example_based/prototypes/proto_dash.md b/docs/api/example_based/prototypes/proto_dash.md index 3684dcf2..ad08564d 100644 --- a/docs/api/example_based/prototypes/proto_dash.md +++ b/docs/api/example_based/prototypes/proto_dash.md @@ -28,7 +28,8 @@ F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\su \end{equation} where $w$ are non-negative weights for each prototype. The problem then consist on finding a subset $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. -[Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) proposed `ProtoDash` algorithm, which is much faster that [`ProtoGreedy`](../proto_greedy/) without compromising on the quality of the solution. In fact, `ProtoGreedy` selects the next element that maximizes the increment of the scoring function, whereas `ProtoDash` selects the next element that maximizes a tight lower bound on the increment of the scoring function. +!!!info + For ProtoDash, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. ## Example diff --git a/docs/api/example_based/prototypes/proto_greedy.md b/docs/api/example_based/prototypes/proto_greedy.md index 35900522..b2c8d280 100644 --- a/docs/api/example_based/prototypes/proto_greedy.md +++ b/docs/api/example_based/prototypes/proto_greedy.md @@ -28,9 +28,9 @@ F(\mathcal{P},w)=\frac{2}{n}\sum_{i,j=1}^{|\mathcal{P}|,n}w_i\kappa(p_i,x_j)-\su \end{equation} where $w$ are non-negative weights for each prototype. The problem then consist on finding a subset $\mathcal{P}$ with a corresponding $w$ that maximizes $J(\mathcal{P}) \equiv \max_{w:supp(w)\in \mathcal{P},w\ge 0} J(\mathcal{P},w)$ s.t. $|\mathcal{P}| \leq m=m_p+m_c$. -[Gurumoorthy et al., 2019](https://arxiv.org/abs/1707.01212) demonstrate that this problem is weakly submodular, which immediately leads to a standard greedy algorithm which they call `ProtoGreedy`. +!!!info + For ProtoGreedy, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. -`ProtoGreedy` is algorithmically similar to greedy algorithm used by [Kim et al., 2016](https://proceedings.neurips.cc/paper_files/paper/2016/file/5680522b8e2bb01943234bce7bf84534-Paper.pdf) where both the methods greedily select the next element that maximizes the increment of the scoring function. ## Example From 19d8340e4960b681f442e35cb5714f529eb73b35 Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Thu, 12 Sep 2024 14:53:40 +0200 Subject: [PATCH 105/138] docs: update prototypes --- .../prototypes/api_prototypes.md | 61 ++++++------------- 1 file changed, 20 insertions(+), 41 deletions(-) diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md index 6814cac1..fa362210 100644 --- a/docs/api/example_based/prototypes/api_prototypes.md +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -1,11 +1,5 @@ # Prototypes -Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class. Three classes of prototype-based methods are found in the literature ([Poché et al., 2023](https://hal.science/hal-04117520/document)): - -- [Prototypes for Data-Centric Interpretability](#prototypes-for-data-centric-interpretability) -- [Prototypes for Post-hoc Interpretability](#prototypes-for-post-hoc-interpretability) -- Prototype-Based Models Interpretable by Design - -For now, the library focuses on the first two classes. +Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class ([Poché et al., 2023](https://hal.science/hal-04117520/document)). Using the identity projection, one is looking for the **dataset prototypes**. In contrast, using the latent space of a model as a projection, one is looking for **prototypes relevant for the model**. ## Common API ## @@ -34,41 +28,24 @@ local_prototypes = explainer(inputs) *: Before using a PyTorch model it is highly recommended to read the [dedicated documentation](../pytorch/) -!!!info - Using the identity projection, one is looking for the **dataset prototypes**. In contrast, using the latent space of a model as a projection, one is looking for **prototypes relevant for the model**. - !!!info Prototypes, share a common API with other example-based methods. Thus, to understand some parameters, we recommend reading the [dedicated documentation](../../api_example_based/). ## Specificity of prototypes + +The search method class related to a `Prorotypes` class includes the following additional parameters: -The prototypes implement the API as follows. -The `search` method includes the following additional parameters: +- `nb_prototypes` which represents the total number of prototypes desired to represent the entire dataset. This should not be confused with $k$, which represents the number of prototypes closest to the input and allows for a local explanation. -`nb_prototypes` whci represents the total number of prototypes desired to represent the entire dataset, whereas $k$ represents the number of prototypes closest to the input, allowing for a local explanation. - -`kernel_type`, `kernel_fn`, and `gamma` are used to specify the kernel, as these methods are based on the MMD distance. +- `kernel_type`, `kernel_fn`, and `gamma` which are related to the kernel used to compute the [MMD distance](#what-is-mmd). The prototype class has a `get_global_prototypes()` method, which calculates all the prototypes in the base dataset; these are called the global prototypes. The `explain` method then provides a local explanation, i.e., finds the prototypes closest to the input given as a parameter. -## Prototypes for Data-Centric Interpretability - -In this class, prototypes are selected without relying on the model and provide an overview of -the dataset. As mentioned in ([Poché et al., 2023](https://hal.science/hal-04117520/document)), we found in this class: **clustering methods** and **data summarization methods**, also known as **set cover methods**. This library focuses on **data summarization methods** which can be treated in two ways [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf): +## Implemented methods -- **Data summarization with knapsack constraint**: +The library implements three methods from **Data summarization with knapsack constraint** [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf): `MMDCritic`, `ProtoGreedy` and `ProtoDash`. **Data summarization with knapsack constraint**: consists in finding a subset of prototypes $\mathcal{P}$ that maximizes the coverage set function $F(\mathcal{P})$ under the constraint that its selection cost $C(\mathcal{P})$ (e.g., the number of selected prototypes $|\mathcal{P}|$) should be less than a given budget. - -- **Data summarization with covering constraint**: -consists in finding a low-cost subset of prototypes $\mathcal{P}$ under the constraint it should cover all the data. - -For both cases, submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). In addition, $F(\mathcal{P})$ should encourage coverage and penalize redundancy in order to have a good summary [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). - -The library implements three methods from **Data summarization with knapsack constraint**: `MMDCritic`, `ProtoGreedy` and `ProtoDash`. - - - - +Submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). In addition, $F(\mathcal{P})$ should encourage coverage and penalize redundancy in order to have a good summary [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). ### Method comparison @@ -78,6 +55,18 @@ The library implements three methods from **Data summarization with knapsack con - `ProtoDash` is much faster than `ProtoGreedy` without compromising on the quality of the solution (the complexity of `ProtoGreedy` is $O(n(n+m^4))$ comparing to $O(n(n+m^2)+m^4)$ for `ProtoDash`). - The approximation guarantee for `ProtoGreedy` is $(1-e^{-\gamma})$, where $\gamma$ is submodularity ratio of $F(\mathcal{P})$, comparing to $(1-e^{-1})$ for `MMDCritic`. +### Implementation details + +`MMDCritic`, `ProtoDash` and `ProtoGreedy` inherit from `Prototypes` class which in turn inherit from `BaseExampleMethod` class. Each of these classes has a corresponding search method class: `MMDCriticSearch`, `ProtoDashSearch` and `ProtoGreedySearch`. + +`ProtoGreedySearch` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. + +Both `MMDCriticSearch` and `ProtoDashSearch` classes inherit from `ProtoGreedySearch`. + +`MMDCriticSearch` and `ProtoGreedySearch` use the same greedy algorithm to find prototypes. In `ProtoGreedySearch`, the `compute_objective` method calculates optimal weights for each prototype, whereas `MMDCriticSearch` assigns uniform weights to all prototypes. + +`ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is [different](#method-comparison) from the one used by `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides both the `compute_objective` method and the `update_selection` method. + ### What is MMD? The commonality among these three methods is their utilization of the Maximum Mean Discrepancy (MMD) statistic as a measure of similarity between points and potential prototypes. MMD is a statistic for comparing two distributions (similar to KL-divergence). However, it is a non-parametric statistic, i.e., it does not assume a specific parametric form for the probability distributions being compared. It is defined as follows: @@ -114,14 +103,4 @@ If we consider any exponential kernel (Gaussian kernel, Laplace, ...), we automa ### Default kernel The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. -### Implementation details - -The search method for `ProtoGreedy` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. - -Both the search methods for `MMDCritic` and `ProtoDash` classes inherit from the one defined for `ProtoGreedy`. The search method for `MMDCritic` differs from `ProtoGreedy` by assigning equal weights to the selection of prototypes. The two classes use the same greedy algorithm. In the `compute_objective` method of the search method of `ProtoGreedy`, for each new candidate, we calculate the best weights for the selection of prototypes. However, in `MMDCritic`, the `compute_objective` method assigns the same weight to all elements in the selection. - -`ProtoDash`, like `ProtoGreedy`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDash` is [different](#method-comparison) from the one used by `ProtoGreedy`. Therefore, search method of `ProtoDash` overrides both the `compute_objective` method and the `update_selection` method. - -## Prototypes for Post-hoc Interpretability -Data-Centric methods such as `ProtoGreedy`, `ProtoDash` and `MMDCritic` can be used in either the output or the latent space of the model. In these cases, [projections methods](../../projections/) are used to transfer the data from the input space to the latent/output spaces. From 67890c03c0fcb8d0ab05c26542920e109eb2d0a1 Mon Sep 17 00:00:00 2001 From: POCHE Antonin Date: Wed, 18 Sep 2024 16:43:08 +0200 Subject: [PATCH 106/138] pylint: remove E1101 no-member warning --- .pylintrc | 1 + 1 file changed, 1 insertion(+) diff --git a/.pylintrc b/.pylintrc index 91513741..4d0f7d27 100644 --- a/.pylintrc +++ b/.pylintrc @@ -6,6 +6,7 @@ disable= E1123, # issues between pylint and tensorflow since 2.2.0 E1120, # see pylint#3613 C3001, # lambda function as variable + E1101, # (no-member), flag for every tf.keras [FORMAT] max-line-length=100 From 5922f0b83e69b238d23d8db685a889af6a0176ca Mon Sep 17 00:00:00 2001 From: POCHE Antonin Date: Wed, 18 Sep 2024 16:51:35 +0200 Subject: [PATCH 107/138] counterfactuals: solve label aware explain signature --- tests/example_based/test_contrastive.py | 33 ++++++++++++++---------- xplique/example_based/counterfactuals.py | 22 +++++++++++++--- 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/tests/example_based/test_contrastive.py b/tests/example_based/test_contrastive.py index b91c5814..d136d2c4 100644 --- a/tests/example_based/test_contrastive.py +++ b/tests/example_based/test_contrastive.py @@ -67,6 +67,7 @@ def test_naive_counter_factuals(): expected_indices = tf.constant([[[0, 1], [1, 0]],[[0, 0], [1, 1]],[[1, 1], [0, 0]]], dtype=tf.int32) assert tf.reduce_all(tf.equal(indices, expected_indices)) + def test_label_aware_cf(): """ Test suite for the LabelAwareCounterFactuals class @@ -80,21 +81,22 @@ def test_label_aware_cf(): cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) inputs = tf.constant([[1.5, 2.5], [2.5, 3.5], [4.5, 5.5]], dtype=tf.float32) - cf_targets = tf.constant([[1, 0], [0, 1], [0, 1]], dtype=tf.float32) + # cf_targets = tf.constant([[0, 1], [1, 0], [1, 0]], dtype=tf.float32) + cf_expected_classes = tf.constant([[1, 0], [0, 1], [0, 1]], dtype=tf.float32) projection = Projection(space_projection=lambda inputs: inputs) # build the LabelAwareCounterFactuals object counter_factuals = LabelAwareCounterFactuals( - cases_dataset, - cases_targets_dataset, + cases_dataset=cases_dataset, + targets_dataset=cases_targets_dataset, k=1, projection=projection, case_returns=["examples", "indices", "distances", "include_inputs"], batch_size=2 ) - mask = counter_factuals.filter_fn(inputs, cases, cf_targets, cases_targets) + mask = counter_factuals.filter_fn(inputs, cases, cf_expected_classes, cases_targets) assert mask.shape == (inputs.shape[0], cases.shape[0]) expected_mask = tf.constant([ @@ -103,7 +105,7 @@ def test_label_aware_cf(): [True, False, False, True, False]], dtype=tf.bool) assert tf.reduce_all(tf.equal(mask, expected_mask)) - return_dict = counter_factuals(inputs, cf_targets) + return_dict = counter_factuals(inputs, targets=None, cf_expected_classes=cf_expected_classes) assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) examples = return_dict["examples"] @@ -134,8 +136,8 @@ def test_label_aware_cf(): cases_targets_dataset = tf.data.Dataset.from_tensor_slices(cases_targets).batch(2) counter_factuals = LabelAwareCounterFactuals( - cases_dataset, - cases_targets_dataset, + cases_dataset=cases_dataset, + targets_dataset=cases_targets_dataset, k=1, projection=projection, case_returns=["examples", "indices", "distances", "include_inputs"], @@ -143,9 +145,9 @@ def test_label_aware_cf(): ) inputs = tf.constant([[1.5], [2.5], [4.5], [6.5], [8.5]], dtype=tf.float32) - cf_targets = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]], dtype=tf.float32) + cf_expected_classes = tf.constant([[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]], dtype=tf.float32) - mask = counter_factuals.filter_fn(inputs, cases, cf_targets, cases_targets) + mask = counter_factuals.filter_fn(inputs, cases, cf_expected_classes, cases_targets) assert mask.shape == (inputs.shape[0], cases.shape[0]) expected_mask = tf.constant([ @@ -156,7 +158,7 @@ def test_label_aware_cf(): [True, False, False, False, False, False, True, False, True, False]], dtype=tf.bool) assert tf.reduce_all(tf.equal(mask, expected_mask)) - return_dict = counter_factuals(inputs, cf_targets) + return_dict = counter_factuals(inputs, cf_expected_classes=cf_expected_classes) assert set(return_dict.keys()) == set(["examples", "indices", "distances"]) examples = return_dict["examples"] @@ -181,6 +183,7 @@ def test_label_aware_cf(): expected_indices = tf.constant([[[0, 1]],[[0, 0]],[[1, 0]],[[2, 1]],[[4, 0]]], dtype=tf.int32) assert tf.reduce_all(tf.equal(indices, expected_indices)) + def test_kleor(): """ Test suite for the Kleor class @@ -199,8 +202,8 @@ def test_kleor(): # start when strategy is sim_miss kleor_sim_miss = KLEORSimMiss( - cases_dataset, - cases_targets_dataset, + cases_dataset=cases_dataset, + targets_dataset=cases_targets_dataset, k=1, projection=projection, case_returns=["examples", "indices", "distances", "include_inputs", "nuns"], @@ -308,4 +311,8 @@ def test_contrastive_with_projection(): batch_size=7 ) - contrastive_method(features, labels) \ No newline at end of file + if isinstance(contrastive_method, LabelAwareCounterFactuals): + cf_expected_classes = tf.one_hot(tf.argmax(labels, axis=-1) + 1 % nb_labels, nb_labels) + contrastive_method(features, targets=labels, cf_expected_classes=cf_expected_classes) + else: + contrastive_method(features, targets=labels) \ No newline at end of file diff --git a/xplique/example_based/counterfactuals.py b/xplique/example_based/counterfactuals.py index 086f8f52..871a3d89 100644 --- a/xplique/example_based/counterfactuals.py +++ b/xplique/example_based/counterfactuals.py @@ -216,6 +216,7 @@ def __init__( + "be aware that when using the explain method,"\ + "the target provided is the class within one should search for the counterfactual."\ + "\nThus, it is possible that the projection of the query is going wrong.") + self.warned = False # set distance function and order for the search method self.distance = distance @@ -264,7 +265,8 @@ def filter_fn(self, _, __, cf_expected_classes, cases_targets) -> tf.Tensor: def explain( self, inputs: Union[tf.Tensor, np.ndarray], - cf_expected_classes: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + cf_expected_classes: Union[tf.Tensor, np.ndarray] = None, ): """ Return the relevant CF examples to explain the inputs. @@ -279,6 +281,9 @@ def explain( Tensor or Array. Input samples to be explained. Expected shape among (N, W), (N, T, W), (N, W, H, C). More information in the documentation. + targets + Tensor or Array. One-hot encoded labels or regression target (e.g {+1, -1}), + one for each sample. If not provided, the model's predictions are used. cf_expected_classes Tensor or Array. One-hot encoding of the target class for the counterfactuals. @@ -291,13 +296,24 @@ def explain( """ # pylint: disable=arguments-renamed # pylint: disable=fixme - # TODO: remove pylint disable the issue is fixed + if not self.warned: + # TODO + self.warned = True # project inputs into the search space - projected_inputs = self.projection(inputs) + projected_inputs = self.projection(inputs, targets) # look for relevant elements in the search space search_output = self.search_method(projected_inputs, cf_expected_classes) # manage returned elements return self.format_search_output(search_output, inputs) + + def __call__( + self, + inputs: Union[tf.Tensor, np.ndarray], + targets: Optional[Union[tf.Tensor, np.ndarray]] = None, + cf_expected_classes: Union[tf.Tensor, np.ndarray] = None, + ): + """explain() alias""" + return self.explain(inputs, targets, cf_expected_classes) From 1edde24713ff2d90e2133f773e5a628890fd74bb Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 18 Sep 2024 16:55:27 +0200 Subject: [PATCH 108/138] datasets operations: move them to example based --- .../test_tf_dataset_operation.py | 7 ++- xplique/commons/__init__.py | 2 - xplique/commons/data_conversion.py | 14 ++++- .../tf_dataset_operations.py | 56 +++++++++++++++++-- xplique/example_based/search_methods/base.py | 2 +- xplique/example_based/search_methods/kleor.py | 2 +- xplique/example_based/search_methods/knn.py | 2 +- .../search_methods/proto_greedy_search.py | 2 +- 8 files changed, 70 insertions(+), 17 deletions(-) rename tests/{commons => example_based}/test_tf_dataset_operation.py (95%) rename xplique/{commons => example_based/datasets_operations}/tf_dataset_operations.py (83%) diff --git a/tests/commons/test_tf_dataset_operation.py b/tests/example_based/test_tf_dataset_operation.py similarity index 95% rename from tests/commons/test_tf_dataset_operation.py rename to tests/example_based/test_tf_dataset_operation.py index 1f9a5f42..e7190b9d 100644 --- a/tests/commons/test_tf_dataset_operation.py +++ b/tests/example_based/test_tf_dataset_operation.py @@ -12,8 +12,8 @@ import tensorflow as tf -from xplique.commons.tf_dataset_operations import * -from xplique.commons.tf_dataset_operations import _almost_equal +from xplique.example_based.datasets_operations.tf_dataset_operations import * +from xplique.example_based.datasets_operations.tf_dataset_operations import _almost_equal def test_are_dataset_first_elems_equal(): @@ -78,13 +78,14 @@ def test_batch_size_matches(): tf_dataset = tf.data.Dataset.from_tensor_slices( tf.reshape(tf.range(90), (10, 3, 3)) ) + tf_dataset_b1 = tf_dataset.batch(1) tf_dataset_b2 = tf_dataset.batch(2) tf_dataset_b5 = tf_dataset.batch(5) tf_dataset_b25 = tf_dataset_b5.batch(2) tf_dataset_b52 = tf_dataset_b2.batch(5) tf_dataset_b32 = tf_dataset.batch(32) - assert batch_size_matches(tf_dataset, 3) + assert batch_size_matches(tf_dataset_b1, 1) assert batch_size_matches(tf_dataset_b2, 2) assert batch_size_matches(tf_dataset_b5, 5) assert batch_size_matches(tf_dataset_b25, 2) diff --git a/xplique/commons/__init__.py b/xplique/commons/__init__.py index 6153c01a..7439c846 100644 --- a/xplique/commons/__init__.py +++ b/xplique/commons/__init__.py @@ -11,5 +11,3 @@ get_inference_function, get_gradient_functions) from .exceptions import no_gradients_available, raise_invalid_operator from .forgrad import forgrad -from .tf_dataset_operations import are_dataset_first_elems_equal, dataset_gather, sanitize_dataset,\ - is_not_shuffled, batch_size_matches diff --git a/xplique/commons/data_conversion.py b/xplique/commons/data_conversion.py index 9bcf3309..536638f1 100644 --- a/xplique/commons/data_conversion.py +++ b/xplique/commons/data_conversion.py @@ -76,14 +76,24 @@ def sanitize_inputs_targets(explanation_method: Callable): explanation_method Function to wrap, should return an tf.tensor. """ - def sanitize(self, inputs: Union[tf.Tensor, np.array], + def sanitize(self, + inputs: Union[tf.Tensor, np.array], targets: Optional[Union[tf.Tensor, np.array]] = None, + *args, + **kwargs ): # ensure we have tf.tensor inputs = tf.cast(inputs, tf.float32) if targets is not None: targets = tf.cast(targets, tf.float32) + + if args: + args = [tf.cast(arg, tf.float32) for arg in args] + + if kwargs: + kwargs = {key: tf.cast(value, tf.float32) for key, value in kwargs.items()} + # then enter the explanation function - return explanation_method(self, inputs, targets) + return explanation_method(self, inputs, targets, *args, **kwargs) return sanitize diff --git a/xplique/commons/tf_dataset_operations.py b/xplique/example_based/datasets_operations/tf_dataset_operations.py similarity index 83% rename from xplique/commons/tf_dataset_operations.py rename to xplique/example_based/datasets_operations/tf_dataset_operations.py index 20933bfa..f4300248 100644 --- a/xplique/commons/tf_dataset_operations.py +++ b/xplique/example_based/datasets_operations/tf_dataset_operations.py @@ -6,7 +6,7 @@ import numpy as np import tensorflow as tf -from ..types import Optional, Union +from ...types import Optional, Union def _almost_equal(arr1, arr2, epsilon=1e-6): @@ -14,9 +14,9 @@ def _almost_equal(arr1, arr2, epsilon=1e-6): return np.shape(arr1) == np.shape(arr2) and np.sum(np.abs(arr1 - arr2)) < epsilon -def are_dataset_first_elems_equal( - dataset1: Optional[tf.data.Dataset], dataset2: Optional[tf.data.Dataset] -) -> bool: +def are_dataset_first_elems_equal(dataset1: Optional[tf.data.Dataset] = None, + dataset2: Optional[tf.data.Dataset] = None, + ) -> bool: """ Test if the first batch of elements of two datasets are the same. It is used to verify equality between datasets in a lazy way. @@ -51,6 +51,38 @@ def are_dataset_first_elems_equal( return _almost_equal(next1, next2) +def is_batched(dataset: tf.data.Dataset) -> bool: + """ + Check if a TensorFlow dataset is batched. + + Parameters + ---------- + dataset : tf.data.Dataset + The dataset to check. + + Returns + ------- + bool + True if the dataset is batched, False otherwise. + """ + # Extract the element_spec + spec = dataset.element_spec + + # Handle datasets with tuple or dict structures + if isinstance(spec, (tuple, dict)): + # Check if any part of the element_spec is batched + if isinstance(spec, tuple): + return all(s.shape[0] is None for s in spec) + if isinstance(spec, dict): + return all(s.shape[0] is None for s in spec.values()) + else: + # Check if the first dimension is None (indicating batching) + return spec.shape[0] is None + + # If we reach here, it's not batched + return False + + def is_not_shuffled(dataset: Optional[tf.data.Dataset]) -> bool: """ Test if the provided dataset reshuffle at each iteration. @@ -93,6 +125,9 @@ def batch_size_matches(dataset: Optional[tf.data.Dataset], batch_size: int) -> b if dataset is None: # ignored return True + + if not is_batched(dataset): + return False first_item = next(iter(dataset)) if isinstance(first_item, tuple): @@ -111,7 +146,7 @@ def sanitize_dataset( Function to ensure input dataset match expected format. It also transforms tensors in `tf.data.Dataset` and also verify the properties. This function verify that datasets do not reshuffle at each iteration and - that their batch isze and cardinality match the expected ones. + that their batch and cardinality match the expected ones. Note that, that Tensorflow do not provide easy way to make those tests, hence, for cost constraints, our tests are not perfect. @@ -141,8 +176,13 @@ def sanitize_dataset( assert batch_size_matches( dataset, batch_size ), "The batch size should match between datasets." - else: + elif isinstance(dataset, (tf.Tensor, np.ndarray)): dataset = tf.data.Dataset.from_tensor_slices(dataset).batch(batch_size) + else: + raise ValueError( + "The input dataset should be a `tf.data.Dataset`, a `tf.Tensor` or a `np.array`. " + + f"Received {type(dataset)}." + ) if cardinality is not None and cardinality > 0: dataset_cardinality = dataset.cardinality().numpy() @@ -153,6 +193,10 @@ def sanitize_dataset( + "You may have provided non-batched datasets "\ + "or datasets with different lengths." ) + else: + # negative cardinality means unknown cardinality, + # it will be the case for datasets created from generator, thus torch converted ones + pass return dataset diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index db9fe5af..b69c33ff 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -8,7 +8,7 @@ import numpy as np from ...types import Union, Optional, List -from ...commons import sanitize_dataset +from ..datasets_operations.tf_dataset_operations import sanitize_dataset class ORDER(Enum): """ diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index 08726515..48d0cadb 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -6,7 +6,7 @@ import numpy as np import tensorflow as tf -from ...commons import dataset_gather +from ..datasets_operations.tf_dataset_operations import dataset_gather from ...types import Callable, List, Union, Optional, Tuple from .base import ORDER diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index f1141623..ff64f2c3 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -6,7 +6,7 @@ import numpy as np import tensorflow as tf -from ...commons import dataset_gather, sanitize_dataset +from ..datasets_operations.tf_dataset_operations import dataset_gather, sanitize_dataset from ...types import Callable, List, Union, Optional, Tuple from .base import BaseSearchMethod, ORDER diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index f439039e..2c4dfcc2 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -5,7 +5,7 @@ import numpy as np import tensorflow as tf -from ...commons import sanitize_dataset +from ..datasets_operations.tf_dataset_operations import sanitize_dataset from ...types import Callable, List, Union, Optional, Tuple from .base import BaseSearchMethod From 38fcd817724e716041e7fe3d6b8d525700188e96 Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 18 Sep 2024 16:56:42 +0200 Subject: [PATCH 109/138] example based: factorize inputs harmonization --- xplique/example_based/base_example_method.py | 111 +-------- .../datasets_operations/harmonize.py | 235 ++++++++++++++++++ 2 files changed, 239 insertions(+), 107 deletions(-) create mode 100644 xplique/example_based/datasets_operations/harmonize.py diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index 1272ec65..c299712c 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -12,7 +12,8 @@ from ..types import Callable, Dict, List, Optional, Type, Union from ..commons import sanitize_inputs_targets -from ..commons import sanitize_dataset, dataset_gather +from .datasets_operations.harmonize import harmonize_datasets +from .datasets_operations.tf_dataset_operations import dataset_gather from .search_methods import BaseSearchMethod from .projections import Projection @@ -91,9 +92,8 @@ def __init__( ), "`BaseExampleMethod` without Projection method should be a `BaseSearchMethod`." # set attributes - self.batch_size = self._initialize_cases_dataset( - cases_dataset, labels_dataset, targets_dataset, batch_size - ) + self.cases_dataset, self.labels_dataset, self.targets_dataset, self.batch_size =\ + harmonize_datasets(cases_dataset, labels_dataset, targets_dataset, batch_size) self._search_returns = ["indices", "distances"] @@ -175,109 +175,6 @@ def returns(self, returns: Union[List[str], str]): default = "examples" self._returns = _sanitize_returns(returns, self._returns_possibilities, default) - def _initialize_cases_dataset( - self, - cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], - targets_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]], - batch_size: Optional[int], - ) -> int: - """ - Factorization of `__init__()` method for dataset related attributes. - - Parameters - ---------- - cases_dataset - The dataset used to train the model, examples are extracted from this dataset. - labels_dataset - Labels associated to the examples in the cases_dataset. - Indices should match with cases_dataset. - targets_dataset - Targets associated to the cases_dataset for dataset projection. - See `projection` for details. - batch_size - Number of sample treated simultaneously when using the datasets. - Ignored if `tf.data.Dataset` are provided (those are supposed to be batched). - - Returns - ------- - batch_size - Number of sample treated simultaneously when using the datasets. - Extracted from the datasets in case they are `tf.data.Dataset`. - Otherwise, the input value. - """ - # at least one dataset provided - if isinstance(cases_dataset, tf.data.Dataset): - # set batch size (ignore provided argument) and cardinality - if isinstance(cases_dataset.element_spec, tuple): - batch_size = tf.shape(next(iter(cases_dataset))[0])[0].numpy() - else: - batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() - - cardinality = cases_dataset.cardinality().numpy() - else: - # if cases_dataset is not a `tf.data.Dataset`, then neither should the other. - assert not isinstance(labels_dataset, tf.data.Dataset), ( - "if the cases_dataset is not a `tf.data.Dataset`, " - + "then neither should the labels_dataset." - ) - assert not isinstance(targets_dataset, tf.data.Dataset), ( - "if the cases_dataset is not a `tf.data.Dataset`, " - + "then neither should the targets_dataset." - ) - # set batch size and cardinality - batch_size = min(batch_size, len(cases_dataset)) - cardinality = math.ceil(len(cases_dataset) / batch_size) - - # verify cardinality and create datasets from the tensors - self.cases_dataset = sanitize_dataset( - cases_dataset, batch_size, cardinality - ) - self.labels_dataset = sanitize_dataset( - labels_dataset, batch_size, cardinality - ) - self.targets_dataset = sanitize_dataset( - targets_dataset, batch_size, cardinality - ) - - # if the provided `cases_dataset` has several columns - if isinstance(self.cases_dataset.element_spec, tuple): - # switch case on the number of columns of `cases_dataset` - if len(self.cases_dataset.element_spec) == 2: - assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels. " - + "Hence, `labels_dataset` should be empty." - ) - self.labels_dataset = self.cases_dataset.map(lambda x, y: y) - self.cases_dataset = self.cases_dataset.map(lambda x, y: x) - - elif len(self.cases_dataset.element_spec) == 3: - assert self.labels_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels. " - + "Hence, `labels_dataset` should be empty." - ) - assert self.targets_dataset is None, ( - "The second column of `cases_dataset` is assumed to be the labels. " - + "Hence, `labels_dataset` should be empty." - ) - self.targets_dataset = self.cases_dataset.map(lambda x, y, t: t) - self.labels_dataset = self.cases_dataset.map(lambda x, y, t: y) - self.cases_dataset = self.cases_dataset.map(lambda x, y, t: x) - else: - raise AttributeError( - "`cases_dataset` cannot possess more than 3 columns, "\ - + f"{len(self.cases_dataset.element_spec)} were detected." - ) - - # prefetch datasets - self.cases_dataset = self.cases_dataset.prefetch(tf.data.AUTOTUNE) - if self.labels_dataset is not None: - self.labels_dataset = self.labels_dataset.prefetch(tf.data.AUTOTUNE) - if self.targets_dataset is not None: - self.targets_dataset = self.targets_dataset.prefetch(tf.data.AUTOTUNE) - - return batch_size - @sanitize_inputs_targets def explain( self, diff --git a/xplique/example_based/datasets_operations/harmonize.py b/xplique/example_based/datasets_operations/harmonize.py new file mode 100644 index 00000000..25beaee2 --- /dev/null +++ b/xplique/example_based/datasets_operations/harmonize.py @@ -0,0 +1,235 @@ +""" +Allow Example-based methods to work with different types of datasets and tensors. +""" + + +import math +from typing import Optional, Tuple, TypeVar + +import numpy as np +import tensorflow as tf + +from .tf_dataset_operations import sanitize_dataset, is_batched + +DatasetTensor = TypeVar("DatasetTensor", + tf.Tensor, np.ndarray, "torch.Tensor", + tf.data.Dataset, "torch.utils.data.DataLoader") + + +def split_tf_dataset(cases_dataset: tf.data.Dataset, + labels_dataset: Optional[tf.data.Dataset] = None, + targets_dataset: Optional[tf.data.Dataset] = None + ) -> Tuple[tf.data.Dataset, tf.data.Dataset, tf.data.Dataset]: + """ + Splits a TensorFlow dataset into cases, labels, and targets datasets. + The dataset is splitted only if it has multiple columns. + If the dataset has 2 columns, the second column is assumed to be the labels. + If the dataset has several columns but labels and targets are provided, + there is a conflict and an error is raised. + + Parameters + ---------- + cases_dataset + The dataset to split. + labels_dataset + Labels associated with the cases in the `cases_dataset`. + If this function is called, it should be `None`. + targets_dataset + Targets associated with the cases in the `cases_dataset`. + If this function is called and `cases_dataset` has 3 columns, it should be `None`. + + Returns + ------- + cases_dataset + The dataset used to train the model. + labels_dataset + Labels associated with the `cases_dataset`. + targets_dataset + Targets associated with the `cases_dataset`. + """ + + assert isinstance(cases_dataset, tf.data.Dataset), ( + f"The dataset should be a `tf.data.Dataset`, got {type(cases_dataset)}." + ) + + if isinstance(cases_dataset.element_spec, tuple): + if len(cases_dataset.element_spec) == 2: + assert labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels. "\ + + "Hence, `labels_dataset` should be empty." + ) + labels_dataset = cases_dataset.map(lambda x, y: y) + cases_dataset = cases_dataset.map(lambda x, y: x) + elif len(cases_dataset.element_spec) == 3: + assert labels_dataset is None and targets_dataset is None, ( + "The second and third columns of `cases_dataset` are assumed to be the labels "\ + "and targets. Hence, `labels_dataset` and `targets_dataset` should be empty." + ) + targets_dataset = cases_dataset.map(lambda x, y, t: t) + labels_dataset = cases_dataset.map(lambda x, y, t: y) + cases_dataset = cases_dataset.map(lambda x, y, t: x) + else: + raise AttributeError( + "`cases_dataset` cannot have more than 3 columns, " + + f"{len(cases_dataset.element_spec)} were detected." + ) + + return cases_dataset, labels_dataset, targets_dataset + + +def harmonize_datasets( + cases_dataset: DatasetTensor, + labels_dataset: Optional[DatasetTensor] = None, + targets_dataset: Optional[DatasetTensor] = None, + batch_size: Optional[int] = None, + ) -> Tuple[DatasetTensor, DatasetTensor, DatasetTensor, int]: + """ + Harmonizes the provided datasets, ensuring they are either `tf.data.Dataset` or + `torch.utils.data.DataLoader`, and transforms them if necessary. + If the datasets have multiple columns, the function will split them into cases, + labels, and targets datasets based on the number of columns. + + This function supports both TensorFlow and PyTorch datasets. + + Parameters + ---------- + cases_dataset : DatasetTensor + The dataset used to train the model, examples are extracted from this dataset. + If the dataset has multiple columns, + the function will split it into cases, labels, and targets. + All datasets should be of the same type. + labels_dataset : Optional[DatasetTensor] + Labels associated with the examples in the `cases_dataset`. + All datasets should be of the same type. + targets_dataset : Optional[DatasetTensor] + Targets associated with the `cases_dataset` for dataset projection. + All datasets should be of the same type. + batch_size : Optional[int] + Number of samples treated simultaneously when using the datasets. + It should match the batch size of the datasets if they are batched. + + Returns + ------- + cases_dataset : DatasetTensor + The harmonized dataset used to train the model. + labels_dataset : DatasetTensor + Harmonized labels associated with the `cases_dataset`. + targets_dataset : DatasetTensor + Harmonized targets associated with the `cases_dataset`. + batch_size : int + Number of samples treated simultaneously when using the datasets. + """ + # Ensure the datasets are of the same type + if labels_dataset is not None: + if isinstance(cases_dataset, tf.data.Dataset): + assert isinstance(labels_dataset, tf.data.Dataset), ( + "The labels_dataset should be a `tf.data.Dataset` if the cases_dataset is." + ) + assert not isinstance(labels_dataset.element_spec, tuple), ( + "The labels_dataset should only have one column." + ) + else: + assert isinstance(cases_dataset, type(labels_dataset)), ( + "The cases_dataset and labels_dataset should be of the same type."\ + + f"Got {type(cases_dataset)} and {type(labels_dataset)}." + ) + if targets_dataset is not None: + if isinstance(cases_dataset, tf.data.Dataset): + assert isinstance(targets_dataset, tf.data.Dataset), ( + "The targets_dataset should be a `tf.data.Dataset` if the cases_dataset is." + ) + assert not isinstance(targets_dataset.element_spec, tuple), ( + "The targets_dataset should only have one column." + ) + else: + assert isinstance(cases_dataset, type(targets_dataset)), ( + "The cases_dataset and targets_dataset should be of the same type."\ + + f"Got {type(cases_dataset)} and {type(targets_dataset)}." + ) + + # Determine batch size and cardinality based on the dataset type + # for torch elements, convert them to numpy arrays or tf datasets + if isinstance(cases_dataset, tf.data.Dataset): + # compute batch size and cardinality + if is_batched(cases_dataset): + if isinstance(cases_dataset.element_spec, tuple): + batch_size = tf.shape(next(iter(cases_dataset))[0])[0].numpy() + else: + batch_size = tf.shape(next(iter(cases_dataset)))[0].numpy() + else: + assert batch_size is not None, ( + "The dataset is not batched, hence the batch size should be provided." + ) + cases_dataset = cases_dataset.batch(batch_size) + cardinality = cases_dataset.cardinality().numpy() + + # handle multi-column datasets + if isinstance(cases_dataset.element_spec, tuple): + # split dataset if `cases_dataset` has multiple columns + cases_dataset, labels_dataset, targets_dataset =\ + split_tf_dataset(cases_dataset, labels_dataset, targets_dataset) + elif isinstance(cases_dataset, np.ndarray) or isinstance(cases_dataset, tf.Tensor): + # compute batch size and cardinality + if batch_size is None: + # no batching, one batch encompass all the dataset + batch_size = cases_dataset.shape[0] + else: + batch_size = min(batch_size, cases_dataset.shape[0]) + cardinality = math.ceil(cases_dataset.shape[0] / batch_size) + + # tensors will be converted to tf.data.Dataset via the snitize function + else: + error_message = "Unknown cases dataset type, should be in: [tf.data.Dataset, tf.Tensor, "\ + + "np.ndarray, torch.Tensor, torch.utils.data.DataLoader]. "\ + + f"But got {type(cases_dataset)} instead." + # try to import torch and torch.utils.data.DataLoader to treat possible input types + try: + import torch + from torch.utils.data import DataLoader + from .convert_torch_to_tf import split_and_convert_column_dataloader + except ImportError as exc: + raise AttributeError(error_message) from exc + + if isinstance(cases_dataset, torch.Tensor): + # compute batch size and cardinality + if batch_size is None: + # no batching, one batch encompass all the dataset + batch_size = cases_dataset.shape[0] + else: + batch_size = min(batch_size, cases_dataset.shape[0]) + cardinality = math.ceil(cases_dataset.shape[0] / batch_size) + + # convert torch tensor to numpy array + cases_dataset = cases_dataset.cpu().numpy() + if labels_dataset is not None: + labels_dataset = labels_dataset.cpu().numpy() + if targets_dataset is not None: + targets_dataset = targets_dataset.cpu().numpy() + + # tensors will be converted to tf.data.Dataset via the snitize function + elif isinstance(cases_dataset, torch.utils.data.DataLoader): + if batch_size is not None: + assert cases_dataset.batch_size == batch_size, ( + "The DataLoader batch size should match the provided batch size. "\ + + f"Got {cases_dataset.batch_size} from DataLoader and {batch_size} specified." + ) + batch_size = cases_dataset.batch_size + cardinality = len(cases_dataset) + cases_dataset, labels_dataset, targets_dataset =\ + split_and_convert_column_dataloader(cases_dataset, labels_dataset, targets_dataset) + else: + raise AttributeError(error_message) + + # Sanitize datasets to ensure they are in the correct format + cases_dataset = sanitize_dataset(cases_dataset, batch_size, cardinality) + labels_dataset = sanitize_dataset(labels_dataset, batch_size, cardinality) + targets_dataset = sanitize_dataset(targets_dataset, batch_size, cardinality) + + # Prefetch datasets + cases_dataset = cases_dataset.prefetch(tf.data.AUTOTUNE) + if labels_dataset is not None: + labels_dataset = labels_dataset.prefetch(tf.data.AUTOTUNE) + if targets_dataset is not None: + targets_dataset = targets_dataset.prefetch(tf.data.AUTOTUNE) + + return cases_dataset, labels_dataset, targets_dataset, batch_size From 027419129fa535a73f24cf1fc2adf2e6ed5577f7 Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 18 Sep 2024 16:57:40 +0200 Subject: [PATCH 110/138] example based: add support for torch dataloader --- .../test_datasets_harmonization.py | 240 +++++++++++ tests/example_based/test_torch.py | 388 ++++++++++++++++++ .../convert_torch_to_tf.py | 165 ++++++++ 3 files changed, 793 insertions(+) create mode 100644 tests/example_based/test_datasets_harmonization.py create mode 100644 tests/example_based/test_torch.py create mode 100644 xplique/example_based/datasets_operations/convert_torch_to_tf.py diff --git a/tests/example_based/test_datasets_harmonization.py b/tests/example_based/test_datasets_harmonization.py new file mode 100644 index 00000000..e9908c90 --- /dev/null +++ b/tests/example_based/test_datasets_harmonization.py @@ -0,0 +1,240 @@ +import pytest +import unittest +import tensorflow as tf +import numpy as np + + +from xplique.example_based.datasets_operations.tf_dataset_operations import are_dataset_first_elems_equal +from xplique.example_based.datasets_operations.harmonize import split_tf_dataset, harmonize_datasets + + +def generate_tf_dataset(n_samples=100, n_features=10, n_labels=1, n_targets=None, batch_size=None): + """ + Utility function to generate TensorFlow datasets for testing. + """ + cases = np.random.random((n_samples, n_features, n_features)).astype(np.float32) + labels = np.random.randint(0, 2, size=(n_samples, n_labels)).astype(np.int64) + + if n_targets is not None: + targets = np.random.random((n_samples, n_targets)).astype(np.float32) + dataset = tf.data.Dataset.from_tensor_slices((cases, labels, targets)) + else: + dataset = tf.data.Dataset.from_tensor_slices((cases, labels)) + + if batch_size is not None: + dataset = dataset.batch(batch_size) + + return dataset + + +def test_split_tf_dataset_two_columns(): + dataset = generate_tf_dataset(n_samples=100, n_features=5, n_labels=2, batch_size=8) + + cases, labels, targets = split_tf_dataset(dataset) + + assert labels is not None, "Labels dataset should not be None for a 2-column dataset." + assert targets is None, "Targets dataset should be None for a 2-column dataset." + + for case_h, label_h, (case, label) in zip(cases, labels, dataset): + assert len(case.shape) == 3 + assert len(label.shape) == 2 + assert np.allclose(case_h, case), "Cases should match the original dataset." + assert np.allclose(label_h, label), "Labels should match the original dataset." + + +def test_split_tf_dataset_three_columns(): + dataset = generate_tf_dataset(n_samples=100, n_features=5, n_labels=2, n_targets=2, batch_size=8) + + cases, labels, targets = split_tf_dataset(dataset) + + assert labels is not None, "Labels dataset should not be None for a 3-column dataset." + assert targets is not None, "Targets dataset should not be None for a 3-column dataset." + + for case_h, label_h, target_h, (case, label, target) in zip(cases, labels, targets, dataset): + assert len(case.shape) == 3 + assert len(label.shape) == 2 + assert len(target.shape) == 2 + assert np.allclose(case_h, case), "Cases should match the original dataset." + assert np.allclose(label_h, label), "Labels should match the original dataset." + assert np.allclose(target_h, target), "Targets should match the original dataset." + + +def test_harmonize_datasets_with_tf_dataset(): + dataset = generate_tf_dataset(n_samples=100, n_features=5, n_labels=3) + batch_size = 10 + + cases, labels, targets, batch_size_out = harmonize_datasets(dataset, batch_size=batch_size) + + assert cases is not None, "Cases dataset should not be None." + assert labels is not None, "Labels dataset should not be None." + assert targets is None, "Targets dataset should be None for a 2-column input dataset." + assert batch_size_out == batch_size, "Output batch size should match the input batch size." + + +def test_harmonize_datasets_with_tf_dataset_three_columns(): + batch_size = 10 + dataset = generate_tf_dataset(n_samples=100, n_features=10, n_labels=1, n_targets=1, batch_size=batch_size) + + cases, labels, targets, batch_size_out = harmonize_datasets(dataset, batch_size=batch_size) + + assert cases is not None, "Cases dataset should not be None." + assert labels is not None, "Labels dataset should not be None." + assert targets is not None, "Targets dataset should not be None for a 3-column input dataset." + assert batch_size_out == batch_size, "Output batch size should match the input batch size." + + +def test_harmonize_datasets_with_numpy(): + cases = np.random.random((100, 10)).astype(np.float32) + labels = np.random.randint(0, 2, size=(100, 1)).astype(np.int64) + batch_size = 10 + + cases_out, labels_out, targets_out, batch_size_out = harmonize_datasets(cases, labels, batch_size=batch_size) + + assert targets_out is None, "Targets should be None when not provided." + assert batch_size_out == batch_size, "Output batch size should match the input batch size." + + for case, label in zip(cases_out, labels_out): + assert case.shape == (batch_size, cases.shape[1]), "Each case should have the same shape as the input cases." + assert label.shape == (batch_size, labels.shape[1]), "Each label should have the same shape as the input labels." + break + + +def test_inputs_combinations(): + """ + Test management of dataset init inputs + """ + + tf_tensor = tf.reshape(tf.range(90, dtype=tf.float32), (10, 3, 3)) + np_array = np.array(tf_tensor) + tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) + + tf_dataset_b3 = tf_dataset.batch(3) + tf_dataset_b5 = tf_dataset.batch(5) + + tf_one_shuffle = tf_dataset.shuffle(32, 0, reshuffle_each_iteration=False).batch(4) + + # Method initialization that should work + cases_dataset, labels_dataset, targets_dataset, batch_size = harmonize_datasets(tf_dataset_b3, None, tf_dataset_b3) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b3) + assert are_dataset_first_elems_equal(labels_dataset, None) + assert are_dataset_first_elems_equal(targets_dataset, tf_dataset_b3) + assert batch_size == 3 + + cases_dataset, labels_dataset, targets_dataset, batch_size = harmonize_datasets(tf_tensor, tf_tensor, None, batch_size=5) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(labels_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(targets_dataset, None) + assert batch_size == 5 + + cases_dataset, labels_dataset, targets_dataset, batch_size =\ + harmonize_datasets(tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), None, tf_dataset_b5) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(labels_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(targets_dataset, tf_dataset_b5) + assert batch_size == 5 + + cases_dataset, labels_dataset, targets_dataset, batch_size =\ + harmonize_datasets(tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5, tf_dataset_b5))) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(labels_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(targets_dataset, tf_dataset_b5) + assert batch_size == 5 + + cases_dataset, labels_dataset, targets_dataset, batch_size =\ + harmonize_datasets(tf.data.Dataset.zip((tf_one_shuffle, tf_one_shuffle))) + assert are_dataset_first_elems_equal(cases_dataset, tf_one_shuffle) + assert are_dataset_first_elems_equal(labels_dataset, tf_one_shuffle) + assert are_dataset_first_elems_equal(targets_dataset, None) + assert batch_size == 4 + + cases_dataset, labels_dataset, targets_dataset, batch_size = harmonize_datasets(tf_one_shuffle) + assert are_dataset_first_elems_equal(cases_dataset, tf_one_shuffle) + assert are_dataset_first_elems_equal(labels_dataset, None) + assert are_dataset_first_elems_equal(targets_dataset, None) + assert batch_size == 4 + + + +def test_error_raising(): + """ + Test management of dataset init inputs + """ + + tf_tensor = tf.reshape(tf.range(90, dtype=tf.float32), (10, 3, 3)) + np_array = np.array(tf_tensor) + tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) + too_short_np_array = np_array[:3] + too_long_tf_dataset = tf_dataset.concatenate(tf_dataset) + + tf_dataset_b3 = tf_dataset.batch(3) + tf_dataset_b5 = tf_dataset.batch(5) + too_long_tf_dataset_b5 = too_long_tf_dataset.batch(5) + too_long_tf_dataset_b10 = too_long_tf_dataset.batch(10) + + tf_shuffled = tf_dataset.shuffle(32, 0).batch(4) + + # Method initialization that should not work + test_raise_assertion_error = unittest.TestCase().assertRaises + + # not input + test_raise_assertion_error(TypeError, harmonize_datasets) + + # shuffled + test_raise_assertion_error(AssertionError, harmonize_datasets, tf_shuffled,) + + # mismatching types + test_raise_assertion_error(AssertionError, harmonize_datasets, tf_dataset, tf_tensor,) + test_raise_assertion_error( + AssertionError, + harmonize_datasets, + tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), + np_array, + ) + test_raise_assertion_error( + AssertionError, harmonize_datasets, tf_dataset_b3, too_short_np_array + ) + test_raise_assertion_error( + AssertionError, harmonize_datasets, tf_dataset, None, too_long_tf_dataset + ) + + # not batched and no batch size provided + test_raise_assertion_error( + AssertionError, + harmonize_datasets, + tf.data.Dataset.from_tensor_slices((tf_tensor, tf_tensor)), + tf_dataset, + ) + + # not matching batch sizes + test_raise_assertion_error( + AssertionError, harmonize_datasets, tf_dataset_b3, tf_dataset_b5, + ) + test_raise_assertion_error( + AssertionError, + harmonize_datasets, + too_long_tf_dataset_b10, + tf_dataset_b5, + ) + + # mismatching cardinality + test_raise_assertion_error( + AssertionError, + harmonize_datasets, + tf_dataset_b5, + too_long_tf_dataset_b5, + ) + + # multiple datasets for labels or targets + test_raise_assertion_error( + AssertionError, + harmonize_datasets, + tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), + tf_dataset_b5, + ) + test_raise_assertion_error( + AssertionError, + harmonize_datasets, + tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5, tf_dataset_b5)), + None, + tf_dataset_b5, + ) diff --git a/tests/example_based/test_torch.py b/tests/example_based/test_torch.py new file mode 100644 index 00000000..61368559 --- /dev/null +++ b/tests/example_based/test_torch.py @@ -0,0 +1,388 @@ +""" +Test example-based methods with PyTorch models and datasets. +""" + +import unittest + +import numpy as np +import tensorflow as tf +import torch +from torch import nn +from torch.utils.data import TensorDataset, DataLoader, ConcatDataset + +from xplique.example_based import SimilarExamples +from xplique.example_based.projections import Projection, LatentSpaceProjection +from xplique.example_based.projections.commons import model_splitting + +from xplique.example_based.datasets_operations.tf_dataset_operations import are_dataset_first_elems_equal +from xplique.example_based.datasets_operations.harmonize import harmonize_datasets + +from tests.utils import almost_equal + + +def get_setup(input_shape, nb_samples=10, nb_labels=10): + """ + Generate data and model for SimilarExamples + """ + # Data generation + x_train = torch.stack( + [i * torch.ones(input_shape, dtype=torch.float32) for i in range(nb_samples)] + ) + + x_test = x_train[1:-1] # Exclude the first and last elements + y_train = torch.arange(len(x_train), dtype=torch.float32) % nb_labels + + return x_train, x_test, y_train + + +def create_cnn_model(input_shape, output_shape): + in_channels, height, width = input_shape + + kernel_size = 3 + padding = 1 + stride = 1 + + # Calculate the flattened size after the convolutional layers and pooling + def conv_output_size(in_size): + return (in_size - kernel_size + 2 * padding) // stride + 1 + + height_after_conv1 = conv_output_size(height) // 2 # After first conv and pooling + height_after_conv2 = conv_output_size(height_after_conv1) // 2 # After second conv and pooling + + width_after_conv1 = conv_output_size(width) // 2 # After first conv and pooling + width_after_conv2 = conv_output_size(width_after_conv1) // 2 # After second conv and pooling + + flat_size = 8 * height_after_conv2 * width_after_conv2 # 8 is the number of filters in the last conv layer + + model = nn.Sequential( + # Convolutional layer 1 + nn.Conv2d(in_channels=in_channels, out_channels=4, kernel_size=kernel_size, padding=padding), # 4 filters + nn.ReLU(), + nn.MaxPool2d(kernel_size=2, stride=2), # Pooling layer (2x2) + + # Convolutional layer 2 + nn.Conv2d(in_channels=4, out_channels=8, kernel_size=kernel_size, padding=padding), # 8 filters + nn.ReLU(), + nn.MaxPool2d(kernel_size=2, stride=2), # Pooling layer (2x2) + + # Flatten layer + nn.Flatten(), + + # Fully connected layer 1 + nn.Linear(flat_size, 16), + nn.ReLU(), + + # Output layer + nn.Linear(16, output_shape) + ) + + # Initialize all weights to ones + for layer in model: + if isinstance(layer, (nn.Conv2d, nn.Linear)): + nn.init.constant_(layer.weight, 1.0) # Set all weights to ones + if layer.bias is not None: + nn.init.constant_(layer.bias, 0.0) # Optionally set all biases to zero + + return model + + +def test_harmonize_datasets_with_torch(): + import torch + + cases = torch.rand(100, 10) + labels = torch.randint(0, 2, (100, 1)) + batch_size = 10 + + cases_out, labels_out, targets_out, batch_size_out = harmonize_datasets(cases, labels, batch_size=batch_size) + + assert targets_out is None, "Targets should be None when not provided." + assert batch_size_out == batch_size, "Output batch size should match the input batch size." + + for case, label in zip(cases_out, labels_out): + assert case.shape == (batch_size, cases.shape[1]), "Each case should have the same shape as the input cases." + assert label.shape == (batch_size, labels.shape[1]), "Each label should have the same shape as the input labels." + break + + +def test_inputs_combinations(): + """ + Test management of dataset init inputs + """ + + tf_tensor = tf.reshape(tf.range(90, dtype=tf.float32), (10, 3, 3)) + np_array = np.array(tf_tensor) + tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) + + tf_dataset_b3 = tf_dataset.batch(3) + tf_dataset_b5 = tf_dataset.batch(5) + + torch_tensor = torch.tensor(np_array) + torch_dataset = TensorDataset(torch_tensor) + zipped2 = TensorDataset(torch_tensor, torch_tensor) + zipped3 = TensorDataset(torch_tensor, torch_tensor, torch_tensor) + torch_dataloader_b3 = DataLoader(torch_dataset, batch_size=3, shuffle=False) + torch_dataloader_b5 = DataLoader(torch_dataset, batch_size=5, shuffle=False) + torch_zipped2_dataloader_b5 = DataLoader(zipped2, batch_size=5, shuffle=False) + torch_zipped3_dataloader_b3 = DataLoader(zipped3, batch_size=3, shuffle=False) + + # Method initialization that should work + cases_dataset, labels_dataset, targets_dataset, batch_size =\ + harmonize_datasets(torch_dataloader_b3, None, torch_dataloader_b3) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b3) + assert are_dataset_first_elems_equal(labels_dataset, None) + assert are_dataset_first_elems_equal(targets_dataset, tf_dataset_b3) + assert batch_size == 3 + + cases_dataset, labels_dataset, targets_dataset, batch_size =\ + harmonize_datasets(torch_tensor, torch_tensor, None, batch_size=5) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(labels_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(targets_dataset, None) + assert batch_size == 5 + + cases_dataset, labels_dataset, targets_dataset, batch_size =\ + harmonize_datasets(torch_zipped2_dataloader_b5, None, torch_dataloader_b5) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(labels_dataset, tf_dataset_b5) + assert are_dataset_first_elems_equal(targets_dataset, tf_dataset_b5) + assert batch_size == 5 + + cases_dataset, labels_dataset, targets_dataset, batch_size =\ + harmonize_datasets(torch_zipped3_dataloader_b3, batch_size=3) + assert are_dataset_first_elems_equal(cases_dataset, tf_dataset_b3) + assert are_dataset_first_elems_equal(labels_dataset, tf_dataset_b3) + assert are_dataset_first_elems_equal(targets_dataset, tf_dataset_b3) + assert batch_size == 3 + + + +def test_error_raising(): + """ + Test management of dataset init inputs + """ + + tf_tensor = tf.reshape(tf.range(90, dtype=tf.float32), (10, 3, 3)) + tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) + + torch_tensor = torch.reshape(torch.arange(90, dtype=torch.float32), (10, 3, 3)) + np_array = np.array(torch_tensor) + + torch_dataset = TensorDataset(torch_tensor) + torch_dataloader = DataLoader(torch_dataset, batch_size=None, shuffle=False) + torch_shuffled = DataLoader(torch_dataset, batch_size=4, shuffle=True) + torch_dataloader_b3 = DataLoader(torch_dataset, batch_size=3, shuffle=False) + torch_dataloader_b5 = DataLoader(torch_dataset, batch_size=5, shuffle=False) + + zipped2 = TensorDataset(torch_tensor, torch_tensor) + zipped3 = TensorDataset(torch_tensor, torch_tensor, torch_tensor) + torch_zipped2_dataloader_b5 = DataLoader(zipped2, batch_size=5, shuffle=False) + torch_zipped3_dataloader_b3 = DataLoader(zipped3, batch_size=3, shuffle=False) + + too_long_torch_tensor = torch.cat([torch_tensor, torch_tensor], dim=0) + too_long_torch_dataset = TensorDataset(too_long_torch_tensor) + too_long_torch_dataloader_b10 = DataLoader(too_long_torch_dataset, batch_size=10, shuffle=False) + + + # Method initialization that should not work + test_raise_assertion_error = unittest.TestCase().assertRaises + + # not input + test_raise_assertion_error(TypeError, harmonize_datasets) + + # shuffled + test_raise_assertion_error(AssertionError, harmonize_datasets, torch_shuffled,) + + # mismatching types + test_raise_assertion_error(AssertionError, harmonize_datasets, torch_dataloader_b3, torch_tensor,) + test_raise_assertion_error(AssertionError, harmonize_datasets, torch_tensor, tf_tensor,) + test_raise_assertion_error(AssertionError, harmonize_datasets, np_array, torch_tensor,) + test_raise_assertion_error(AssertionError, harmonize_datasets, np_array, torch_dataloader_b3,) + test_raise_assertion_error(AssertionError, harmonize_datasets, tf_dataset, torch_dataloader_b3,) + test_raise_assertion_error( + AssertionError, harmonize_datasets, torch_zipped2_dataloader_b5, tf_tensor, + ) + + # labels or targets zipped + test_raise_assertion_error( + AssertionError, harmonize_datasets, torch_dataloader_b5, torch_zipped2_dataloader_b5, + ) + test_raise_assertion_error( + AssertionError, harmonize_datasets, torch_dataloader_b3, None, torch_zipped3_dataloader_b3, + ) + + # not batched and no batch size provided + test_raise_assertion_error(AssertionError, harmonize_datasets, torch_dataloader,) + + # not matching batch sizes + test_raise_assertion_error( + AssertionError, harmonize_datasets, torch_dataloader_b3, torch_dataloader_b5, + ) + test_raise_assertion_error( + AssertionError, harmonize_datasets, torch_zipped2_dataloader_b5, None, torch_dataloader_b3, + ) + test_raise_assertion_error( + AssertionError, + harmonize_datasets, + too_long_torch_dataloader_b10, + too_long_torch_dataloader_b10, + torch_dataloader_b5, + ) + + # multiple datasets for labels or targets + test_raise_assertion_error( + AssertionError, harmonize_datasets, torch_zipped2_dataloader_b5, torch_dataloader_b5, + ) + test_raise_assertion_error( + AssertionError, harmonize_datasets, torch_zipped3_dataloader_b3, None, torch_dataloader_b3, + ) + + +def test_torch_model_splitting(): + + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + + n_sample = 10 + torch_input_shape = (3, 32, 32) + input_shape = (32, 32, 3) + nb_labels = 10 + + model = create_cnn_model(input_shape=torch_input_shape, output_shape=nb_labels) + + # generate data + np_data = np.random.rand(n_sample, *input_shape).astype(np.float32) + + # inference with the initial model + model.eval() + model.to(device) + torch_data = torch.tensor(np_data, device=device) + with torch.no_grad(): + torch_channel_first_data = torch_data.permute(0, 3, 1, 2) + np_predictions_1 = model(torch_channel_first_data).cpu().numpy() + + assert np_predictions_1.shape == (n_sample, nb_labels) + + # test splitting support different types + _, _ = model_splitting(model, "flatten1") + _, _ = model_splitting(model, -2) + features_extractor, predictor = model_splitting(model, "last_conv") + + assert isinstance(features_extractor, tf.keras.Model) + assert isinstance(predictor, tf.keras.Model) + + + # inference with the splitted model + tf_data = tf.convert_to_tensor(np_data) + features = features_extractor(tf_data) + tf_predictions = predictor(features) + np_predictions_2 = tf_predictions.numpy() + + assert tf_predictions.shape == (n_sample, nb_labels) + assert np.allclose(np_predictions_1, np_predictions_2, atol=1e-5) + + +def test_similar_examples_basic(): + """ + Test the SimilarExamples with an identity projection. + """ + input_shape = (4, 4, 1) + k = 3 + batch_size = 4 + + x_train, x_test, y_train = get_setup(input_shape) + + torch_dataset = TensorDataset(x_train, y_train) + torch_dataloader = DataLoader(torch_dataset, batch_size=batch_size, shuffle=False) + + identity_projection = Projection( + space_projection=lambda inputs, targets=None: inputs + ) + + # Method initialization + method = SimilarExamples( + cases_dataset=torch_dataloader, + projection=identity_projection, + k=k, + batch_size=batch_size, + distance="euclidean", + case_returns=["examples", "labels"], + ) + + # Generate explanation + outputs = method.explain(x_test) + examples = outputs["examples"] + labels = outputs["labels"] + + # Verifications + # Shape should be (n, k, h, w, c) + assert examples.shape == (len(x_test), k) + input_shape + + for i in range(len(x_test)): + # test examples: + assert almost_equal(np.array(examples[i, 0]), np.array(x_train[i + 1])) + assert almost_equal(np.array(examples[i, 1]), np.array(x_train[i + 2]))\ + or almost_equal(np.array(examples[i, 1]), np.array(x_train[i])) + assert almost_equal(np.array(examples[i, 2]), np.array(x_train[i]))\ + or almost_equal(np.array(examples[i, 2]), np.array(x_train[i + 2])) + + # test labels: + assert almost_equal(np.array(labels[i, 0]), np.array(y_train[i + 1])) + assert almost_equal(np.array(labels[i, 1]), np.array(y_train[i + 2]))\ + or almost_equal(np.array(labels[i, 1]), np.array(y_train[i])) + assert almost_equal(np.array(labels[i, 2]), np.array(y_train[i]))\ + or almost_equal(np.array(labels[i, 2]), np.array(y_train[i + 2])) + + +def test_similar_examples_with_splitting(): + """ + Test the SimilarExamples with an identity projection. + """ + # Setup + device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") + + nb_samples = 10 + torch_input_shape = (3, 32, 32) + input_shape = (32, 32, 3) + nb_labels = 10 + k = 3 + batch_size = 4 + + x_train, x_test, y_train = get_setup(input_shape, nb_samples, nb_labels) + torch_dataset = TensorDataset(x_train, y_train) + torch_dataloader = DataLoader(torch_dataset, batch_size=batch_size, shuffle=False) + + model = create_cnn_model(input_shape=torch_input_shape, output_shape=nb_labels) + projection = LatentSpaceProjection(model, "last_conv") + + # Method initialization + method = SimilarExamples( + cases_dataset=torch_dataloader, + projection=projection, + k=k, + batch_size=batch_size, + distance="euclidean", + case_returns=["examples", "labels"], + ) + + # Generate explanation + outputs = method.explain(x_test) + examples = outputs["examples"] + labels = outputs["labels"] + + # Verifications + # Shape should be (n, k, h, w, c) + assert examples.shape == (len(x_test), k) + input_shape + + for i in range(len(x_test)): + # test examples: + assert almost_equal(np.array(examples[i, 0]), np.array(x_train[i + 1])) + assert almost_equal(np.array(examples[i, 1]), np.array(x_train[i + 2]))\ + or almost_equal(np.array(examples[i, 1]), np.array(x_train[i])) + assert almost_equal(np.array(examples[i, 2]), np.array(x_train[i]))\ + or almost_equal(np.array(examples[i, 2]), np.array(x_train[i + 2])) + + # test labels: + assert almost_equal(np.array(labels[i, 0]), np.array(y_train[i + 1])) + assert almost_equal(np.array(labels[i, 1]), np.array(y_train[i + 2]))\ + or almost_equal(np.array(labels[i, 1]), np.array(y_train[i])) + assert almost_equal(np.array(labels[i, 2]), np.array(y_train[i]))\ + or almost_equal(np.array(labels[i, 2]), np.array(y_train[i + 2])) diff --git a/xplique/example_based/datasets_operations/convert_torch_to_tf.py b/xplique/example_based/datasets_operations/convert_torch_to_tf.py new file mode 100644 index 00000000..881f8b4c --- /dev/null +++ b/xplique/example_based/datasets_operations/convert_torch_to_tf.py @@ -0,0 +1,165 @@ +""" +Set of functions to convert `torch.utils.data.DataLoader` and `torch.Tensor` to `tf.data.Dataset` +""" +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf +import torch +from torch.utils.data import DataLoader + + +def convert_column_dataloader_to_tf_dataset(dataloader: torch.utils.data.DataLoader, + elements_shape: Tuple[int], + column_index: Optional[int] = None, + ) -> tf.data.Dataset: + """ + Converts a PyTorch torch.utils.data.DataLoader to a TensorFlow Dataset. + """ + + # make generator from dataloader + if column_index is None: + def generator(): + for elements in dataloader: + yield tf.cast(elements.numpy(), tf.float32) + else: + def generator(): + for elements in dataloader: + tf_elements = tf.cast(elements[column_index].numpy(), tf.float32) + yield tf.cast(elements[column_index].numpy(), tf.float32) + + # create tf dataset from generator + dataset = tf.data.Dataset.from_generator( + lambda: generator(), + output_signature=tf.TensorSpec(shape=elements_shape, dtype=tf.float32), + ) + + return dataset + + +def split_and_convert_column_dataloader( + cases_dataset: torch.utils.data.DataLoader, + labels_dataset: Optional[torch.utils.data.DataLoader] = None, + targets_dataset: Optional[torch.utils.data.DataLoader] = None, + ) -> Tuple[tf.data.Dataset, tf.data.Dataset, tf.data.Dataset]: + """ + Splits a PyTorch DataLoader into cases, labels, and targets datasets. + The DataLoader is splitted only if it has multiple columns. + If the DataLoader has 2 columns, the second column is assumed to be the labels. + If the DataLoader has several columns but labels and targets are provided, + there is a conflict and an error is raised. + The splitted parts are then converted to TensorFlow datasets. + + Parameters + ---------- + cases_dataset + The dataset to split. + labels_dataset + Labels associated with the cases in the `cases_dataset`. + If this function is called, it should be `None`. + targets_dataset + Targets associated with the cases in the `cases_dataset`. + If this function is called and `cases_dataset` has 3 columns, it should be `None`. + + Returns + ------- + cases_dataset + The dataset used to train the model. + labels_dataset + Labels associated with the `cases_dataset`. + targets_dataset + Targets associated with the `cases_dataset`. + """ + first_cases = next(iter(cases_dataset)) + + if not (isinstance(first_cases, tuple) or isinstance(first_cases, list)): + # the cases dataset only has one column + + # manage cases dataset + cases_shape = (None,) + first_cases.shape[1:] + new_cases_dataset = convert_column_dataloader_to_tf_dataset(cases_dataset, cases_shape) + + else: + # manage cases dataset + cases_shape = (None,) + first_cases[0].shape[1:] + new_cases_dataset = convert_column_dataloader_to_tf_dataset( + cases_dataset, cases_shape, column_index=0) + + if len(first_cases) >= 2: + # the cases dataset has two columns + assert labels_dataset is None, ( + "The second column of `cases_dataset` is assumed to be the labels. "\ + + "Hence, `labels_dataset` should be empty." + ) + + # manage labels dataset (extract them from the second column of `cases_dataset`) + labels_shape = (None,) + first_cases[1].shape[1:] + labels_dataset = convert_column_dataloader_to_tf_dataset( + cases_dataset, labels_shape, column_index=1) + + if len(first_cases) == 3: + # the cases dataset has three columns + assert targets_dataset is None, ( + "The second and third columns of `cases_dataset` are assumed to be the labels "\ + "and targets. Hence, `labels_dataset` and `targets_dataset` should be empty." + ) + # manage targets dataset (extract them from the third column of `cases_dataset`) + targets_shape = (None,) + first_cases[2].shape[1:] + targets_dataset = convert_column_dataloader_to_tf_dataset( + cases_dataset, targets_shape, column_index=2) + + elif len(first_cases) > 3: + raise AttributeError( + "`cases_dataset` cannot have more than 3 columns, " + + f"{len(first_cases)} were detected." + ) + + # manage labels datasets + if labels_dataset is not None: + if isinstance(labels_dataset, tf.data.Dataset): + pass + elif isinstance(labels_dataset, torch.utils.data.DataLoader): + first_labels = next(iter(labels_dataset)) + if isinstance(first_labels, tuple) or isinstance(first_labels, list): + assert len(first_labels) == 1, ( + "The `labels_dataset` should only have one column. " + + f"{len(first_labels)} were detected." + ) + labels_shape = (None,) + first_labels[0].shape[1:] + labels_dataset = convert_column_dataloader_to_tf_dataset(labels_dataset, labels_shape, column_index=0) + else: + labels_shape = (None,) + first_labels.shape[1:] + labels_dataset = convert_column_dataloader_to_tf_dataset(labels_dataset, labels_shape) + else: + raise AttributeError( + "The `labels_dataset` should be a PyTorch DataLoader or a TensorFlow Dataset. " + + f"{type(labels_dataset)} was detected." + ) + else: + labels_dataset = None + + # manage targets datasets + if targets_dataset is not None: + if isinstance(targets_dataset, tf.data.Dataset): + pass + elif isinstance(targets_dataset, torch.utils.data.DataLoader): + first_targets = next(iter(targets_dataset)) + if isinstance(first_targets, tuple) or isinstance(first_targets, list): + assert len(first_targets) == 1, ( + "The `targets_dataset` should only have one column. " + + f"{len(first_targets)} were detected." + ) + targets_shape = (None,) + first_targets[0].shape[1:] + targets_dataset = convert_column_dataloader_to_tf_dataset(targets_dataset, targets_shape, column_index=0) + else: + targets_shape = (None,) + first_targets.shape[1:] + targets_dataset = convert_column_dataloader_to_tf_dataset(targets_dataset, targets_shape) + else: + raise AttributeError( + "The `labels_dataset` should be a PyTorch DataLoader or a TensorFlow Dataset. " + + f"{type(labels_dataset)} was detected." + ) + else: + targets_dataset = None + + return new_cases_dataset, labels_dataset, targets_dataset From 468cbfdfc4eaa4e129d29f196f9072671ee86a34 Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 18 Sep 2024 16:58:31 +0200 Subject: [PATCH 111/138] example based: add support for torch dataloader fixup --- tests/example_based/test_similar_examples.py | 102 +------------------ 1 file changed, 1 insertion(+), 101 deletions(-) diff --git a/tests/example_based/test_similar_examples.py b/tests/example_based/test_similar_examples.py index 4580ed6d..6be4577d 100644 --- a/tests/example_based/test_similar_examples.py +++ b/tests/example_based/test_similar_examples.py @@ -7,13 +7,10 @@ sys.path.append(os.getcwd()) from math import prod, sqrt -import unittest import numpy as np import tensorflow as tf -from xplique.commons import are_dataset_first_elems_equal - from xplique.example_based import SimilarExamples from xplique.example_based.projections import Projection @@ -34,100 +31,6 @@ def get_setup(input_shape, nb_samples=10, nb_labels=10): return x_train, x_test, y_train -def test_similar_examples_input_datasets_management(): - """ - Test management of dataset init inputs - """ - proj = Projection(space_projection=lambda inputs, targets=None: inputs) - - tf_tensor = tf.reshape(tf.range(90, dtype=tf.float32), (10, 3, 3)) - np_array = np.array(tf_tensor) - tf_dataset = tf.data.Dataset.from_tensor_slices(tf_tensor) - too_short_np_array = np_array[:3] - too_long_tf_dataset = tf_dataset.concatenate(tf_dataset) - - tf_dataset_b3 = tf_dataset.batch(3) - tf_dataset_b5 = tf_dataset.batch(5) - too_long_tf_dataset_b5 = too_long_tf_dataset.batch(5) - too_long_tf_dataset_b10 = too_long_tf_dataset.batch(10) - - tf_shuffled = tf_dataset.shuffle(32, 0).batch(4) - tf_one_shuffle = tf_dataset.shuffle(32, 0, reshuffle_each_iteration=False).batch(4) - - # Method initialization that should work - method = SimilarExamples(tf_dataset_b3, None, np_array, projection=proj) - assert are_dataset_first_elems_equal(method.cases_dataset, tf_dataset_b3) - assert are_dataset_first_elems_equal(method.labels_dataset, None) - assert are_dataset_first_elems_equal(method.targets_dataset, tf_dataset_b3) - - method = SimilarExamples(np_array, tf_tensor, None, batch_size=5, projection=proj) - assert are_dataset_first_elems_equal(method.cases_dataset, tf_dataset_b5) - assert are_dataset_first_elems_equal(method.labels_dataset, tf_dataset_b5) - assert are_dataset_first_elems_equal(method.targets_dataset, None) - - method = SimilarExamples( - tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), - None, - np_array, - projection=proj, - ) - assert are_dataset_first_elems_equal(method.cases_dataset, tf_dataset_b5) - assert are_dataset_first_elems_equal(method.labels_dataset, tf_dataset_b5) - assert are_dataset_first_elems_equal(method.targets_dataset, tf_dataset_b5) - - method = SimilarExamples( - tf.data.Dataset.zip((tf_one_shuffle, tf_one_shuffle)), projection=proj - ) - assert are_dataset_first_elems_equal(method.cases_dataset, tf_one_shuffle) - assert are_dataset_first_elems_equal(method.labels_dataset, tf_one_shuffle) - assert are_dataset_first_elems_equal(method.targets_dataset, None) - - method = SimilarExamples(tf_one_shuffle, projection=proj) - assert are_dataset_first_elems_equal(method.cases_dataset, tf_one_shuffle) - assert are_dataset_first_elems_equal(method.labels_dataset, None) - assert are_dataset_first_elems_equal(method.targets_dataset, None) - - # Method initialization that should not work - test_raise_assertion_error = unittest.TestCase().assertRaises - test_raise_assertion_error(TypeError, SimilarExamples) - test_raise_assertion_error(AssertionError, SimilarExamples, tf_tensor) - test_raise_assertion_error( - AssertionError, SimilarExamples, tf_shuffled, projection=proj - ) - test_raise_assertion_error( - AssertionError, SimilarExamples, tf_dataset, tf_tensor, projection=proj - ) - test_raise_assertion_error( - AssertionError, SimilarExamples, tf_dataset_b3, tf_dataset_b5, projection=proj - ) - test_raise_assertion_error( - AssertionError, - SimilarExamples, - tf.data.Dataset.zip((tf_dataset_b5, tf_dataset_b5)), - np_array, - projection=proj, - ) - test_raise_assertion_error( - AssertionError, SimilarExamples, tf_dataset_b3, too_short_np_array - ) - test_raise_assertion_error( - AssertionError, SimilarExamples, tf_dataset, None, too_long_tf_dataset - ) - test_raise_assertion_error( - AssertionError, - SimilarExamples, - tf_dataset_b5, - too_long_tf_dataset_b5, - projection=proj, - ) - test_raise_assertion_error( - AssertionError, - SimilarExamples, - too_long_tf_dataset_b10, - tf_dataset_b5, - projection=proj, - ) - def test_similar_examples_basic(): """ @@ -262,7 +165,7 @@ def test_similar_examples_weighting(): method = SimilarExamples( cases_dataset=x_train, - labels_dataset=y_train, + labels_dataset=np.array(y_train), projection=weighting_function, k=k, batch_size=5, @@ -278,9 +181,6 @@ def test_similar_examples_weighting(): assert examples.shape == (nb_samples_test, k) + input_shape for i in range(nb_samples_test): - print(i) - print(examples[i, 0]) - print(x_train[i + 1]) # test examples: assert almost_equal(examples[i, 0], x_train[i + 1]) assert almost_equal(examples[i, 1], x_train[i + 2]) or almost_equal( From be5bb74c298d336774e5dd6deb7ea1df2a4be1dc Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 18 Sep 2024 16:59:26 +0200 Subject: [PATCH 112/138] projections: add warning --- xplique/example_based/projections/base.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index 8ed8cfdf..dbceab15 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -171,11 +171,11 @@ def project_dataset( The projected dataset. """ if self.requires_targets and targets_dataset is None: - warnings.warn( - "The projection requires `targets` but `targets_dataset` is not provided. "\ - +"`targets` will be computed online, assuming a classification setting. "\ - +"Hence, online `targets` will be the predicted class one-hot-encoding. "\ - +"If this is not the expected behavior, please provide a `targets_dataset`.") + warnings.warn( + "The projection requires `targets` but `targets_dataset` is not provided. "\ + +"`targets` will be computed online, assuming a classification setting. "\ + +"Hence, online `targets` will be the predicted class one-hot-encoding. "\ + +"If this is not the expected behavior, please provide a `targets_dataset`.") if self.mappable: return self._map_project_dataset(cases_dataset, targets_dataset) From 587647c470bf4ee64b6c88c312e9da83771470d2 Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 18 Sep 2024 16:59:48 +0200 Subject: [PATCH 113/138] datasets operations: move them to example based fixup --- xplique/example_based/prototypes.py | 2 +- xplique/example_based/semifactuals.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index b43cf467..7b4fe48f 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -9,7 +9,7 @@ from ..types import Callable, Dict, List, Optional, Type, Union -from ..commons.tf_dataset_operations import dataset_gather +from .datasets_operations.tf_dataset_operations import dataset_gather from .search_methods import ProtoGreedySearch, MMDCriticSearch, ProtoDashSearch from .projections import Projection diff --git a/xplique/example_based/semifactuals.py b/xplique/example_based/semifactuals.py index 616d2dcf..79ff929e 100644 --- a/xplique/example_based/semifactuals.py +++ b/xplique/example_based/semifactuals.py @@ -5,7 +5,8 @@ import tensorflow as tf from ..types import Callable, List, Optional, Union, Dict -from ..commons import dataset_gather + +from .datasets_operations.tf_dataset_operations import dataset_gather from .base_example_method import BaseExampleMethod from .search_methods import ORDER, KLEORSimMissSearch, KLEORGlobalSimSearch From 0f9212717549ed1e2114b9c5d6c7fe273a6f36c1 Mon Sep 17 00:00:00 2001 From: Mohamed Chafik Bakey Date: Thu, 19 Sep 2024 13:33:25 +0200 Subject: [PATCH 114/138] docs: update prototypes --- .../prototypes/api_prototypes.md | 45 +++++++------------ .../example_based/prototypes/proto_dash.md | 4 +- .../example_based/prototypes/proto_greedy.md | 2 +- 3 files changed, 19 insertions(+), 32 deletions(-) diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md index fa362210..91e9d2d4 100644 --- a/docs/api/example_based/prototypes/api_prototypes.md +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -1,14 +1,15 @@ # Prototypes -Prototype-based explanation is a family of natural example-based XAI methods. Prototypes consist of a set of samples that are representative of either the dataset or a class ([Poché et al., 2023](https://hal.science/hal-04117520/document)). Using the identity projection, one is looking for the **dataset prototypes**. In contrast, using the latent space of a model as a projection, one is looking for **prototypes relevant for the model**. +A prototype in AI explainability is a representative example from the data that shows how the model makes decisions ([Poché et al., 2023](https://hal.science/hal-04117520/document)). It helps explain a prediction by pointing to a similar example the model learned from, making the decision more understandable. Imagine you're training a model to recognize dogs. After the model learns, you can ask it to show a "prototype" for the dog category, which would be an actual image from the training set that best represents what a typical dog looks like. + +!!!info + Using the identity projection, one is looking for the **dataset prototypes**. In contrast, using the latent space of a model as a projection, one is looking for **prototypes relevant for the model**. ## Common API ## ```python -explainer = Method(cases_dataset, labels_dataset, targets_dataset, k, - projection, case_returns, batch_size, distance, - nb_prototypes, kernel_type, - kernel_fn, gamma) +explainer = Method(cases_dataset, labels_dataset, nb_local_prototypes, projection, + case_returns, batch_size, distance, nb_global_prototypes) # compute global explanation global_prototypes = explainer.get_global_prototypes() # compute local explanation @@ -18,7 +19,7 @@ local_prototypes = explainer(inputs) ??? abstract "Table of methods available" - The following Data-Centric prototypes methods are implemented: + The following prototypes methods are implemented: | Method Name and Documentation link | **Tutorial** | Available with TF | Available with PyTorch* | |:-------------------------------------- | :----------------------: | :---------------: | :---------------------: | @@ -26,47 +27,33 @@ local_prototypes = explainer(inputs) | [ProtoDash](../proto_dash/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | | [MMDCritic](../mmd_critic/) | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1-bUvXxzWrBqLLfS_4TvErcEfyzymTVGz) | ✔ | ✔ | - *: Before using a PyTorch model it is highly recommended to read the [dedicated documentation](../pytorch/) - !!!info Prototypes, share a common API with other example-based methods. Thus, to understand some parameters, we recommend reading the [dedicated documentation](../../api_example_based/). ## Specificity of prototypes -The search method class related to a `Prorotypes` class includes the following additional parameters: +The search method class related to a `Prototypes` class includes the following additional parameters: -- `nb_prototypes` which represents the total number of prototypes desired to represent the entire dataset. This should not be confused with $k$, which represents the number of prototypes closest to the input and allows for a local explanation. +- `nb_global_prototypes` which represents the total number of prototypes desired to represent the entire dataset. +- `nb_local_prototypes` which represents the number of prototypes closest to the input and allows for a local explanation. This attribute is equivalent to $k$ in the other exemple based methods. -- `kernel_type`, `kernel_fn`, and `gamma` which are related to the kernel used to compute the [MMD distance](#what-is-mmd). +- `kernel_type`, `kernel_fn`, and `gamma` which are related to the [kernel](#how-to-choose-the-kernel) used to compute the [MMD distance](#what-is-mmd). The prototype class has a `get_global_prototypes()` method, which calculates all the prototypes in the base dataset; these are called the global prototypes. The `explain` method then provides a local explanation, i.e., finds the prototypes closest to the input given as a parameter. ## Implemented methods -The library implements three methods from **Data summarization with knapsack constraint** [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf): `MMDCritic`, `ProtoGreedy` and `ProtoDash`. **Data summarization with knapsack constraint**: -consists in finding a subset of prototypes $\mathcal{P}$ that maximizes the coverage set function $F(\mathcal{P})$ under the constraint that its selection cost $C(\mathcal{P})$ (e.g., the number of selected prototypes $|\mathcal{P}|$) should be less than a given budget. -Submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). In addition, $F(\mathcal{P})$ should encourage coverage and penalize redundancy in order to have a good summary [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). +The library implements three methods, `MMDCritic`, `ProtoGreedy` and `ProtoDash` from **Data summarization with knapsack constraint** [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). This class of prototype methods involves finding a subset of prototypes $\mathcal{P}$ that maximizes the coverage set function $F(\mathcal{P})$ under the constraint that its selection cost $C(\mathcal{P})$ (e.g., the number of selected prototypes $|\mathcal{P}|= nb\_global\_prototypes$) should be less than a given budget. +Submodularity and monotonicity of $F(\mathcal{P})$ are necessary to guarantee that a greedy algorithm has a constant factor guarantee of optimality [(Lin et al., 2011)](https://aclanthology.org/P11-1052.pdf). ### Method comparison - Compared to `MMDCritic`, both `ProtoGreedy` and `Protodash` additionally determine the weights for each of the selected prototypes. - `ProtoGreedy` and `Protodash` works for any symmetric positive definite kernel which is not the case for `MMDCritic`. - `MMDCritic` and `ProtoGreedy` select the next element that maximizes the increment of the scoring function while `Protodash` maximizes a tight lower bound on the increment of the scoring function (it maximizes the gradient of $F(\mathcal{P},w)$). -- `ProtoDash` is much faster than `ProtoGreedy` without compromising on the quality of the solution (the complexity of `ProtoGreedy` is $O(n(n+m^4))$ comparing to $O(n(n+m^2)+m^4)$ for `ProtoDash`). +- `ProtoDash` is much faster than `ProtoGreedy` without compromising on the quality of the solution. The complexity of `ProtoGreedy` is $O(n(n+m^4))$ comparing to $O(n(n+m^2)+m^4)$ for `ProtoDash`. - The approximation guarantee for `ProtoGreedy` is $(1-e^{-\gamma})$, where $\gamma$ is submodularity ratio of $F(\mathcal{P})$, comparing to $(1-e^{-1})$ for `MMDCritic`. -### Implementation details - -`MMDCritic`, `ProtoDash` and `ProtoGreedy` inherit from `Prototypes` class which in turn inherit from `BaseExampleMethod` class. Each of these classes has a corresponding search method class: `MMDCriticSearch`, `ProtoDashSearch` and `ProtoGreedySearch`. - -`ProtoGreedySearch` inherits from the `BaseSearchMethod` class. It finds prototypes and assigns a non-negative weight to each one. - -Both `MMDCriticSearch` and `ProtoDashSearch` classes inherit from `ProtoGreedySearch`. - -`MMDCriticSearch` and `ProtoGreedySearch` use the same greedy algorithm to find prototypes. In `ProtoGreedySearch`, the `compute_objective` method calculates optimal weights for each prototype, whereas `MMDCriticSearch` assigns uniform weights to all prototypes. - -`ProtoDashSearch`, like `ProtoGreedySearch`, assigns a non-negative weight to each prototype. However, the algorithm used by `ProtoDashSearch` is [different](#method-comparison) from the one used by `ProtoGreedySearch`. Therefore, `ProtoDashSearch` overrides both the `compute_objective` method and the `update_selection` method. - ### What is MMD? The commonality among these three methods is their utilization of the Maximum Mean Discrepancy (MMD) statistic as a measure of similarity between points and potential prototypes. MMD is a statistic for comparing two distributions (similar to KL-divergence). However, it is a non-parametric statistic, i.e., it does not assume a specific parametric form for the probability distributions being compared. It is defined as follows: @@ -100,7 +87,7 @@ If we consider any exponential kernel (Gaussian kernel, Laplace, ...), we automa !!!warning For `MMDCritic`, the kernel must satisfy a condition ensuring the submodularity of the set function (the Gaussian kernel respects this constraint). In contrast, for `ProtoDash` and `ProtoGreedy`, any kernel can be used, as these methods rely on weak submodularity instead of full submodularity. -### Default kernel -The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. +!!!info + The default kernel used is Gaussian kernel. This kernel distance assigns higher similarity to points that are close in feature space and gradually decreases similarity as points move further apart. It is a good choice when your data has complexity. However, it can be sensitive to the choice of hyperparameters, such as the width $\sigma$ of the Gaussian kernel, which may need to be carefully fine-tuned. diff --git a/docs/api/example_based/prototypes/proto_dash.md b/docs/api/example_based/prototypes/proto_dash.md index ad08564d..5ddc130b 100644 --- a/docs/api/example_based/prototypes/proto_dash.md +++ b/docs/api/example_based/prototypes/proto_dash.md @@ -12,13 +12,13 @@ !!! quote Our work notably generalizes the recent work - by Kim et al. (2016) where in addition to selecting prototypes, we + by [Kim et al. (2016)](../mmd_critic/)) where in addition to selecting prototypes, we also associate non-negative weights which are indicative of their importance. This extension provides a single coherent framework under which both prototypes and criticisms (i.e. outliers) can be found. Furthermore, our framework works for any symmetric positive definite kernel thus addressing one of the key open - questions laid out in Kim et al. (2016). + questions laid out in [Kim et al. (2016)](../mmd_critic/)). -- [Efficient Data Representation by Selecting Prototypes with Importance Weights (2019).](https://arxiv.org/abs/1707.01212) diff --git a/docs/api/example_based/prototypes/proto_greedy.md b/docs/api/example_based/prototypes/proto_greedy.md index b2c8d280..108d70e2 100644 --- a/docs/api/example_based/prototypes/proto_greedy.md +++ b/docs/api/example_based/prototypes/proto_greedy.md @@ -12,7 +12,7 @@ !!! quote Our work notably generalizes the recent work - by Kim et al. (2016) where in addition to selecting prototypes, we + by [Kim et al. (2016)](../mmd_critic/)) where in addition to selecting prototypes, we also associate non-negative weights which are indicative of their importance. This extension provides a single coherent framework under which both prototypes and criticisms (i.e. outliers) can be From 0e0ab2e6a6195740d3d3e9af9ee6483907eed8fa Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:26:14 +0200 Subject: [PATCH 115/138] projections: correct from splitted model --- xplique/example_based/projections/hadamard.py | 27 ++++++++++++------- .../example_based/projections/latent_space.py | 14 ++++++---- 2 files changed, 26 insertions(+), 15 deletions(-) diff --git a/xplique/example_based/projections/hadamard.py b/xplique/example_based/projections/hadamard.py index 385b84c7..6255bcb2 100644 --- a/xplique/example_based/projections/hadamard.py +++ b/xplique/example_based/projections/hadamard.py @@ -46,12 +46,12 @@ class HadamardProjection(Projection): Otherwise, `-1` could be used for the last layer before softmax. operator Operator to use to compute the explanation, if None use standard predictions. + The default operator is the classification operator with online targets computations. + For more information, refer to the Attribution documentation. device Device to use for the projection, if None, use the default device. Only used for PyTorch models. Ignored for TensorFlow models. """ - # pylint: disable=fixme - # TODO: make a larger description of the operator arg. def __init__( self, model: Callable, @@ -77,7 +77,6 @@ def __init__( # the weights are given by the gradient of the operator based on the predictor gradients, _ = get_gradient_functions(self.predictor, operator) - # TODO check usage of gpu get_weights = lambda inputs, targets: gradients(self.predictor, inputs, targets) mappable = isinstance(model, tf.keras.Model) @@ -93,7 +92,7 @@ def from_splitted_model(cls, features_extractor: tf.keras.Model, predictor: tf.keras.Model, operator: Optional[OperatorSignature] = None, - mappable=True): # TODO: test + mappable=True): """ Create LatentSpaceProjection from a splitted model. The projection will project the inputs in the latent space, @@ -112,7 +111,6 @@ def from_splitted_model(cls, It is not the case for wrapped PyTorch models. If you encounter errors in the `project_dataset` method, you can set it to `False`. """ - # pylint: disable=fixme assert isinstance(features_extractor, tf.keras.Model),\ f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." @@ -120,12 +118,21 @@ def from_splitted_model(cls, f"predictor should be a tf.keras.Model, got {type(predictor)}"\ f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." + if operator is None: + warnings.warn("No operator provided, using standard classification operator. "\ + + "For non-classification tasks, please specify an operator.") + operator = target_free_classification_operator + # the weights are given by the gradient of the operator based on the predictor gradients, _ = get_gradient_functions(predictor, operator) - # TODO check usage of gpu get_weights = lambda inputs, targets: gradients(predictor, inputs, targets) - super().__init__(get_weights=get_weights, - space_projection=features_extractor, - mappable=mappable, - requires_targets=True) + new_instance = cls.__new__(cls) + super(HadamardProjection, cls).__init__( + new_instance, + get_weights=get_weights, + space_projection=features_extractor, + mappable=mappable, + requires_targets=True + ) + return new_instance diff --git a/xplique/example_based/projections/latent_space.py b/xplique/example_based/projections/latent_space.py index 0aadfd68..ee94778f 100644 --- a/xplique/example_based/projections/latent_space.py +++ b/xplique/example_based/projections/latent_space.py @@ -63,11 +63,15 @@ def from_splitted_model(cls, It is not the case for wrapped PyTorch models. If you encounter errors in the `project_dataset` method, you can set it to `False`. """ - # pylint: disable=fixme - # TODO: test assert isinstance(features_extractor, tf.keras.Model),\ f"features_extractor should be a tf.keras.Model, got {type(features_extractor)}"\ f" instead. If you have a PyTorch model, you can use the `TorchWrapper`." - super().__init__(space_projection=features_extractor, - mappable=mappable, - requires_targets=False) + + new_instance = cls.__new__(cls) + super(LatentSpaceProjection, cls).__init__( + new_instance, + space_projection=features_extractor, + mappable=mappable, + requires_targets=False + ) + return new_instance From 9662bcab2b542ad2190725e8355f142c8f50e3be Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:26:42 +0200 Subject: [PATCH 116/138] projections: correct target free operator --- xplique/example_based/projections/commons.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/xplique/example_based/projections/commons.py b/xplique/example_based/projections/commons.py index eafafe84..22b80326 100644 --- a/xplique/example_based/projections/commons.py +++ b/xplique/example_based/projections/commons.py @@ -126,7 +126,7 @@ def _torch_model_splitting( model: 'torch.nn.Module', latent_layer: Union[str, int], device: Union["torch.device", str] = None, - ) -> Tuple['torch.nn.Module', 'torch.nn.Module']: # pylint: disable=import-outside-toplevel + ) -> Tuple['torch.nn.Module', 'torch.nn.Module']: """ Split the model into two parts, before and after the `latent_layer`. The parts will respectively be called `features_extractor` and `predictor`. @@ -214,6 +214,7 @@ def second_model_forward(x): return wrapped_first_model, wrapped_second_model +@tf.function def target_free_classification_operator(model: Callable, inputs: tf.Tensor, targets: Optional[tf.Tensor] = None) -> tf.Tensor: @@ -239,15 +240,18 @@ def target_free_classification_operator(model: Callable, scores Predictions scores computed, only for the label class. """ - # pylint: disable=fixme - # TODO: test, and use in attribution projection predictions = model(inputs) - targets = tf.cond( - pred=tf.constant(targets is None, dtype=tf.bool), - true_fn=lambda: tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]), - false_fn=lambda: targets, - ) + # the condition is always the same, hence this should not affect the graph + if targets is None: + targets = tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]) + + # this implementation did not pass the tests, the cond shapes were different if targets is None + # targets = tf.cond( + # pred=tf.constant(targets is None, dtype=tf.bool), + # true_fn=lambda: tf.one_hot(tf.argmax(predictions, axis=-1), predictions.shape[-1]), + # false_fn=lambda: targets, + # ) scores = tf.reduce_sum(predictions * targets, axis=-1) return scores From 4ddb8599d72b89884b993224949eec5d52ec3b43 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:27:27 +0200 Subject: [PATCH 117/138] projections: small fix to base --- xplique/example_based/projections/base.py | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/xplique/example_based/projections/base.py b/xplique/example_based/projections/base.py index dbceab15..edf23d53 100644 --- a/xplique/example_based/projections/base.py +++ b/xplique/example_based/projections/base.py @@ -66,9 +66,10 @@ def __init__(self, device: Optional[str] = None, mappable: bool = True, requires_targets: bool = False): - assert get_weights is not None or space_projection is not None, ( - "At least one of `get_weights` and `space_projection`" - + "should not be `None`." + if get_weights is not None or space_projection is not None: + warnings.warn( + "At least one of `get_weights` and `space_projection`" + + "should not be `None`. Otherwise the projection is an identity function." ) self.mappable = mappable @@ -234,23 +235,16 @@ def _loop_project_dataset( projected_dataset The projected dataset. """ - # pylint: disable=fixme - # TODO see if a warning is needed - projected_cases_dataset = [] - batch_size = None + batch_size = next(iter(cases_dataset)).shape[0] # iteratively project the dataset if targets_dataset is None: for inputs in cases_dataset: - if batch_size is None: - batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this projected_cases_dataset.append(self.project(inputs, None)) else: # in case targets are provided, we zip the datasets and project them together for inputs, targets in tf.data.Dataset.zip((cases_dataset, targets_dataset)): - if batch_size is None: - batch_size = inputs.shape[0] # TODO check if there is a smarter way to do this projected_cases_dataset.append(self.project(inputs, targets)) projected_cases_dataset = tf.concat(projected_cases_dataset, axis=0) From 8d9975d2850c7785510ffa35f5bf9c4f3e08a9d4 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:28:09 +0200 Subject: [PATCH 118/138] commons: sanitize inputs targets disable pylint warning --- xplique/commons/data_conversion.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/xplique/commons/data_conversion.py b/xplique/commons/data_conversion.py index 536638f1..d5126db6 100644 --- a/xplique/commons/data_conversion.py +++ b/xplique/commons/data_conversion.py @@ -82,6 +82,7 @@ def sanitize(self, *args, **kwargs ): + # pylint: disable=keyword-arg-before-vararg # ensure we have tf.tensor inputs = tf.cast(inputs, tf.float32) if targets is not None: @@ -89,7 +90,7 @@ def sanitize(self, if args: args = [tf.cast(arg, tf.float32) for arg in args] - + if kwargs: kwargs = {key: tf.cast(value, tf.float32) for key, value in kwargs.items()} From 4f94e6b4b60b1448fdf8a036e497dc13a8caa503 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:29:25 +0200 Subject: [PATCH 119/138] tests projections: add tests for from splitted model and target free operator --- tests/example_based/test_projections.py | 85 ++++++++++++++++++------- 1 file changed, 61 insertions(+), 24 deletions(-) diff --git a/tests/example_based/test_projections.py b/tests/example_based/test_projections.py index ec303d05..912cf12e 100644 --- a/tests/example_based/test_projections.py +++ b/tests/example_based/test_projections.py @@ -11,9 +11,10 @@ Input, ) +from xplique.commons.operators import predictions_operator from xplique.attributions import Saliency from xplique.example_based.projections import Projection, AttributionProjection, LatentSpaceProjection, HadamardProjection -from xplique.example_based.projections.commons import model_splitting +from xplique.example_based.projections.commons import model_splitting, target_free_classification_operator from ..utils import almost_equal @@ -47,29 +48,6 @@ def _generate_model(input_shape=(32, 32, 3), output_shape=2): return model -# def test_model_splitting_latent_layer(): -# """We should target the right layer using either int, string or default procedure""" -# tf.keras.backend.clear_session() - -# model = _generate_model() - -# first_conv_layer = model.get_layer("conv2d_1") -# last_conv_layer = model.get_layer("conv2d_2") -# flatten_layer = model.get_layer("flatten") - -# # last_conv should be recognized -# _, _, latent_layer = model_splitting(model, latent_layer="last_conv", return_layer=True) -# assert latent_layer == last_conv_layer - -# # target the first conv layer -# _, _, latent_layer = model_splitting(model, latent_layer=0, return_layer=True) -# assert latent_layer == first_conv_layer - -# # target a random flatten layer -# _, _, latent_layer = model_splitting(model, latent_layer="flatten", return_layer=True) -# assert latent_layer == flatten_layer - - def test_simple_projection_mapping(): """ Test if a simple projection can be mapped. @@ -114,6 +92,8 @@ def test_model_splitting(): model.get_layer("dense2").set_weights([np.ones((10, 1)), np.zeros(1)]) # Split the model + _, _ = model_splitting(model, latent_layer=-1) + _, _ = model_splitting(model, latent_layer="dense2") features_extractor, predictor = model_splitting(model, latent_layer="dense1") assert almost_equal(predictor(features_extractor(x_train)).numpy(), model(x_train)) @@ -187,3 +167,60 @@ def test_attribution_projection_mapping(): # Apply the projection by mapping the dataset projected_train_dataset = projection.project_dataset(train_dataset, targets_dataset) + + +def test_from_splitted_model(): + """ + Test the other way of constructing the projection. + """ + latent_width = 8 + nb_samples = 15 + input_features = 10 + output_features = 3 + x_train = np.reshape(np.arange(0, nb_samples * input_features), (nb_samples, input_features)) + tf_x_train = tf.convert_to_tensor(x_train, dtype=tf.float32) + + train_dataset = tf.data.Dataset.from_tensor_slices(x_train).batch(3) + + model1 = tf.keras.Sequential() + model1.add(Input(shape=(input_features,))) + model1.add(Dense(latent_width, name="dense1")) + model1.compile(loss="mean_absolute_error", optimizer="sgd") + + model2 = tf.keras.Sequential() + model2.add(Input(shape=(latent_width,))) + model2.add(Dense(output_features, name="dense2")) + model2.compile(loss="categorical_crossentropy", optimizer="sgd") + + assert model1(x_train).shape == (nb_samples, latent_width) + assert model2(model1(x_train)).shape == (nb_samples, output_features) + + # test LatentSpaceProjection from splitted model + projection = LatentSpaceProjection.from_splitted_model(features_extractor=model1, mappable=True) + projected_train_dataset = projection.project_dataset(train_dataset) + + # test HadamardProjection from splitted model + projection = HadamardProjection.from_splitted_model(features_extractor=model1, predictor=model2, mappable=True) + projected_train_dataset = projection.project_dataset(train_dataset) + + +def test_target_free_classification_operator(): + """ + Test if the target free classification operator works as expected. + """ + nb_classes = 5 + x_train = np.reshape(np.arange(0, 100), (10, 10)) + + model = tf.keras.Sequential() + model.add(Input(shape=(10,))) + model.add(Dense(10, name="dense1")) + model.add(Dense(nb_classes, name="dense2")) + model.compile(loss="categorical_crossentropy", optimizer="sgd") + + preds = model(x_train) + targets = tf.one_hot(tf.argmax(preds, axis=1), nb_classes) + + scores1 = target_free_classification_operator(model, x_train) + scores2 = predictions_operator(model, x_train, targets) + + assert almost_equal(scores1, scores2) From f515f14d7926b67f65a4a371fc40b91dc35017a5 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:29:52 +0200 Subject: [PATCH 120/138] plots: small fix --- xplique/plots/image.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/xplique/plots/image.py b/xplique/plots/image.py index d0a556d7..d822d528 100644 --- a/xplique/plots/image.py +++ b/xplique/plots/image.py @@ -239,7 +239,6 @@ def plot_examples( test_labels: np.ndarray = None, predicted_labels: np.ndarray = None, img_size: float = 2., - **attribution_kwargs, ): """ This function is for image data, it show the returns of the explain function. @@ -289,8 +288,6 @@ def plot_examples( left = margin/figwidth bottom = margin/figheight - space_with_line = spacing / (3 * img_size) - fig = plt.figure() fig.set_size_inches(figwidth, figheight) From f8bc136140d330c513f22c95e5799860db6a52d5 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:30:31 +0200 Subject: [PATCH 121/138] base example methods: projection can now be none --- xplique/example_based/base_example_method.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/xplique/example_based/base_example_method.py b/xplique/example_based/base_example_method.py index c299712c..e935acb7 100644 --- a/xplique/example_based/base_example_method.py +++ b/xplique/example_based/base_example_method.py @@ -3,8 +3,7 @@ """ from abc import ABC, abstractmethod - -import math +import warnings import tensorflow as tf import numpy as np @@ -87,9 +86,11 @@ def __init__( case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, ): - assert ( - projection is not None - ), "`BaseExampleMethod` without Projection method should be a `BaseSearchMethod`." + if projection is None: + warnings.warn( + "Example-based methods without projection will not explain the model."\ + + "To explain the model, consider using projections like the LatentSpaceProjection." + ) # set attributes self.cases_dataset, self.labels_dataset, self.targets_dataset, self.batch_size =\ @@ -98,11 +99,12 @@ def __init__( self._search_returns = ["indices", "distances"] # check projection - assert hasattr(projection, "__call__"), "projection should be a callable." if isinstance(projection, Projection): self.projection = projection elif hasattr(projection, "__call__"): self.projection = Projection(get_weights=None, space_projection=projection) + elif projection is None: + self.projection = Projection(get_weights=None, space_projection=None) else: raise AttributeError( f"projection should be a `Projection` or a `Callable`, not a {type(projection)}" From f8bb9f8ec9cca6d882811b60cfeab624994dc2ae Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:31:17 +0200 Subject: [PATCH 122/138] example based: small pylint fixes --- xplique/example_based/counterfactuals.py | 13 +++++++++---- xplique/example_based/semifactuals.py | 3 ++- xplique/example_based/similar_examples.py | 1 - 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/xplique/example_based/counterfactuals.py b/xplique/example_based/counterfactuals.py index 871a3d89..0c6f1f04 100644 --- a/xplique/example_based/counterfactuals.py +++ b/xplique/example_based/counterfactuals.py @@ -70,6 +70,7 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar by default "euclidean". """ # pylint: disable=duplicate-code + def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -187,6 +188,8 @@ def custom_projection(inputs: tf.Tensor, np.ndarray): {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ + # pylint: disable=duplicate-code + def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -213,7 +216,7 @@ def __init__( # as the target used for the target dataset warnings.warn( "If your projection method requires the target, "\ - + "be aware that when using the explain method,"\ + + "be aware that when using the explain method, "\ + "the target provided is the class within one should search for the counterfactual."\ + "\nThus, it is possible that the projection of the query is going wrong.") self.warned = False @@ -294,10 +297,12 @@ def explain( The elements that can be returned are defined with the `_returns_possibilities` static attribute of the class. """ - # pylint: disable=arguments-renamed - # pylint: disable=fixme if not self.warned: - # TODO + warnings.warn( + "If your projection method requires the target, "\ + + "be aware that when using the explain method, the target provided "\ + + "is the class within one should search for the counterfactual."\ + + "\nThus, it is possible that the projection of the query is going wrong.") self.warned = True # project inputs into the search space diff --git a/xplique/example_based/semifactuals.py b/xplique/example_based/semifactuals.py index 79ff929e..327eb1c4 100644 --- a/xplique/example_based/semifactuals.py +++ b/xplique/example_based/semifactuals.py @@ -79,11 +79,12 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ + # pylint: disable=duplicate-code + _returns_possibilities = [ "examples", "weights", "distances", "labels", "include_inputs", "nuns", "nuns_indices", "dist_to_nuns", "nuns_labels" ] - # pylint: disable=duplicate-code def __init__( self, diff --git a/xplique/example_based/similar_examples.py b/xplique/example_based/similar_examples.py index 22d9d42f..fcacd6ac 100644 --- a/xplique/example_based/similar_examples.py +++ b/xplique/example_based/similar_examples.py @@ -67,7 +67,6 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ - # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], From cb8eee9bdb718386c28d916bd1c813a40615c8a4 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:32:12 +0200 Subject: [PATCH 123/138] search methods: small pylint fixes --- xplique/example_based/search_methods/base.py | 1 - xplique/example_based/search_methods/kleor.py | 4 ++-- xplique/example_based/search_methods/knn.py | 13 +++++++++---- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/xplique/example_based/search_methods/base.py b/xplique/example_based/search_methods/base.py index b69c33ff..8c1ada13 100644 --- a/xplique/example_based/search_methods/base.py +++ b/xplique/example_based/search_methods/base.py @@ -84,7 +84,6 @@ class BaseSearchMethod(ABC): Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. """ - # pylint: disable=duplicate-code _returns_possibilities = ["examples", "indices", "distances", "include_inputs"] def __init__( diff --git a/xplique/example_based/search_methods/kleor.py b/xplique/example_based/search_methods/kleor.py index 48d0cadb..38269572 100644 --- a/xplique/example_based/search_methods/kleor.py +++ b/xplique/example_based/search_methods/kleor.py @@ -49,7 +49,6 @@ class BaseKLEORSearch(FilterKNN, ABC): {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ - # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -164,7 +163,8 @@ def _get_nuns(self, def kneighbors(self, inputs: Union[tf.Tensor, np.ndarray], - targets: Union[tf.Tensor, np.ndarray]) -> Tuple[tf.Tensor, tf.Tensor]: + targets: Union[tf.Tensor, np.ndarray] + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the k SF to each tensor of `inputs` in `self.cases_dataset`. Here `self.cases_dataset` is a `tf.data.Dataset`, hence, computations are done by batches. diff --git a/xplique/example_based/search_methods/knn.py b/xplique/example_based/search_methods/knn.py index ff64f2c3..1d42b569 100644 --- a/xplique/example_based/search_methods/knn.py +++ b/xplique/example_based/search_methods/knn.py @@ -2,6 +2,7 @@ KNN online search method in example-based module """ from abc import abstractmethod +import inspect import numpy as np import tensorflow as tf @@ -174,7 +175,6 @@ class KNN(BaseKNN): {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, by default "euclidean". """ - # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -346,7 +346,6 @@ class FilterKNN(BaseKNN): where n is the number of inputs and m the number of cases. This boolean mask is used to choose between which inputs and cases to compute the distances. """ - # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], @@ -358,7 +357,6 @@ def __init__( order: ORDER = ORDER.ASCENDING, filter_fn: Optional[Callable] = None, ): - # pylint: disable=fixme super().__init__( cases_dataset=cases_dataset, k=k, @@ -374,9 +372,16 @@ def __init__( self.distance_fn = lambda x1, x2, m:\ tf.where(m, get_distance_function(distance)(x1, x2), self.fill_value) - # TODO: Assertion on the function signature if filter_fn is None: filter_fn = lambda x, z, y, t: tf.ones((tf.shape(x)[0], tf.shape(z)[0]), dtype=tf.bool) + elif hasattr(filter_fn, "__call__"): + filter_fn_signature = inspect.signature(filter_fn) + assert len(filter_fn_signature.parameters) == 4,\ + f"filter_fn should take 4 parameters, not {len(filter_fn_signature.parameters)}" + else: + raise TypeError( + f"filter_fn should be Callable, not {type(filter_fn)}" + ) self.filter_fn = filter_fn # set targets_dataset From f4367131ea8e13b9691eb2bee1b737ef9a278822 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:32:58 +0200 Subject: [PATCH 124/138] prototypes: optimize and harmonize --- xplique/example_based/prototypes.py | 159 +++- .../search_methods/mmd_critic_search.py | 87 +- .../search_methods/proto_dash_search.py | 162 ++-- .../search_methods/proto_greedy_search.py | 842 +++++++++--------- 4 files changed, 634 insertions(+), 616 deletions(-) diff --git a/xplique/example_based/prototypes.py b/xplique/example_based/prototypes.py index 7b4fe48f..8b753774 100644 --- a/xplique/example_based/prototypes.py +++ b/xplique/example_based/prototypes.py @@ -12,6 +12,7 @@ from .datasets_operations.tf_dataset_operations import dataset_gather from .search_methods import ProtoGreedySearch, MMDCriticSearch, ProtoDashSearch +from .search_methods import KNN, ORDER from .projections import Projection from .base_example_method import BaseExampleMethod @@ -63,26 +64,25 @@ def custom_projection(inputs: tf.Tensor, np.ndarray, targets: tf.Tensor, np.ndar case_returns String or list of string with the elements to return in `self.explain()`. See `self.set_returns()` for detail. + In the case of prototypes, the indices returned by local search are + the indices of the prototypes in the list of prototypes. + To obtain the indices of the prototypes in the dataset, use `self.prototypes_indices`. batch_size Number of sample treated simultaneously for projection and search. Ignored if `tf.data.Dataset` are provided (these are supposed to be batched). distance Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, - by default "euclidean". + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable. + By default a distance function based on the kernel_fn is used. nb_prototypes : int For general explanations, the number of prototypes to select. - If `class_wise` is True, it will correspond to the number of prototypes per class. - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. + If `class_wise` is True, it will correspond to the number of prototypes per class. kernel_fn : Callable, optional Kernel function, by default the rbf kernel. This function must only use TensorFlow operations. gamma : float, optional Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ - # pylint: disable=too-many-arguments # pylint: disable=duplicate-code def __init__( @@ -94,9 +94,8 @@ def __init__( projection: Union[Projection, Callable] = None, case_returns: Union[List[str], str] = "examples", batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = None, + distance: Optional[Union[int, str, Callable]] = None, nb_prototypes: int = 1, - kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None ): @@ -112,24 +111,28 @@ def __init__( ) # set prototypes parameters - self.distance = distance self.nb_prototypes = nb_prototypes - self.kernel_type = kernel_type - self.kernel_fn = kernel_fn - self.gamma = gamma - # initiate search_method - self.search_method = self.search_method_class( + # initiate search_method and search global prototypes + self.global_prototypes_search_method = self.search_method_class( cases_dataset=self.projected_cases_dataset, - labels_dataset=self.labels_dataset, - k=self.k, - search_returns=self._search_returns, batch_size=self.batch_size, - distance=self.distance, nb_prototypes=self.nb_prototypes, - kernel_type=self.kernel_type, - kernel_fn=self.kernel_fn, - gamma=self.gamma + kernel_fn=kernel_fn, + gamma=gamma + ) + + # get global prototypes through the indices found by the search method + self.get_global_prototypes() + + # set knn for local explanations + self.search_method = KNN( + cases_dataset=self.global_prototypes_search_method.prototypes, + search_returns=self._search_returns, + k=self.k, + batch_size=self.batch_size, + distance=self.global_prototypes_search_method._get_distance_fn(distance), + order=ORDER.ASCENDING, ) @property @@ -152,33 +155,107 @@ def get_global_prototypes(self) -> Dict[str, tf.Tensor]: - 'prototype_weights': The weights of the prototypes. - 'prototype_indices': The indices of the prototypes. """ - # (nb_prototypes,) - indices = self.search_method.prototypes_indices - batch_indices = indices // self.batch_size - elem_indices = indices % self.batch_size - - # (nb_prototypes, 2) - batch_elem_indices = tf.stack([batch_indices, elem_indices], axis=1) + # pylint: disable=access-member-before-definition + if not hasattr(self, "prototypes") or self.prototypes is None: + assert self.global_prototypes_search_method is not None, ( + "global_prototypes_search_method is not initialized" + ) + assert self.global_prototypes_search_method.prototypes_indices is not None, ( + "prototypes_indices are not initialized" + ) - # (1, nb_prototypes, 2) - batch_elem_indices = tf.expand_dims(batch_elem_indices, axis=0) + # (nb_prototypes, 2) + self.prototypes_indices = self.global_prototypes_search_method.prototypes_indices + indices = self.prototypes_indices[tf.newaxis, ...] - # (nb_prototypes, ...) - prototypes = dataset_gather(self.cases_dataset, batch_elem_indices)[0] + # (nb_prototypes, ...) + self.prototypes = dataset_gather(self.cases_dataset, indices)[0] - # (nb_prototypes,) - labels = dataset_gather(self.labels_dataset, batch_elem_indices)[0] + # (nb_prototypes,) + if self.labels_dataset is not None: + self.prototypes_labels = dataset_gather(self.labels_dataset, indices)[0] + else: + self.prototypes_labels = None - # (nb_prototypes,) - weights = self.search_method.prototypes_weights + # (nb_prototypes,) + self.prototypes_weights = self.global_prototypes_search_method.prototypes_weights return { - "prototypes": prototypes, - "prototypes_labels": labels, - "prototypes_weights": weights, - "prototypes_indices": indices, + "prototypes": self.prototypes, + "prototypes_labels": self.prototypes_labels, + "prototypes_weights": self.prototypes_weights, + "prototypes_indices": self.prototypes_indices, } + def format_search_output( + self, + search_output: Dict[str, tf.Tensor], + inputs: Union[tf.Tensor, np.ndarray], + ): + """ + Format the output of the `search_method` to match the expected returns in `self.returns`. + + Parameters + ---------- + search_output + Dictionary with the required outputs from the `search_method`. + inputs + Tensor or Array. Input samples to be explained. + Expected shape among (N, W), (N, T, W), (N, W, H, C). + # targets + # Targets associated to the cases_dataset for dataset projection. + # See `projection` for details. + + Returns + ------- + return_dict + Dictionary with listed elements in `self.returns`. + The elements that can be returned are defined with the `_returns_possibilities` + static attribute of the class. + """ + # initialize return dictionary + return_dict = {} + + # indices in the list of prototypes + # (n, k) + flatten_indices = search_output["indices"][:, :, 0] * self.batch_size\ + + search_output["indices"][:, :, 1] + flatten_indices = tf.reshape(flatten_indices, [-1]) + + # add examples and weights + if "examples" in self.returns: # or "weights" in self.returns: + # (n * k, ...) + examples = tf.gather(params=self.prototypes, indices=flatten_indices) + # (n, k, ...) + examples = tf.reshape(examples, (inputs.shape[0], self.k) + examples.shape[1:]) + if "include_inputs" in self.returns: + # include inputs + inputs = tf.expand_dims(inputs, axis=1) + examples = tf.concat([inputs, examples], axis=1) + if "examples" in self.returns: + return_dict["examples"] = examples + + # add indices, distances, and labels + if "indices" in self.returns: + # convert indices in the list of prototypes to indices in the dataset + # (n * k, 2) + indices = tf.gather(params=self.prototypes_indices, indices=flatten_indices) + # (n, k, 2) + return_dict["indices"] = tf.reshape(indices, (inputs.shape[0], self.k, 2)) + if "distances" in self.returns: + return_dict["distances"] = search_output["distances"] + if "labels" in self.returns: + assert ( + self.prototypes_labels is not None + ), "The method cannot return labels without a label dataset." + + # (n * k) + labels = tf.gather(params=self.prototypes_labels, indices=flatten_indices) + # (n, k) + return_dict["labels"] = tf.reshape(labels, (inputs.shape[0], self.k)) + + return return_dict + class ProtoGreedy(Prototypes): # pylint: disable=missing-class-docstring diff --git a/xplique/example_based/search_methods/mmd_critic_search.py b/xplique/example_based/search_methods/mmd_critic_search.py index 324a6318..1e5d4b5a 100644 --- a/xplique/example_based/search_methods/mmd_critic_search.py +++ b/xplique/example_based/search_methods/mmd_critic_search.py @@ -23,25 +23,11 @@ class MMDCriticSearch(ProtoGreedySearch): cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - k - The number of examples to retrieve. - search_returns - String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. batch_size Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. - distance - Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, - by default "euclidean". nb_prototypes : int - Number of prototypes to find. - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. + Number of prototypes to find. kernel_fn : Callable, optional Kernel function, by default the rbf kernel. This function must only use TensorFlow operations. @@ -49,14 +35,13 @@ class MMDCriticSearch(ProtoGreedySearch): Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ - def compute_objectives(self, - selection_indices: tf.Tensor, - selection_cases: tf.Tensor, - selection_weights: tf.Tensor, - selection_selection_kernel: tf.Tensor, - candidates_indices: tf.Tensor, - candidates_selection_kernel: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor]: + def _compute_batch_objectives(self, + candidates_kernel_diag: tf.Tensor, + candidates_kernel_col_means: tf.Tensor, + selection_kernel_col_means: tf.Tensor, + candidates_selection_kernel: tf.Tensor, + selection_selection_kernel: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. @@ -70,45 +55,55 @@ def compute_objectives(self, ≡ Find argmax_{c} (sum1 - sum2) where: sum1 = (2 / n) * ∑[i=1 to n] κ(x_i, c) - sum2 = 1/(|S|+1) [2 * ∑[j=1 to |S|] * κ(x_j, c) + κ(c, c)] + sum2 = 1/(|S|+1) [κ(c, c) + 2 * ∑[j=1 to |S|] κ(x_j, c)] Parameters ---------- - selection_indices : Tensor - Indices corresponding to the selected prototypes. - selection_cases : Tensor - Cases corresponding to the selected prototypes. - selection_weights : Tensor - Weights corresponding to the selected prototypes. - selection_selection_kernel : Tensor - Kernel matrix computed from the selected prototypes. - candidates_indices : Tensor - Indices corresponding to the candidate prototypes. + candidates_kernel_diag : Tensor + Diagonal values of the kernel matrix between the candidates and themselves. Shape (bc,). + candidates_kernel_col_means : Tensor + Column means of the kernel matrix, subset for the candidates. Shape (bc,). + selection_kernel_col_means : Tensor + Column means of the kernel matrix, subset for the selected prototypes. Shape (|S|,). candidates_selection_kernel : Tensor - Kernel matrix between the candidates and the selected prototypes. + Kernel matrix between the candidates and the selected prototypes. Shape (bc, |S|). + selection_selection_kernel : Tensor + Kernel matrix between the selected prototypes. Shape (|S|, |S|). Returns ------- objectives - Tensor that contains the computed objective values for each candidate. + Tensor that contains the computed objective values for each candidate. Shape (bc,). objectives_weights Tensor that contains the computed objective weights for each candidate. + Shape (bc, |S|+1). """ - nb_candidates = candidates_indices.shape[0] - nb_selection = selection_indices.shape[0] + nb_candidates = tf.shape(candidates_kernel_diag)[0] + + # (bc,) - 2 * ∑[i=1 to n] κ(x_i, c) + sum1 = 2 * candidates_kernel_col_means - sum1 = 2 * tf.gather(self.col_means, candidates_indices) + if candidates_selection_kernel is None: + extended_nb_selected = 1 - if nb_selection == 0: - sum2 = tf.abs(tf.gather(self.diag, candidates_indices)) + # (bc,) - κ(c, c) + sum2 = candidates_kernel_diag else: - temp = tf.transpose(candidates_selection_kernel, perm=[1, 0]) - sum2 = tf.reduce_sum(temp, axis=0) * 2 + tf.gather(self.diag, candidates_indices) - sum2 /= (nb_selection + 1) + extended_nb_selected = tf.shape(selection_kernel_col_means)[0] + 1 + # (bc,) - κ(c, c) + 2 * ∑[j=1 to |S|] κ(x_j, c) + # the second term is 0 when the selection is empty + sum2 = candidates_kernel_diag + 2 * tf.reduce_sum(candidates_selection_kernel, axis=1) + + # (bc,) - 1/(|S|+1) [κ(c, c) + 2 * ∑[j=1 to |S|] κ(x_j, c)] + sum2 /= tf.cast(extended_nb_selected, tf.float32) + + # (bc,) objectives = sum1 - sum2 - objectives_weights = tf.ones(shape=(nb_candidates, nb_selection+1), dtype=tf.float32) - objectives_weights /= tf.cast(nb_selection+1, dtype=tf.float32) + + # (bc, |S|+1) - 1/(|S|+1) + objectives_weights = tf.fill(dims=(nb_candidates, extended_nb_selected), + value=1.0 / tf.cast(extended_nb_selected, tf.float32)) return objectives, objectives_weights diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index 3e31bfcf..c57c6a62 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -6,7 +6,7 @@ from scipy.optimize import minimize import tensorflow as tf -from ...types import Callable, List, Union, Optional, Tuple +from ...types import Union, Optional, Tuple from .proto_greedy_search import ProtoGreedySearch @@ -94,25 +94,11 @@ class ProtoDashSearch(ProtoGreedySearch): cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - k - The number of examples to retrieve. - search_returns - String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. batch_size Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. - distance - Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, - by default "euclidean". nb_prototypes : int - Number of prototypes to find. - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. + Number of prototypes to find. kernel_fn : Callable, optional Kernel function, by default the rbf kernel. This function must only use TensorFlow operations. @@ -123,18 +109,12 @@ class ProtoDashSearch(ProtoGreedySearch): Exact method is based on a scipy optimization, while the other is based on a tensorflow inverse operation. """ - # pylint: disable=duplicate-code def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = None, nb_prototypes: int = 1, - kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None, exact_selection_weights_update: bool = False, @@ -144,25 +124,18 @@ def __init__( super().__init__( cases_dataset=cases_dataset, - labels_dataset=labels_dataset, - k=k, - search_returns=search_returns, batch_size=batch_size, - distance=distance, nb_prototypes=nb_prototypes, - kernel_type=kernel_type, kernel_fn=kernel_fn, gamma=gamma ) - def update_selection_weights(self, - selection_indices: tf.Tensor, - selection_weights: tf.Tensor, - selection_selection_kernel: tf.Tensor, - best_indice: tf.Tensor, - best_weights: tf.Tensor, - best_objective: tf.Tensor - ) -> tf.Tensor: + def _update_selection_weights(self, + selection_kernel_col_means: tf.Tensor, + selection_selection_kernel: tf.Tensor, + best_diag: tf.Tensor, + best_objective: tf.Tensor + ) -> tf.Tensor: """ Update the selection weights based on the given parameters. Pursuant to Lemma IV.4: @@ -173,56 +146,61 @@ def update_selection_weights(self, Parameters ---------- - selected_indices : Tensor - Indices corresponding to the selected prototypes. - selected_weights : Tensor - Weights corresponding to the selected prototypes. + selection_kernel_col_means : Tensor + Column means of the kernel matrix computed from the selected prototypes. Shape (|S|,). selection_selection_kernel : Tensor - Kernel matrix computed from the selected prototypes. - best_indice : int - The index of the selected prototype with the highest objective function value. - best_weights : Tensor - The weights corresponding to the optimal solution - of the objective function for each candidate. - best_objective : float - The computed objective function value. + Kernel matrix computed from the selected prototypes. Shape (|S|, |S|). + best_diag : tf.Tensor + The diagonal element of the kernel matrix corresponding to the lastly added prototype. + Shape (1,). + best_objective : tf.Tensor + The computed objective function value of the lastly added prototype. Shape (1,). + Used to initialize the weights for the exact weights update. - Returns - ------- - selection_weights : Tensor - Updated weights corresponding to the selected prototypes. """ # pylint: disable=invalid-name + nb_selected = selection_kernel_col_means.shape[0] if best_objective <= 0: - selection_weights = tf.concat([selection_weights, [0]], axis=0) + self.prototypes_weights[nb_selected - 1].assign(0) else: - u = tf.expand_dims(tf.gather(self.col_means, selection_indices), axis=1) + # (|S|,) + u = selection_kernel_col_means + + # (|S|, |S|) K = selection_selection_kernel if self.exact_selection_weights_update: - best_objective_diag = best_objective / tf.gather(self.diag, best_indice) - initial_weights = tf.concat([selection_weights, [best_objective_diag]], axis=0) - opt = Optimizer(initial_weights) - selection_weights, _ = opt.optimize(u, K) - selection_weights = tf.squeeze(selection_weights, axis=0) + # initialize the weights + best_objective_diag = best_objective / best_diag + self.prototypes_weights[nb_selected - 1].assign(best_objective_diag) + + # optimize the weights + opt = Optimizer(self.prototypes_weights[:nb_selected]) + optimized_weights, _ = opt.optimize(u[:, tf.newaxis], K) + + # update the weights + self.prototypes_weights[:nb_selected].assign(tf.squeeze(optimized_weights, axis=0)) else: # We added epsilon to the diagonal of K to ensure that K is invertible + # (|S|, |S|) K_inv = tf.linalg.inv(K + ProtoDashSearch.EPSILON * tf.eye(K.shape[-1])) - selection_weights = tf.linalg.matmul(K_inv, u) - selection_weights = tf.maximum(selection_weights, 0) - selection_weights = tf.squeeze(selection_weights, axis=1) - - return selection_weights - - def compute_objectives(self, - selection_indices: tf.Tensor, - selection_cases: tf.Tensor, - selection_weights: tf.Tensor, - selection_selection_kernel: tf.Tensor, - candidates_indices: tf.Tensor, - candidates_selection_kernel: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor]: + + # use w* = K^-1 * u as the optimal weights + # (|S|,) + selection_weights = tf.linalg.matvec(K_inv, u) + selection_weights = tf.abs(selection_weights) + + # update the weights + self.prototypes_weights[:nb_selected].assign(selection_weights) + + def _compute_batch_objectives(self, + candidates_kernel_diag: tf.Tensor, + candidates_kernel_col_means: tf.Tensor, + selection_kernel_col_means: tf.Tensor, + candidates_selection_kernel: tf.Tensor, + selection_selection_kernel: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: """ Compute the objective function and corresponding weights for a given set of selected prototypes and a candidate. @@ -233,38 +211,34 @@ def compute_objectives(self, Parameters ---------- - selection_indices : Tensor - Indices corresponding to the selected prototypes. - selection_cases : Tensor - Cases corresponding to the selected prototypes. - selection_weights : Tensor - Weights corresponding to the selected prototypes. - selection_selection_kernel : Tensor - Kernel matrix computed from the selected prototypes. - candidates_indices : Tensor - Indices corresponding to the candidate prototypes. + candidates_kernel_diag : Tensor + Diagonal values of the kernel matrix between the candidates and themselves. Shape (bc,). + candidates_kernel_col_means : Tensor + Column means of the kernel matrix, subset for the candidates. Shape (bc,). + selection_kernel_col_means : Tensor + Column means of the kernel matrix, subset for the selected prototypes. Shape (|S|,). candidates_selection_kernel : Tensor - Kernel matrix between the candidates and the selected prototypes. + Kernel matrix between the candidates and the selected prototypes. Shape (bc, |S|). + selection_selection_kernel : Tensor + Kernel matrix between the selected prototypes. Shape (|S|, |S|). Returns ------- objectives - Tensor that contains the computed objective values for each candidate. + Tensor that contains the computed objective values for each candidate. Shape (bc,). objectives_weights - Tensor that contains the computed objective weights for each candidate. + No weights are returned in this case. It is set to None. + The weights are computed and updated in the `_update_selection_weights` method. """ # pylint: disable=invalid-name - u = tf.gather(self.col_means, candidates_indices) - - if selection_indices.shape[0] == 0: + if candidates_selection_kernel is None: + # (bc,) # S = ∅ and ζ^(∅) = 0, g = ∇l(ζ^(∅)) = μ_p - objectives = u + objectives = candidates_kernel_col_means else: - u = tf.expand_dims(u, axis=1) - K = candidates_selection_kernel - - objectives = u - tf.matmul(K, tf.expand_dims(selection_weights, axis=1)) - objectives = tf.squeeze(objectives, axis=1) + # (bc,) - g = μ_p - K * ζ^(S) + objectives = candidates_kernel_col_means - tf.linalg.matvec(candidates_selection_kernel, + selection_kernel_col_means) return objectives, None diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 2c4dfcc2..4b9e720c 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -5,59 +5,14 @@ import numpy as np import tensorflow as tf +from ...types import Callable, Union, Optional, Tuple + from ..datasets_operations.tf_dataset_operations import sanitize_dataset -from ...types import Callable, List, Union, Optional, Tuple -from .base import BaseSearchMethod from .common import get_distance_function -from .knn import KNN -# from ..projections import Projection - - -@tf.function -def rbf_kernel(X: tf.Tensor, - Y: Optional[tf.Tensor] = None, - gamma: Optional[float] = None - ) -> tf.Tensor: - """ - Compute the rbf kernel matrix between two sets of samples. - - Parameters - ---------- - X - The first set of samples. - Y - The second set of samples, by default None. - If None, it is set to X. - gamma - The spread of the rbf kernel, by default None. - If None, it is set to 1.0 / n_features. - - Returns - ------- - Tensor - The rbf kernel matrix. - """ - # pylint: disable=invalid-name - # pylint: disable=invalid-unary-operand-type - # (for `X - Y`, pylint sees that Y might be `None`, but it is not the case) - if Y is None: - Y = X - if gamma is None: - gamma = 1.0 / tf.cast(tf.shape(X)[1], dtype=X.dtype) - X = tf.expand_dims(X, axis=1) - Y = tf.expand_dims(Y, axis=0) - - pairwise_diff = X - Y - pairwise_sq_dist = tf.reduce_sum(tf.square(pairwise_diff), axis=-1) - kernel_matrix = tf.exp(-gamma * pairwise_sq_dist) - - return kernel_matrix - - -class ProtoGreedySearch(BaseSearchMethod): +class ProtoGreedySearch(): """ ProtoGreedy method for searching prototypes. @@ -71,32 +26,18 @@ class ProtoGreedySearch(BaseSearchMethod): cases_dataset The dataset used to train the model, examples are extracted from the dataset. For natural example-based methods it is the train dataset. - labels_dataset - Labels associated to the examples in the dataset. Indices should match with cases_dataset. - k - The number of examples to retrieve. - search_returns - String or list of string with the elements to return in `self.find_examples()`. - See `self.set_returns()` for detail. batch_size Number of sample treated simultaneously. It should match the batch size of the `search_set` in the case of a `tf.data.Dataset`. - distance - Distance function for examples search. It can be an integer, a string in - {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable, - by default "euclidean". nb_prototypes : int - Number of prototypes to find. - kernel_type : str, optional - The kernel type. It can be 'local' or 'global', by default 'local'. - When it is local, the distances are calculated only within the classes. + Number of prototypes to find. kernel_fn : Callable, optional Kernel function, by default the rbf kernel. - This function must only use TensorFlow operations. + The overall method will be much faster if the provided function is a `tf.function`. gamma : float, optional Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. """ - # pylint: disable=duplicate-code + # pylint: disable=too-many-instance-attributes # Avoid zero division during procedure. (the value is not important, as if the denominator is # zero, then the nominator will also be zero). @@ -105,463 +46,494 @@ class ProtoGreedySearch(BaseSearchMethod): def __init__( self, cases_dataset: Union[tf.data.Dataset, tf.Tensor, np.ndarray], - labels_dataset: Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]] = None, - k: int = 1, - search_returns: Optional[Union[List[str], str]] = None, batch_size: Optional[int] = 32, - distance: Union[int, str, Callable] = None, nb_prototypes: int = 1, - kernel_type: str = 'local', kernel_fn: callable = None, gamma: float = None ): - super().__init__( - cases_dataset, k, search_returns, batch_size - ) - - # pylint: disable=fixme - # TODO: see if leave the choice between local and global kernels to the user - # by forcing a global kernel, we can simplify the code - self.labels_dataset = sanitize_dataset(labels_dataset, self.batch_size) - - if kernel_type not in ['local', 'global']: - raise AttributeError( - "The kernel_type parameter is expected to be in"\ - + " ['local', 'global'] "\ - +f"but {kernel_type} was received."\ - ) + # set batch size + if hasattr(cases_dataset, "_batch_size"): + self.batch_size = tf.cast(cases_dataset._batch_size, tf.int32) + else: + self.batch_size = batch_size - self.kernel_type = kernel_type + self.cases_dataset = sanitize_dataset(cases_dataset, self.batch_size) - # set default kernel function (rbf_kernel) or raise error if kernel_fn is not callable + # set kernel function if kernel_fn is None: - # define rbf kernel function - kernel_fn = lambda x, y: rbf_kernel(x,y,gamma) - elif not hasattr(kernel_fn, "__call__"): + # define kernel fn to default rbf kernel + self.__set_default_kernel_fn(self.cases_dataset, gamma) + elif isinstance(kernel_fn, tf.types.experimental.PolymorphicFunction): + # the kernel_fn was decorated with a tf.function + self.kernel_fn = kernel_fn + elif hasattr(kernel_fn, "__call__"): + # the kernel_fn is a callable the output is converted to a tensor for consistency + self.kernel_fn = lambda x1, x2: tf.convert_to_tensor(kernel_fn(x1, x2)) + else: raise AttributeError( - "The kernel_fn parameter is expected to be a Callable"\ + "The kernel_fn parameter is expected to be None or a Callable"\ +f"but {kernel_fn} was received."\ ) - # define custom kernel function depending on the kernel type - def custom_kernel_fn(x1, x2, y1=None, y2=None): - if self.kernel_type == 'global': - kernel_matrix = kernel_fn(x1,x2) - if isinstance(kernel_matrix, np.ndarray): - kernel_matrix = tf.convert_to_tensor(kernel_matrix) - else: - # In the case of a local kernel, calculations are limited to within the class. - # Across different classes, the kernel values are set to 0. - kernel_matrix = np.zeros((x1.shape[0], x2.shape[0]), dtype=np.float32) - y_intersect = np.intersect1d(y1, y2) - for i in range(y_intersect.shape[0]): - y1_indices = tf.where(tf.equal(y1, y_intersect[i]))[:, 0] - y2_indices = tf.where(tf.equal(y2, y_intersect[i]))[:, 0] - sub_matrix = kernel_fn(tf.gather(x1, y1_indices), tf.gather(x2, y2_indices)) - - y1_indices_flatten = tf.reshape(y1_indices, (-1, 1)) - y2_indices_flatten = tf.reshape(y2_indices, (1, -1)) - kernel_matrix[y1_indices_flatten, y2_indices_flatten] = sub_matrix - kernel_matrix = tf.convert_to_tensor(kernel_matrix) - return kernel_matrix + # compute the sum of the columns and the diagonal values of the kernel matrix of the dataset + self.__set_kernel_matrix_column_means_and_diagonal() + + # compute the prototypes in the latent space + self.find_global_prototypes(nb_prototypes) - self.kernel_fn = custom_kernel_fn + def _get_distance_fn(self, distance: Optional[Union[int, str, Callable]]) -> Callable: + """ + Get the distance function for examples search. + Function called through the Prototypes class. + The distance function is used to search for the closest examples to the prototypes. + + Parameters + ---------- + distance + Distance function for examples search. It can be an integer, a string in + {"manhattan", "euclidean", "cosine", "chebyshev", "inf"}, or a Callable. - # set distance function + Returns + ------- + Callable + Distance function for examples search. + """ if distance is None: def kernel_induced_distance(x1, x2): - x1 = tf.expand_dims(x1, axis=0) - x2 = tf.expand_dims(x2, axis=0) - distance = tf.sqrt(kernel_fn(x1,x1) - 2 * kernel_fn(x1,x2) + kernel_fn(x2,x2)) + def dist(x): + x = tf.expand_dims(x, axis=0) + return tf.sqrt( + self.kernel_fn(x1, x1) - 2 * self.kernel_fn(x1, x) + self.kernel_fn(x, x) + ) + distance = tf.map_fn(dist, x2) return tf.squeeze(distance) - self.distance_fn = kernel_induced_distance - else: - self.distance_fn = get_distance_function(distance) + return kernel_induced_distance + + return get_distance_function(distance) + + def __set_default_kernel_fn(self, + cases_dataset: tf.data.Dataset, + gamma: float = None, + ) -> None: + """ + Set the default kernel function. + + Parameters + ---------- + cases_dataset : tf.data.Dataset + The dataset used to train the model, examples are extracted from the dataset. + The shape are extracted from the dataset, it is necessary for optimal performance, + and to set the default gamma value. + gamma : float, optional + Parameter that determines the spread of the rbf kernel, defaults to 1.0 / n_features. + """ + cases_shape = cases_dataset.element_spec.shape + self.nb_features = cases_shape[-1] + + # elements should be batched tabular data + assert len(cases_shape) == 2,\ + "Prototypes' searches expects 2D data, (nb_samples, nb_features), but got "+\ + f"{cases_shape}. Please verify your projection "+\ + "if you provided a custom one. If you use a splitted model, "+\ + "make sure the output of the first part of the model is flattened." + + if gamma is None: + if cases_dataset is None: + raise ValueError( + "For the default kernel_fn, the default gamma value requires samples shape." + ) + gamma = 1.0 / self.nb_features + + gamma = tf.constant(gamma, dtype=tf.float32) + + # created inside a function for gamma to be a constant and prevent graph retracing + @tf.function(input_signature=[ + tf.TensorSpec(shape=cases_shape, dtype=tf.float32, name="tensor_1"), + tf.TensorSpec(shape=cases_shape, dtype=tf.float32, name="tensor_2") + ]) + def rbf_kernel(tensor_1: tf.Tensor, tensor_2: tf.Tensor,) -> tf.Tensor: + """ + Compute the rbf kernel matrix between two sets of samples. + + Parameters + ---------- + tensor_1 + The first set of samples of shape (n, d). + tensor_2 + The second set of samples of shape (m, d). + + Returns + ------- + Tensor + The rbf kernel matrix of shape (n, m). + """ + + # (n, m, d) + pairwise_diff = tensor_1[:, tf.newaxis, :] - tensor_2[tf.newaxis, :, :] + + # (n, m) + pairwise_sq_dist = tf.reduce_sum(tf.square(pairwise_diff), axis=-1) + kernel_matrix = tf.exp(-gamma * pairwise_sq_dist) + + return kernel_matrix + + self.kernel_fn = rbf_kernel + def __set_kernel_matrix_column_means_and_diagonal(self) -> None: + """ + Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset. + Results are stored in the object. + + Parameters + ---------- + cases_dataset : tf.data.Dataset + The kernel matrix is computed between the cases of this dataset. + kernel_fn : Callable + Kernel function to compute the kernel matrix between two sets of samples. + """ # Compute the sum of the columns and the diagonal values of the kernel matrix of the dataset # We take advantage of the symmetry of this matrix to traverse only its lower triangle col_sums = [] diag = [] - row_sums = [] - - for batch_col_index, (batch_col_cases, batch_col_labels) in enumerate( - zip(self.cases_dataset, self.labels_dataset) - ): - # elements should be tabular data - assert len(batch_col_cases.shape) == 2,\ - "Prototypes' searches expects 2D data, (nb_samples, nb_features), but got "+\ - f"{batch_col_cases.shape}. Please verify your projection "+\ - "if you provided a custom one. If you use a splitted model, "+\ - "make sure the output of the first part of the model is flattened." - - batch_col_sums = tf.zeros((batch_col_cases.shape[0])) - - for batch_row_index, (batch_row_cases, batch_row_labels) in enumerate( - zip(self.cases_dataset, self.labels_dataset) - ): - if batch_row_index < batch_col_index: + row_sums = [0] # first batch has no row sums and not computed, 0 is a placeholder + nb_samples = 0 + + for batch_col_index, batch_col_cases in enumerate(self.cases_dataset): + + batch_col_sums = tf.zeros((batch_col_cases.shape[0]), dtype=tf.float32) + + for batch_row_index, batch_row_cases in enumerate(self.cases_dataset): + # ignore batches that are above the diagonal + if batch_col_index > batch_row_index: continue - batch_kernel = self.kernel_fn(batch_row_cases, batch_col_cases, - batch_row_labels, batch_col_labels) + # Compute the kernel matrix between the two batches + # (n_b_row, n_b_col) + batch_kernel = self.kernel_fn(batch_row_cases, batch_col_cases) + # increment the column sums + # (n_b_col,) batch_col_sums = batch_col_sums + tf.reduce_sum(batch_kernel, axis=0) + # current pair of batches is on the diagonal if batch_col_index == batch_row_index: - if batch_col_index != 0: - batch_col_sums = batch_col_sums + row_sums[batch_row_index] - + # stock the diagonal values diag.append(tf.linalg.diag_part(batch_kernel)) + # complete the column sums with the row sums when the batch is on the diagonal + # (n_b_col,) + batch_col_sums = batch_col_sums + row_sums[batch_row_index] + continue + + # increment the row sums + # (n_b_row,) + current_batch_row_sums = tf.reduce_sum(batch_kernel, axis=1) if batch_col_index == 0: - if batch_row_index == 0: - row_sums.append(None) - else: - row_sums.append(tf.reduce_sum(batch_kernel, axis=1)) + row_sums.append(current_batch_row_sums) else: - row_sums[batch_row_index] += tf.reduce_sum(batch_kernel, axis=1) + row_sums[batch_row_index] += current_batch_row_sums col_sums.append(batch_col_sums) + nb_samples += batch_col_cases.shape[0] - self.col_sums = tf.concat(col_sums, axis=0) - self.n = self.col_sums.shape[0] - self.col_means = self.col_sums / self.n - self.diag = tf.concat(diag, axis=0) - self.nb_features = batch_col_cases.shape[1] + # pad the last batch to have the same size as the others + col_sums[-1] = tf.pad(col_sums[-1], [[0, self.batch_size - col_sums[-1].shape[0]]]) - # compute the prototypes in the latent space - self.prototypes_indices, self.prototypes, self.prototypes_labels, self.prototypes_weights =\ - self.find_prototypes(nb_prototypes) - - self.knn = KNN( - cases_dataset=self.prototypes, - k=k, - search_returns=search_returns, - batch_size=batch_size, - distance=self.distance_fn - ) + # (nb, b) + self.kernel_col_means = tf.stack(col_sums, axis=0) / tf.cast(nb_samples, dtype=tf.float32) + + # pad the last batch to have the same size as the others + diag[-1] = tf.pad(diag[-1], [[0, self.batch_size - diag[-1].shape[0]]]) - def compute_objectives(self, - selection_indices: tf.Tensor, - selection_cases: tf.Tensor, - selection_weights: tf.Tensor, - selection_selection_kernel: tf.Tensor, - candidates_indices: tf.Tensor, - candidates_selection_kernel: tf.Tensor - ) -> Tuple[tf.Tensor, tf.Tensor]: + # (nb, b) + self.kernel_diag = tf.stack(diag, axis=0) + + def _compute_batch_objectives(self, + candidates_kernel_diag: tf.Tensor, + candidates_kernel_col_means: tf.Tensor, + selection_kernel_col_means: tf.Tensor, + candidates_selection_kernel: tf.Tensor, + selection_selection_kernel: tf.Tensor + ) -> Tuple[tf.Tensor, tf.Tensor]: """ - Compute the objective and its weights for each candidate. + Compute the objective function and corresponding weights + for a given set of selected prototypes and a batch of candidates. + + Here, we have a special case of protogreedy where we give equal weights to all prototypes, + the objective here is simplified to speed up processing. + Find argmax_{c} F(S ∪ c) - F(S) + ≡ + Find argmax_{c} F(S ∪ c) + ≡ + Find argmax_{c} max_{w} (w^T mu_p) - (w^T K w) / 2 + + w*, the optimal objective weights, is computed as follows: w* = K^-1 mu_p + + where: + - mu_p is the column means of the kernel matrix + - K is the kernel matrix + Parameters ---------- - selection_indices : Tensor - Indices corresponding to the selected prototypes. - selection_cases : Tensor - Cases corresponding to the selected prototypes. - selection_weights : Tensor - Weights corresponding to the selected prototypes. - selection_selection_kernel : Tensor - Kernel matrix computed from the selected prototypes. - candidates_indices : Tensor - Indices corresponding to the candidate prototypes. + candidates_kernel_diag : Tensor + Diagonal values of the kernel matrix between the candidates and themselves. Shape (bc,). + candidates_kernel_col_means : Tensor + Column means of the kernel matrix, subset for the candidates. Shape (bc,). + selection_kernel_col_means : Tensor + Column means of the kernel matrix, subset for the selected prototypes. Shape (|S|,). candidates_selection_kernel : Tensor - Kernel matrix between the candidates and the selected prototypes. - + Kernel matrix between the candidates and the selected prototypes. Shape (bc, |S|). + selection_selection_kernel : Tensor + Kernel matrix between the selected prototypes. Shape (|S|, |S|). + Returns ------- objectives - Tensor that contains the computed objective values for each candidate. + Tensor that contains the computed objective values for each candidate. Shape (bc,). objectives_weights Tensor that contains the computed objective weights for each candidate. + Shape (bc, |S|+1). """ # pylint: disable=invalid-name - - nb_candidates = candidates_indices.shape[0] - nb_selection = selection_cases.shape[0] - - repeated_selection_indices = tf.tile(tf.expand_dims(selection_indices, 0), - [nb_candidates, 1]) - repeated_selection_candidates_indices = tf.concat([repeated_selection_indices, - tf.expand_dims(candidates_indices, 1)], - axis=1) - u = tf.expand_dims(tf.gather(self.col_means, repeated_selection_candidates_indices), axis=2) - - candidates_diag = tf.gather(self.diag, candidates_indices) - candidates_diag = tf.expand_dims(tf.expand_dims(candidates_diag, axis=-1), axis=-1) - - if nb_selection == 0: - K = candidates_diag + # construct the kernel matrix for (S ∪ c) for each candidate (S is the selection) + # (bc, |S| + 1, |S| + 1) + if candidates_selection_kernel is None: + # no selected prototypes yet, S = {} + # (bc, 1, 1) + K = candidates_kernel_diag[:, tf.newaxis, tf.newaxis] else: - repeated_selection_selection_kernel = tf.tile( + # repeat the selection-selection kernel for each candidate + # (bc, |S|, |S|) + selection_selection_kernel = tf.tile( tf.expand_dims(selection_selection_kernel, 0), - [nb_candidates, 1, 1] + [candidates_selection_kernel.shape[0], 1, 1] ) - repeated_selection_selection_kernel = tf.pad( - repeated_selection_selection_kernel, - [[0, 0], [0, 1], [0, 1]] + + # add candidates-selection kernel row to the selection-selection kernel matrix + # (bc, |S| + 1, |S|) + extended_selection_selection_kernel = tf.concat( + [ + selection_selection_kernel, + candidates_selection_kernel[:, tf.newaxis, :] + ], + axis=1 ) - candidates_diag = tf.pad( - candidates_diag, - [[0, 0], [nb_selection, 0], [nb_selection, 0]] + # create the extended column for the candidates with the diagonal values + # (bc, |S| + 1) + extended_candidates_selection_kernel = tf.concat( + [ + candidates_selection_kernel, + candidates_kernel_diag[:, tf.newaxis] + ], + axis=1 ) - candidates_selection_kernel = tf.expand_dims(candidates_selection_kernel, axis=-1) - candidates_selection_kernel = tf.pad( - candidates_selection_kernel, - [[0, 0], [0, 1], [nb_selection, 0]] + # add the extended column for the candidates to the extended selection-selection kernel + # (bc, |S| + 1, |S| + 1) + K = tf.concat( + [ + extended_selection_selection_kernel, + extended_candidates_selection_kernel[:, :, tf.newaxis], + ], + axis=2 ) - K = repeated_selection_selection_kernel + candidates_diag\ - + candidates_selection_kernel + tf.transpose(candidates_selection_kernel, [0, 2, 1]) + # (bc, |S|) - extended selected kernel col means + selection_kernel_col_means = tf.tile( + selection_kernel_col_means[tf.newaxis, :], + multiples=[candidates_kernel_col_means.shape[0], 1] + ) + + # (bc, |S| + 1) - mu_p + candidates_selection_kernel_col_means = tf.concat( + [ + selection_kernel_col_means, + candidates_kernel_col_means[:, tf.newaxis]], + axis=1 + ) - # Compute the objective weights for each candidate in the batch + # compute the optimal objective weights for each candidate in the batch + # (bc, |S| + 1, |S| + 1) - K^-1 K_inv = tf.linalg.inv(K + ProtoGreedySearch.EPSILON * tf.eye(K.shape[-1])) - objectives_weights = tf.matmul(K_inv, u) - objectives_weights = tf.maximum(objectives_weights, 0) - # Compute the objective for each candidate in the batch - k_objectives = tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), K) - u_objectives = tf.matmul(tf.transpose(objectives_weights, [0, 2, 1]), u) - objectives = u_objectives - 0.5 * tf.matmul(k_objectives, objectives_weights) - objectives = tf.squeeze(objectives, axis=[1, 2]) + # (bc, |S| + 1) - w* = K^-1 mu_p + objectives_weights = tf.abs(tf.einsum("bsp,bp->bs", + K_inv, candidates_selection_kernel_col_means)) - return objectives, objectives_weights + # (bc,) - (w*^T mu_p) + weights_mu_p = tf.einsum("bp,bp->b", + objectives_weights, candidates_selection_kernel_col_means) - def update_selection_weights(self, - selection_indices: tf.Tensor, - selection_weights: tf.Tensor, - selection_selection_kernel: tf.Tensor, - best_indice: tf.Tensor, - best_weights: tf.Tensor, - best_objective: tf.Tensor - ) -> tf.Tensor: - """ - Update the selection weights based on the optimization results. + # (bc,) - (w*^T K w*) + weights_K_weights = tf.einsum("bs,bsp,bp->b", + objectives_weights, K, objectives_weights) - Parameters - ---------- - selected_indices : Tensor - Indices corresponding to the selected prototypes. - selected_weights : Tensor - Weights corresponding to the selected prototypes. - selection_selection_kernel : Tensor - Kernel matrix computed from the selected prototypes. - best_indice : int - The index of the selected prototype with the highest objective function value. - best_weights : Tensor - The weights corresponding to the optimal solution - of the objective function for each candidate. - best_objective : float - The computed objective function value. + # (bc,) - (w*^T mu_p) - (w*^T K w*) / 2 + objectives = weights_mu_p - 0.5 * weights_K_weights - Returns - ------- - selection_weights : Tensor - Updated weights corresponding to the selected prototypes. - """ - - selection_weights = best_weights - - return selection_weights + return objectives, objectives_weights - def find_prototypes(self, nb_prototypes): + def find_global_prototypes(self, nb_prototypes: int): """ - Search for prototypes and their corresponding weights. + Search for global prototypes and their corresponding weights. + Iteratively select the best prototype candidate and add it to the selection. + The selected candidate is the one with the highest objective function value. + + The indices, weights, and cases of the selected prototypes are stored in the object. Parameters ---------- nb_prototypes : int - Number of prototypes to find. - - Returns - ------- - prototypes_indices : Tensor - The indices of the selected prototypes. - prototypes : Tensor - The cases of the selected prototypes. - prototypes_labels : Tensor - The labels of the selected prototypes. - prototypes_weights : - The normalized weights of the selected prototypes. + Number of global prototypes to find. """ - - # Tensors to store selected indices and their corresponding cases, labels and weights. - selection_indices = tf.constant([], dtype=tf.int32) - selection_cases = tf.zeros((0, self.nb_features), dtype=tf.float32) - selection_labels = tf.constant([], dtype=tf.int32) - selection_weights = tf.constant([], dtype=tf.float32) - # Tensor to store the all_candidates-selection kernel of the previous iteration. - all_candidates_selection_kernel = tf.zeros((self.n, 0), dtype=tf.float32) - # Tensor to store the selection-selection kernel. - selection_selection_kernel = None - - k = 0 - while k < nb_prototypes: - - nb_selection = selection_cases.shape[0] - - # Tensor to store the all_candidates-last_selected kernel - if nb_selection !=0: - all_candidates_last_selected_kernel = tf.zeros((self.n), dtype=tf.float32) - - best_objective = None - best_indice = None - best_case = None - best_label = None - best_weights = None - - for batch_index, (cases, labels) in enumerate( - zip(self.cases_dataset, self.labels_dataset) - ): - batch_inside_indices = tf.range(cases.shape[0], dtype=tf.int32) - batch_indices = batch_index * self.batch_size + batch_inside_indices - - # Filter the batch to keep only candidate indices. - if nb_selection == 0: - candidates_indices = batch_indices - else: - candidates_indices = tf.convert_to_tensor( - np.setdiff1d(batch_indices, selection_indices) + # pylint: disable=too-many-statements + assert 0 < nb_prototypes, "`nb_prototypes` should be between at least 1." + + # initialize variables with placeholders + # final prototypes variables + # (np, 2) - final prototypes indices + self.prototypes_indices = tf.Variable(tf.fill((nb_prototypes, 2), -1)) + # (np,) - final prototypes weights + self.prototypes_weights = tf.Variable(tf.zeros((nb_prototypes,), dtype=tf.float32)) + # (np, d) - final prototypes cases + self.prototypes = tf.Variable(tf.zeros((nb_prototypes, self.nb_features), dtype=tf.float32)) + + # kernel matrix variables + # (np, np) - kernel matrix between selected prototypes + selection_selection_kernel = tf.Variable(tf.zeros((nb_prototypes, nb_prototypes), + dtype=tf.float32)) + # (nb, b, np) - kernel matrix between samples and selected prototypes + samples_selection_kernel = tf.Variable(tf.zeros((*self.kernel_diag.shape, nb_prototypes))) + + # (nb, b) - mask encoding the selected prototypes + mask_of_selected = tf.Variable(tf.fill(self.kernel_diag.shape, False)) + + # (np,) - selected column means + selection_kernel_col_means = tf.Variable(tf.zeros((nb_prototypes,), dtype=tf.float32)) + + # iterate till we find all the prototypes + for nb_selected in range(nb_prototypes): + # initialize + best_objective = tf.constant(-np.inf, dtype=tf.float32) + + # iterate over the batches + for batch_index, cases in enumerate(self.cases_dataset): + # (b,) + candidates_batch_mask = tf.math.logical_not(mask_of_selected[batch_index]) + + # last batch, pad with False + if cases.shape[0] < self.batch_size: + candidates_batch_mask = tf.math.logical_and( + candidates_batch_mask, tf.range(self.batch_size) < cases.shape[0] ) - nb_candidates = candidates_indices.shape[0] - - if nb_candidates == 0: + # no candidates in the batch skipping + if not tf.reduce_any(candidates_batch_mask): continue - candidates_inside_indices = candidates_indices % self.batch_size - candidates_cases = tf.gather(cases, candidates_inside_indices) - candidates_labels = tf.gather(labels, candidates_inside_indices) - - # Compute the candidates-selection kernel for the batch - if nb_selection == 0: - candidates_selection_kernel = None - else: - candidates_last_selected_kernel = self.kernel_fn( - candidates_cases, selection_cases[-1:, :], - candidates_labels, selection_labels[-1:] - ) - candidates_selection_kernel = tf.concat( - [tf.gather(all_candidates_selection_kernel, candidates_indices, axis=0), - candidates_last_selected_kernel], - axis=1 + # compute the kernel matrix between the last selected prototypes and the candidates + if nb_selected > 0: + # (b,) + batch_samples_last_selection_kernel = self.kernel_fn( + cases, last_selected + )[:, 0] + samples_selection_kernel[batch_index, :cases.shape[0], nb_selected - 1].assign( + batch_samples_last_selection_kernel ) - all_candidates_last_selected_kernel = tf.tensor_scatter_nd_update( - all_candidates_last_selected_kernel, - tf.expand_dims(candidates_indices, axis=1), - tf.squeeze(candidates_last_selected_kernel, axis=1) + + # (b, |S|) + batch_candidates_selection_kernel =\ + samples_selection_kernel[batch_index, :cases.shape[0], :nb_selected] + # (bc, |S|) + batch_candidates_selection_kernel = tf.boolean_mask( + tensor=batch_candidates_selection_kernel, + mask=candidates_batch_mask[:cases.shape[0]], + axis=0, ) - # Compute the objectives for the batch - objectives, objectives_weights = self.compute_objectives( - selection_indices, selection_cases, selection_weights, - selection_selection_kernel, candidates_indices, candidates_selection_kernel + else: + batch_candidates_selection_kernel = None + + # extract kernel values for the batch + # (bc,) + batch_candidates_kernel_diag = self.kernel_diag[batch_index][candidates_batch_mask] + # (bc,) + batch_candidates_kernel_col_means =\ + self.kernel_col_means[batch_index][candidates_batch_mask] + + # compute the objectives for the batch + # (bc,), (bc, |S| + 1) + objectives, objectives_weights = self._compute_batch_objectives( + batch_candidates_kernel_diag, + batch_candidates_kernel_col_means, + selection_kernel_col_means[:nb_selected], + batch_candidates_selection_kernel, + selection_selection_kernel[:nb_selected, :nb_selected], ) - # Select the best objective in the batch + # select the best candidate in the batch objectives_argmax = tf.argmax(objectives) + batch_best_objective = tf.gather(objectives, objectives_argmax) - if (best_objective is None)\ - or (tf.gather(objectives, objectives_argmax) > best_objective): - best_objective = tf.gather(objectives, objectives_argmax) - best_indice = tf.squeeze(tf.gather(candidates_indices, objectives_argmax)) - best_case = tf.gather(candidates_cases, objectives_argmax) - best_label = tf.gather(candidates_labels, objectives_argmax) + if batch_best_objective > best_objective: + best_objective = batch_best_objective + best_batch_index = batch_index + best_index = tf.range(self.batch_size)[candidates_batch_mask][objectives_argmax] + best_case = cases[best_index] if objectives_weights is not None: - best_weights = tf.squeeze(tf.gather(objectives_weights, objectives_argmax)) - - # Update the all_candidates-selection kernel - if nb_selection != 0: - all_candidates_selection_kernel = tf.concat( - [all_candidates_selection_kernel, - tf.expand_dims(all_candidates_last_selected_kernel, axis=1)], - axis=1) - - # Update the selection-selection kernel - if nb_selection == 0: - selection_selection_kernel = tf.gather(self.diag, [[best_indice]]) - else: - selection_selection_kernel = tf.pad(selection_selection_kernel, [[0, 1], [0, 1]]) - - best_candidate_selection_kernel = tf.gather(all_candidates_selection_kernel, - [best_indice], axis=0) - best_candidate_selection_kernel = tf.pad(best_candidate_selection_kernel, - [[nb_selection, 0], [0, 1]]) - - best_candidate_diag = tf.expand_dims(tf.gather(self.diag, [best_indice]), axis=-1) - best_candidate_diag = tf.pad(best_candidate_diag, - [[nb_selection, 0], [nb_selection, 0]]) - - selection_selection_kernel = selection_selection_kernel + best_candidate_diag\ - + best_candidate_selection_kernel\ - + tf.transpose(best_candidate_selection_kernel) - - # Update selection indices, cases and labels - selection_indices = tf.concat([selection_indices, [best_indice]], axis=0) - selection_cases = tf.concat([selection_cases, [best_case]], axis=0) - selection_labels = tf.concat([selection_labels, [best_label]], axis=0) - - # Update selection weights - selection_weights = self.update_selection_weights( - selection_indices, selection_weights, selection_selection_kernel, - best_indice, best_weights, best_objective) - - k += 1 - - prototypes_indices = selection_indices - prototypes = selection_cases - prototypes_labels = selection_labels - prototypes_weights = selection_weights - - # Normalize the weights - prototypes_weights = prototypes_weights / tf.reduce_sum(prototypes_weights) - - return prototypes_indices, prototypes, prototypes_labels, prototypes_weights - - def find_examples(self, - inputs: Union[tf.Tensor, np.ndarray], - _ = None - ) -> dict: - """ - Search the samples to return as examples. Called by the explain methods. - It may also return the indices corresponding to the samples, - based on `return_indices` value. - - Parameters - ---------- - inputs - Tensor or Array. Input samples to be explained. - Assumed to have been already projected. - Expected shape among (N, W), (N, T, W), (N, W, H, C). - - Returns - ------- - dict - Dictionary potentially containing the following elements: - - "examples" : the expected examples, - the inputs may be included in the first position. (n, k(+1), ...) - - "distances" : the distances between the inputs and the corresponding examples. - They are associated to the examples. (n, k, ...) - """ - - # look for closest prototypes to projected inputs - knn_output = self.knn(inputs, _) + best_weights = objectives_weights[objectives_argmax] + + # update the selected prototypes + # pylint: disable=possibly-used-before-assignment + last_selected = best_case[tf.newaxis, :] + mask_of_selected[best_batch_index, best_index].assign(True) + self.prototypes_indices[nb_selected].assign([best_batch_index, best_index]) + self.prototypes[nb_selected].assign(best_case) + + # update selected-selected kernel matrix (S = S ∪ c) + selection_selection_kernel[nb_selected, nb_selected].assign( + self.kernel_diag[best_batch_index, best_index] + ) + if nb_selected > 0: + # (|S|,) + new_selected = samples_selection_kernel[best_batch_index, best_index, :nb_selected] - # obtain closest prototypes indices with respect to the prototypes - indices_wrt_prototypes = knn_output["indices"] + # add the new row and column to the selected-selected kernel matrix + selection_selection_kernel[nb_selected, :nb_selected].assign( + new_selected + ) + selection_selection_kernel[:nb_selected, nb_selected].assign( + new_selected + ) - # convert to unique indices - indices_wrt_prototypes = indices_wrt_prototypes[:, :, 0] * self.batch_size\ - + indices_wrt_prototypes[:, :, 1] + # update the selected column means + selection_kernel_col_means[nb_selected].assign( + self.kernel_col_means[best_batch_index, best_index] + ) - # get prototypes indices with respect to the dataset - indices = tf.gather(self.prototypes_indices, indices_wrt_prototypes) + # update the selected weights + if not hasattr(self, "_update_selection_weights"): + # pylint: disable=used-before-assignment + self.prototypes_weights[:nb_selected + 1].assign(best_weights) + else: + self._update_selection_weights( + selection_kernel_col_means[:nb_selected + 1], + selection_selection_kernel[:nb_selected + 1, :nb_selected + 1], + self.kernel_diag[best_batch_index, best_index], + best_objective, + ) - # convert back to batch-element indices - batch_indices, elem_indices = indices // self.batch_size, indices % self.batch_size - indices = tf.stack([batch_indices, elem_indices], axis=-1) + # normalize the weights + self.prototypes_weights.assign( + self.prototypes_weights / tf.reduce_sum(self.prototypes_weights) + ) - knn_output["indices"] = indices + # convert variables to tensors + self.prototypes_indices = tf.convert_to_tensor(self.prototypes_indices) + self.prototypes = tf.convert_to_tensor(self.prototypes) + self.prototypes_weights = tf.convert_to_tensor(self.prototypes_weights) - return knn_output + assert tf.reduce_sum(tf.cast(mask_of_selected, tf.int32)) == nb_prototypes,\ + "The number of prototypes found is not equal to the number of prototypes expected." From 145f0d5e25c998bbff8afc119aae249ff1181099 Mon Sep 17 00:00:00 2001 From: POCHE Date: Tue, 24 Sep 2024 11:33:31 +0200 Subject: [PATCH 125/138] tests prototypes: adapt to prototypes changes and complete --- tests/example_based/test_prototypes.py | 432 ++++++++++++------------- tests/utils.py | 6 +- 2 files changed, 203 insertions(+), 235 deletions(-) diff --git a/tests/example_based/test_prototypes.py b/tests/example_based/test_prototypes.py index fe1ae962..7c890d63 100644 --- a/tests/example_based/test_prototypes.py +++ b/tests/example_based/test_prototypes.py @@ -19,262 +19,230 @@ def test_prototypes_global_explanations_basic(): Test prototypes shapes and uniqueness. """ # Setup - k = 3 + k = 2 nb_prototypes = 5 - nb_classes = 3 + nb_classes = 2 gamma = 0.026 - - x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) - - identity_projection = Projection( - space_projection=lambda inputs, targets=None: inputs - ) - - for kernel_type in ["local", "global"]: - for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: - # compute general prototypes - method = method_class( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=identity_projection, - batch_size=8, - distance="euclidean", - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - gamma=gamma, - ) - # extract prototypes - prototypes_dict = method.get_global_prototypes() - prototypes = prototypes_dict["prototypes"] - prototypes_indices = prototypes_dict["prototypes_indices"] - prototypes_labels = prototypes_dict["prototypes_labels"] - prototypes_weights = prototypes_dict["prototypes_weights"] - - # check shapes - assert prototypes.shape == (nb_prototypes,) + x_train.shape[1:] - assert prototypes_indices.shape == (nb_prototypes,) - assert prototypes_labels.shape == (nb_prototypes,) - assert prototypes_weights.shape == (nb_prototypes,) - - # check uniqueness - assert len(prototypes_indices) == len(tf.unique(prototypes_indices)[0]) - - # for each prototype - for i in range(nb_prototypes): + batch_size = 8 # TODO: test avec batch_size plus petite que nb_prototypes + + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20, n_dims=3) + x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=6, n_dims=3) + + for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + batch_size=batch_size, + case_returns=["examples", "distances", "labels", "indices"], + distance="euclidean", + nb_prototypes=nb_prototypes, + gamma=gamma, + ) + + # ====================== + # Test global prototypes + + # extract prototypes + prototypes = method.prototypes + prototypes_indices = method.prototypes_indices + prototypes_labels = method.prototypes_labels + prototypes_weights = method.prototypes_weights + + # check shapes + assert prototypes.shape == (nb_prototypes,) + x_train.shape[1:] + assert prototypes_indices.shape == (nb_prototypes, 2) + assert prototypes_labels.shape == (nb_prototypes,) + assert prototypes_weights.shape == (nb_prototypes,) + + # check uniqueness + flatten_indices = prototypes_indices[:, 0] * batch_size + prototypes_indices[:, 1] + assert len(tf.unique(flatten_indices)[0]) == nb_prototypes + + # for each prototype + for i in range(nb_prototypes): + # check prototypes are in the dataset and correspond to the index + assert tf.reduce_all(tf.equal(prototypes[i], x_train[flatten_indices[i]])) + + # same for labels + assert tf.reduce_all(tf.equal(prototypes_labels[i], y_train[flatten_indices[i]])) + + # check indices are in the dataset + assert flatten_indices[i] >= 0 and flatten_indices[i] < x_train.shape[0] + + # ===================== + # Test local prototypes + + # compute local explanations + outputs = method.explain(x_test) + examples = outputs["examples"] + distances = outputs["distances"] + labels = outputs["labels"] + indices = outputs["indices"] + + # check shapes + assert examples.shape == (x_test.shape[0], k) + x_train.shape[1:] + assert distances.shape == (x_test.shape[0], k) + assert labels.shape == (x_test.shape[0], k) + assert indices.shape == (x_test.shape[0], k, 2) + + assert tf.reduce_all(indices[:, :, 0] >= 0) + assert tf.reduce_all(indices[:, :, 0] < (1 + x_train.shape[0] // batch_size)) + assert tf.reduce_all(indices[:, :, 1] >= 0) + assert tf.reduce_all(indices[:, :, 1] < batch_size) + flatten_indices = indices[:, :, 0] * batch_size + indices[:, :, 1] + + # for each sample + for i in range(x_test.shape[0]): + # check first closest prototype label is the same as the sample label + assert tf.reduce_all(tf.equal(labels[i], y_test[i])) + + for j in range(k): # check prototypes are in the dataset and correspond to the index - assert tf.reduce_all(tf.equal(prototypes[i], x_train[prototypes_indices[i]])) + assert tf.reduce_all(tf.equal(examples[i, j], x_train[flatten_indices[i, j]])) # same for labels - assert tf.reduce_all(tf.equal(prototypes_labels[i], y_train[prototypes_indices[i]])) - - # check indices are in the dataset - assert prototypes_indices[i] >= 0 and prototypes_indices[i] < x_train.shape[0] - - -def test_prototypes_local_explanations_basic(): - """ - Test prototypes local explanations. - """ - # Setup - k = 3 - nb_prototypes = 5 - nb_classes = 3 - batch_size = 8 - gamma = 0.026 - - x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) - x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=10) - - identity_projection = Projection( - space_projection=lambda inputs, targets=None: inputs - ) - - for kernel_type in ["local", "global"]: - for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: - # compute general prototypes - method = method_class( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=identity_projection, - case_returns=["examples", "distances", "labels", "indices"], - batch_size=batch_size, - distance="euclidean", - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - gamma=gamma, - ) - # extract prototypes - prototypes_dict = method.get_global_prototypes() - prototypes = prototypes_dict["prototypes"] - prototypes_indices = prototypes_dict["prototypes_indices"] - prototypes_labels = prototypes_dict["prototypes_labels"] - prototypes_weights = prototypes_dict["prototypes_weights"] - - # compute local explanations - outputs = method.explain(x_test) - examples = outputs["examples"] - distances = outputs["distances"] - labels = outputs["labels"] - indices = outputs["indices"] - - # check shapes - assert examples.shape == (x_test.shape[0], k) + x_train.shape[1:] - assert distances.shape == (x_test.shape[0], k) - assert labels.shape == (x_test.shape[0], k) - assert indices.shape == (x_test.shape[0], k, 2) - - # for each sample - for i in range(x_test.shape[0]): - # check first closest prototype label is the same as the sample label - assert tf.reduce_all(tf.equal(labels[i, 0], y_test[i])) - - for j in range(k): - # check indices in prototypes' indices - index = indices[i, j, 0] * batch_size + indices[i, j, 1] - assert index in prototypes_indices - - # check examples are in prototypes - assert tf.reduce_all(tf.equal(prototypes[prototypes_indices == index], examples[i, j])) - - # check indices are in the dataset - assert tf.reduce_all(tf.equal(x_train[index], examples[i, j])) - - # check distances - assert almost_equal(distances[i, j], tf.norm(x_test[i] - x_train[index]), epsilon=1e-5) - - # check labels - assert tf.reduce_all(tf.equal(labels[i, j], y_train[index])) + assert tf.reduce_all(tf.equal(labels[i, j], y_train[flatten_indices[i, j]])) def test_prototypes_global_sanity_check(): """ Test prototypes global explanations sanity checks. - Check: For n separated gaussians, for n requested prototypes, there should be 1 prototype per gaussian. + Check: For n separated gaussians, + for n requested prototypes, + there should be 1 prototype per gaussian. """ - + # TODO: the two first prototypes seem to always come from the same class, I should investigate # Setup - k = 3 + k = 2 nb_prototypes = 3 gamma = 0.026 - x_train, y_train = get_gaussian_data(nb_classes=nb_prototypes, nb_samples_class=20) + x_train, y_train = get_gaussian_data(nb_classes=nb_prototypes, nb_samples_class=5, n_dims=3) - identity_projection = Projection( - space_projection=lambda inputs, targets=None: inputs - ) + print("DEBUG: test_prototypes_global_sanity_check: x_train", x_train) + + for method_class in [MMDCritic, ProtoDash, ProtoGreedy]: + print("DEBUG: test_prototypes_global_sanity_check: method_class", method_class) + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + batch_size=8, + nb_prototypes=nb_prototypes, + gamma=gamma, + ) + # extract prototypes + prototypes_labels = method.get_global_prototypes()["prototypes_labels"] + print("DEBUG: test_prototypes_global_sanity_check: y_train", y_train) + print("DEBUG: test_prototypes_global_sanity_check: prototypes_labels", prototypes_labels) + + # check 1 + assert len(tf.unique(prototypes_labels)[0]) == nb_prototypes - for kernel_type in ["local", "global"]: - for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: - # compute general prototypes - method = method_class( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=identity_projection, - batch_size=8, - distance="euclidean", - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - gamma=gamma, - ) - # extract prototypes - prototypes_dict = method.get_global_prototypes() - prototypes = prototypes_dict["prototypes"] - prototypes_indices = prototypes_dict["prototypes_indices"] - prototypes_labels = prototypes_dict["prototypes_labels"] - prototypes_weights = prototypes_dict["prototypes_weights"] - - # check 1 - assert len(tf.unique(prototypes_labels)[0]) == nb_prototypes - - -def test_prototypes_local_explanations_with_projection(): + +def test_prototypes_with_projection(): """ - Test prototypes local explanations with a projection. + Test prototypes shapes and uniqueness. """ # Setup - k = 3 - nb_prototypes = 5 - nb_classes = 3 - batch_size = 8 + k = 2 + nb_prototypes = 10 + nb_classes = 2 gamma = 0.026 + batch_size = 8 # TODO: test avec batch_size plus petite que nb_prototypes - x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) - x_train_bis, _ = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20) - x_train = tf.concat([x_train, x_train_bis], axis=1) # make a dataset with two dimensions + x_train, y_train = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=20, n_dims=3) + x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=6, n_dims=3) - x_test, y_test = get_gaussian_data(nb_classes=nb_classes, nb_samples_class=10) + # [10, 10, 10] -> [15, 15] + # [20, 20, 20] -> [30, 30] + # [30, 30, 30] -> [45, 45] + weights = tf.constant([[1.0, 0.0], + [0.5, 0.5], + [0.0, 1.0],], + dtype=tf.float32) - projection = Projection( - space_projection=lambda inputs, targets=None: tf.reduce_mean(inputs, axis=1, keepdims=True) + weighted_projection = Projection( + space_projection=lambda inputs, targets=None: inputs @ weights ) - for kernel_type in ["local", "global"]: - for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: - # compute general prototypes - method = method_class( - cases_dataset=x_train, - labels_dataset=y_train, - k=k, - projection=projection, - case_returns=["examples", "distances", "labels", "indices"], - batch_size=batch_size, - distance="euclidean", - nb_prototypes=nb_prototypes, - kernel_type=kernel_type, - gamma=gamma, - ) - # extract prototypes - prototypes_dict = method.get_global_prototypes() - prototypes = prototypes_dict["prototypes"] - prototypes_indices = prototypes_dict["prototypes_indices"] - prototypes_labels = prototypes_dict["prototypes_labels"] - prototypes_weights = prototypes_dict["prototypes_weights"] - - # check shapes - assert prototypes.shape == (nb_prototypes,) + x_train.shape[1:] - assert prototypes_indices.shape == (nb_prototypes,) - assert prototypes_labels.shape == (nb_prototypes,) - assert prototypes_weights.shape == (nb_prototypes,) - - # compute local explanations - outputs = method.explain(x_test) - examples = outputs["examples"] - distances = outputs["distances"] - labels = outputs["labels"] - indices = outputs["indices"] - - # check shapes - assert examples.shape == (x_test.shape[0], k) + x_train.shape[1:] - assert distances.shape == (x_test.shape[0], k) - assert labels.shape == (x_test.shape[0], k) - assert indices.shape == (x_test.shape[0], k, 2) - - # for each sample - for i in range(x_test.shape[0]): - # check first closest prototype label is the same as the sample label - assert tf.reduce_all(tf.equal(labels[i, 0], y_test[i])) - - for j in range(k): - # check indices in prototypes' indices - index = indices[i, j, 0] * batch_size + indices[i, j, 1] - assert index in prototypes_indices - - # check examples are in prototypes - assert tf.reduce_all(tf.equal(prototypes[prototypes_indices == index], examples[i, j])) - - # check indices are in the dataset - assert tf.reduce_all(tf.equal(x_train[index], examples[i, j])) - - # check labels - assert tf.reduce_all(tf.equal(labels[i, j], y_train[index])) - - # check distances - assert almost_equal( - distances[i, j], - tf.norm(tf.reduce_mean(x_train[index]) - tf.reduce_mean(x_test[i])), - epsilon=1e-5 - ) + for method_class in [ProtoGreedy, ProtoDash, MMDCritic]: + # compute general prototypes + method = method_class( + cases_dataset=x_train, + labels_dataset=y_train, + k=k, + projection=weighted_projection, + batch_size=batch_size, + case_returns=["examples", "distances", "labels", "indices"], + nb_prototypes=nb_prototypes, + gamma=gamma, + ) + + # ====================== + # Test global prototypes + + # extract prototypes + prototypes = method.prototypes + prototypes_indices = method.prototypes_indices + prototypes_labels = method.prototypes_labels + prototypes_weights = method.prototypes_weights + + # check shapes + assert prototypes.shape == (nb_prototypes,) + x_train.shape[1:] + assert prototypes_indices.shape == (nb_prototypes, 2) + assert prototypes_labels.shape == (nb_prototypes,) + assert prototypes_weights.shape == (nb_prototypes,) + + # check uniqueness + flatten_indices = prototypes_indices[:, 0] * batch_size + prototypes_indices[:, 1] + assert len(tf.unique(flatten_indices)[0]) == nb_prototypes + + # for each prototype + for i in range(nb_prototypes): + # check prototypes are in the dataset and correspond to the index + assert tf.reduce_all(tf.equal(prototypes[i], x_train[flatten_indices[i]])) + + # same for labels + assert tf.reduce_all(tf.equal(prototypes_labels[i], y_train[flatten_indices[i]])) + + # check indices are in the dataset + assert flatten_indices[i] >= 0 and flatten_indices[i] < x_train.shape[0] + + # ===================== + # Test local prototypes + + # compute local explanations + outputs = method.explain(x_test) + examples = outputs["examples"] + distances = outputs["distances"] + labels = outputs["labels"] + indices = outputs["indices"] + + # check shapes + assert examples.shape == (x_test.shape[0], k) + x_train.shape[1:] + assert distances.shape == (x_test.shape[0], k) + assert labels.shape == (x_test.shape[0], k) + assert indices.shape == (x_test.shape[0], k, 2) + + assert tf.reduce_all(indices[:, :, 0] >= 0) + assert tf.reduce_all(indices[:, :, 0] < (1 + x_train.shape[0] // batch_size)) + assert tf.reduce_all(indices[:, :, 1] >= 0) + assert tf.reduce_all(indices[:, :, 1] < batch_size) + flatten_indices = indices[:, :, 0] * batch_size + indices[:, :, 1] + + # for each sample + for i in range(x_test.shape[0]): + # check first closest prototype label is the same as the sample label + assert tf.reduce_all(tf.equal(labels[i], y_test[i])) + + for j in range(k): + # check prototypes are in the dataset and correspond to the index + assert tf.reduce_all(tf.equal(examples[i, j], x_train[flatten_indices[i, j]])) + + # same for labels + assert tf.reduce_all(tf.equal(labels[i, j], y_train[flatten_indices[i, j]])) diff --git a/tests/utils.py b/tests/utils.py index 000b7f01..e7ae014e 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -255,20 +255,20 @@ def download_file(identifier: str, if chunk: file.write(chunk) -def get_gaussian_data(nb_classes=3, nb_samples_class=20): +def get_gaussian_data(nb_classes=3, nb_samples_class=20, n_dims=1): tf.random.set_seed(42) sigma = 1 mu = [10 * (id + 1) for id in range(nb_classes)] X = tf.concat([ - tf.random.normal(shape=(nb_samples_class,1), mean=mu[i], stddev=sigma, dtype=tf.float32) + tf.random.normal(shape=(nb_samples_class, n_dims), mean=mu[i], stddev=sigma, dtype=tf.float32) for i in range(nb_classes) ], axis=0) y = tf.concat([ tf.ones(shape=(nb_samples_class), dtype=tf.int32) * i - for i in range(3) + for i in range(nb_classes) ], axis=0) return(X, y) From c8a0abe3628bfe89aaac141d36dfe4db0f23e7ed Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 25 Sep 2024 11:51:50 +0200 Subject: [PATCH 126/138] prototypes: small fix --- xplique/example_based/search_methods/proto_dash_search.py | 2 +- xplique/example_based/search_methods/proto_greedy_search.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/xplique/example_based/search_methods/proto_dash_search.py b/xplique/example_based/search_methods/proto_dash_search.py index c57c6a62..6df09b51 100644 --- a/xplique/example_based/search_methods/proto_dash_search.py +++ b/xplique/example_based/search_methods/proto_dash_search.py @@ -189,7 +189,7 @@ def _update_selection_weights(self, # use w* = K^-1 * u as the optimal weights # (|S|,) selection_weights = tf.linalg.matvec(K_inv, u) - selection_weights = tf.abs(selection_weights) + selection_weights = tf.maximum(selection_weights, 0) # update the weights self.prototypes_weights[:nb_selected].assign(selection_weights) diff --git a/xplique/example_based/search_methods/proto_greedy_search.py b/xplique/example_based/search_methods/proto_greedy_search.py index 4b9e720c..01ff76cb 100644 --- a/xplique/example_based/search_methods/proto_greedy_search.py +++ b/xplique/example_based/search_methods/proto_greedy_search.py @@ -359,8 +359,8 @@ def _compute_batch_objectives(self, K_inv = tf.linalg.inv(K + ProtoGreedySearch.EPSILON * tf.eye(K.shape[-1])) # (bc, |S| + 1) - w* = K^-1 mu_p - objectives_weights = tf.abs(tf.einsum("bsp,bp->bs", - K_inv, candidates_selection_kernel_col_means)) + objectives_weights = tf.einsum("bsp,bp->bs", K_inv, candidates_selection_kernel_col_means) + objectives_weights = tf.maximum(objectives_weights, 0) # (bc,) - (w*^T mu_p) weights_mu_p = tf.einsum("bp,bp->b", From 97fd2cf79efb961e3dd7c50a0eeed23e4fa3cb9d Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 25 Sep 2024 16:05:47 +0200 Subject: [PATCH 127/138] ci: restrict numpy version and specify torch test --- requirements.txt | 4 ++-- setup.cfg | 18 +++++++++--------- setup.py | 2 +- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/requirements.txt b/requirements.txt index b7985ca3..865df0bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ -tensorflow >= 2.1.0, < 2.16 -numpy +numpy<2.0.0 +tensorflow>=2.1.0 scikit-learn scikit-image matplotlib diff --git a/setup.cfg b/setup.cfg index 3a85bd3f..4c6b64dd 100644 --- a/setup.cfg +++ b/setup.cfg @@ -42,27 +42,27 @@ commands = deps = pytest pytest-cov - tf21: tensorflow ~= 2.2.0 - tf25: tensorflow ~= 2.5.0 - tf28: tensorflow ~= 2.8.0 - tf211: tensorflow ~= 2.11.0 + tf21: tensorflow ~= 2.2.0,<2.16 + tf25: tensorflow ~= 2.5.0,<2.16 + tf28: tensorflow ~= 2.8.0,<2.16 + tf211: tensorflow ~= 2.11.0,<2.16 -rrequirements.txt commands = - pytest --cov=xplique --ignore=xplique/wrappers/pytorch.py --ignore=tests/wrappers/test_pytorch_wrapper.py --ignore=tests/concepts/test_craft_torch.py {posargs} + pytest --cov=xplique --ignore=xplique/wrappers/pytorch.py --ignore=tests/wrappers/test_pytorch_wrapper.py --ignore=tests/concepts/test_craft_torch.py --ignore=tests/example_based/test_torch.py {posargs} [testenv:py{38,39,310}-tf{25,28,211}-torch{111,113,200}] deps = pytest pytest-cov - tf25: tensorflow ~= 2.5.0 - tf28: tensorflow ~= 2.8.0 - tf211: tensorflow ~= 2.11.0 + tf25: tensorflow ~= 2.5.0,<2.16 + tf28: tensorflow ~= 2.8.0,<2.16 + tf211: tensorflow ~= 2.11.0,<2.16 torch111: torch == 1.11.0 torch113: torch == 1.13.0 torch200: torch -rrequirements.txt commands = - pytest --cov=xplique/wrappers/pytorch tests/wrappers/test_pytorch_wrapper.py tests/concepts/test_craft_torch.py + pytest --cov=xplique/wrappers/pytorch tests/wrappers/test_pytorch_wrapper.py tests/concepts/test_craft_torch.py tests/example_based/test_torch.py [mypy] check_untyped_defs = True diff --git a/setup.py b/setup.py index 96f0aebf..08a132ef 100644 --- a/setup.py +++ b/setup.py @@ -12,7 +12,7 @@ author="Thomas FEL", author_email="thomas_fel@brown.edu", license="MIT", - install_requires=['tensorflow>=2.1.0,<2.16', 'numpy', 'scikit-learn', 'scikit-image', + install_requires=['numpy<2.0.0', 'tensorflow>=2.1.0,<2.16.0', 'scikit-learn', 'scikit-image', 'matplotlib', 'scipy', 'opencv-python', 'deprecated'], extras_require={ "tests": ["pytest", "pylint"], From a4414dd727a5ed9796b1b18d93c831b0b5455096 Mon Sep 17 00:00:00 2001 From: POCHE Date: Wed, 25 Sep 2024 16:16:15 +0200 Subject: [PATCH 128/138] test example based: small fix --- tests/example_based/test_image_plot.py | 13 ++----------- tests/example_based/test_similar_examples.py | 6 ++---- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/tests/example_based/test_image_plot.py b/tests/example_based/test_image_plot.py index 9bc994a0..3d2ee11d 100644 --- a/tests/example_based/test_image_plot.py +++ b/tests/example_based/test_image_plot.py @@ -6,24 +6,15 @@ sys.path.append(os.getcwd()) -from math import prod, sqrt - -import numpy as np -import scipy import tensorflow as tf -from xplique.attributions import Occlusion, Saliency +from xplique.attributions import Occlusion -from xplique.example_based import Cole, SimilarExamples -from xplique.example_based.projections import Projection -from xplique.example_based.search_methods import KNN +from xplique.example_based import Cole from xplique.plots.image import plot_examples from tests.utils import ( - generate_data, generate_model, - almost_equal, - generate_timeseries_model, ) diff --git a/tests/example_based/test_similar_examples.py b/tests/example_based/test_similar_examples.py index 6be4577d..5d990fad 100644 --- a/tests/example_based/test_similar_examples.py +++ b/tests/example_based/test_similar_examples.py @@ -6,8 +6,6 @@ sys.path.append(os.getcwd()) -from math import prod, sqrt - import numpy as np import tensorflow as tf @@ -130,8 +128,8 @@ def test_similar_examples_return_multiple_elements(): # test distances assert almost_equal(distances[i, 0], 0) - assert almost_equal(distances[i, 1], sqrt(prod(input_shape))) - assert almost_equal(distances[i, 2], sqrt(prod(input_shape))) + assert almost_equal(distances[i, 1], np.sqrt(np.prod(input_shape))) + assert almost_equal(distances[i, 2], np.sqrt(np.prod(input_shape))) # test labels assert almost_equal(labels[i, 0], y_train[i + 1]) From 4debfd6daaba6d0ac0a7c604d0ab4cf8b5efd070 Mon Sep 17 00:00:00 2001 From: POCHE Date: Mon, 30 Sep 2024 15:36:46 +0200 Subject: [PATCH 129/138] rise: fix pylint --- xplique/attributions/rise.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/xplique/attributions/rise.py b/xplique/attributions/rise.py index 99220ef7..71d88d3f 100644 --- a/xplique/attributions/rise.py +++ b/xplique/attributions/rise.py @@ -172,6 +172,10 @@ def _get_masks(input_shape: Tuple[int], downsampled_shape = grid_size mask_shape = (nb_samples, *downsampled_shape, 1) + else: + raise ValueError("Data type is not supported. " + "Only tabular, time series and image data") + downsampled_masks = tf.random.uniform(mask_shape, 0, 1) binary_masks = downsampled_masks < preservation_probability @@ -232,6 +236,10 @@ def _apply_masks( masks = tf.image.random_crop(upsampled_masks, (binary_masks.shape[0], *single_input.shape[:-1], 1)) + else: + raise ValueError("Data type is not supported. " + "Only tabular, time series and image data") + masked_input = masks * tf.expand_dims(single_input, 0) + (1 - masks) * mask_value return masked_input, masks From ef7db467dc802dfbde631fcaba7c7e4a1da12710 Mon Sep 17 00:00:00 2001 From: POCHE Date: Thu, 3 Oct 2024 12:28:43 +0200 Subject: [PATCH 130/138] all: resolve part of example based pr comments --- .gitignore | 1 + .pylintrc | 20 --- docs/api/example_based/api_example_based.md | 30 ++-- .../label_aware_counter_factuals.md | 40 +++-- .../counterfactuals/naive_counter_factuals.md | 37 +++-- docs/api/example_based/projections.md | 2 +- .../prototypes/api_prototypes.md | 21 ++- .../example_based/prototypes/mmd_critic.md | 36 +++- .../example_based/prototypes/proto_dash.md | 36 +++- .../example_based/prototypes/proto_greedy.md | 36 +++- docs/api/example_based/semifactuals/kleor.md | 76 +++------ .../example_based/similar_examples/cole.md | 32 ++-- tests/FreeMono.ttf | Bin 592752 -> 0 bytes .../test_datasets_harmonization.py | 111 ++++++------- tests/example_based/test_projections.py | 4 +- tests/example_based/test_prototypes.py | 17 +- .../test_tf_dataset_operation.py | 129 ++++++++++----- tests/example_based/test_torch.py | 155 ++++++++++++------ .../test_image_example_based_plot.py} | 0 tests/utils.py | 60 ------- xplique/example_based/base_example_method.py | 66 ++++---- xplique/example_based/counterfactuals.py | 117 ++++++------- .../convert_torch_to_tf.py | 24 ++- .../datasets_operations/harmonize.py | 56 +++---- .../tf_dataset_operations.py | 21 ++- .../example_based/projections/attributions.py | 7 +- xplique/example_based/projections/base.py | 12 +- xplique/example_based/projections/commons.py | 41 ++--- xplique/example_based/projections/hadamard.py | 113 ++++++------- .../example_based/projections/latent_space.py | 52 ++---- xplique/example_based/prototypes.py | 79 +++++---- xplique/example_based/search_methods/base.py | 21 ++- .../example_based/search_methods/common.py | 3 +- xplique/example_based/search_methods/kleor.py | 23 ++- xplique/example_based/search_methods/knn.py | 52 ++++-- .../search_methods/mmd_critic_search.py | 9 +- .../search_methods/proto_dash_search.py | 4 +- .../search_methods/proto_greedy_search.py | 13 +- xplique/example_based/semifactuals.py | 58 +++---- xplique/example_based/similar_examples.py | 93 ++++++----- xplique/plots/image.py | 6 +- xplique/types/__init__.py | 2 +- xplique/types/custom_type.py | 8 +- 43 files changed, 918 insertions(+), 805 deletions(-) delete mode 100644 .pylintrc delete mode 100644 tests/FreeMono.ttf rename tests/{example_based/test_image_plot.py => plots/test_image_example_based_plot.py} (100%) diff --git a/.gitignore b/.gitignore index 84161dc4..57036226 100644 --- a/.gitignore +++ b/.gitignore @@ -44,6 +44,7 @@ coverage.xml cover/ *test*.sh tests/concepts/checkpoints/ +tests/FreeMono.ttf # Environments .env diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 4d0f7d27..00000000 --- a/.pylintrc +++ /dev/null @@ -1,20 +0,0 @@ -[MASTER] -disable= - R0903, # allows to expose only one public method - R0914, # allow multiples local variables - E0401, # pending issue with pylint see pylint#2603 - E1123, # issues between pylint and tensorflow since 2.2.0 - E1120, # see pylint#3613 - C3001, # lambda function as variable - E1101, # (no-member), flag for every tf.keras - -[FORMAT] -max-line-length=100 -max-args=12 - -[SIMILARITIES] -min-similarity-lines=6 -ignore-comments=yes -ignore-docstrings=yes -ignore-imports=no -ignore-signatures=yes diff --git a/docs/api/example_based/api_example_based.md b/docs/api/example_based/api_example_based.md index fd45c8f0..3affa7cd 100644 --- a/docs/api/example_based/api_example_based.md +++ b/docs/api/example_based/api_example_based.md @@ -27,17 +27,14 @@ At present, we made the following choices: projection = ProjectionMethod(model) explainer = ExampleMethod( - cases_dataset, - labels_dataset, - targets_dataset, - k, - projection, - case_returns, - batch_size, - **kwargs + cases_dataset=cases_dataset, + k=k, + projection=projection, + case_returns=case_returns, + distance=distance, ) -explanations = explainer.explain(inputs, targets) +outputs_dict = explainer.explain(inputs, targets) ``` We tried to keep the API as close as possible to the one of the attribution methods to keep a consistent experience for the users. @@ -70,13 +67,15 @@ We can broadly categorize example-based methods into four families: similar exam ### Parameters ### -- **cases_dataset** (`Union[tf.data.Dataset, tf.Tensor, np.ndarray]`): The dataset used to train the model, from which examples are extracted. It should be batched as TensorFlow provides no method to verify this. Ensure the dataset is not reshuffled at each iteration. -- **labels_dataset** (`Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]]`): Labels associated with the examples in the cases dataset. Indices should match the `cases_dataset`. -- **targets_dataset** (`Optional[Union[tf.data.Dataset, tf.Tensor, np.ndarray]]`): Targets associated with the `cases_dataset` for dataset projection, often the one-hot encoding of a model's predictions. +`DatasetOrTensor = Union[tf.Tensor, np.ndarray, "torch.Tensor", tf.data.Dataset, "torch.utils.data.DataLoader"]` + +- **cases_dataset** (`DatasetOrTensor`): The dataset used to train the model, examples are extracted from this dataset. All datasets (cases, labels, and targets) should be of the same type. Supported types are: `tf.data.Dataset`, `torch.utils.data.DataLoader`, `tf.Tensor`, `np.ndarray`, `torch.Tensor`. For datasets with multiple columns, the first column is assumed to be the cases. While the second column is assumed to be the labels, and the third the targets. Warning: datasets tend to reshuffle at each iteration, ensure the datasets are not reshuffle as we use index in the dataset. +- **labels_dataset** (`Optional[DatasetOrTensor]`): Labels associated with the examples in the cases dataset. It should have the same type as `cases_dataset`. +- **targets_dataset** (`Optional[DatasetOrTensor]`): Targets associated with the `cases_dataset` for dataset projection, often the one-hot encoding of a model's predictions. See `projection` for detail. It should have the same type as `cases_dataset`. It is not be necessary for all projections. Furthermore, projections which requires it compute it internally by default. - **k** (`int`): The number of examples to retrieve per input. - **projection** (`Union[Projection, Callable]`): A projection or callable function that projects samples from the input space to the search space. The search space should be relevant for the model. (see [Projections](#projections)) -- **case_returns** (`Union[List[str], str]`): Elements to return in `self.explain()`. Default is "examples". -- **batch_size** (`Optional[int]`): Number of samples processed simultaneously for projection and search. Ignored if `cases_dataset` is a `tf.data.Dataset`. +- **case_returns** (`Union[List[str], str]`): Elements to return in `self.explain()`. Default is `"examples"`. `"all"` indicates that every possible output should be returned. +- **batch_size** (`Optional[int]`): Number of samples processed simultaneously for projection and search. Ignored if `cases_dataset` is a batched `tf.data.Dataset` or a batched `torch.utils.data.DataLoader` is provided. !!!tips If the elements of your dataset are tuples (cases, labels), you can pass this dataset directly to the `cases_dataset`. @@ -117,6 +116,9 @@ To know more about projections and their importance, you can refer to the [Proje ## Search Methods ## +!!!info + The search methods are hidden to the user and only used internally. However, they help to understand how the API works. + Search methods are used to retrieve examples from the `cases_dataset` that are relevant to the input samples. !!!warning diff --git a/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md b/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md index 2701410c..95989d53 100644 --- a/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md +++ b/docs/api/example_based/counterfactuals/label_aware_counter_factuals.md @@ -14,7 +14,7 @@ In contrast to the [Naive Counterfactuals](../../counterfactuals/naive_counter_factuals/) approach, the Label Aware CounterFactuals leverage an *a priori* knowledge of the Counterfactuals' (CFs) targets to guide the search for the CFs (*e.g.* one is looking for a CF of the digit 8 in MNIST dataset within the digit 0 instances). !!!warning - Consequently, for this class, when a user call the `explain` method, the user is not expected to provide the targets corresponding to the input samples but rather a one-hot encoding of the label expected for the CFs. + Consequently, for this class, when a user call the `explain` method, the user is expected to provide both the `targets` corresponding to the input samples and `cf_expected_classes` a one-hot encoding of the label expected for the CFs. But in most cases, the `targets` can be set to `None` as they are computed internally by projections. !!!info One can use the `Projection` object to compute the distances between the samples (e.g. search for the CF in the latent space of a model). @@ -23,28 +23,42 @@ In contrast to the [Naive Counterfactuals](../../counterfactuals/naive_counter_f ```python from xplique.example_based import LabelAwareCounterFactuals +from xplique.example_based.projections import LatentSpaceProjection -# load the training dataset +# load the training dataset and the model cases_dataset = ... # load the training dataset targets_dataset = ... # load the one-hot encoding of predicted labels of the training dataset +model = ... + +# load the test samples +test_samples = ... # load the test samples to search for +test_cf_expacted_classes = ... # WARNING: provide the one-hot encoding of the expected label of the CFs # parameters -k = 5 +k = 5 # number of example for each input +case_returns = "all" # elements returned by the explain function distance = "euclidean" +latent_layer = "last_conv" # where to split your model for the projection -# instantiate the LabelAwareCounterfactuals object -lacf = LabelAwareCounterFactuals(cases_dataset=cases_dataset, - targets_dataset=targets_dataset, - k=k, - distance=distance, - ) +# construct a projection with your model +projection = LatentSpaceProjection(model, latent_layer=latent_layer) -# load the test samples -test_samples = ... # load the test samples to search for -test_cf_targets = ... # WARNING: provide the one-hot encoding of the expected label of the CFs +# instantiate the LabelAwareCounterfactuals object +lacf = LabelAwareCounterFactuals( + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + case_returns=case_returns, + distance=distance, +) # search the CFs for the test samples -counterfactuals = lacf.explain(test_samples, test_cf_targets) +output_dict = lacf.explain( + inputs=test_samples, + targets=None, # not necessary for this projection + cf_expected_classes=test_cf_expacted_classes, +) ``` ## Notebooks diff --git a/docs/api/example_based/counterfactuals/naive_counter_factuals.md b/docs/api/example_based/counterfactuals/naive_counter_factuals.md index 1982ea8e..3b5dd600 100644 --- a/docs/api/example_based/counterfactuals/naive_counter_factuals.md +++ b/docs/api/example_based/counterfactuals/naive_counter_factuals.md @@ -21,28 +21,41 @@ As it is mentioned in the [API documentation](../../api_example_based/), by sett ```python from xplique.example_based import NaiveCounterFactuals +from xplique.example_based.projections import LatentSpaceProjection -# load the training dataset +# load the training dataset and the model cases_dataset = ... # load the training dataset targets_dataset = ... # load the one-hot encoding of predicted labels of the training dataset +model = ... + +# load the test samples +test_samples = ... # load the test samples to search for +test_targets = ... # compute a one hot encoding of the model's prediction on the samples # parameters -k = 5 +k = 5 # number of example for each input +case_returns = "all" # elements returned by the explain function distance = "euclidean" +latent_layer = "last_conv" # where to split your model for the projection -# instantiate the NaiveCounterfactuals object -ncf = NaiveCounterFactuals(cases_dataset=cases_dataset, - targets_dataset=targets_dataset, - k=k, - distance=distance, - ) +# construct a projection with your model +projection = LatentSpaceProjection(model, latent_layer=latent_layer) -# load the test samples and targets -test_samples = ... # load the test samples to search for -test_targets = ... # load the one-hot encoding of the test samples' predictions +# instantiate the NaiveCounterFactuals object +ncf = NaiveCounterFactuals( + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + case_returns=case_returns, + distance=distance, +) # search the CFs for the test samples -counterfactuals = ncf.explain(test_samples, test_targets) +output_dict = ncf.explain( + inputs=test_samples, + targets=test_targets, +) ``` ## Notebooks diff --git a/docs/api/example_based/projections.md b/docs/api/example_based/projections.md index c0495b20..ea34720d 100644 --- a/docs/api/example_based/projections.md +++ b/docs/api/example_based/projections.md @@ -64,7 +64,7 @@ rdm_targets = tf.random.uniform(shape=[5], minval=0, maxval=1000, dtype=tf.int32 rdm_targets = tf.one_hot(rdm_targets, depth=1000) # project the samples -projections = custom_projection(rdm_imgs, rdm_targets) +projected_samples = custom_projection(rdm_imgs, rdm_targets) ``` {{xplique.example_based.projections.LatentSpaceProjection}} diff --git a/docs/api/example_based/prototypes/api_prototypes.md b/docs/api/example_based/prototypes/api_prototypes.md index 91e9d2d4..61751ffa 100644 --- a/docs/api/example_based/prototypes/api_prototypes.md +++ b/docs/api/example_based/prototypes/api_prototypes.md @@ -7,13 +7,24 @@ A prototype in AI explainability is a representative example from the data that ## Common API ## ```python +# only for model explanations, define a projection based on the model +projection = ProjectionMethod(model) + +# construct the explainer (it computes the global prototypes) +explainer = PrototypesMethod( + cases_dataset=cases_dataset, + nb_global_prototypes=nb_global_prototypes, + nb_local_prototypes=nb_local_prototypes, + projection=projection, + case_returns=case_returns, + distance=distance, +) -explainer = Method(cases_dataset, labels_dataset, nb_local_prototypes, projection, - case_returns, batch_size, distance, nb_global_prototypes) # compute global explanation -global_prototypes = explainer.get_global_prototypes() +global_prototypes_dict = explainer.get_global_prototypes() + # compute local explanation -local_prototypes = explainer(inputs) +local_prototypes_dict = explainer(inputs) ``` @@ -37,7 +48,7 @@ The search method class related to a `Prototypes` class includes the following a - `nb_global_prototypes` which represents the total number of prototypes desired to represent the entire dataset. - `nb_local_prototypes` which represents the number of prototypes closest to the input and allows for a local explanation. This attribute is equivalent to $k$ in the other exemple based methods. -- `kernel_type`, `kernel_fn`, and `gamma` which are related to the [kernel](#how-to-choose-the-kernel) used to compute the [MMD distance](#what-is-mmd). +- `kernel_fn`, and `gamma` which are related to the [kernel](#how-to-choose-the-kernel) used to compute the [MMD distance](#what-is-mmd). The prototype class has a `get_global_prototypes()` method, which calculates all the prototypes in the base dataset; these are called the global prototypes. The `explain` method then provides a local explanation, i.e., finds the prototypes closest to the input given as a parameter. diff --git a/docs/api/example_based/prototypes/mmd_critic.md b/docs/api/example_based/prototypes/mmd_critic.md index 020c4ccc..d80786cb 100644 --- a/docs/api/example_based/prototypes/mmd_critic.md +++ b/docs/api/example_based/prototypes/mmd_critic.md @@ -32,18 +32,38 @@ Second, to find criticisms $\mathcal{C}$, the same greedy algorithm is used to s ```python from xplique.example_based import MMDCritic +from xplique.example_based.projections import LatentSpaceProjection -# load data and labels -# ... +# load the training dataset and the model +cases_dataset = ... # load the training dataset +model = ... + +# load the test samples +test_samples = ... # load the test samples to search for + +# parameters +case_returns = "all" # elements returned by the explain function +latent_layer = "last_conv" # where to split your model for the projection +nb_global_prototypes = 5 +nb_local_prototypes = 1 +kernel_fn = None # the default rbf kernel will be used, the distance will be based on this + +# construct a projection with your model +projection = LatentSpaceProjection(model, latent_layer=latent_layer) + +mmd = MMDCritic( + cases_dataset=cases_dataset, + nb_global_prototypes=nb_global_prototypes, + nb_local_prototypes=nb_local_prototypes, + projection=projection, + case_returns=case_returns, +) -explainer = MMDCritic(cases_dataset, labels_dataset, targets_dataset, k, - projection, case_returns, batch_size, distance, - nb_prototypes, kernel_type, - kernel_fn, gamma) # compute global explanation -global_prototypes = explainer.get_global_prototypes() +global_prototypes = mmd.get_global_prototypes() + # compute local explanation -local_prototypes = explainer(inputs) +local_prototypes = mmd.explain(test_samples) ``` ## Notebooks diff --git a/docs/api/example_based/prototypes/proto_dash.md b/docs/api/example_based/prototypes/proto_dash.md index 5ddc130b..334c941a 100644 --- a/docs/api/example_based/prototypes/proto_dash.md +++ b/docs/api/example_based/prototypes/proto_dash.md @@ -35,18 +35,38 @@ where $w$ are non-negative weights for each prototype. The problem then consist ```python from xplique.example_based import ProtoDash +from xplique.example_based.projections import LatentSpaceProjection -# load data and labels -# ... +# load the training dataset and the model +cases_dataset = ... # load the training dataset +model = ... + +# load the test samples +test_samples = ... # load the test samples to search for + +# parameters +case_returns = "all" # elements returned by the explain function +latent_layer = "last_conv" # where to split your model for the projection +nb_global_prototypes = 5 +nb_local_prototypes = 1 +kernel_fn = None # the default rbf kernel will be used, the distance will be based on this + +# construct a projection with your model +projection = LatentSpaceProjection(model, latent_layer=latent_layer) + +protodash = ProtoDash( + cases_dataset=cases_dataset, + nb_global_prototypes=nb_global_prototypes, + nb_local_prototypes=nb_local_prototypes, + projection=projection, + case_returns=case_returns, +) -explainer = ProtoDash(cases_dataset, labels_dataset, targets_dataset, k, - projection, case_returns, batch_size, distance, - nb_prototypes, kernel_type, - kernel_fn, gamma) # compute global explanation -global_prototypes = explainer.get_global_prototypes() +global_prototypes = protodash.get_global_prototypes() + # compute local explanation -local_prototypes = explainer(inputs) +local_prototypes = protodash.explain(test_samples) ``` ## Notebooks diff --git a/docs/api/example_based/prototypes/proto_greedy.md b/docs/api/example_based/prototypes/proto_greedy.md index 108d70e2..e46d9eba 100644 --- a/docs/api/example_based/prototypes/proto_greedy.md +++ b/docs/api/example_based/prototypes/proto_greedy.md @@ -36,18 +36,38 @@ where $w$ are non-negative weights for each prototype. The problem then consist ```python from xplique.example_based import ProtoGreedy +from xplique.example_based.projections import LatentSpaceProjection -# load data and labels -# ... +# load the training dataset and the model +cases_dataset = ... # load the training dataset +model = ... + +# load the test samples +test_samples = ... # load the test samples to search for + +# parameters +case_returns = "all" # elements returned by the explain function +latent_layer = "last_conv" # where to split your model for the projection +nb_global_prototypes = 5 +nb_local_prototypes = 1 +kernel_fn = None # the default rbf kernel will be used, the distance will be based on this + +# construct a projection with your model +projection = LatentSpaceProjection(model, latent_layer=latent_layer) + +protogreedy = ProtoGreedy( + cases_dataset=cases_dataset, + nb_global_prototypes=nb_global_prototypes, + nb_local_prototypes=nb_local_prototypes, + projection=projection, + case_returns=case_returns, +) -explainer = ProtoGreedy(cases_dataset, labels_dataset, targets_dataset, k, - projection, case_returns, batch_size, distance, - nb_prototypes, kernel_type, - kernel_fn, gamma) # compute global explanation -global_prototypes = explainer.get_global_prototypes() +global_prototypes = protogreedy.get_global_prototypes() + # compute local explanation -local_prototypes = explainer(inputs) +local_prototypes = protogreedy.explain(test_samples) ``` ## Notebooks diff --git a/docs/api/example_based/semifactuals/kleor.md b/docs/api/example_based/semifactuals/kleor.md index f8aa571c..9162f65d 100644 --- a/docs/api/example_based/semifactuals/kleor.md +++ b/docs/api/example_based/semifactuals/kleor.md @@ -36,70 +36,48 @@ We extended to the $k$ nearest neighbors of the NUN for both approaches. ## Examples ```python -from xplique.example_based import KLEORSimMiss +from xplique.example_based import KLEORGlobalSim # or KLEORSimMiss +from xplique.example_based.projections import LatentSpaceProjection -# loading +# load the training dataset and the model cases_dataset = ... # load the training dataset -targets = ... # load the one-hot encoding of predicted labels of the training dataset +targets_dataset = ... # load the one-hot encoding of predicted labels of the training dataset +model = ... -# parameters -k = 5 -distance = "euclidean" -case_returns = ["examples", "nuns"] - -# instantiate the KLEOR object -kleor_sim_miss = KLEORSimMiss(cases_dataset=cases_dataset, - targets_dataset=targets, - k=k, - distance=distance, - ) - -# load the test samples and targets +# load the test samples test_samples = ... # load the test samples to search for -test_targets = ... # load the one-hot encoding of the test samples' predictions - -# search the SFs for the test samples -sim_miss_sf = kleor_sim_miss.explain(test_samples, test_targets) - -# get the semi-factuals -semifactuals = global_sim_sf["examples"] - -# get the counterfactuals -counterfactuals = global_sim_sf["nuns"] -``` - -```python -from xplique.example_based import KLEORGlobalSim - -# loading -cases_dataset = ... # load the training dataset -targets = ... # load the one-hot encoding of predicted labels of the training dataset +test_targets = ... # compute a one hot encoding of the model's prediction on the samples # parameters -k = 5 +k = 1 # number of example for each input +case_returns = "all" # elements returned by the explain function distance = "euclidean" -case_returns = ["examples", "nuns"] +latent_layer = "last_conv" # where to split your model for the projection -# instantiate the KLEOR object -kleor_global_sim = KLEORGlobalSim(cases_dataset=cases_dataset, - targets_dataset=targets, - k=k, - distance=distance, - case_returns=case_returns, - ) +# construct a projection with your model +projection = LatentSpaceProjection(model, latent_layer=latent_layer) -# load the test samples and targets -test_samples = ... # load the test samples to search for -test_targets = ... # load the one-hot encoding of the test samples' predictions +# instantiate the KLEORGlobalSim object (could be KLEORSimMiss, the code do not change) +sf_explainer = KLEORGlobalSim( + cases_dataset=cases_dataset, + targets_dataset=targets_dataset, + k=k, + projection=projection, + case_returns=case_returns, + distance=distance, +) # search the SFs for the test samples -global_sim_sf = kleor_global_sim.explain(test_samples, test_targets) +sf_output_dict = sf_explainer.explain( + inputs=test_samples, + targets=test_targets, +) # get the semi-factuals -semifactuals = global_sim_sf["examples"] +semifactuals = sf_output_dict["examples"] # get the counterfactuals -counterfactuals = global_sim_sf["nuns"] +counterfactuals = sf_output_dict["nuns"] ``` ## Notebooks diff --git a/docs/api/example_based/similar_examples/cole.md b/docs/api/example_based/similar_examples/cole.md index 8f717ae5..63fac794 100644 --- a/docs/api/example_based/similar_examples/cole.md +++ b/docs/api/example_based/similar_examples/cole.md @@ -26,36 +26,46 @@ More specifically, the COLE approach is based on the following steps: - (3) Perform a KNN search in the projection space to find the most similar training samples !!! info - In the original paper, the authors focused on Multi-Layer Perceptrons (MLP) and three attribution methods (LPR, Integrated Gradient, and DeepLift). We decided to implement a COLE method that generalizes to a more broader range of Neural Networks and attribution methods (see [API Attributions documentation](../../../attributions/api_attributions/) to see the list of methods available). + In the original paper, the authors focused on Multi-Layer Perceptrons (MLP) and three attribution methods (Hadamard, LPR, Integrated Gradient, and DeepLift). We decided to implement a COLE method that generalizes to a more broader range of Neural Networks and attribution methods (see [API Attributions documentation](../../../attributions/api_attributions/) to see the list of methods available). + +!!! tips + The original paper shown that the hadamard product between the latent space and the gradient was the best method. Hence we optimized the code for this method. Setting the `attribution_method` argument to `"gradient"` will run much faster. ## Example ```python from xplique.example_based import Cole -from xplique.attributions import Saliency -model = ... # load the model +# load the training dataset and the model cases_dataset = ... # load the training dataset -target_dataset = ... # load the target dataset (predicted one-hot encoding of model's predictions) +model = ... # load the model + +# load the test samples +test_samples = ... # load the test samples to search for # parameters -k = 5 +k = 3 +case_returns = "all" # elements returned by the explain function distance = "euclidean" +attribution_method = "gradient", +latent_layer = "last_conv" # where to split your model for the projection # instantiate the Cole object cole = Cole( cases_dataset=cases_dataset, model=model, k=k, - attribution_method=Saliency, + attribution_method=attribution_method, + latent_layer=latent_layer, + case_returns=case_returns, + distance=distance, ) -# load the test samples and targets -test_samples = ... # load the test samples to search for -test_targets = ... # load the one-hot encoding of the test samples' predictions - # search the most similar samples with the COLE method -similar_samples = cole.explain(test_samples, test_targets) +similar_samples = cole.explain( + inputs=test_samples, + targets=None, # not necessary with default operator, they are computed internally +) ``` ## Notebooks diff --git a/tests/FreeMono.ttf b/tests/FreeMono.ttf deleted file mode 100644 index f88bcef9c138ae61473f852d10803c195601d51e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 592752 zcmeF)4_Kvh{`mjb{kPLhGZPbHGM$vBnaPL^u_0t8gb*4+Lx^n%A%xg5GC~Mp5JGHr z4?+mBAvUyx5JEF^tdKQq)Xe!lU#D~X^yxC}etzHUcm1yG_i=q5`+nc=Kd<-ee&6@G z ?CVu;A*{EsdE#fu9MUb>_8fJ9F(q%~nl?*5D8-gipG_x_-W*|DVH&?BP1EVx8` zYvV+=-)qScNAA1r${EjyZ(y}Zbk(6p?6~t^b~@>y*=J|J>f5%Etxb=f%;GFNtByfh~x;{WUSs})}JnT`R)^DWq)tpWd&)MzYXs8b2+{)lx03+`Dyk+ zcQ^dMmOqq5UPa^UHaurO6KM~3>i@(Zpe?b78G}+KEfOmx$EIwlQ{8Dqa>O!=jCx8j zS22G_snaraobie5WSEX)&a$IT(=ly(NEU@&luJXBQlCa7S#rpcescGRLK8WVTX%^V zp-_nEcbmUsdB50Zk)&xRl%5glH=r12;b@$OVKPajyb;=-hbtrHXOT8-SA{5n&eO6s zQrAAM>-ejoF@A?buo5~x8Vj*^q|~~1ybE+O{Vj_ z>o7Hj*EczTt?T+;&U)@TZfdD(dF%gDsd03Sp1+pf@o$wn_SfjSXbi8^z1H*nt#Vt& zR6}z;7hWEVsq1uL^DOO3Dh_NGdW zsdJ{5X_5ASw~S-FzV0+XkFI+Trk1DC#<_;pQl?MTWl^N`u92P#*EsYddB_4Uj(4CkWzpAB!XPN9AnG}dO3GLO>R z6Mc@-7y6n~<9X*79d@&u^8wA6^Wv^zI++o6OrkZw;^1bJO$Edwp`w-rQK)bsv@P_0)AV$I0zpzqg)O zmNHJS!_;&2@<^d?^8G&b+`Vg<$2g5y0o_ksTaWc#^{&^{9K1Dtt<*l<1C6ieo(Mf> z%}IIt;g$Q*rt5j~soywh+`-f{q4&1tr}34Zhu+gyM(TR3bzN)ve+fNHUC(V-Xij>+ zc<1fqr17V-X?Bs}aPFyeEA^$+g$_ zwa^H4{frPxUGKR#T!v`qYkRLrb$y-cb3~s#XF>aQ+;r$OXMbp)(!NumuSY#zgfz^? zV(5Hr-y3=_YFvGm={#-IXV0Fbhq+&}W$DtMqy5 zLZ3}KW(Vk8eJyLL`>4+yjidAZk-Co6I66k@yxlP!dfr~U&eMI<^U;|4OiGK?58!;> zqHbd==v*DIIqGMe8PIu$p&VyJpCcMi?+v}~`$Xuz7a=optn;)_ufLw-DbSoWe=Rk~ z66pCUT~qf!*Euj!*Lb>?(!6wi?bCJj+|{|zYnK2Wqj~7Mx`ytT-nZHwi#gEq(sjIb zg}MVDQlb4?R!7XH)c6`t&q?Raom6Tp<@I~(g-c#G^7@ z`=t5mIZiD#H{EYNUp@B}=y^^lL+fbMTy$>_|F1Ii9rewj`>5B#EA@Qz^`rUdIeBH; zL_Pc&Ro6Zcdab6Ep;qpnHxMbq_p_I)mz&;`dL1X{r04F%nod0v`dZMv)ttOi&qdE$ z_eXQy8Tpe+Z!K?MG{+O*?eFALN-ZBVS2swIk^BDHy(Pve&{?s7tL4uH5aWffeFn|V{8EL72(e@*HixhPvJV~KInDR z*t%|dq<%4_=A-wsmg)fL>q_HiLEE&xFZB7jD~`pb(01)V4!VYStnt;U(0<*A3!r08 zgI-t7S;u^a$1t1kqs`(i_tuEU$T9DuGTHaEPoEg0|7o~U>y57pj@#~Z?;B7G8a387Dl|Idc5Or1qWJ<7dEUy>O}Zk0SLqD2MSR-kzY>N(T7+R7d%8#5m=r zXu_4m{uFD_1br53o||c1#^jws8zQ0yZ z-p{G^D~R!1yuJ6zIOceDuk^0R=yQD<^t|*~pA`o}V@0D7I##cluAdMo_oCF? zw0&RbT+L1A=-g!JGuw-|AX49*(tA(Sb#y;w!iC1z272BaN9(#~8gyMR7tK}AITc=> z2UGt&G{!N|`8sYsbd0X)(O8Q+36_an6K>7nk8)AB~>9;;61TyOqY)OFqu6J_{0qHSf+XTSEn zhKr$dc0dK1BhMTidr5@m_h;xF%}dMsaax4#w~p7>K{xcC(l*WOP;3E>@0HrFa~TuP zOV?R3aV-C!RM$e=HI9mg?z8TvzGkPEdi@F`?ONCV|68TzsH8(R2b#mCO7FhhRH^w)-A8Z#eyP+P_4W8$rFV@sRi+U`*Z!?iulHPd zrQS1o{9C2Q)pPbrtt-v_oL?w47p1XvTpsj#>$+N>TFNHcr;hurV_j3P`6kNH7mNxMd@=7oFDP`z;=IVLqXS@td>HoKnbw4$S|FqQgevR(IuL$uh2>pN_JP4k1 zA-?w!dI>Ku_xfL^Utfkcfd0@;;8_w{$yj~O{EE;&8TS*O!V_r4Luki)%zZfG6w1>g z^(IPv&Yj7aaZ0XPXc!u62$~D^pWnyydQ9cg7Q;B6zu}zMQtD?7-Sc0={TR9%x;F#A zF_t*t{Ct$0N0{ce4m`Uf>(IUl;5 zb-i^qQEu|OT*G|5E>rWFa=gj&Ud{X)CY|>$)Tz za6fcEYjApm-cL881jpd43E}Ur=;tu5Z@6FYwJGNoeqY^UQhVg} z7G86ZwY%uMIWoV0(zr=+LVsnPULQTDnOKY1*WK8ja!=^z zpq1=Jh_W=|5svkm={b<2u_YzXpzwL5QpQ4KABw{w^cr3bjiGhD*4IalHBK2ZKd02( zbRYCwbYHYj>%Vqxo%BD3C-FD}k-E0Ofk*#~F^#(J^-bV9%Zt!{-OHyVrN+{__WvEf zG+%RPU&7y`xhzumUN3qtX?;3$Z{|SHN#C!tq5b|yspmHX`kw5~i>AI8G=}yUO#GY4 zhA;TuCSRLx_&X)vN7?Yisb>zYv|_&oulWP37x0+Wg3zr$GZR8 zZ$o3~YfI_KJ#(ECYqJP}((>R#z`tmmi4 zTK7t=XGM9}t^gMJ8t!s{6p3|x8KI#3geR`~W;ea^$z8xN` zd3yV&;|nA0I(|O%p4&B2>VE36zWy#k6>6Y4sDmT4uH(E?$OmN-SZ|Xp{}%D z_v|%Hn?&yky{`INUJTAz_T#+uo_YlOY`F#Ay}N|^0ni-v9QC};m{QkcrF*M>g3kBy z(z@O+9=&d-AQ)*okW%S*@4U41#%rI}wXHN#P9Epg7tyD@<4u*bH!)@(+7Fr3r+cOJ znWEl+uCICOIK4)CzTUA{UPaqv+OB)Osd7_!PHorg;L$zN*NInZY!x4oK&kY8no94{ z$#k80(AT_rC_>xMhpwrwdoQ00BF7g|suQ4XUJhGQS9;C#IjVb>J&E3XYKI8#-hY>4 zy>}X+v2?!Y3hF8!dLJq;AH7FaRisU?jdCKMp;X>H?^@^@TGt%CSXyek&iDHD{PZ=Y z$4YbLH52|`y51MNLHE=b1_k_mv+VoiG>+!Gk-A=#G z(^8*B8cVN}&eQtj`TFcu+ODznI_S0cu7k$$?uDzVd%5WSuk`&_uZ51&XR5C0t*^&A z-h_<@kPY1@rTed?UPCQ)oR;1iT5cX`*FMd0GQDnkKkGQnL&tcCV-)_E@aM?^`VXUx z_Xdw%S3`5tQm?=EQHJX$O(}I9&0kHAn4If&v`s$Nx<1=g+9Z1IJbDhEqp7P$BW6PYi$v9pY(OErLL8V8k~-EP=s?)hBI*qG+t7q)Vk7h zN`V*WZ0b5jkC#DTqo?5*tc3RIGf2LndZF}wA6UfI1%1C{+ng^dg=K` z#&7C4+^*M_vEgezCH{Zf@5S*-Z~gzTmwKMzuNU?Fc};|m^|hfU?a6=IK6&n@>fxA^ zu9J=njp3V#wY5L|+SDBB3-7JICL`B(YPqR({+)JR=jU<8MBSVbS$A^Zy{e?EKZxdyB zZ#2hA=dN*oer`^l=be{VzQ#PSKDnGcN8?WJ)B5CotxxUO}pLZsB^rk9iU{~GTZ=8f0fu7rNRIB$|1>iQm~ zYi%2G6s7LR9+6V(yf=n6Qd*Hx?@xU`dgqr%UGGJ`SM+_tJ3cIOypmGw8nK*G_e8%p zbKoZE`?2P)rRQ1dO5aPSM;uFeFm$Z8>wA>mV`_Fp6s6{?Zi%>EZ&yI)gE z|Nr{mJ^0^y;QycZfIbgB`k7QeTYC1Vu3xk0*BYK2>iS-=0QW~|-(K*>X#ahf`b_tp z-(IQDT{RNnJ%2S1#^`64aBkl7PoJ?$-;>{s(65iwAvg&aL7&&flQdI*0E?l|!dcMV zs_`0b$7kSkMEEn2P5p@fp(B|$|9?35H`nBQ!~8th#$?RHk_g&1jH!ur!=D2+2Ynw| z0)5}m^WGN^;c4jmx4yUQd+D#yz0`YPr%9#Go9v27#~NRcx5T}Z+P(3U$7-8i2aT)yWnPs?IrC~yAM;}!HNuYct1B7Ncc+NRGojT3-Am#=`{m)frNv!MNIId+J|r3{a|l2V_knzuTO zXTL@HGCv*s@Wfw92%iPNbD@8aK`f-h=KqHIgAM!_6HBYs(kZ8f+ikm(pX*JhF^bkp zOuzX%b2sZu>s;$X>k?~~RcGC8yyo+@yn&4oSK+>7AsHlD9+7%#>Y1rm%zJHK&%F2MeL3&DdE;rJ1@3}v7wo-o z#=@-^E?ii+D1OnLMTv_REy{SH?!g_$oUvWUvd8us+h;6y?76WQ#=aN}ZLHilzA+Tq zpns)8{QR74in*IvU{!{9YX!S?t#z-}YkgpiSnF-E|7fprVx4WA6lW)Ask6+viror0 zz0Qd1vRiZArS8|Uz3kT5?4~W7=E~frL`jTK^e5&e7DRUIgv6S}ClX&yd^ho<#D6CK z;1_>|ztUgtzuVvL@Abdy|IojN-TFt8B+W=lNb<8=sY&z0yR}QwKJ3=PNk=5rCk-U6 zWw(UgGLwDmR$Ov?azgU_c|-I5Iq&;4Ss>xvl7*WuoU`zVMO!Y~X3^Y5Y3x?*gEA&#+m2O8uuHX3G#t*-r{=M(}sDGAz^HlJm;Qhg- z;GMzM!P|n3!CQhi2X6{C1aAmlAFK~v8@wiXb#PVis^BHT>flAeGlR>6M+XaohXo6Q zhXxM`?iKufaF5{b!K~nJ!Cixy!JUFT1{VZVg2}pogHue^@5cC2k(bM~6E z)||2C#5Kihj$KnUR5Y~jP}1kiK0ogB;?H*(tQN484WY4}Kps{QPU; zNv_8yQZh#Ws*J(US(v*`FutSY=Qyl0Xzg93i&@m8DCbOYHlv)2qiNrql6Bp8 zsOM9D4f1zh);uh3tOXS)lZcF_MNg)jCD%NW zYv|*8C2B0r%SWulAFz=+@kmqrjP-F%{1rHhI(z1?#I@AP&0miO{-zZ_XXNKz^FK~K zma-jBQQw}j7jIFYM@bI;_oy$RBo{yTesntJ8my(h3nk~TqIaX@I{V2pI!h#pe3GK6 z??pKS+-K43Us3|N2+^El5=kd*N1eS(N|_K{OqmMSk7oap=3^ms_AhDCgy<6~cb*V^ z5+!*g{hl$Lf6_k4rOxXj>7WVGXHs(Ak`AH0g7OGlO8sm~_9W>Y>UU8NfW409{E|4o zq%WwiqvV>Y=r2V`SGMEYMh^?`hZClKOliUunf3{#5A5r-A9Rh*ZJric^ zN!bhTu^D?&vS0Jq;~9%7*^7CsGlOe3?|Y0>FOcvD)wB@xBH{A{T>b`|)>&K4Jb`lW z2{XxSA!8QKpnVx7*J2@iK9l?w&H&!^-(78X*k6d{stvEj&WUQvgc!2$fo`^ zCFieZa?WEp;69$Yo^n5MKhOMul6@I_j`}ENCteVl#p`_RZG1uf97^_SjQyEK{u?V$ zN&Q;N^FZIM8!72mv7BG%M{J-zM+}R%F>5Y$Kc(UZkKLQc=`>D2jPU0eo6Bs8lP&oBa9c^dY%SZ!97&LE zd1NPwp9hM508QaWO_h1v2J^Z63;CL32U*15jmwaoWM}!EWXdkGtL!FOvb*fT-`mKR zJ!LQ1TXJL{*;n?HT-je1%M!_x1LQzCNb==iIYbWSr!fzc!{rG6gx`^JlpHPmdx;z) z$4aprC&$YPQXY){Pe?nzW!b^kSkKCHz6a%ODV3AuG~b{2i+FcSh5RA&!Q2_WB-@3w-zb9`HRSE2K{T zDA&j;xlZckdbw6^lAAfh23ak4$nE@8^yy{|UsG0_7n_&xd+wK+mzsYNPAokjvkwX^ zC@2aXc;3>0B<~#`a2FQsxis8%@lt59AaSB?WSp?hvrkb!t*_(uGAI&3S$49L5!GnDf=rImt-@XJJub@p((b9ZQ!I z&q<$Ae9R63H~o?Aj9OOr*A^Gg2?$}L(jQ3)H|6}?8?huGh_g}1wbP?Yl zu(m%S*&nb|4-7~_(W;YIE%R$7Yfe(q(mAWbM~6=w>4MWHvWuM)o5Uj1)BSISPbVhb zzhfY3VR4b)za)9_veW%V{?ZdCLg=^|x-v`qSNWH$TD&ZIm48)oc)8?&99|U2VI=3L z%LZ~z)&u6u46n53yYWd$bNuhFVjGxr0J$9*$&LINHY+`uFW4p)Pxcobbi|ybfU&e_ z6(@8+@~UM2ssmOfFVjr)bo75UpIE&Nn{yd9(-Y9j=2K39M3WaMFFU{$Uy;L zLD3_IaqZGa7VEtdh~-9$JDf_*(rvl0#e6@+^6ww87UmiOd!n*55XW;p{5K%y^OQ*C zsj-OXL=w-EZFrXKM0ox)il^XNYnJjkm1m~QbKK|SJ>HKJ@P0LoXZ3X6NT;{+p2z%{ z95nMjM14j*&*b=t=kly!o|&|7#{0nLReS~|&g@ZEJ~a$YtSXK74HM_ zBO+T9W1AHsbI5BB&*^RB_-tzP`3`>S{w6+y6!O_46a6B|1H5;4^1fZbdo2C)Qh0B~ z{49{e0{Rwa^R1dnksUHX+oBec9m!|M640OFM-c-2ihi5O&J7~Jqc1ZT#NVZZKS$au zvKzAUK-=z3B6~3J_l(P~64{e@dyb0iH6*fkD}RqP0sSKT5_7*akz8`i?G@R-Ok^?h z7LV|^JJT^PlD7h^c|f+vf%zf_Rfyzwi5xsEa!9Voq3t3CS^OQ+ou&XLq8}q7XOYuc%sq>_XBVOztaCQ&oXt9C zv(DM9b56BLC4H6jRnk{UUnPB&^qt#{L5zx=7mGCHh@4*}Qbk`CeO2^bK;H%QT|nOj z^j%PaS~Q^@jJ+@@a*-e&8OTEkszjs%zmfTk%x`3VBl8=3FobdbL`R&+ zZAButH=!N92nuiIh(`wUP=YGdgSGD<=AE@7cXgqUKl9KnayRvRf+F{Jh%^t1+_zrj zej6ivwq;BUV_F9IYr{!^YvYp%${X(E5F5cvxN;=ND#0Wm))K#RzS z<)}sjTG7Ry%g7M%`b5O^i8NVkA%|w23h{4%8D% z#qjrvku)wwa)uZwDd2c}a+*gD^M=JpqfBFb8ZqZHcR{Tf3u8e}>Ey5jYwlPf2Jg$p zPBGwkXVoXh?^ruCAjU4_w=3)KnvW7xim_XU7+INO?2bK%_xo}&vJ1r6lf3p^FUDRy zV(i@_MoyC$`%vynj=AKwKV$dTvR917ePS%(91o!Fz-%!NV$8u+VjR*hMnSw7hYgEy z1jk3R#!&*=j_MNQXyz{E_!#0GTME`Irmc8LjN^!T{D2rI1jHyQz-qLKvCM};kjse) zs2Ag;T(Cwd?WJX4ZfQ`AlbLri^G{wc#wj-9kb*4agFQH;^ zqiVev7qHF+xhO(8YQ(rO3;8Gk=Xqf*hb7XrV*{^1i4(s zyvtbQGS;|^c$a4(2=e=54I0pb4)kIW>%~~%LjoATq5$M}MK(%M4d$(kiSScqf^jPu zx3UU#Anr=yt|actA&iP~lJ?p{R6y&Dxr(?~5%VfyUX_Pp(0>(kuWCdq zI?;z9n6rvGtH^5=>#m~hYSzA*F?A`(25Z!ngBW$I(S{xjU|5W6n0L*%7}v%i0qJ5~ z$J*Bwp$v?Ne8ORai9$SoiOHnCCGxP3akM3v9 z``g5LphS$81gsb1!4@$dY82y70d!*!X8ot*LENWvP$0%L)nasXiSe9(zUMNK zC&u%+C={cUah;6o91-J%egwt%Yq}U+m14Y9izYE%rhJ7suhd~RTE*z5t-BE|XcyyE zV!Xzf*BJA+3NdWV}SmTnEP>~7@stQb04e)$Ac|md|HfhF+O9>&zSo;xesx?rdo`( zj9*84Fd)VkwEu(lFPp>|X5Mfo`o;L#k1R31$priIO(7Wf4f97>_uD=(zKcUT=>LxO z)-&dN#(dA1AF9Oo7i;_&E5=W4VvI3oe1#YrnG>qvdxJ1q#kBbU{9sp$>Bfi|)qrNS zp$9`^`pPjZX0(k2F{jZV6OU0bXAEFm%$c;$%tSu=!T6c9&x(Ox%-Ak5H|M9fXJ?5S z7boTx>0)kKCgxW3Z^c^iwPJ2vD&{s-V$P`(GhtZFxy54oGsN7^CuTA+Qkcir(`G8; z=8=0EW9L_kxgcN6g%x6^6Jv)~F&FiSnGrAMP9W{xf9J}F}EOOE?BiJ2P^bN@~;7c+ke4p=SbK?z7l zE{es>Zx-|5E-?>b{X-dB5ES$97%`7vox(gZk1P@MC~`i!o*zJI6mu!@mu907w_OzC;%~!tw1fB(2icPcCmot?_*yYf$ilFgB{n$EWqD$rNRB71=G!nmVxHV4<|))q4T^bMhM1=_{tubRMIlO2 zDQ0<{n9Em)c?R_}iO<)}=2_%&cB7c*G>BQrTD(7-=i&S&F{_Hjynt92lE+1iuO`mL zwwQcvYF@%xmyU{A6A<&VE-^1>ecqqU6;)zhL476lD~VZKCgxRb{J$(=d>wP^m~%~^ znAZ+ry_nZ^i&@_<=JmB=-q0cDjnr>s>`lbsnQY$Fh!!z#_8}gubxXRKjb&oqS}Nvk z^ZkoX35PyMKk44-mJdP0R;rf2c*w zKgD5G%+?+;AMO|P&y3+YYX;iIeAEVQk2QliZCxPl;{{?q!SR#zV85Qq6Z7d<_|YKd zGb3X1wVL@XIX>qD$2?Qb=R3sgq^+}6%oozZUcJEh7ioJjfN?SZO02&U^RGcMyK+#7 z0TAOQV!YH1a(a14%vYG-O@DWvn6K7hSj^YT#r&He)O!lhA?E8vAl@6bV!l}k;`Nq@ z`S)}&-%7xsm~V4#ee}J<8t;sY`EHe%?=_0qUn=JNKD3GXK`~Z~`Jsq8K&+3*`4j4& zq=7X)Va_LYVh-Bq7V|Ua@!T~(YY_8u+WDH#T+@P1^n-I+%b2x(u+G|QFm`P#x-cN- zx>_`e8H`5;n$aQV7wza3^B=_glJ&l%9Ht!Z1NE<{f0ckt zeIxXZkk7ZA&$s0Et=7ryyI7>4O3d|X$QJW^;(b2=`hQ@JAL5aY92A0BKQR7>UNQg0 zdjIMWldt*A9~t{&f|x&vm}A5k=Xkt9%nfm1+=hUd8=1Rt1xCdT`M~&4i&*@di6z5g z88#A-iCh$;5_M=s2l~KR6Bgqw##@ZH@==OvG@un-7{Ge5>=>~e{`JX;M+Wjxf-2O5 zJe*GSi{-YV2SZ{-u|^bYM6pH`Yebb}1)9-;J`9WHW1Me9tY{z7kOO`@h<|IgqHED4 z*0emarpJgC<3|<>P=*>bB7klTVpOadu}DLXSTlPuBG#;Ww1^cO0An{}-e%M{qrQ0t z@=zicU&C5)wP->+dJzScxNIC8c0gtmFi-QtHLp9;w7gW$m<5 zvF4YEwV*((h4iPB%MMv$Eh-diN5*6{inTNCzoX2o6Kj`tvH1S9wHtA>u!n%-Z07By zI>gGU5o@1bvG(f}E0;O@v&LfDmkeQ4tUSi$5j!sfIVce80OB9WIq?2#<*yLy;0m!0 zX%XvC%0r2FXg!+5Dj?2bJz^a`BGwVz=ojnAEU}I%7wc$Z6ft*cyI9Bi#464d>$onl zj%RF%U{tJSeISn$2QVyFDdS4xktWv3Wn%IEXPsIiR#}x;rxE85d195finW}*I)ix? z3{&Z7V92C*uccW$v*=f#M1KJzX}5bHwLx{!4)V*W+l=ohPcT&zp-#kw>Weq@MM zQz+JDjJb?4mv@Tw$5OFY1VEjyv#cxA#i}KrRXz~!YGTwe_8RiJmU=xgu5T9WhAy!h z7}F4s5(LG%DG$u$dz;qH%)KQ6O&AudF&D(=dzsd)jJvfSa>OlSWSmdKttkphf z$vSroigjliszLp(Y>>}gqhd9&R#UHpf28W}3d&}&?y*q-;@(3Z_okx~@oMbT!)0gCQ{QG2%Uzj$9O@62#$s+%|zPARC1!#|o@Q8+tG#JoXVU)=TugMBmHRAl@tF`wDr!LY}XX=PTs- z3VC)jzMJvg8OTEks!)#>bfO<4V!i4U>u)(I66gZ$ngzc*~SdBLHU)BZl~AJFyzZ3C18lphzO94oLIZRo)e#>M(14(Z56F)C4qW^{=4DeHVntWSyc zDeHX7b>Op?^_d@8C_ou%(1-xKF^EyIK95D3SVN2(V%!kph8Q=*xFN<3F>Z))YZ$kN zacgo=gbLK63GL`bP^`6rcw`_CC8$C@TF{ApjEJ?)2S2hUwCqZdK3h6VA+Kpsj^g?hB06a5$w>nk7p$U*_iP=iJU(2YTiiuH9Y(vX89RG=13 zXh$!CqCd)NeG`uiyrs7DJr(T@?aMttxi3k4`c4H^+ZHwH1vkJ82>4LK-61!~cR zcJv}B);|UD$Uq)SP=$K5pcDNV5$ii2{K!H9%20zw1kjB^jEc2B7HP;q5h_rNCbXj$ z!HFO9{XU*D19>Px73$G~PV{3$tRH;vBMSv6Lk$`cKsN?4D%QVZk%k-;p#rsNLOXg9 z6l*je%>9w^KQ@T<6R+!^cwO`TAL}Px*FW*P{)yN17_aLwUe{y1uE%&?kMX)5<8?j8 z>w1jW^;jQ<#o}uPYdis&$VVxv(STNTVF2sJ+7N>jWP`CA7`uV78yLHRu^Sk>fw3DH zyOFUQ8M~45+c+XtC=Vs5LOoj0iGGZTEk5{>g*>$WUoa}RL0lsq{1q6Z5iMv(uh?cf zR$xGED+>iELk$>fXNv9cYao2BVmll=<6^tDVn<~o|F=aKi9NkT>=<(3>kd062Myrd zVtT}$!J0Gbv0m(1m14)%iM<))HzT*r^Ux{wY}T1wE_Pfl$YBfSY|$t7R`hLMBK9`q zG>3T!O=54G3Fd8E493i*Z7%Wn>~GH<5j&A{NsI$A5}BLGyhQpF`^EOt=V!jZ2sN9Q z6fbsCx7gb;H@Qme6xK=^6nlGePHh!?UIA94P3$z*N=p-aej&3QfEdk5z3z}$>Bv3E`p`*&GlXEutxE9>%gh`mQZ?CfE& zb27!=Ck;7b@7p2ve$3rJUhKu>u!Q3S(#1ZoM(l%Dh<$LU*oTaZeJEoJM#VlX27VCZ zFxEY+7>qrvRqVs_q2-|1M>K-^kriSeMgLI^VEv=|5ET1p8?1SBI&whnOIM3sTp;#w zxndvRC3Xqz%UHLRv88chpUirv^oV`xda+Mq%pX$3F3%Esxeo(kpFv**G0$QwUvt=J zv)(zQVpkI5JlfA668i!X`$FPY_lbQ8acUTMStVAW5#)SX2Z(hUF)tet`*P-79t%G* zK-(W<#J-{)&1e^UWicv1oRuvg#!A*$$r@KOmakFlEAvr~I<%q(!(!LQARW0V6#FXH zyQ)jHs8auSNCBE>&3pqMjUFzzLRzCWXzqce^-^*O|&vx9us;9RVn5g*_Cs0d5&KW&Vz)An@A=se zQ~z_e*neRzUtiddkoO~fVn136<~>#e=C-AvP3*@R{{(ZNh_BRV*izSUG%@ixR=Q5Wn#WO zEcPp`_sV*)yPL&+wHOs*zm|Xwu+D3Y`I`?zV)s;`PVCnyUmq9yO~2T^iWt4D_4gF9 z`Rr`JMZC9}(^mxg`l`j|bFKXjvER)G$M4po1;l+X0U78(kJ$a`$VIW(?-TO_Vtv4x zAM}g;VT0HMv=6Kn`=d1Ei2bo30mS&2c^`L){YjbFgXHw-sMwz~e$5K8gIQvKK_8!~ z?5~*1_wDSjN>Gg!bYe*Cuj7!34za&s44+->5$YqX|1I;s9RTO}9qWCUC-!<`@V;;J zzHk4FvAlQN|EdD#I7;7-rDFdS561sgfC|v}6ET146?-gzZm{;)xY*;2<-OSEz1SWv zLnGS6-at&=SM5+5a#4&*5G&LwjCjJe{mLcffx&EUr3CF)o2ih z_Z26dwbO~cLk$?aCu)uT^XqV zj#!zD&m`6^oYO9G;_OO&*DNq@H|AxrRu*kpycfRy%qyD9-L(;{4t(PB#5} zmWZ=gsW^M*i^F@IlT(0l)Pi~YQ0`kH&VH1+`$VCVxnrgNslO>W7f;A;aPv zO3sI}c0mJL#5s)Ehb4$}xE~xJA&5r?hZ5;B^BF8$Nh;s|=jp-;vHRx-ky>Y!bwSgL!w*e+TV%(0*sWICtfu zU!0}_41oOaZWHGo_WT~k+{-%mvX9N#s1oPCT5;}Ytro^VSS`*&)cM}0^KiO2e{MjF zI0426%E9~qu^)-W3XF^MXdwdPJeG+baoRX;8x-g93^3+##y>&*i5m2Y^JE&RKiMlz zdkV;*o%&OLRH9p)r>Q^9+^2`dd8QPD;&kMpOPptO&??Szv0(0V>&1DV+@2?PX939J zg)FQV=f!x`V^o~KmS8}ft|D}b^Ac;n#F&>E^KuP>;=ICIuMCRQoey&2vyJmA?XOi} zT%5nviqjK|fHc!zRjq@gLy*cO==kKiXcVhp2NSwFQQH6F8<89V>n=yTy zPakvMNkF4G?+Q4NcUkYf0(6Mep9#jiPmK4O^8tB%&?C-=6&Mz0piG>PSo@d_4PJ|7onC)iuVvoa01YynZVJeGF6$-=(ohKU@@Js|^gW~STyxe$k_oshxuDDCcfzJ)@0o51~ z_dv!TNc@B7Kd4XKeA*B8V@TYBW^oUX6ZZ((3R}cIx=P$4`ilC+J*Gt5V~JHw+1LB@CEN)q=xThzG`-fg}%e%!rqg328iCxho?%8$X zp3@<2CF}6M=$>bbdw!Ols@=gi&#?XT9s!i+e*BSmOrTZX`}axwtpwp+nr8Gtdj>+``<(Sfrp` z+*`@#R>tw~c`pB+=icTAV{Qw8Shwe(7R+BAj}o+CMBF>FP=ju9?~Fwenh+HCt_)P6 zQ`{yW3ebo_aqmt;1=_{EN05hl^ox71A7uz&RNUqq)S_41`{Gf87L15{e->)cE$#!c zC_)p0;w)#izIjBXixSjDRK?_F2<$c%X`v>j|-QvC&iy|~3DDGb~P=!u$yL>1>BL>BN zDGe297x!gB9_rCA?kj$jA%IbFyK_*BUU6TIM+sUmBJOKhs6n^5e~U#Cnh+GXCj(XJ z6!&!>3ebo_ao&x^oz^)D%{>O1TZS@-*ZrlUUA=wM+sUmBJSH+s6n^5 zeX%G)6N2KtlYuI9iubL*g-&rl@u2{X7!-Ff4HalduehJKiOXjMm!FYxKWEP8 zJa2~b&;p(xYj~EdsRhrTHOyJlC+=GM*Or3y*Xr?zxa(rz2lLi-fwk6+iyMqbCRj7L z0!<+97mWXc7+>qq! z%)b~vnlJ8;0nnE|;inXrEJo5s6A@k*IB1mwyk4noBG`cSI#N zN|fIR;_;ay%FjGM{ zO7$Tg8OTEkDo~3iw4)n?7?r4bvG5}c1t>!m>d}Hu^dcxxX@YoUAP*&AY#L+J7@Nk} zw0?|8)O;WO$O2>M4@uMl$^}IzLk$`cKo5cvwa^B07BXjH0T{QCaSMr`UV;kLq6zKj z1@qEJC29vBx+H2*C00n(j+7ah615Ze>Q1b)(}+av%$S{t&-bsQcCJG+h_iDah_my! zMEx!nIVeOKYS182nUuS5zwJWoT~?zFD02Yh2^nJ;|aqh76c)Q)7yvYIVhvL)AX_jqRP z={?gjGnq^>Ei;*f5K<q@tafV&lLJIh5k%!$8{3d4T5DF-#Hz?)rD&m7d}7z9IoF87Uf`?+KRK&*H*oXYt#!e-tcp_}pAtux#uSESs!?Wi!5O zGuk%e_2wB|hj3km>w;j}g1&8eAFd~HeFfKZxPBv8w&F8eW4QWoZN_y-uxxkYO5++8 zEIW1!mYp?R_{=V}?ZSP#@&4}XaQ#NG?7`>vd_b`5#pf=;xG$N-brcuIaS6t83Eta> z*Za;3mi@QmLfe50xZWmM4&vT}`*7jCgZJXXy$7Gh^=(`~7c7SaTwz>2g5^^D_Tia} zTvrK}Bk0eOcMF!I=)=*U36{(7oyX9>%eM=bS*!$RAX>>SWrDKG}(t0 z*{fRo!^29@rwGG}(Tm%9di~>Kr5uz3W{VVPPmQs0*58+HPw`j%e!LYh7g#FWH(>F$ zc{6R^wtMX36Z!4OHXIH}`?hQ!ADanC?~$X4WCVW_KNPL?2>V)njp#MRJ$u#-ZSLxj z<(bU7LU|&EXZ3qyT`~Osu=r1}cDIQS38L`g;!EtLcp6o4T2axa8l`#l^Lb@TqNKzZ zY9XWG#gu9f^R`gjTBx-cg{ZNhCRst~>176Udn#w>%l7vT&|qTBY-s8^+vyYiKGDZQ zUQdX1yF!UX$Q4W^#7g~{Sd6U~ouW4w^dhCUo``qG@n1l{-pNMT*Th4D3DtW>1;toW zY?Yc|fEH3v%w?uHONwcsCK`nl8bpukz&cp@E7e6yC}Psh!|0vAVlk(3gZ%G1Z%-Nf zjM`uE(5vb6tGERG6NN*IUt)h0--6$oWS0y0`gtZg`qQC`>M^`9?=4MCrbFeb(_=5p zPdbNe1;tcW{H1wcacrD!3gIS{#LUx8AwS&|l5tZzdOB57+7{G|@r7!pjo!$VZS=Qb zE2z_~pbSKxnRpK6mZXB+t=K(EC+yLs2@_%eHBrwrncX)l_NNXaQ%NI-i17qV8 zlZ`uG^QcTlu`H&+b_>V`G}AxWp6c%#VU34@vJ{Xkl|j%Fe}caSW}z2&&_3dCw^Z1a z=U|J$XLWimk>&lit+KqsBW7C;dYC`p*)PinZQEsemxl!`uGm4_!Ss)R9Cg}Eh960R zTcyB{BM~P42!BQPqkwd4K*F15BYWGepamvev-lzQy!e1H!;TA83EZV>#~;O0Q4@w2 zs`dn}qTPk%1t00EsMBaHPg7&LWGgUb9g62Hs43$M^8+c1wVTc8bvFA%k+v{$vykbOJDxtV|dS@7(Md_qv$X28%%g%!i#w${e9+F z4}S41zbWmR&!n@sDV??A8PP|U{^Dd*gVB`EWOI#mWUE@Rq)fG}0m~a28y;4>d+{tW z4PF=XD)C`O@TpRiSmAUTyK-n)NqC>K_@f>DW9blhV5YBV1nU^=@2iyKtd)hZ@^J=s zm@BgOlvxV28kZSlk>xZ_iJL>MtWq9epD(W4zOJ}qLJG8X$)Z0H@EMGA#lAV-d|-1x zYM=E7B*|+qTrxPh>+jQDEq-aRYhb&jPnP=(J}EAb%0W3lR%q*R`K8g~$ad?SWcf{E zYtSRh+vG^HXN-7+Nw{S3HzE@+7wo7V)+Tg;SNx+;g%wm#y&!=Ui6QWTngfuj3oHmv zb8-GOXLx}rVo3pFT{)L6r`s##_Dq>QKHFcO zqn670N+p#k|LaW#r^Ri)Npx6UoJXHpe3AV~w4e^#kZ_|=HPFltV&?H?LG6SBkSnPv zJVZ-s3m#fLia)vFr8Qy7a7i&QsC}5{K9B0eJjXH5YaqYWK>e+1A|2fYm7ZKK=aeWvLMz#f(;C6yvl3|(UbNP zb2C1b?5&CJwIh2+`cj^`a$B3X_FE;Zm@*x!^^+IfAl2(mG{nT zzp|BnnDcSrKf%WjL2JAgI*6s47EZGNb!}c76*wYi@{c?IxO_m(Lz!xOvIr$ z%PPa3@RqC0MDH;Px0x6{HQAt{MEHth-bp#tOl2K-I>og*03!k{(cZ({rv-+81g@qu_?Nt38k^iOyx3wb`6Kp3leW&vluhY<38i z2&c8hPlDFohZ>WupfwNYHZ-$riPx)M{$)0%50vR_^C$Js*s#A%Y=hNpb&5B>I_q;< zOb+pBJV3ZWH@yb^e;zuz1Jx?WgerW91=SC}=Ol?^r-_L2-}B)2JRUU&?uMUMWjqEw z5CcY=-9TahM2XFs%8s!-ytw zde^(oy!G(@V|VjE`dBgG=f&gT7vtJ|a#CYu44bnL=K}qHl{q;@89`A_qA0=XC(YMM zbfs|;=H+BWi#Ni&x^g$c*c2$f{>51A{%fo*tHW`Zc>L9`LV^cG6B_I`$6Z=KcB3CZ z!n~w}8-%K#W(6eApVV43(@>@vGSCdQ^WEQy8EWPHUkR60FJ76U)O)F))HzgmEU2y6 z!m8Z?N$K<|hG7L8+6NKZ0tHvK2hc2g=LK^(i5Z1;p_x1Za|@H&w4Kq`Kd4)AWgDCU;m5sg}uX-T~2e0#mN@BPn?|T-nQq9hO{hi8U4|^@l@Z|(e8jl z8{bi^J1S&C-d!zJJv6=u#+Rot8R?0UCNRMYr{&rmyFcBPqFY}=`7xCA*QQQ>_D08TiigU zq4Fng5!?2b+}_HjE2gp|(>L!Od5hECxwUuZ^3!v&{P@;2og<;34Rq~s7D}_D4(XuKM*-;C7W+m0SXCnP%bV+-f; z=r)7!j0r4J8ieNww$WBrg*IBcXq=YL<5k=k2TPG~?DrBI#7H#uLb6#TB5kZ48_kVp zZGIMw#a@~l*>e01`*T|=NvGA)xwSlZXd62gkb=w=kiOP>ZugNJ!)-~AGn+lOZ7nt% z@uGV{lXcK!9QyBReSK)81{!IYo`h*221xOmGe_&BI*BT~ONz9hTCqGG13stC~g{nxSw1ViYo|;KVN&MX0gW3*L zn_F^rEF5X_f&mNX@{r+UA`HkgtcYG!u#*zW`r9;_+9ovwAA73)!+^xz_?H$)15s@A zwZ~$cr(S(Ua#~GR%)Mbr4tRwaY7>LbOhjkUc&mQumm72zj+@+=ax4S`WJPvp5*>4p z8Z6;S(xYb3Gm++=+8{C14m8M;*Y|0o$>wqk@f4@8IP_oDB7(ZYUezYx(eG0|;EL!w zseD{k(-M$&lU+6zEYN{Trhqx1_|&=go;!c#sRvKpaP78j!$aHe9ow^KY;^B#He9)4 z?!<|?<9&Vm)*d^ycHj5r`g_;S^!9&u^Z9Mt28TCal-9yDS}^k_605e+ziQ)vc4Mxy1tqqnj0IqiGe2;5z`g5k+q;!kn zzg?8n*l;tc`l#-M*E8EDC`Ny_z>zzH)|#ko*E_xjfJai_60>M>lzB!TSHgT zRJCSZr5QhD%X$k}UYa!+)}19yCQJReA3ED%^uvY<*b8`(z z4PjE<0T7A6K;g|AT=UQVldjF(v7|LM^g7Y5|0wCMU7}rt>MRHuk~O(*T{p70SWvVR z{nwAM55j76x$2Lz%Ux?N9@Ec%ZgN{4mal#lN*wxK5k4yNwGizYAcpUuA1zk9e2H^njR_g);oz8GrF&6fDW{}*(c%$)!K@aR8@ zL;o50nbzwR@YWxR=Yi)B2=@xrIPGHyN<}CE+Ipdy=5}W~4{_)(sr`7PR3hz%DT5`& zu%NbKXSZppZW&L@9=^Ffk#d>P`>LZz^4|fBP94H56oEU{;8c#MNx(R~YQ8{Rx!(`+ zb-2LWN?s)h&SuhdW7_Xtu(bl~Z*Tyv>*)G&Lp2)g*V!H`Tyo~zo|%JwxvkZ{X6t3! zrYkd-?49fE55ZVFGC8t&upo^;a`j(c`^cen_5Wq?N+!c>|Jbfl-^{I-f8bKUkq&Dp zUw?W{cecc7*@g+)hErgYu*ZSPgZUSfXocixRg@Cta#`c|gmI+GHI}ye!TF#HG1Bw0 zoX#_CI%3K-8NX(^VPn#ucvH>*<>txy?*_JUy(Te#gDijZs~K5-AC#O8=UcA&?8Btr zupdi-SAR#(KFc!oe<8kX5DMsD9Q_NTVtywfF-uxnu3RguHmEh~!WK z%kk2nn;}r5m^~fDJCr9ouu;Wi$1El;tB2Pww0}_#w8< zo82<-$hZ49cF;g~jc+V=AD=jYVS7f4T_g1eq--Yi{1oW|6N@j3L2(jezf7p+NfO1d z9I-qpJ25EKRh*tt&;~EjN_3T!!qT?&0dTOENzYTeF)AOa-R4-HbT_wGO|%g)N-+?l z71{zB72|NAgfw$v2?y1UH4PW%YIpyHBUL^ye8ntqa-V28v3A$>ckJuinYY;j$-doP zS3b_x`3EtQ?=G?=oTP14MFAt*O(V05yBUP(R~>Edc6^W1WOJ?{gM6$BDB{#<66ax4OBl zs>I4wH%C3(4vfoJCc7QVH;A~QM2HwA)Yti-RfJK+0o#%q#IT9Yihu_J`>0kTQxkVP zEt!LSyXlnL(D^D$Ydgk1%nieFSsoxF{Wy%oe|d;R5A!?^82SU+9)v-B>^t4Ee5WjT zV-GIROHO#(3LCuYqsf3Os6;R%A5Dfkh{=!-%3mvJQNA->Hf?c*0v_8*0fr@_?Z&}= zjk9VN7YPf(R$jAr*~s{T4&Q~V*3V&3Uy8-t2QIt$;kTc?d@WYl&kRS!(7@Ko;M|pO zOg&2L{_(zdT)*+udj{O_Vabu~0G1o;-H&~m!#=IxR1i%fbn6l!4}qXb1f{J)z;`tq z%c7|kWn3^si5>No6xRaR`#~0>i-sy<>tLQ>qtYwDD)g6jHN2^CWt0A#$9B(G=5qs*VU?3>kbaU&Cs^yDblTMz9nGq{0bERwpgq?BFhjz1I&%r zcoHef^4sb4GC2gK?|g?Qr=B1J!F*hUNqHXg5yB}eTwB&LsAc*Wz_kE!t^%W=JZpGV zgQgoG=L#@Nmn_(@Z1VnCto}t{uvk7e5sN`zw~0fdnWyCEAkpf7RZkdDsLDztET zrUmm_4bz?sfGnv`t=udFKBKriDkOu3mC;U}WbheReVh;ZuxUAG^^xglBSyey-qk8M2D(ff!j-!Zdu4ZDJOc!__EqR(aF zs5XB}kt4<`>i{(o6HyWs;v>~iG!1HEN~JNEz*(C3!|Lhm14*cEg3BUh5@&<2OB81%Kdd!aML>Gx6&l#r zTF*Whkly{K5bc)<>=)8eA~;{?&&my27C@+)ET}R!UyBgdVwFtLr2y$G#O=Hd%x{^= zbGmQ;Df5D>jmh%Sqp}P}M>O*R0OJ}r*Kuh;Ug8e)u}gRmmKJ22lGdi&n7|^Gx$9X8 zSGadkj2HMS07q$AP>tA_#un=Yf?-lYyly-dLbPU?c0+sfX61x7!ejs~w5wOX#nu-s9NB5cFu+XeEejX}{Dz#11ufG5ayr2~e3>#vG)CyR>y8&joRCLz&BUCB`E# z)KDAIAW&Q^60nx4fiIH5Op9co5t87>Umr#f9ELD(0k;W3dQSke0w_mMgHeo|;I$y3 zGYrtD7NMhx2-bmful+S+(^!>0d_H#e+rIj@K%b9dwBT2#S+72H_WOEogC^@%_APz0 z!8Qed$Ut`4VN=jI#5$&cT5m@P9+?vjQCtPkaZnXQAQ%Y+#9JsGn%PKxfMi)MqwB>E0AXf#OTZRw zm&jQ3QUn9w)^>1VGu*=D2bPjh+ynH6nuSiFdYwc%rj%wZPJn^wxEQkmRb{|FX_N-fi-db-_M=7zce|P zkmcd&J;QfhyYb|nf%p93yA3k9bmFp}82CWj+vhBbzGhuLTtbvvBw69kAT1n zRlr#VLJl}J@CY6aKo}}1-P-b(%k!3wZav}#v_N4v?VyDqpi#wHQGKA}VYFGgF~&j< zq9uYb(rN~KB8$kdVbw>Tq87M>1U1r+Z~==m(4OzXJK&#MOv`ID!R2TcfLxj_upGwn z?Y=DsxAdLbws#baBftzh*NpGrP1hYbjo22+&%|Rh>o0vzXP;LJO_ZnikN9QzgdFZ3 zo$k%`cthc}mAS*SnyJ@2GY8%}i_Pua;*p+HHsSuT5aX>5a8nyz~F6f7(1|Fp%%BZsZ+B2 z2U$M#*ARkV57?Z4g(wD+hwu|!d#&8!#KvZb^?VVro)w(GcTU%(fF5u^3XwDE{DKek zjE-ix9tG2*8sKiNfCo(8LgwG3g^aj>);sQrj8ZFW#|=@gT}6p}bLe{%2<8gy3Qix+ z@-;E1G1ld#cJ}Kx?A~*&F4_*Roj$aYOF4-5Pwu(y4SU%RV*2$@&K$aQt@fP81fIn4 z9GMVRP^*R$D1i#$BvnTXP%DflR?_@ZB5flV$s$8jSNs(n#Niz?Fvr)iKj4ts@{Hk& zTe&bwd`GnNUr%U?>8rn^-wxonXE2{h;rse;$6J$?N`+hZN|1M9o;kfSL@qL=4FNPz z{SlM+?D^mGcr%3|2Eyerk>Te=90YV$ROEVQ?7m-~p%@7ziMJEas!o?xby3`z``Kr{ zbjdIOh2AqMtwlANu;P6;%=1^27Q{5Z?f1vE_q;_VUR1muHD-E6ZSh!@xJUJvUJ+_8 zcZ-KkH!l|1q)T8qxL7S#_2fJys1xpl*T{?7r z{OmBaG05$f@yYXV&}R#W`qgh>&(X}i`aAYu{X7$Huyktk<4*}(<5%0z0ikGKmG^x;#YU~}XI#}n|kW}Uvjc@0($Wm@0!^I_iY z?7JVy$6^>rxc+_W`1hHU`ux+M@}ASYpTM7&(056=UZ^tSS=^T5@dKpVa-5B#KdL6q zlT2VlTz|52Bfo04(^fY_6()NhQ6rpxc{tPZzyr*+DzhC6^Q&OSxU0}o(y&=K=U1E^5x)``p`VU2MVymkLRYCZBa}^DCgZF~*bS9eZwg4_5BR!X zqsN%Id5lR*Nx_(*w*W9au}qu<=cX@+dDl}Ha~3i=J?571Xtuu{JTVy?zUzfC%GPI$}%Y2VN#{M1Sz6~)u=$E-va~ml3hoX0QIJvOaFVMQxqPl&|AIW9c$02{w>IX0!eu#V%L4{zGPr9e%~cPCaHs-AyyEmwo&jMq=baxuF9-{m)}08jS+XNS~%ObAf}Lq9as=?quRlCv0@-m=v_dM zp*LeYdu46O8f=ggsD`A{!c`DfuoRBWen+Ia;p+Xj-M0VSES}$jCEK*|B$n*ee+WqX zPu=zOohN@m3$}U7>C>CGo`yK&W3*zuLf{d@x}SrBs3aN@gm^8?S|sd0KDgErL2m+( zIds?t>kdH>2m-@8wDC|AJ*ZQ&_lU+!ZAF$cBm3s4Ud!0inrf&^orZ- zN7?nxE8lti@3tLj4I{oYI&|-42XDS@a%Sfa@WxZ<9PN`%;XHB;Xu`1-C}@diHD%C4 zV}@wTh=7`E5}j47TQxQL2Vk>2so$@1r_@RNTT?udP|yzi2= z>(}|joY`E;jzRBLxkA593XBbIaG6?gE(7L$y;#P)#}Tn|lTeM(bQ5hcdO{9gXNkLR zLM1JF4PJ=ol6#l7xe#cuK0Xf?qANX)&5fauS&F zgqF72N(1L1i7>svp^MW7CMG;fvHiW~an(C(aQW@OV zoua4RotOkN#JdqhrLxVWA@i9Y(l1?IrDsb64kjbM>rsTR_^2fsmZVW}re34c{T z*WCP+`(r3K{06)SZ2;$W*E$HvK>fM%C7|%pOJ?3I%eR-O3?u9YOxxP^5GT_W@om&4 z(2a&uQ;a%vb&=>MRZ*dSsqq}B#zs^FElgum5St(s?zTi=7c@!5O%6E<03s=g963jY z+>tjE4 zmF2a4BXnZU=C$td-*;efcYwzX3ZE3JUBpAZt(A(}gL!jC$Q4#KVyTI75(EgB*C4HY zh57|BsCPllVk!ZPWP6F;vS67?)>G>@Qg*4*UxH@@uemb@Nusrw){b0Ja^65&AuR=W za;PYgGCiattBGE<+aW$o30MM?rTi-6-Wg=GCN%|Tajd$}W zKwiM8`TUV@k@VGG;XRsURPq%bU~VH{X}Ox9=>iN4@B~k|hKkI$z=CSSya7V8anJ<{ zFcLRBwXiln;ZikBF8awZ`4nv$(h78~+RkHh+Q}DRY^NKF1+Xwmo-LB2H31eDq`({t z7`htOFhvqpZjywGW9Y*%Leax^ArAbo382@1ji#$d|9* zpvhECci4dOQnV|^E7LG3nqx_KaKn1Uc&%lGCQ=+M4`PUDF|P=9B}a+?l!F_ux`5S; z?j{+_H3&bHZ8a!!4T2KCgg`iw&@>3>=1^3dG>B$OTcJQ`jtuq>IupfEkFCAt(5<&# zdiB+tPMp}Z@ze>nZ6~Z^4&40V=uOodoAig>d=GvMey}UFSYz_~kxs`Km!e+7G<-Ob zxfcu&E!K)+JS^OEOcr6)$2D&sR(D?D{$u#L1T_Hqn>PzlN?3q;i{VhH8xN>P);W!) znbnRkZAZboBw9^(uI5|g34fuAzsdWO_qChh zKpU3LvehX#Pzf(62@WKXxU=wx7Yl*oCW&4sPKn~4l}LqyXaNpRH~yHfs+DX}hD8O=&W+6m@XrU@2x8-6xSf4^_p#AA(un)B z$9G|5Z-5k;-Z?wMOwj1Qo9#QmpFrKz#?^|wyawZv!KXD0lj0~WG#shIBiNmuCjsUL&``PyOAL#(tAB#;r>VJ4fmftPQ9i&W>#YIB=wZEGb-l6$Uu=+fL zo1#adQ_h<_61V`L$_CE=#Rp&d4F~c~0P?-S`H@Es7@mN6qv@4r#vlCEo&0U7h%8A( zLU<;j{6HK7!0J_R04V*Z1*6I14MP6guPQd3h$Iih`JzBq=g@S<>rWsTdOW; z?i5?EI}EjO!$~(#cG{K(Y&$53RRh-o4Fp*t246K`C#oVa6$}boMsXmyn&3M|LP=tc zfSP`$`w!WFuYZSO66^P|ZfFVYiFEx*gaBQZX1fO=l!ymD4*dGPu)XsXo6EDrDAr0u zEHoY(Fg7$H_V2=5*O29b30{L~YKK zfsx9)3SEAw(D`uR2tmee*@ri`hIhUz(?&v6^CJ)*Z-Jf46l+N_a%+z)J}Ml*O{4%4 zz52>*GM^WxUOfz>=!3*mnechc#ospOg2zE19GMd1p+XR00qWvfp?QIP50oH-Xs$FR zpG~F#5LL}-&RS_&d}VDelF%7c3taqE&cTKREdN7y@ zUz!n2%;%l9D2+a1b2)9xvm%|u*fa0i8E)PDaJJ29wK+!m9_}ni{;tBiDx`vAJcoqu zu|Kjy;H71O))Hoj;wotxM8jh$z-~}|2<4&0LY7q+sy9j<0In;KH&kwdx|T82R>tlK zcD8z)J2S1*BSC-C7qnZPo4Z=qjl7!6I;{TfJ*99w?6i1y_8=yIagjPh|2!hR45W*{ zxOkAAWZ#2NBL*x1n)ecR00+9XP?NQ)IoiX#RhCdS2bnXdn!_C`6)ILkC3N68SFI$R zS}4TN@C z2nq;AzNM1a0;~`pp`^<^TsGf@C5TXxV$iF)5JIN-pg?dSJCYzQB9I|;+u^O6TmXQv zHVAST^^zal00yNjSNy;RBExv8EK-qO<4nv#W18qNwMYT4!8G18v;UImu91Yzf2VjK7E0!JO-}5JMDj)P8kAMA0%^s35Z+=Hr-2o`Q>gNcGNe^d z_;(j!6$lrpWmDlUWS3ehoP7YiB#@wkd)lDT()@G)-zO26K&Aqc78(^{D4rJi3zYgt zQ6mCEiBu=Nz>1w}Jt+a-yr-)~fg^&NBKyjR$Pu+4jHcRA>ec2Xpo=3A0DQ$%8)_-5 zfpUs*S~u*pnXFJ?XbNTN9{sBEkoI1Umc+*m2{+2Zd^dxIUr( zAsz}Gq5wlo7$N7?2uZKrk~)mHdxvRqdZAA$U6{2Az)=fS^``0=;8X<(LJ{Nx9(5Gc z1J+YbkJ1}y0$iY$PNH>y!bWYBk~8X^mxM?O)1{<+YAQ$HWZsstRS4;lM`H)zuctShOrT@W7D#P40xh8@9uF=NM{F4LY12$57Xr;qF0V z%l5%k`yjHWH{jb? zYOc~ZH&^bT<8;UM73k@cM0bP_>y~RZ2f9=GI5bu5Dh)&a*QAI$2MS~rm3X(gT#F@n z!-$6bQ?x3IO8FO{A#nH@x_py$J6@ijY{rpE8E!=z0m@WyO0@!AQHq$(cG885#3ywF zzS)k~!?yK@*9}(SAq|M_xl@wv+$+o9m*u^81cP_ry6!{p`9t%_{l#D8_>=I5u9u`= zj{5w^Nhvz+_l^2}FOv?Ux&L4G`2~^t&^6iD0-S?aQgAMiyP2>ifj_94N0xat85m*9 zvXG=<{kMQ!9ddoE{shwxnx0<#67qKrLgQK|JcSG%+U?}A%O^pC0)@9j(DHa^H;3OT zoRlf+xz`SPJM&X+D-2Xh8wi)wDLg>Kp;*TPo)$DH5J9;VcuP16pn4R4%Hk97&#N2I z;-11>L)R$)Pf+D?kg(7DRIpSY9$iZ$ZlqLnCF@oChLr)InntjhpynYL=8f_4C@1rb zmg9t^G+tOlLmNum#Xm?K0hT*BC{^ho=p})qXbUB=W^oHutU+S}*_6R6U7Lr$B?Ug5 z?;n|+ddE=7wYl$m0qG-|fzka#>nH9pR?=&FCwI4_ntbOikP`hQ&)$>8w$zj zP(HnVZK~TC442lO4TsltCt`iQiR{{4qQ&bEu8F3L2^RGiHndZYiYUB|EwX<=y(G%X z93c<8(!xFe1gqK+hNy$>P#~=fHF(>p9kIKBRckKIbkmYH{Tw#aTs0&b>DlL{cx%43 z^(E~=yu4Kk@CNOH*4!W%7GDx~gV$4x=&0~f9UC6Rs#D&G+6!iHOmt;YD9~sS&Y>Pi zf`{xUKnyiUn7`rRZ}8$xrKR#gaCZ_4j87;r4=J^9#?`!91MDVL6{6@;s^Y}6x;|xe zB(hz-gS5_GxOYjDT0=?5z!M-FpYnGUOMPqTU5A(J`0(~(6&tA}ea*$NH05$!EC*K> zWm{S`1=esxY>;7|Hs4Zu>1aec}_h-th5H+ERL3pq5S@s2S z9P3#CzVjGxDw;+p+0`s%ln8{JS4)rxwIrW5y;ck54Z{E>JgXn93%E~wf!rrZ+^m*) zVpf@=36aA|DSshUiz39Q>g^)o;H7#El*3sjAPyBOy1etQXtD#-1@n{At-_SPS&erg z*I-yl`jiwgv?XI&Q_matux9CNeq~Zvu`jZr<-iB_q5qKO|B&nTJ>jmQXecx>(BCz< zX-%v<9hcUM7W+BbLcs+wJrMwtY7rW^Y#2`^S3B;5ZaRC`Fmjes&b zi9|@P!ci8T@UOE-rBX9%8uCal9%o?ZjzWlWf_*5|pyubd&A0k9(7Ul$sY;~Z!?z^L?r|}gq0ob1va_du z1jMAqNGe7^RAfykD}?L!G_3#Fn+M*`iFmGM)ZI zy00VFW%K6;(r@yo6LL85vDMoXYyOxRM*P_%?8Xk@KQ!?aywP|~UKp*QG-Lq$OZ1>(P_l}M29TP9?xna+q@iluo#yY#u zfNr24v-sl?y@&^llXVXxmwTY}5?8=FsdklxlLALF?HtLpBQ$_#TEXOpnMF#m)s zKasEhflnXS;*;!V_K5f#vTDx>2q>%BGzv#WS|Y_T4^rb4&J%0NTqJQ~Lu#~^oe5=) zB5OeMRO|tkBfV`d^9U$ARj#41MV^`HrRWr(p;8>AN_G`AU!pw=l1w2w+Fw(S=$Xmx z!qmtukEZ|au57Rnk9Js$Zi`*KJ32PnkzDr%1E{fZb2jOT=Hek9V8?`~*c;dfFqR@} zXps{ib$6*Z=~AsjdnS}>nxL!-6qJcti8TEsDtub=XFNSBm!xy%;Fx>VO_ zam|KuW{cfoSd&gw`tp)TiluYn(@vMuZt-XObA?2@uWKUhwb)ag@zzd;Dn*voP&i4n z2OI|RfuC!{2O9Vo-FkeW8T$x}vK$|{T-HsSex_dQL~@6d{}nGe4)3Vd!T<8_25vvZ zvF|qF4xx(Q)8brX^n`-ql00N5N!UD<;GjTl3YPIfej*jeqKHO>bx0o#GJ z)M_g)!_$h;D-^sU@*)p4Ps(Y5QyR*LYN04)4b&RsydHo)mEj`vitulG&bb{4%@0cf z!oMr|D|YJm_onSv40{j{BxdvV?{<_AGIPHEmi)*j?AdM5wOzoVGKhwGR72z_oq`4c zY6sRy=q5rJKsH%}u!p7;u`q^(8mhp@L0HhI!NZlThUv=`_cJMDpH?$`Z)A|gMtg(& zth{s*_hV4$q-vOlM}{eP5-~BLDe~)uK~icKP8?v2O4O_5h^C5+kUO5N*PPCVfoqi? zI3VopcQewsSnSX>Hy-Z*jHI~94XDI^|NWJw1`NxUT;SXYG`rR|oKU)H55 zyDY3LyM$+acX1cH5hN89_G_3eaheFR5>E}K0ja<>VrhFW;QIT}927}|@j9or|q!`Ffb+t#wo!3epS7b*? z904ElS{>-y7ucJC%l-hodxno0{UN@g7m9;c=;T!Z(Y7k3jMs{zI7vZuB9=w5(wb@H z$Z^+Dqgvbmo7fNtRHZ~NIAvvKM^gMj{mW^epCPhbbXXj2jF|M+XT>SZy##)|4f8~@ z5r{@ZK}EnX$3=-cxoWBx11E5f5_KUTDDt$G(%C$y-y|T~3{T;ZL${W7%_Wr_lhcrL z{e8Mp`Ofh@myC|>-FxQHn%ZvGBd-nu43JQ6&$^fX939r&M z+@K$X`r{s|){QmuUzC(gOe<@}wQ(t6ApXz~`B~AlK3(@i&cEfs4U*JEbL?;HPuM+9 z8$0X(&oB96jz?}Ec1eJ{C25Z|ES?dMuO!OT^aMUYN`%G=X!;m^yODgk*cYwO@CqsFklNYyc(r9U=}=}Vw0 zBWhC1ywECr+{4+Zhq5VTUUO9@j*7~Wq&ZJ6tw3gy<~-An_a=Fvdu2_zR?*%rl&LVa z>LE=8310={L0KSDa^i1|uLzziDQxmj6E?x!=#V|>b59%|X zx#+Sid01XYqhbHrJp1*(S;-Gr=S3AB_QMrRaLdZ|sek!()gkN;1OoB>QA9P)-NHU( zsZ@8->P%_tuoX%i7|d#wsG26*SkswmGy8URz}A_q)Mm7gZ7x^$&CrbR+W`EbRid|b zRf{S6|Es!`;`8vb@_Wj)t=dv;C|9@g>f~E@U?n$}tJ7QQSEk2lFHO&2FCD;2P9th` zet2~KKE8)$fL5t}0Hu|EUS-`d*Uf6s&Gue`x0kfDs(W}83ySUZf25)P*SFa%e@>NS zJDQiic#(}a`Db;#%U2M>>b*$yS9h@Q!j6{_hXl`PidHesH=T;m5s=toP2yCEtag?JKtw%LorfX05yPvBJfW0ItD%w%K|&<^ z^jg~ky2nJXD9v9P>UX2{4dr<@eu{*H$wRR)g&n^B;xO-*0__8M@p03}ZwJzT;egW;b zi^o>Af7O!`PQSXU-4Mn1-@5q6#`j|j!38aD1!mqa3^S`<^`ykBp3IA`Jj!$9rS0Wnv=4GX`>L@VJ&h$vH(FCt z`WKV|&Dn+M#(vI8`*CUm`MpRnjh7q6Z%}iO(qF3)i9R2pNXLjr>42@HmH;!1;AR9Z zjeY!=|IULbTt#K5NGX&eA}j!CxbYz3Fb8o6)%J(!ui+^sy4*vL*+pf6^_*a`IR)2FF+_TJ*NWH1m-}Dc~S1Gz3g1v?p_khO$hm^E&y_ zEtp>HH_9cj8f<(YhK2jEgMi5_wbTAQxAo2dH($fn#T8Q?PmIZbmT4gL{q!?d-{IiMy+-Ee(@{x{fn11+Oco(kJl87!q&N} zDXXOu`;|0rDxlg{u9=wR}dpYJ$Lxtdk-UiEydns zmpj%M9w0d9(GAxyj}$=7p+`;*oqQCVBG}S3+S~q}fb{$=+b~yvcu}|Hi^Su8e;i}) zrBS1u*ZR=sHo@l>xrR&acD{B$5$?w)n%XrUDi9B)%*3VpH6AJuuUy&wWtwy0Xrmo_ z@_OMo`w9CtG8qNboF|)+&}4F!T&&`J*E<{@mxBxgpB;bU|Fl2wfSnQ=*&rGLRlMT? z@@?`~I6xoZ_c)-J<`=(@yy#0|N97Pxa8jru*iEavL4p5T#6P0!t062jk0AicOyTti zpr^G$geQ*|$WkIPifb3WpBCI6TWh3^5Pc|#iXR$eJK``uZF6`cvOq`bK>n@p@d9C4 z;o1d0fmB!S;<7PqufBcC28XP5-84#}%ue&?>(@SX^1w7pj?C@7iCtCyA0r$0-c;G? zbXd&xOZHuR;mPS;cy4m++jYZ*CvRt0jXZklAT!n<#^YhEUmmfFJuB9Ah_92@uI0=Zhz$9WlDZVoS9)LW`=IjcrK2!>9oe*&JWTT`< zEt2O?5f9D-eY7+iHJpSX^+P$8>ZTktx)>BGDBW8AUEKDqsR$ z8Zp45n%!JX!)zH!*$Ce z5lHL$wApU*raOv5T>TxQ;LtF}uA1SXQw3BDr67(DUJRb#BgFlpvLu(RJjR`954Ov4 z@e3Vq1MKTvQ^S%;4wPwY2_${c9#KS@WLRsS3{eXtAf`#qtMx zX47CmmA20Dp-df#XPCT?_wU#>F`6sBd-tU?y`e-Vv0kI8S&E|-ZwD{BI>0~`Gdis; zx1amx)$P|3KmYBoSKU8DJZZnlTcvr1!V5=`ab0k z%E@$vyM4N;!l)Tbs^R7#o){P6X`V~5V~F8~M#U{vVm(^MG?Z>X_Z(Wt-CikYiqOL? z8;-SR#@)Wj^xUjiubXDekz$+2d1=SSa(?Lc-Dfs;bX79xaihigov!xeMz1tdtgvaL z6|rdh1MDV*&}MVpx18C0ERk?pz0)L~A~U-u2R1lI+sjE;{STMA!X7ErIq=W%Tz~vg z@crG;#V8MX9LJMr87dTtLpXA^6Lv~9gFlq&M0u8oCaFqP*in#>9Eyr(HH)r*Ayy~F zV|6GxCx*#`V~|d_BZ!q^IBgtawE;WSl6r(Nk_;%R2Xa@*@uRGG_>KjzSTzGz8MaOj z`O$iZm2R&(0y7ao0glu~pkw zN!mZ~MR6X!+VCd|jI@6m-*X7O{g2xBOY}Xz*1vzbUE8-S+pnYV z|KGn|bwB_9U(xsT`yo4K7ykpkv#X%R4N`v^nez6JXG3*SN5XVw*DTu|?pNB_)0(S&Lcm*@#oV#d*8GMO!QIFOms<}#)xKe_9wJ1B!H9KXD`leuj! z!<|dHR9IV{^Cog$_dDWsJDPZ=M_ZE=t;x<+?dN{Dy8U`u6XE4m_w(-;==)cG4__;R z){3`-XU+-{tW^xYTOT}~g970QR(^aXfz(ElJB54s+zn{b80r0aa|owmCj_UYlf zkL({pSSw z9_)4e({ua8tyud!ysS&PeXCOXC{hjX~R^$}S!%@okjQ?NB=M%Rs zHs|wwN(y|vk+WpU? z{X+fbRrg;_?Td!S{g_+Z;uYe4=+J#QHx7A(aiU;%2^qp|M7W_&LW_7nAo(Q+D0!0n za$`Ws1Sn#G!X-E!^iIJguA;*mZh`hrP=$<`Z7=eHmWdh^glinm+k zH5*a8{bs+->;v$elb&)nc4x0oqVCoPR zxJl)B;EvKsz*s|n)3k6pCU zgeMAtix==SdR-YURZTM>7f^1Hm2@f z$_T+u;+rV*391NT%Vh#KTHRnLLlg;g-A7v3P2Ze+7sOJTK+re z0_BWpi-0#@e3rYSo*-tqix@I$H9%Tn!m6EomO3#@RVT+&`Js0xy?`3X+7zf!ice1= zSqFi(z~WZPE9hSoY)-6oMJ*&^xdvVZiQT<44Fj!_EA<*k%XN|ZC;NY;RYYn`MY+xN zmSZT1bV{#~f^p%gnq6_S?s1?M|265m<5FF2oinfO#=UyyxP7JmbJKJ9uV+L898 z?<3SA;(Ye>kLmu6OYL+&-$yRmM=S3?OZ#YX_5I(V{o*>Y>i)yj{^LKa`2Hb$|D-1; zeuMo1Ib>$~1?+cT8%cZ*?mzOq=In|ujz!@;x(O$+{4IEm!w2GLRu)sswRaE35YA8T$H(!fw+P<5g|npo~WZHQH-XM zs)PIze%}kKz8a!ogMdFYEp*@8^3}puM{C`_JKfE-Z>@=l5d`uUWi; z{RT3uAl#yJP|6L4VBw2LF<{AyEBxG;#gTqeM1*2d!~+T%(tO~Y(Q1YZm<%_EGVrGJN^@Mcs7@q<(qtt6^knd$ zQ|)MJUPSAQ&2#a$T5+%Ue;}63Ciji3oCy>{K*cYVCQFA>;{W31wU+4dYoLo;mgy06 z!RZk(9jj_qHL8{UU#wYmJ)G@L)yO!{jzdO`fG%5+N%H|gD^?Ek3Dl^a?+cW?>fz}~ z9tv#jB?gXK7^E%>0XhSWC(@vR3bdPQl84_Vq2MZgVD{V%Qpn>~3B?4o@{Ia-9rDI$ z?TC`)^CD2%!YEuo-L{|Vb=x}OQwBFqkS+urk}BX3_azH~vi*f}qnaBXI9j2AIm#-d z@*!|8!*_tl&T52m+$SS6_%tOv3v|vL@=$Rm6po9WPo=vNq0^-iJj#60oRRse7k`6k zf$5?Wa2DKak#KKNyB7uIwBmKTI~qw`{9xne%5Z*EhHF&!(%z+;D!hQ*{0>>ZsZlSk zXXLLYW3i8+&T(zor+_zp+-H!AL?Hro_4O%~a)j9A$%EF^|s(Yu1@AYIExS~fC zPyZTe6lkXybesXh^^$)T1bhNcH?&VsY&z^Y(pR+ht-Kv}py*>yctpcTwfpco&h684 zA0+#fhp*!Ed~X?AxvzUsct76R*7Uv2zo+HaA6gs&4&MBG`u+I+B=jcOmBQaFegt-; zem~j;&E6F5{Di(=YsdF&`vID`e5W&Fx+zM*PZUL* z5mEIa9y%1FGFeNw5ZRuHQbz@y6hC8-v>W}*C<{@915v4s^Q0*1K8uvwFddTQ(AXm# zCO|RiKq{oAyyS-Ypa<#bDELG}TGQtG$vsXW-QM>yANboSB63dKa8$hKRz&8!farVs z_qL<9v%_}iM@T^Vh7|aRakC3AH~uRxHnLosFH7hAcxjBJnP7}5a1`1y{1D>c0vHb+ z;>JhC&rES>qk@)J4f3RpAaoU+pMt1S{DqoAmBEq84yTT4#1WvxC6>m;8G>F`s(AP{R}Jbe*xeB<^i&$SKfaUZ>K%P z?+2T;gT^TaGmYH_Pd=b`4)aoV&rS}GcOutIJ8*|Q`6#a0P2iBw4L(srx=RZTN=j%F zxCdE)h(*;6T1qN5*1#tC5%nH^k|L$*g~}9-f@CGKn{bAelHgg^Jd#!<)EG5$dBI38 zs=BErWMLqa8u*3k1a1M6Za_3`8dT979m)O3lqH`Dw2II*sVEHFNfD5rx%U2h&$5w# z^aXzUZ~fsw_r9@Ya%^8WP6K`Z5b8WVcH1rQdoL=9d3ZWz{p`~Rn6au-CWF=6bH0971&M92zxt-g?)^fUV;L@JZg~BP( zocI0xKgqIZrlt2jeV)tH%*dZLs{Y&W|6AVQ`vO*9?hV6>wnIf66f0WUj_thW_I8ns z4tel$4=V4aVE|S}Wgc*XV}?r8G_VZm0qHGQDV$cpEDSU;Q4WtD4`s8_TFtsLZbBDE z_jYye7+O4a&Eg;$e_Psu!@w%MZ*+NiY-Go>zUA)V#0}e4Z;HoVHt)7#VRrE?r>=f< z@sXbNbk8%-yy~5I+_7u*4(wCL(t9N*bH_8x$-rid%@y85mHT#b-yJod(>H;hQIn0v zzrg1l5!?N#rswz`}e$d*JVHd{D#k0dO<(M`|r4TAKpK0{{I#JLiK(Ah2KZo zQ1SWd{mem~yU261@%>W&`gaXX)#}3#>v@_g9TLbPrmT`zp1Dc3!c*COM*#md1=jJ zwD^&j@vM=3Xn1X!f1-`IrHM@u&%mb0%;}3DRGgfeE||(J`lg?SRB;vvn{sLhR@Q;c zYbCp|hquu(5Ci3{JGqP5Rt4UJ@}4X}1-8DS*~OizA{mIau#fff*pT!(vtpNuNUkKg z^g&Tky8T2QCo+nzwQS(j1o$gOQ)${W@!wP`SJ>uc>@h5LT-U%1ok-O<%3Rh;T?vc{fb~vN zvF{hR69)@l`wc&O84=uZ(_SOzk2zeo`-R5+@v>nxZkE{bsf%AT|HZl{T1IUSCw zh*^NcVRFNcBP**k%gLV1=2yi!=@QmSG?iD!M>7y=r`9Ol6=q8yl8EGzWId|*H@=^K^8?x&@qFX^#n1i${e=zhx47{B2L;!^@%$T23b+<#@d!fr20^a4NM?S zc-cg+UIJ(uRdmT6N&wX>nLtS#isS^chRo^C!a&SQPIs22)mgQF1^B|-rkKa-SJL9A zrib9ClGx3OTfXc;$XQMU=vj6bc*nE6Sa#y&C5(7X~_yT&lv?m&j%lDk+?Z=KAfgIeMucU+wwP)of-Y)FF> z=Ao+V>GjKz9V_4q&t3pFcv)~E=QR4pXcm{VCp^fi z-YPNZN%MIg?@x)%J}18VLj5^;ahu+cJfeJ#djFpgf3Ce6pI?9fSs1yW7(d6Ex#OE} zz;mHm!1uWa&wr?V_&?#9yw?02_5QEp{crrm`Agn^EuUZfUzfaJ@(<-@UR@)BKt(Ne8`;0T$~Obm31z^=IWqkr(APWbx!nS(WF&9J zdc{V~FMXRS>{a^~Q8`5CY?l6d!c_Qr6u=xXdc3zG|D^-S2$rkAk@d}Mc5ouiXzw?9 zTL>D&btrmfK*EBvzp8cg1`yqIa>gd*^yL;0rbN^j0pLW(V@dPk$MOL318XgzY4XhB{uPL9 zdYFxGXjNgy8)%HMDO5{Jqx?vQUf5gwf>-h6^vB*+JhWQ)Eq%W?y?6RO-pHIL`sc)g@{-4I?Q97d>GpZ}FZ~Kr_%WJRFAV`i;k(?hY z4`Uc@)oJYuJSQ~7_0Yn7W7D1yEn=}~evATdjP%B#* zbopk>W61ojMdPXAdOTI%1lS2k3Sn^O7Ff^DOS`b?m)Z1RG&Vr_U0bgo51Q+rlVm-b zxmHn@aCm;*T>HB}8p};aW$E9Wl$`zeHuM>*Wd8bkbL>;BNy~a8%d04Ox!zGp5Jure zsO3g;LM4fHmpm{C1JPxIWe35O$Z44ij|4B4x*As2B#a7Qo=eZzhD$HG-}R-pm`mTr zrPr-$=_yy8zy|Qu?FkLXr8eXW)AW>R1wwqTc{7a49BoCTo6M0ORlo5xKJKRnAE1oQm#QD|>G`>AFkhK=#OpIB`^;0W=N*G^7{+ zTtNW$9ZY#^%oSEF5ppksNT(Htz}roap`g0a6l=GYW5Y}4MltZ11ttMc#*C@S!)>wT z?y(b(-g|iaS&QyR#$@-+g{$*P=lgFNK0K9pV>}&y)5%>|KeRj+irg8A^z0a1SeXeP zzWM#}p@DWkV-Sw%H;mW8UcOavm`L?Q=v=fx@WqG@F|}Ki#xrE#WN0vzGmXYkM51i? zraA~{pUAHaA_Xyulv}D;2d6N;Qp8&xfpiCWmMab?_){_BzCj!UZ^TY!w35|ZaR-n- zrE#2Gv`AZ#G|JNOv8PM8p0N63q);{U2RsgRnD~}@=lA5H0vLduy&->Z&}UfAZa;kQ zqbDX0W!l;Ty^}{qZh3#B=Q}k&lLiNv9oVFP|DBQ0*z!YH?>hOWq49QqTY9+n{WsGw zVGq~K#=B5y&2f%eX%$%J9~FZ7QJ!~^kz|(z!3h+K#{DZuSdj3UKuGj%qL)Wu9X(Vg z6Lm?#UHfr!LR}ImVbEiOmjF{!>IN6JSDV1Jp!s4Hr@`)uce0fSd=q7VH|J=R3Nokk?vx6|K`IG)=eJ+q)M1{PNG zwfF-E^u>3bn>i0LHU0a&{!nlIs-&%i|7?sPw(9|c2R*-XPara(mmz|1u$!PytC+$H z2+XQle-H13yON^idNPHA+=yKE(L1>hOwYK1qrM@ zq!ht|iE-xvz*9L06@u{`;LgBFSu`+gL6lkb_`7P~02S(I7237-_2tvS@$2tjo%+Um zZab)N|D!ViFQK-AY|eGX=+tEI$Fe(raut`h$FC!q-rp4 zFoI8}1)GH~DtePsiaoHeZaAA1~@9mcBu<(GK&Sr-9D9*mu+u8Fsu z!RmruJtNt=4D)iL9SLR=4y;2On`x$S#SqTsVoLO$vH-au!2V6w^$Ml88W8m!emLt%s zD^2(+@qQJR!=qeJb-`}%jy1PYfkYzBovrm-hv$mtOdni1aqZ6KGltx`OXp6$W+68) zaohf53z$*AVL3Fnc<}gENX_W(;#6T}c_Jj&@9yhXrgnGO+*?PsT|EymowJe9=)$%$ z3nCDtYxavDHKwrFhP7Wo?h~^oOdP1egXjT?UpqNRAvi!(`NO9fzCLYTm_U7ffc162UpAoW=T~(pt8*V6$JnjLIbeNo zdssMjoHOC+B0nO%@Hk-dN+HswpgwRq(P)4#&h)TbA`OyEqLYMBpB`y6=|MpoQ3p8_ zg>W*AUM2K<_2Z}@m5ocOVwiGk|V)A7|bs*y(^#r;wr}G`Qz~- z#%N4CiuvO(r$1J30S?(63>@EjO@|lY0p)A`CVZ_0aw)LnBx97p5~a+(t&bF!X>~zqV{!2$X(C0H!QzzRyJHn1chGJ!P(S_PCj2=&4IO&;N**6uPjsh~M zYc|;69mr$NXjM}qh2E799tZanQW>gP!pJyoN)-4UWAAjRpa_~h95Q2eO$ zh)^K|^fisghD>#gSWtvwxoH9~H5lp%qC>O^^cIPSZwH$b^FI?ve+cLC7-?&R@Z-yQ zgmEv}+r6nF2D#+|d%31$O5wo6i=P5s6~3DF5;!~)p*BL`)yy|?&BhLW*L-oz-mWiq z_2+H2CMjgyH#+#}RC;uf?64s=J88MQmaY<{N#0N16s#(%NFPadJEmi_x3L|g_TD8AI4twR3P|k_jRpQC83L^vOw_XKMw|)45Q4M=n>!!%xO2OZePkW z;2JCk0Vr3OqTtIhc|tZT1QDd1&WivN#Cv%YCIzaH1XD|8P_F(#q~F+B3C3Vp`id<< z8i@6~ui1=kl$*8%%@VT7mSfe=%LRC;`t%qzRo5@UA2w@H4^$CDeayTBWt^xR5Kjy) ztg{@^@QY?Pux=<)k2~Ou{NGuu){IilI1n5mB$hD zGc!EnU2AoZjPmwbP$bHJn2X>#dlu5faUDBVzNP~vk=wR!fctS?_t$ATKrk#Q_5b1z z+dFnkQfu|QUa!#-y`#H(=A$2-g1%3iwZ$HBelD_4l3xeH)TFz`?``d!ko4AMZQSm{ zzSNJ@J`H>6ZVBp-%uGuXYzFk>vTlxxe{USbnJ#FbHQ64`k~_{N>iUYj#LL0QhS7<| zYo|NS=3kNM0@0X)J(zmjr4FRW6orj+fO_i6-BA!mDb-Mh0DFn9$IHhx)*xD(5QxkgX^a30)(Y5>`g6E!s`a)L|?Z4CXS5x8<}j0 zFq8U^e+flzQ_ovR*IF$LJ#YQ}c#(siH(y}y#Lao6pMM1Nm?pP;3KJ(0WT{6qSjr&? ztduNfo^1e{JWATELr|F=$R?~6I~YZ49`bK1oi53(_xIu$bV=_7$t5Il(8a1`C-RF( zFA^ex`ALaLARc!i-bShsXdTnMH9Nh8UUHAAIY{b%vNTB~aoxi|r+abJqAR3tvi3jc zipRu}H+}q?U59`Xqig+VvTvp??uLs<{c>Q7JGfDRr>2&|CBo%|TfxK$+2H@rB+OMM zubx}_{~}?I9&=d4=OJQ_cv|6!i-@O*=r!c$qsE7jhac4LSJp+c%+lw8{1KYgJiRYv zrQIHK;(+c-8X?_Cy3l1)=c(L*mn8IKqJRLQ7ahJ}NF`K&>O;1IA{(xe2O~0D z_AwR+%k9vpw?Oj_h(ZKPjPVc}F~ioVN5OKA9DBnXjvc-K^)qu#nY8(oZ`(W1-um>@ zw;nLMp7|398r}YkqbZSg^bN27)Te*;X&E!I)BB7F#yrF>Kw4B8vp~-=<~8S_h8zGH z2I@lw!TG;BWTNajK zN{afK6rP}TWoFo*Hd;?GGuZ~G{_-BZfDow{P<5t9MsvJx07quu0TONvhn*!iWG%A{ z0CQg8{t*1tlO#yM4{-4}9fL7s-mP$`;^@~k_s4%ba`mZ$hpsv4AG~F0E*Om-N7Jv> zmx(1Zf&7B58+ZQUJ$K%cFuI<;>-MLfzT?$T#a32_>J4R%V0T9#+V}9T!3RI`@dw}Z z+n6St3HIV0ftF_*`iszu*Qm>RkcF$y*%$*I%IVOim<;#AZ;2H1`eap8igO$Ba|H7x z$<|F|>qVCBB--J8t+gjIL~oZPjGUbi(8Pp6)vC<2QdvjDyk@Z{7~g;@6VxZ;6Iy-7 zCNDQ%I7KM%+Ul_)W>hy`v$|(??yBkBb%V2G(dgsh@K`djWpqhDyM5P9H|`$RpSbTR7%1h*LrS}dwX|o5B$#(jZsC*}-p*hL=?U?wIV@Rzqfz*ok5Dfr+ zx$+nSyRu*SIy#}*REp1n%b(oX?a+(qhAJ=j5ewYdL&L`Aq&zj6Xi@M6@ojkNeC&y> z*Bzbz;&ju&TIhdS*ec%|js6h6rqTYtRwwf-O(zSSm0#rKV83(%r_uvIniz=(f%M=J zLPA9msZkbF@@hCxgah>CK$)W&T5#iJq^Gv5dEv(O2(fezZdC-t1X5)rSZ2zXJ++(y z-~a>=(Mn()gI-|%xodn~p(yqQZ`V#4H%^J)%r!}Gw<=yY4cS}wv7fs2-M8Ly)#|Mk zakVptX0LtYgQxD?jzlctE#+f>ch`+)cCMVcQTHR^^iVUQ)AgpC4Ga$EAPOJDjvUFa znfn2m0YX=&IUj_AETCRdpN)tRrB@T$APW_!X6Pj(Phe?Su+-Wx9et#RF7{zBlxB04 ztfarQ+zyk{a!ptSHjrdjT3Da)VCB)_M4oj2Y&ik+19aVqg@6LEU5RDW#&T0Yw?q&Q zHlT;)6+$!104TEgRIXeoJ-{ImuNT_{+vVBQizin`9)E26sk!2CHr*9*xI($mV5l%~ z*Aia7C**RBj+!r)zB`tT4GnM04Nba25Bx?y3V!kX&t30)5-(8r(Z8?$#yMKgXBTv6 zbdBQ_iI{_k=n+982tLJY3i3VZ@A9A|JtY_O*r-%7S;=DriU6DvfG)w0$>jA~bc|Fd zg!CSU6{7#(;@lyN9)PR3aBTl=0`lLORDW-Nd#KAhJ}|LuVKjtndBi_| z^|stnSI|2-)SOkvx=CLMe9Rf`Cknlwk|#tdOkj^!rn%8^-Ua0Y(nRuKSUsg7t{c{e zplw!a^ErZm!TDcu=6Id1X}(UZn?U)Yt%m&#tPj_%eetFLs@5&;DJ5Piw~#6x-5D(g z)*|j%>y0OX(8r&bN9Z?JL4N^j(D+Fq^l?#(r{Zhl{(5AZyl}*D#FN6%KVRj zV;@Ea(JkPYoMsax&{WGO_j82rxdAEp>~^D1VI-5d7F` zpheyc-Vz|?dEZpx0>z3W%`busG`5w_E^3kiO~3>r6ezpA6;GUyijoz03Roe8gcX0N z8&3cuk?WaeM?e(ld6-0_ib$4fpC9WSX|Xws zmAv$UTgo<^y<_#TWfdcx4O_#ijiKNmHT*-)>pdnn2t@dC9jx7 zJG;V)2wniR0dC9%(pezkM{!3l0k{fLUDST*$fMb`m-U~bV1S*10d}$!gdmxfLK5&> zNB9P74%0$_s+ADmHh6a=2})yXENI=KW?~zQI;EtqWW!XbUP`m*WrlmraBv0m6~GaW zu2uQ}dPIsIdk*h9vJMDufWaHRl=F%0GrFse2(SH<2?)Q@Y^2nYyJufO9yho zypZ&{pd%G+4yv6L-Z@bbS~-HHH|^T_l+j*${^)D&Ikp*e-?V*Uw#9b6Y(SbC^YLk=StlLB zR!$5ftWnHDgHx6>1n5Iq!vx7qD-(m|CE>e^5jZ1HZMi%rrnG-MCDe+VoRms?4d|fUe(| z$KdY&i#}xSH>2x)P!u%*L38@+YUM!i5`2AQ&IU8Q>vU}&a>7w#9{vY##{k3wno-|F z$YR@Y9?-YMEmdttGcx)>J7WEk8AUk`ZX{d{)Z0pSx+7)_0-C5v=0T;CsC+X(4l6;^wr=GXn@V*;bTgR5hdsv^@Wm!xOprwEONu zQ-`y$cEcGT0bPmOXa8sn!fx~tk1stku=}b<#wtBB=(J$+I4L+v+=Z3iD>cJ#mZUl- zpgcDU0-~1Cii?m8#wg996gMR%9l?Va4iawu<_sr9BuBb3!`;_Kou0{09ldrc8hv}b zXJT(pTi19}|7fSvR(s+%_Z`q*$INE!t#ey6{o>!@4ol(t=V>>BTyyrPbttqj4LaMo zG{cE?t|kYA#16v=kQB~{H23fdr;Wh+qISS5oWP-`8kgj4!Waa_y9oy@b$FqYiLi>b zEw@g`r`(goDznuD*V)FRbaPBSh1&HdJe(Cu(DwubP2-Y1s(MoLs8zkow1ud_RtyRnYX*|>id$1O#$*A>zgP9R z+QFe&(v+A&4@Qad6%s5Wf3Vvto>*hJ_(3xU6h6{KGaUWIZH%Ud?U+5#X(mAP3W zWFd|KK82EFwj5xGL6j!X)?lcaLLHAY)PMyBp+g0v()1_vIqcYbKd?6vIrsM3KcK{O z?PDM^(z}1$^7csN#rf}L{?Kx7H2T50&t|ZW(wl>xtGMnI11D{b!2fl!cVJM;44;yY7vg;xvU!G=Sk+xdkjQ}1f?$)PrKJCcJzY#M?LW1W+cyNI&))prfvfMiY41qSQ|V|_UkV03 zQ?<3>JOqwx-+v~Zc6xdfq1Ru3Ry2lTk6eH8jQ#@7!MFxqCK2hN zcRrkhG&oI{qL$Je9oW{;UT5!Et1#fO4?w0NqIO3_PDzBWI#$#3%*(e*(N-9OG8)J*LmX zwWoX{Z!QuGq6|5YNAni0`+O1WKIqF6#yN~* z6J4JE-MOh9_VAX8(Y+SK8@P3_;K~gzOt*DSBu9HNg?=0$qz41R2ko8N6guVN{i%*X z&@tjgArt%L2>$pU;=6I}Bc>07*hich79l_iRq5dY0*V8m1`inOOJnPx1~hS}#1U9S z9S&(GGl59YPk9Ce!2*9(*4*%}&Gv2>VYQV|kpL%BS=@tGpOh&|!(NcsqtA|VzbRJ-_ zRop>@Ln=ESbe_PE6Wx#2Q{L!L~h2#g5wL|`Ow_MjVk!44aX z{&pOQie^#BZ;@r~F;z;ynyK7K^Dn7FnU1rBL5PUP^e2LWW`_MU)?d>g^>(dg)H z(dcd0?K*QHv$WW^9El9}E#-!Hqz(pxuclY|>R@1cEg0C9i9}j%KebcS_0Yw~^&&7{ zHYjE5AV4X#Twvrc|DkG4(4jZbhwN)kFJ>~v)>nu}`UrS0F;{fm1@ zK{|A9Oi-jptVxcgXX1T>PJSUX|zF$aSq4ua@UeUVItPUywO(#q<5J&)wiC z|B{)p27rq^a??Dyv{o)=uQeA8kaSZMz{&0(jrdLMT2PX^WsxhoOoR^405K~5P0>wD zjSL`CJOI} z{MSe%gRT54Kmc0YR+q!;2t?g>T`}b5?r{Zu4zDc;m^M;saWuvN086O=EZ5=Ih}~;# z^#q6GJtkM~Z@geQT07hNY(PuVEt`MooN(7E{QkOZ?bW`PbsIibt_w(qqs(lzC2>k0GMk2r1$f3J=0^f-@ zZ{K+$7}%CKo)nH~%-2SKVXz)FvkRx`ULlnZwb`}6P#4*L)hxBr#3Em55)KQ0=PyAp z{Qr(n7hG z70bap^%8zXw5A&=OChr^kFp&$p5Mk}J>eBlh1?amhU?W$q6V5yv?XcTP^(dk;9q~o zFsfGps}YQk9|{JB0$%v30Qc5%9~}lDKN9h!BaxFI9g0TrunzzwTkYK8iIX_3k1nUC zb+|ZyGX7luvl?!8bSG_=b3mDp65POb^n=_S2*2 zG}{Xe*ww_pJ=a$<1p3pHUjthH8B8+#h;v^Y^u6GC@knGaxfwYU8C+A9ya0ysoMCiv?!p@=eY0zxv%d=m zWv#etD}FbR*`Hs*?60cX$M;xKvkwA5<-C>csC%GC7z+^NB<-!K+tcRbJe*D!rcJVYef$ z8?MkRvOHj=*)bK{H&?6w)NHZfm*7f+KVkB|!CUF4>B4hjJ$nplu$;I4Ii4?@&sE>c z`uo90BJWqd0=M)17ylOT{|EEA!p*L~ee7?rd7oJ@F|5bcq>+hd2KS${X%KLxD z_n$DI%lj?8o+LYRw7+L^Bw)Y;jJvxD`4?Ix2|Nb<}iu3lS8g>V$+)KLK65CFf7 zkWpr>_{7!mP2%hfOUomjoV8`&F#ZATFwf3Pwz{m&4%cT6iMF*4XJHH1XT@8wwIIbM zJkBORE>XbIM3!Kza$yT@OrmT2aT6q8pYva$rVlsza);`|-L_U|o6T`II4W%V;AQIk zmz*d|;3j*k6$j)dWPhEt-?`G6THCl}&F82U=V&|dqfeMCrSt)*ch&{wi&=Ax(0N+v zFfZ0>w-zLyII5sJD3V3*2<-^=Hjt2h_r(aUm6JtVy7Gegp#5Jy8GGZ>|i=HBPlSNcyjq_ zv-o?0Rv!L@>aeYMZeK112EQ92=FIN&IU(! z%@Y*LI>?}-(umSrE3lP@tp`jhJkA4zR1C>vp)*9#%6_&Ist8w=T!FgeB&jJ<=^k%D z6<<9&&zyPk%;^U`dl&AGL|(I;d;@6hEZc57`uf)&J-3xjlRBz%`V(h{)s`;G#w+n{J zb&TiWHdF7%^B#*w@;`?5G=2jx20kZ$@hEa*MSRXf7s%hc?oS;xJUY&V`MmB=srSp7 z&~PSncJin*g!db5JQLbi@%%aSxvIIo!tePP590aj&F4~QGsM69EBJQ?l1=hC)WrE+ zcfjwj3`mi304;KrC{7zW>d+-s zZpq_!p(u|_$j*G8Vh5!Q-zk23CyG;GaG~qYtAfx=M855KiSt*nvf^Ka+PSa<6OO9k zsYl{1k8W-#D{EH-7P0}tg54BC-Jxu_X!a7xDRA!AaW@vFfe>u1N2xf7YF8OKoHjHj zqnM!^6F>U=!-3#8KL0*!1ivp5IrZ_6pNd50)3391baW;L;`90+tJ-~SlEwVDGYj>~ zOf4Ar<3sWOd{+VmU31Q6F=u{wr#bSp5O2wTY^0sb24zoDk_VzL*t%Be1k$2Be6UXy zm*8%gftCzC@>2vpB)?U8Mtmz-_ zuYFno0s0XZ$7^r7+`JhYbESjG7q_9~^$*Mn2Gedpa-pWB>{EnY5n6=dpU?ooGAZN5 zl5jic4;HEtldjAdD&W!zJKZRevwy|_ceCJF$5}|%#*sE9c$GL1fjo0R7vqTDsk4#6 zQxsIC_jMdq)N(sCCCW*bm7}xe3^}rTQ2Ife8y-~+n}1xmEm~DOR{=B@>0}>D&FC|> z22FeK&DZBO-Dc-X0fpZF!SvFf#CNz$F)zlr$)0A-7g$G z`{Len`ZFz7hpofv*1wEx_%(e$C)b?niFD+_Gk>nmwz6f2#k70Beiv-bMq7qh49^*h zi7mr@>cKiJ16#DDy##I;&ioWs!AIyDMQuTMz#405Snh>NUz#n)l||0|9B$fvhh-5L zD^e^%ECBX(CaTb~?3ARx6D9n$qO%tZ*d;LQE|A5n6-A!iDKK=hJm#?7b5hFBfmWqr zmyTpRJCbuAk!7Pbn4vY$W{@2dHi8+F=hF&Ifg?kp`>6Fx`!S9;L3Zk{*px_RCsgx2 ztRTGge3!8d3XG*JC@{W=m3R_WC}?Ni3UZMzB8i2+yEk)Sd{BvJN_P62-{k#UJG)MT zq3Tj$V*$Cuw05t_yo0G)uhMy>F$iTwVJ?H_U^|N+=yO*fkzzae>MKF9j&%XXc8`F1 zU^ZpDCSVn19=Z^OR;?&fCU~_*$d1~L1MU}yISNi2dcaJ>81(9@TxEM|apkUU2lm$f zQ&TZYk8hutJyX=bJCZLHj_$2}^$DgUjNb?bex>%f9^Sq^-HWbD-b-j2#68J9w|$y> zZUT?_e2Vv>@hg13(2nN^`CJ2v19;B;mR_v`mHkO=5!$uSsk&`xC{OBnbSYGPDdgpI zX!)s5s;&l56y@^Or21h#Tiu~v6|>bTd8H$=C~dh=-KP2)g1O2T$)aw7VPF;&DIS2zHG%_t4(*}cc!N9-2^05jd9JwU)aUQkUc2f2EAsjKo8EsDf{!1V zpP!J=M-~mAuX1SW^Lyp}$fDu7%DJh}KOvvr{C@TM$D7_SIX8=%pFYv=gO`ZU;ktRu z?+@N~d{20Zc&>7EzbD`Gghmyg=J%`5KhgAl%(k&teSiJ?^8Il-@j1ZQ;Eby8KPK-# zD$hATdyOy1=g(_YoNIo+`uv-k-mh|cEi%u2+S~AcWC8K{DyOGD-!AV*77)+n{f7Gf zC*}J$zh8a+Qq%jDPGS)|7hbH#rqmhnIcI^hg5FGB6AAj#$K>;OLn{e=6L>$^Gn(EH zZ6#u5@cj7tbKHY>Ui`6MGm7~1+txdLH}+Eg!_+*o9w zEoabpfUckyS|W)tNnijwGztsjg$x}mjDXVs4q7se@TCoY9Xg@n;BaEqWywSm*{r(9 zVz;-weL!0L3_q?NY4MAU%-?* zYcbSH<_zi`A|DZ=|77xok)e^@8|xrXYnIKeX#JLQ>E(FB(34P2n0d8qB_Y0jf~?-? zRX~P9!Xizw5*j8JvN`l#9f-IDrsN=9*Z}UaINM3Rz-g6}AZ-$cXVm2z9pEDHcWK$Q zE_&bc76w^Lj@*3Bxg3W~GSDMO=s z2sE?m%S)4lh-IRRONyH=^01&5gdQeR6LC*jWlvc#UPOzCjLJkdxY(Z+M29>`jwQ=r zVP8U$?9b?HDm&TQ*@o86^`5-M6b055ziW_M>MHfZJOkJ#^loXwt9?-4tpMG%&l2R_ z=)N1;6c_%D*>;EV--Gf05ID3X@xC&C!LG~r14xXZR*Ufq!c~Uo00x-3Jn2gW%a}*p z=ZaJMFr6GfoMet4I4$<#Sp^JijvsCfVK8M-c_UDf-c<`>NC|V~;DRo-p$bgQzklQC z8+dPh^Vq#=>}&d{K)!3=B;0#_=>00DQ(?!`YT&TJsvh+v&x8$Xu-R&c4QSTbOF&JF4_J+lI64uJknwRu8v8~v_@KCSaI&ROFKv}+9%Ud6RVc|>i7HeeR~m)r z045YbTcT?Le55WcVsS*@r5Of9aJox4#4}uq8Gaw@BEAn|bAls*uvrUDj=>6oO_n^> zKXEBy)joo zkE>huy7J$eYns2-I0GI-^5h!xcZ>ja1BmG_cPZwS7 zPs7&v5$#njfdd7ks|>9Qb>VOduUT^C;5w8ipV&}6*B|={wr#ks0N#w#F~9fUBcWdQ zIJ9NsX8pr2;p>WTzYBZRfexbAD%DZp(eC2aBds-(g&<>HgAPmDg>d(p32m}`wV{cO z@bHRPy`6-%Ob}5g7|iHQ_*en@L?;QjOOU~~w1uKQ46~W$H+hcXopj+EsWbRUo1`SI z^)UFdp^BwfEe)J^RkJqQLZtK=A6@9{=oPx5`-K;DQ_A zSoME0ZZXHIv}wq@GD2pvFo%eHqY~o_wEHV27WUdWU^XiO-ZZF;2*w+%;kZ6c;uSfp zJxJPDl5(9Uk)0vs3VVgAEkavHp71jD0?^&gI|V1Ntq=JH-RkR7+$&9r5e1WbMa9Cn zMH|{P_2PA1C}LIGp_|nrqzk&P5m5r;$`k7{5p`Nar4i7DbnfKe|q(U8K zgOf_GE{>W2b~imEgBN3^HO$svt?WQ8qN;g9JuF4QhA2^pb*R3HhiVLJjGByRAqla6 z^3bhk4=-J75gv!7Yj1k=p);91lNrDBd^i@qdHVk=_6D}2|Cz1Wsckj44-}UHC*Rmev zvskWOI(*hYe0Z)fo}2w7iT(#4-nQr&+PO1*1YnX^1zhws1U^jl`acIW$c96Yp)dt35J$Tv8&-Nr8cgD-K9Zby#h1$@sz+=bxG zMu&!!RaGMb=oqUhiz|ljiINcXuLV*(u5!Huy0wqv3)^MpEldxzdxpnhlgK3rUIxGK zGj^{n6iM_AshoijG-P!pW!0A+NTKG?++5UE=u#VSpp}15O&Tu@1;S&_u1??MH|)ME zqTexlXZB32-D-EdW4U-fF*n@N)0VLM{;Kww>01un`j4(aBsl0C8N25G-HTs15)0X! z_VCPXF18xBwm%EidvWg9If07W$CTb%#m`~$N}^(g+K3R;tWGcZ6BxT_6Tta~Xdu%0 zYjKU#KY3U#U@1}Y1Gz3s2Wx{)51!LK$Nq(AFl?JR(W80vHTo^lL}Ye>ljHH0)9AY@ z4I;P$sk#PASa^nQUc`YSNDxK$c4^$VhP|s)pgD8l3Shr>O?9A%&0VonIk8X`QUz^#$=M&0=T|kS(&J@R5 zqR}0ZuD;B0Z)7kK?CcFo%>1(GB^R~dR(*mMRFj9xi=?z+_s&w(N}c)y)7!)ugf_HG6aWHC>niu#U$p z!gm1vpLJ|8S3v)=Gq!aUEj{B0mN(KNjdeJ?;{I13ybon3OJbAWBA2t z8Z+Au9tfyAly~pN%ocZIX1n6qLg(Be?pe@2A|VmQe3LezJcHn-5>4P=fS?$Q!P~io z!F=0pJlXDjLGuKPI}h+*?)8C~0N!T(Xv@@sR08xmynnAW*Pzw^H&#mjPxa$(+6ZII z=0&*ta%B(b_t7MHl(}eeTk@&PEKBAxD~JXI*vzY-Z9<*!UPK!3XT^B%7L!l{eX-Ta zaeph;Xp;LKxHRoTHQAyq06>;hjo@^0?4BVk1mbZZGMRjou9qa^!XBdiUTk&v+Xu3w zP`b_N0STJFlpr3zLOBJ-u`;ui6njIktM* zoUmXj8ut0y&=;V)L%V{VBj>iAXr8(o{>T2+&QNh;eVQz-Z8n#!qqRf#pL|EOxIcAx zvT3@wpWlVps~0g=0=a4t2P(M~k&zu7-Q3Hd3NTySB&>mG9$4y0Xo9w@4oMbupgE`# zVRNGvLCYsHNo&Oj(Cj96cS>|V{}#$B59O>XIt{ZSwR!x8XOfW8VQLW&KNT6;<6B2 zRCibcw&o!+=clkmb>~3;?CD(sEK+XEaDErTMfqKIT#@)L-(ERGRFVE+x-Ok_W7EOF z?!1B(J-oqx*{OdYeTi+T|DH8F-HE$ON!~Wam^hdfycLq52x+7WyiwxG){e=*RU z(P2;Sz4(tti~cO~59l?PDAa}#iQ9>xRUMu%Z+jAubi$m1fCRBNRk&JIaxDq`05fCo zgDN2*Yzq-g)Fxe+Z3xsQ1Jp(zE)18b6jJxRRW36CS#&Yrb)X#* zxI(;wAqTm(xRj=w!j~cJX&h>O`0$ytH_R`-7Cpz$cDtO;{YPK*$OAVVna9PWx9Sev za!`Nt_}1yweW6n>dmvzU-J8DkL}A}unf5?nd%zJ*!-T!TB6OM0X94KRA+g zW*Wl@=nr7!k|7gD3s97h1aU}Z3$dg^V=+Wu8yv}F-m6EKB1)et z1q_M0Z<)il)P>kMx(%c)ervF^D^~ywmgOTk(M9R_d@2?-qP?jXLDd4TnTZbuT72%7 zlzv#y3iq$(TY{-nrzLll{P_{ttcsY#A zuw0lxWbO}yhC6ta+nC;Wv62Ru4WfW@?F0#;>N&#wnhd=ueEqkOl#?m?U>p_|Un zPORl7$(Xuf#A$-3*Mlk~V%rVPF!~G2g`vWr!<38>3u%|2FTHbY=hb*jxih_YbT%>W z63LjlWChIUJ!9$pTmBb4KJGd85zh+e!<_b%Rzb;pxQrlzQY9kg1Z(F8oK%qNLRKJn z#12d#J`>b=UC>H73MPx3Ob_EMsj6+(+o?~UetdmCl)M$l{>SDooj1|Bk~yh8r|K&t z{$IUx>UcgC_TWyS5c{<^Z0y|h&^>U%Ah)%~J-|YD3D+LmgpF*~Vs>Vwc=a-2L{c50 zW$qR%T{lqv@Dxhjo~ohRsElmIkgS10G>r{Tgh5-KM!=12gHBY_K$+mGdW%233R4qA zUz`u1oA*QaMj{`C63B<@Y9Pj)P!mK?>YIasQXu$2b0g@Q=m+7Rxw3u`M<^N2|1kZa z_Dt>T$D8zn-i!_UP1yS=`r=eyF}X$2#ofv8f*?(5yFDT+Azhk6Dxs59q8kpGG!xMw zD+O|rtH9L3@q!Dl1c@6w%!)oDsc*f{>2f)1KLF{>>t9cPjBZ?3B+a7)E6J7jtJ{g^ zM(u@DKvY2!h#1s5=ckRv1ov?@)RADa1bE#_hg`Q*x~N(P<*C>gP^P3WKuMIvMk4># zxj(TRn(05a9L^PrIE;uuD~57bMGJ#g23FWxaZxJ}sJ=p>BUKN`W@#&%KAbUxo|%N9lpI z!|p#2`9toUA0$IPA*bA3_4!C+M()_C_SDK5!Op4nX1hViO%QW0EeWnJ%mDl^a9mW9 zQZr1FN^C^=rIrDeKf{5k_@$1;6q=fXR>pFgWe5WY=8hhRIb19+ed-MZHT{w<02)Z| zKd*nkE$rujKgi4{Pe<*&I4QY-eE#ZSPu8M`(NFwY*IO{$Z#X^9Z*!#Ip6;`zChi!F z7?x}RJPz8MktfxWC++0kv~z?w{p1+Jw$w{qLi9qL=N6(DH|S3)Rsx1KiWincTt_df zkw`hAa8A;!1{J8d9ttXn;TSrznf$tD7E^J5-q^@m>gd@$V-)aRmVo|J*)X0B#3Tjm zOC=W1T_~Ab!3aK zJmK1L_~dHe_5}E7td4<&{@m);Ea{UzNirnTVUqYD82CI$k?z@h^s!jD%VuwLq*CX1 zES>~qlK2#&6(l~z0ppbEQ#cTN9fWNvQ2&tFgVMTwkiZT$$p)1QW=k}v*8iRALC{>U z!*;bCCmFopzRrp=%DD9f-!DKMvh-y_`E=o|xvcD-3UJoNhe;3Zyf^(I2L*N2bEWAxJU~ zL{sG27y*xzw8@3B>q=oyy}uNtslGgZF8i_jfN-gL6vjuPTh{ReMdAr0P3LE{GcMd4 zx!JOnF;Dv>gFVYUez7=P>Xls5KtFt8XSaxsl!sZP9OR^$3Y+xi>M)-xV}9t(&<}?C zh@_UWll3hE1IdJQ(`Z><9SG+NY+VjV`uJl*tm)_$s1`j<|Kv7IZtb5yn74q*N2Om^E}mr+E*@rV z&+xYF_LH1JG=Fc^pRjefLg)~$J-hDUeFPof<4LpQ`=6m+B);$z=IX;TSI|fP*R`F% zZl1>6rKMwnj;~A`dPz8K=w53RQZMu@%Cv>l{tIR6(@7R#a|Q|QOUeu;QHorjwm7D( zgE`J5$t5~b8(OnP`^YX=4s*^z^*Or?^j^&uHW+Sy*=lN9u_@Q*%mke^=F7ZYc-gGa z*?NDx%+~WPli7tR{(wxHPIRc@gB{FhkDJ&clPMHj5h0%BgiyqlBe>0VNMaFm1}s3u zA{i!&4VwmfkuYjrq-XFaZxz%L*&NO`1WlU5wSK%UELof|lWD9mhgm|@yLm|cJg)}I z=j=?XyLggVYq)sx`X=%09ldv~eJHpN)s1RW+HmN>iyma>L`I%A|MOZ(G@Y&aS_v_N4vXQ|I;P zAG=-&GX0mGr>@T- zOe=5i4BXACJJQdbFbP;9((M#MKCI%FOedK;B+;T>Kx0&0S(OkY;;~%062zEM1_gz% zP`iO2mWWFF6g|Xtu|Pr@RvV!YZ7>DkO=05?b(#y~v;Zb#5UnMRUQKB{_7w>+n4yX2%q2F<9@1NOE-1fdm!LDVx}|tH<9<9h3#rNMzW;0XEri6R3Q-p3h(3IoC7P=WOeW212&>La4AM((66)RBz|eT|?t` z-%xtxNKbg;+HLb^^-uH$XKpGSOndF#O!Tcjr}eE9gRk2)y({61^`#FioOw7Ei1obr z)XbT#;l2UdKnrl$Ge*L=MT-dg1G1>Z*p~bXH8G4cEvShQj;U-KcHRLM0A-j;tElL- z;zY}~0B5KG^Axv>wHMAjDE`WJXd06&m>UKDG4}wen84%>ZPq+&8Rx-`G75rR4m?kp zayWw4U@OIS;2us--RX)ikN1sEE$wT(7+p$DOy@`R;|m%2JJvZk6#j!hSe^fM5@CPk zE35MpBl^MVl#v^*JwFYdLgtE&T>L5SM&^o+8h6pJt3NZY@i>fL^$g2DiF;u?&d@M* z50HygWmhN9I#TKgWZ1kb&|6N9)>+pr@4LVLsuEI3z&#;^J= zm<l()UnWc$wcN^n2Vtt@5rGE;1KhSCI;gArTishE}ZD^aQSVEY~f2p8=Y>rT# zhHt-S)xy2JutJPC=a*ZuslJ`epT#OL{9AW6Rp&_)=DJ|B- zPBZI|o>v}Pt0w&efIVOoN?A{hV&j_B+uF>_cYP#HYqEJT-)O9f{)zPw$-2NWS+VCr zK=D>|?l}}JkjPMdm?CtBdu1b=D2(%(7Qg^2CZ{O70dSMzfDNhwGVF-t>v1_Ewa9FM z{wbN|daY?K5(s`v)tJ5oZ=L&N#{3@JkUQm+3Z_0QG&2vNXaJW#ZhFCUTKx> z-D>a)f$pLHExJ@_%Y&@)>u`c4CGmVDvbHt%a3u1`@^P5xufVnaNPb%&cw%SmItC4b zk2Io~k3n>>q1Gi47j)9m&|B8YD=AWFn3hsDSJfu$L0nsKZlyC?(aDv@b9F91lKz=o zemH=z)kaA#nO)|Wlq|FS z7SU=!>cV*Y^s~)+UM|PCiq{qAhURl3_%*X<@-5N9e&^$-=dXRCF6Xt^?Q!kM_RqSz zW>-MSdSWH;x^?h^7tWg&2em10B+?&ESh3Vc;w;%LzjprgXg-1?A zq~d5BCbQ*X{C=4Hj2g4{dTD#MRJcGoENaSS3fv7Dz~k4f8MfRoRfn#6#Nnibco zS|J8(On0_>E7neuIIpKXr4FY28QRo_vtw8pjxObc8H?Sq*xgVqn^#(8`Jw~l;2rBy zWtjJ7owg@UOax)8#6+MxT!mSnD4R7~?Kaz(A=cJ)p=wuc%&6png_uNnd>29(>ePq{ zRNx4hNnwT{{Gk#Ebz>MnaN&-Ca$6KXl)53ENSU^FU)cjFyApCsVXTU9g(pwdwajff zIZcnZY2DtV#M&N}g4OY2x`0MxuWZqUzED>m{xkL=2UNRz>orrW(^LJ{-n9O1GvOok zPil1K;nN;BVn*^M`VwXjCaP9Pv`QZ)LUKMNb>wZ)3V&QYBpn>A$ovrEOwR~+gH7rd zHnsEJy}`h}`0s=7NKc*=|9vN7SD*ez zh)wP@d*CQ{06BWBE_uk)OgU7$ELu1Ab3dj7e0|72BYj#4OJ`%485iVAVMPfXsU%vs z&8TvfHJ9pzV=;r{3_`;eC&^A=`4` zj_o_no;tAojst*%U>~FRLceIlfnnQX`ozpkG^PzdJ^-_4klSn_J)gc(b&KRelYMl3>#@uhZ3J;S)b(_q-J&(rL;uyJz} z&x1NAIcrQ&vPvkD0#-pn7Z8X`xl<@Ubim9iPN{V|St2BP$F`nh^NYS%*~CMG4pD%d&?1`n4}ex5_@4n zX;nL>q(HZz0~2H=^T=_0X2K!I1;3mhNAl0oF^^9$43x!Txn{xU4dI0BDdbIZTf9 zF^)(faRf^#h2OyoW#rE)og#eDNq7Sb3Z3Ye0lrWZ;kPxTGu|hzTmc&wfZXmFWw;3qbyqx`EyK&6Os`Qo3!e3iBm7N28!_F-j^(r1&*%Fi`A^c}FXGb(OI=L0Y zF75P{ikONv^p^t}Bl=lZXPmlAYjyS468#ZAhC zD=W@e^-Q-r+SL>G2O|d~hNI141r*Efa|Ql4cW)jaSy|ny7Z8w55L7^La1`880mrIJPzTgs%R7UPBZ~gCbw$BF`T@C%Zr`}<)Of_c;eLX|xNf>ph^GxUTRP)GT zW8T~Y1@bd}4eS!E8mTr3L!eI3e#1tg>pGYA*O59<5@%{GX3TJNLWE9%ZCKM?Zj_y@ zkrmudBX)qToOVj7k+w4`(VXZR2%8g)b;H=WY={*UlTz=2{fu1DLH3ED;8rja51Y+o zP;!M0{GNvC)!Cg#HXbrFtoEO=b9!oBD6IP}N#DNpW7~SaP96D5W_?e7bsGBz_2ber zUTaoUk?#+E^;-FifDh*^;a2k*n@{q=YLD=Sjr7IimrZE>j+{7h0IPMx__w^$SZubuh*J@EcHWa*N&U{FL)Ls z*MKNY6Lu<$+KTokhK@U39Dx*Jj1FM8mM4mXy~2DVpV91uySwRzI)zcEfc?JUtJeGipkgvsh zx1v2SNH*1S0P-(e6T+$L@t0bp$|y{F2s;wt1?RS$#u;BSok_#NRlC}m&YwMg;U<8c z0o`)(+CA6bv43bcf-<4h(B9sQ9#tVb5)Rq<&5y4wy^s%PKbJ8&|fZ7Y|3iblt0RNb`X#tnJXWnMfe05l8=uQb%EMgoOkp?(IXlLqy z;fQSF6dn&#QhN2Ux!Ig65H&jaF@(dMTFqbfh7rj@7O{zwVo6DcJMbT1+Mqw5G1~8DhcSuu&amXi} zb9&PNdQi*C8aFIRp-_}$MK=^khGYT=WjFZ=H|XT|!^DKEgD2yj3iDB(v=uwnNC99N zr)rNs9+!RelT9>&)B6)=mz-Q?-2H_W6NHB>Kbfo$ecJZQYui z1={r&o#|A%@`pX|llY7-z2O&tZzYf;j65s!;}M=!BTWM69{Nltdupb|c&?Z_N%wUw z>rdgC5OOT~eI*uQqZCZCfmw9@8HX#B%yh7D3zRvmmq9x-tA8$%>5y(_8Y5WHC3IeC z(F^G5(GA=Pg&GzMAD)$699`t)pqq%tqWYItY3-jqxVb)*p6TCrCCj;{4sYpM-`nD~ zD{E8FPIb>>iIyWfbpG*Z$3RPC<@nZdqnImG7@YA%I$H*sLS3si^wJHGy!h+XFOl;E z4#-uM0`sFSoY&&+?}PK+^2g;gFj)kgW!ZTht74AFY98jva2Xu@3K#)7=4HY;4hmF z=sreE2{oYhu-rGvulv9qg1@99nG_#=5+HnWG*3b!cx@Sy3_@h4qqzc5=Gt1?4InBr z&r{+IFm=_3f8OVGMQ&z(XC{Odcsvs6`*~j^avUo#)Iq@cmWa#Q_ZRPjm{>J!qtIPrYI|*?T{5)+)H{YgaopDW{E6M0Y(Qjcy9=P zv#bj3gXWcy@M{DhU^7d28FR2+AoqI43cDpB-_5jXvo#A7fMrHtIyf5PF#lkZaV0WzNcyGMrRX9;x;#HO zxV)Q^nX0>@I}e7C8rF5*W_)G|}eGASmoYfMKK&UK-22w)Z? z2@(;5a#H-7B!h=nle<>b6vi42q-g@i7E7!fQG-?4@S~&DS&%des=JRh`hFRBa&^%cX7|%B! z*08e?|Cn!Si`E&Lx$KVW%g+r&#qdTwQT`-3T8sUX#!rWo?Y)tg7cj&C^LuWF!l;P> zHqzV-g+UT6f)O;u37sYTp%-C_2Vjb$o(L6!fV)dJ*h=j52w?b)Oz{;)s{y3Ffnm-r zcBXND33}VT-asE&YG}7>nVG)KPS0z9VpjbV)Ne$5%(TOT!0?Daul%{^n3MjXV&q8nw5F22BW{=cNdCI5}gu!N)KMU$7 zOHOvV_)ksw(+TG^hCRh$+QZ9Q{Vg}}(GlqNz5Xm(7x2Y#(HHbVgnmtI+A5Sp!&9$k3!c<`#R*O^w&#=FfEy znMBSeA$ZV9NXkeDLtQ?SJ2j)7PsuAjoeOWl#TpO`1`L3bPiPdN!}h|O8wD$H279iF z!Y*e9mMwK?WOqK+H!;=Q-1~#*QE{Y5%O1x7DPxiEq|I62MD(uovPJiONJp&SEYh zUr-Ek$f($!V}j;qY!jj#960x-~5;f~k>-3f#1RrK;@q3V!$o$SG+B7wLp2A)d^nWLDn_~iGNAdtn z)Eu_aY;S5{l;g3%7z<@dB8GYRFDs#k;fwxw0i?!@_<&+U~vh$}gV#U_H8KRf&OU@At ziW@QB(hvrtlv*c&C%h%U_-qEIrY}4qR1K^B#TE?75)Nn^H77aE&J{~QKtPz9v@H#o zFtdChv1-(pYY5?wlq`h2xi+4f5`fTv4J(f>TRt_+@SH(e8MU=SxUg*zMqII;XwQ3~ zXSRZ)ZXFoA${>O4VX6Wd*T*2|@(?%b&?MqIi2oF4z{Ac1q$MAa&WZz)a_1r? z&a=H6m>SBn{ag^UNb)ZC3pa6p9^wTzF~k%4X}e&X&*IvJ9l&LQn(VfMuMYJ z>DL6wGUh<^BLmwFTmVM+j8Z~Ec&DWHbiy<*2L;5GNLr^)U}>oGIjg%5t|sXDm|Ot^ zs%(|y3M-N?gfC!-8=DWMJ4g%fhLGJ20~B%+(8-*`I>^p5^fjrs;bDf+X&zF8ZS)30 zGaT$;%aGxF@?^aLb7^o^wW+fwYC^ju9=~OLpbwfxox?nO1oLkjT>ts_At425R>=-&YwiR!hD0j)x~y9rDiF- z*#}w2_FxDYuhvQpGf*KYaMo zQ{GF{+}LZ8S;8I%@Nqd5Ku(RZOLvMAS{@P>f?ZHpJ(`QhbE}4&b(81s?HL21e<-|T zd}dRfFFDrHpBo(5x`iIuK$oX&F=(8b%8d=Fk0NyZZyo*m*!36Q+SKe8a;wb5g?kfS zBdyVOL(^lq$ziL|Tb&;ctQ}KsSrQdxYfDdW#+Z5$yIKu>zZNxIFBy5iY+G2$uE%f+ z22EO+x@qpEb&Nobb8XlQg8Q#CH*Fe&$S@R6q)oYE;UqLqX+1^)2_fqX{HN<@@lOlu zIj(FmiNAys*84=yFwe*VOKJnZX6HR-7qwvQNJ7vePIL<+mz!~@C5y3QOY{V)0gxGj zDt7F?rC|UKrOpNn)i^Bb;;1Yi$?`#pUswj>b#)aqi)<#?y)qF~hK@0WdVj&M{#5T@ z(=<4J=k$*0og2o@ERyEY(fsQ5bqWS$uo zd~)V2$t+nnptkf*oL#3cmJ>{kZgkgsL!YKK1=9)o31Lt4{ykS4Q3D23n74r_EaLVejC zWHVdqSNDGDr_9!>EB5z$ii0N*(Emd*c;dO=J@h3zkC(zhJMxur$WzMM|A4|ZJI;FU zcU@nK<7+3rnhS-4!JbkMU-bLta{YtBp67mLf5FPH=LYyu{!z|1H;}`JGEQGg`_euW z42Ao11A|7wSHF4|pBFz$##bj(4!~mChKyR%uj~e2!(1epAv!lOSP=iwTv?JJUCa7< zqdlk%83%Wq6_4Nk^7N*a`;9)^SKDul!nlQ(2C{7#uRqtFpR8>(v4M_Ol*Wc91}8Sq z?uAQ$egx;DpG}N)(IDTGB8ZZu3G2utm__cOK}XCyX^gjEwW=5w^N3(NVw_Df-J6g# zTJ}lxGnr!NK+A(M1=e1NQDnmgrrXys&|wkAM2&!~#Be~RVAcu{d^KSaq$2o(zffb9 zKfVgg09~5o!7Gb>Q;f&_iOfXA@vlI??^pFl!Eb(!rd6)WcU~2bzxTayuj{JHrHgO5mKuQBbm+&x zBLQCI1V@8D8WVp6`XE!b5vD~B4a2lRKcrA4Clq!pyJ=yqjn)D{j*bSZRk42=;D=-c z1(v-2sDK|)W*)HrOBD7I+%NElSg^HgO-O+P5{ev>aw@}uWz$tKa@$QPxn}ws32pu% zZ2Wj)a|`*4H3RDk@agZ0$NTA&MaO!l_D_eibvEZ{d}!#-zR3%O&0yiRaOmILJGKMF zx>jnGX7`C+sfUg~3$hqxenOD4AWqywSr8{CtzmhP(~$u2NS;M-6qbd#6&l;sN|PJp zA0lc{ZlsH*l!Wm|gA`DmIfVJ^m0~3?3*p8g78Y_L4FN5=5Z=bm zkQJ%a1V5`BNdPH=Sn9>_Ha1Ne@6$@d*m;+bPQ(>nP9+*sAU7Ni_m3IEY0Tmyg|kd_ z-9s}$RBkn%x#qH!LaG3KH6&>0;hWT3)L&Y>$hdl+R&?jsAtYeYu0V$nYug|YUxJw} zasXLl^Kp4+=2*>J>v$qdi>zWKveda8lm!w5l~B2FV)~+xHPimF}s}x$Rb*-ZrB#j zB%2(L2K$+Ihr`~~%)hgl$^NOBy}>*f)7MPITc2=xocLdDX+WdM4-XcyA9mV!ijB0Q zSYNk^j68_IJfo3MDIq-^`AlKvk>!h>xp}`KBbTs0AeXT0vT*J&MMoH0z^~P9hzmGT zw=B1A%r+xwpsWe{t04kG0vi-m%!du3g@rI8Of0FLN@k`#1)-7Fr5(0($uI4aMqv=l zr_cjom_kF7c`vMO1|LnM4D+ikjif_J=yhTmK8<9b8p{VWcB>xutUpCur-)Q9^$mDk zXfL*E-EygYa3YpAWeWM}+FvP)dc@+={OEkPO+%jVyvLB1%FWjq?~th(wrB~)B+(_$ zf3h~WkgJbbMDo2*t{mdjnh8Poa=%B200|;g`!ebhjgzkJ>Z(Wl{?Co~?hOR?%%=PF z{q2>1?i&yI*Q$?f0yLwvhcKSc>F-1S)tfYs_sxfld$8q^;5y%Gd`RTsEi#V|!)$gB znztDP2q4TRV<9#2VX@l7sPLSd2@?~RLK3zM0;x9*YQ%z&&0B0WW2EBf7+D8PlNBpm z;y&S8RgC&Lats1pqnG*}_Ln#prw8p0|Gt6VXspwvAC4sR_xWrtm(AMnO_$BTwl~@0 zu)0{!be_6F-HZLq3oY1G7dXM^y+Nha*Y!t%=b=jyKBGu8Hy@RB*m9JRr4}U*LssT4 zSF{})SYh66K0Hu7(3n{B^Nq%wY?L|K$S2ks0cw`p1?6mKaTChCz?g--px8qovt)&~ z!%SRO;3OD?EYxrazMQdYmubMEk++}v=<7Xk-D=MS^JB*IE2JYXKL5Y1YG|;!yaRnT zA?Vsg>K^q~{ojyh(nkiM40QfHf;SngN(pIo6y}5S5ll-ZM_ZvlQgwV=ti$Gwr;VPR z@2HalFjAu210)gF3>%|xWZo-Zm%XxVy|8rztn7x?FDo~a0HFCJwAsVLf`I5TLaJ^G z!l;*sAgG0M2~%G!GhHOq$~DR$a@%Ll%#8MA{3;O2b~m5Be&9oXe;cgAEB*e#3))B5 z4d}c4Yj?Qa8-~LztGlM$$k%k^zmGoJmu%_fxL=_jplySQ`L!A{py%p+VbE$Dg^n>i zY8t$ihE@EflfU{3t>0ojjN-Zz^(CL<51yj`#BH*D>Wg7~6T&yG4fsYop}|G+ zhiJvB&uVq`K7T0OYTC5@G-G9TcG}ujrXCg+vk&p?*vua4SvA%_;`RGC`~8tzVQ>sZ z_Wn)whJar^@QF|Sgk6+RJo)5_V8Cg^Ue0HFP&KL_;F&s5yNPLRvan#6D!aT*Y_-c& zv=APyGgquTIUlLhZ0JZOYf=YlCRHn{c2jZ_i?_r(LbirP962Vc<0hxyzt!dpM2{YA z3IsL=0*)s~{Qk%M{%Q5D9|cig8T`T*Q1}+A{B|i zH{Y-%gNEfYD5-|U`-_;cPfn?=-KxRnw4bFv_8wKI+ZwzdfQ{a({z2V`ad(470(J`J zYUcGLmj&+>6nW7iPtV=gBH)Y#{sc=hVYwk|pQdU97NL=%+%{~K+^XKDwzj71@Mwpw z*v_j}4WYT0jO90Q&EDEIA!qZN_8jwPYS+Xhb4v9<*(sUSPK9g2#+Pf_dCfXrGb|SF zXjnil)!Q*ToW*r@sN2+&`ai+zI8Vb?$o-LetC7F0vHV5ci{#1q zCPPEPPr{XW3shjphC{VEH!6G*SiuX&iBUKv#DTn5!(TZ7U9^+}2ajXQY=4wCa^Fr^ zySidSGyTviYo{kb#>`4_67p6#;#PNFZ!-P;4R5~d*7%Ni&JTTsqazmwxnQ(bW+>(5Qw8|wO_=wAC_zPYA`bpR-AV%UY^YX=3UFK@8Ax~()guTc;;<%s@`c_ z#an;)a6K=lfBg!#+g4}))TiuqHaDUbxW0KUT>seO_1_0w@&)}i^eM;nU8p22a5{VV z_@H>>6vFdURU*R;J2(@{+52PZ=|xu!jjG~D}ed~5Y3ocB@vJqt!} z;=id*;Mg~C>|=VC=l>XMa#o8bh(izqf)GT0~=ufKG~<=3HXd!ZffQ`h+{`VrtO z{~&Vu29EX0u?vi2xQFL4CXefni+&d@maVcCJo7p!b*MuXfT(z2fWU;x4a|`-`ghig zs;{S4K%I+eC0@bs5LhY;`H;HfnRt9QI~&)d_=9&C0*ux37~{wFE3rmy)c4oDP1dM! z>}%+`g1RY;eh=9WPy$CV-zOd}0jhFPV5=Z5sNj_pJ;DmiMBX}dF_&~#whSwKB-lsV zj9Ock+AT5qhKY&WEGYA)aA*3Ja(lIVT$ITS<~qhj@M#eUu?)OXP__iU2c(%MK*>p@h zwuXjqgLzO@pyI$Z+_|e2{T6-I-bpmoMSMejoKgfO3s7X^Q?XqQRGgP3G^VZ@FjDK0(JWK?fB*_>@|)-Pxdhxm@`i2E~b z0tZHHr~EROBdr2wZLgLA!jHR+`aS@~P;a~j&f=()h&Pn$$;N`M(dokno82M+&C<1( z^%Lz~(I@oLQ`&$+&MI2HP?aF@pf+Q2k2>ULrm%@?k}gH;Q0@zfVUk}Y&{F_lfXtJU zhc<%?y0?CrcZ6akSc^acj|2RvcMK-lvuEVu6E_DPR(B+ooM`v9cBMJa55ac)vVIWQ z?1UyQDta4DSc1^_N|FIXb6~2L_<^S0`d; zx|%v%cGsF@4fHwffx!Wei?Ij185tx=FjaH63l!#^qEbwJR5m;mUGmZl zek8cxde~$Q7eA%@Ys7|v7ItwIS5dVcZa{R%bp&9J>y9H%AKvnSzUvU!m+VgdwYl&& z&>#mWlTRzF`hMEr#)<*gh`6Ax=*>) zH}ntSetWcI8cd=2tlGz_*8Ii1cw>KOy8gnYMd|7-gBYm2WI$HC~=z=YgEnnISpAf#+P0@(g7mZ#I zrws05>SDLW*`TYB{k_NK@+=Ir1{))RRQ`|rRQX9V>`~q@#^XtKw|ds%f!>2{3SR`% zkcP6#6Ziqri4UUru+143Xjz?7W~aGO(u^+K4lGb;L#PSz8;VhHBH2s9hk}|o76`l= z2proM2v|Iohut2JTkY|{F2lV%t?p3I=pWNYm0Lr##5@`+^IoDknu2`i@D)7>SrMH; z`KBPzy(j0hhHXEX2PHwTy@yi=+Qeifm`@v@Ok#q0zhOoLgl4qsH~YWzX1eci>J4rJ zC%Flpd+n?fpXcFHoo-+G&A-PGb&OR?qCu@;=t=;EEdtEYQc4~(W^D!Oz%muVl&4zCNPHjPeaDfY>(o^kaz z(U`-TN~F4*ErCE^z}py0ghHV@kGnCG8Emup{UZTSePar$6XSEAx(hhjBjAtTqj6`4 zjSQ3w1A~=-yek%dl{48q*F?$~us*DbAvRrM;RKc0FegZqv|bKBrhfvB5Zo~%9aKe@ zVGEroXIlgsR?PbqaDbp+_EKJ+z37G$H|9z)^q-h_Y3Xc;R6+fS2F^ zdgG;s{jKStGt^tMm8avD)F1yN7Sn%d*h=~8@8oygK>Vfn93YD{Mmoh7bxV+s<|wL# zp(5PNY*74m0p+IXScfaA591nfwGVE{;p|&tu>}W;#c=*mkE{z5nHv z{I9!`x+SZQn3q~{t^ea^%E&Xl{z{+A>K#kZWR8ggqf^GJGGdYNbO8mB{ZGt2cxvEH zEZ0ecf;z;1O?$6Ep~(f#Fva!5SnQAg!FhZXfBvYKG1kLcS?5@z;GLWRHYqi*AqQQ? zTsNsVjH864jWKVrM>)I$H)~))V!e8)e3Df#xj;S$w6&Xs989@_E zMlv6=nXkIFvJC>C#|;~=`qkO&k!%)z(ouDm`mTPiPq#mblnvk*xc`UIwbal1hiMD0 za}z_MplMFlKol6TrkCKYnJ|%4z>DiogIyTE55Mf)`u^(FiKT05xMtc|CqxxakKEv* zr^RdBwtCw$&p4IM?K-*&;G(DQ{6p}Noi_S{#xrTS=A*dg*a;r{2JYj&>OaG8zZL%d ze#5`lzNcAD|(B%LjNdyn*psh73(F)TraLLoF-!` z8uL(-d1!b|aLr*qaUQ;MoDQ%4Q98aDyV`FyV26V(BrIt*YA$SD=JJ7iO$QLuyPhjZ ztEbgt!qo#yUh5V7Mg61l^SE6+!zD zmN!DK|4{uV@+wAZ*f8)L03BqVBvLGB4)`o=Mn`nCLnrqV)*RcIVeZx&HK9_7}}P90L$&xTag`MFO`Q1Kvd&K}vwo|BK6v?l?WG2znFK3~&Ig5fCr3xDG@^)R4SY zPYPr=tM+2uJu@pX-W7Z6JqQFOyONxw*wY}#%3Aem<>$bsEZW+IPv8)bolvhr`aqO>jVhyo3z#1TXi>WVm z*VF{i1>^qkdzaxFw=7=c!>U@f>KEYHTNaNE;n;ONrkVGrsmitL+tmU6DXi>$8XKR0 znugt_!@{f>2ARJUsV0PNs2K5;+?Wh@%uo>b0I6j%2H8mMaN`+l3nF0vb{8^DNso|# zZE3-*_2|rbW{wSu7CYMDN8=MzE9*C3ZcH?S3866-QF?yF?Q@TH=-DBCTU{vT@VguA zZnwRmG2!sJM)U$dbd8Kr{x@J24xGb8kByrT+dJsC-dK_|=vDHMfsU}R|yDaU3|Y1r zm?Qjgfg7#q&dT2b5O+9^@NdJv%3r((r{*p=pf1ND;9-k@<;dc^#{GQ6*CulW^Ksey z*{EKuUZQWpZ``8Q)#a&ADxb&wk0Kss(a&4>lDQ84R{gNLPk#W%&R_WZ>hEN2ybI?w z;5?gtc;OM_yz1RL3@rUQocCEUxT6Oke($mBcy+4!by-&n3rI}FvA0*psySe|7VY|l ztBK)ajy_(Ut){ELgl#Zn+JmqSSpHrwt>>AEXojEQ*-L(q*gG?{cOX2Q3HE^&2zJ2_ z4HWVt@pwlTB@Db~5x+lN{U!9|=NGv=boUTnQn=L zu`_VYtAA%A122;p?mHcC@?aH4E$$FPiX*~o6t?mJn zHR}zo2a+08FD<{GL-I`=z()bg7!jEwuII(|Y;qa7UZau4a3#59x=fBd>De&QO~M$+ z>ciCws@Ce4G54Sg7huWa*m(8g>P^+Z7G3zMg=2E;Hpprma;8ox(&oV2Tfni4s$auC z)?SMz%MOYC%ZMiju~e0TNN}#zV#=dGT(`yMusYPx+uRN-pbcE}rs|$*tESV?$MS@d(g8mSV2O(wxAIdUV+2p*yb}kITM3Sfix!Si0a@aY9`YT?)<=Iz1i-$K`1TK&EHixWJy9kctY>bG#-EtuaI$$1mi&DC=; zX3$C1e^<}Sd8?{#s$Pvb&;9l3h0EoQ!e=c+8PVFD6SE&aDe`Wq&E&dMsAH|0R!x1mQWXV94L$TMORa@obPOHu3 z`*T;l-Rb+O)oph3Fu@g*+yCj;~-YWkjExSa`Xbk-22fFW80o|4qA)vwo#b_(_&{kRLEOPcBp4Pj^d}yMzCEKUv^fD7kdNN9Ipw$8!kvY1+fR;W%jFMVaKU{< z_f=&WGv*%5{dVvK(J!nQ3OW`^dL)(EBXb{=C`ct`8ZdHzS%t*nScVyp$4^xfCpJnm zF;1OgaCI`oEu|HAiD)!xAQbi4XdwO4-!=F^=EPgVb3p5guKYn4~@Ph-Z7N%RD8sElb;IxNAo znsaGhlSTqSwTqp*t+?ZWM8^>(fniXq)`7;wO^dtATG3x>H7D{>@&ZvIG-PYJL z=(2lS{O>+ZA0dagWN-Cd1T{}H)4FMY?3tG$%B%fGq5 zy0z-7zKq{_V&TIJy|`0_-+r}v1MIFY?GU>Bao;9qA7V1e5Y=Qdjc6gmGv%xqqd|@q zbIl#%@3CtmsZ7Z1Y=&!$P|cjv7a-Y0DM<AWPhM)-~z{JlA(&zumr2)LO)j z`+MzLjN4ll-ctP^B99&LKkn9Fm2I5 zzZb_&R9`{PDCf#1_08&W{eAMh&tQ+BjapxK_X&P?C+1eJ`V*Xg=fbBJ&@BVUF2}KP z?6Efe(S?tj&wd2QV1?q?dluer9&_Q?JGFXX3GZ3hAm2T*@JI8zZ>aBL{XT&G_r$_? zjXkRRKKN5r!^f(8$UN_Q{6@a|s?V#rE7O(dQ!=+Pk1whI$meZnfSg}TaZcW%{zr8} zzfNmHoRFH3WejFkD-f-+nbitYOEF23)Iexz;>5OLFkm(yR)zUyPBKHzhVu6cE#|kj za9XvW8Fy`QA!5jPx_uRA66SmZ-Z<<|yu(dHK99gxzpVNL-`g~&_Nxj~b!M~^d>r{s zeOXiIFH}>*xSMOE5)#r?3z^U-8)hNdD=fK>;%zc5H z2Uj0aVyEy8V;wT*7)#2IS>zip1v#7Q@pKWcCc~bTMPw+qgXwLK(4p>*w+7pHT0@6> z(sy0ZA7c136c|AC)41m!;GS>iJy(5}CpA3H48FmCs`5+VKW){2!rbnZeGc&`hK7;* z2TkBavH&BqMvSTCkp6BFV~=KuOQCfU5wuzFM&iSk%4jI0zPzcSq26MBWesb8;)(L>`{Ly$1cIKEjs)G zZDVDJdERBX#wPtG9NSts+q}j;_0{T5{Vp8aiQoIG{M(yxY(~EW$M#juGLPMZ>u%Nm z9LLTAZeU#ZLimZ>WuRV} zZt^AjTBBW&Owt$bM}Y=hf;xZmM7Av#vXVv;_i%spN_7YvqR11M>;D`%DR0uBfQ@D3 zHeqf^UMK7}CWs+-iupGtgnLSqTpd5l>U8*RXEoRye*OA8WrfLK&O3|tZv6I(^4m>_ z_3kwDTVjRzMuRH01*@Q4QlEo$!W|nFf~@(Ze3Bq_8fkfDixZ8-giMcpbbU-Fw9*bZ z<#ud~nHypeGJ-#%?p=0^-}XnMsc7`K#>;P#>BV6gyKywulbiB|p{ zd$(?B!zQGFohH@w$fa^JE`ZN9ZnNd@6%*c&d1{cGnJ+|=uw$@n#BhmmgG=OO>O&nQ zBkPcB{}k8WVP3mZeF+3@of~;iKza2u^~-rr@Q=TN<6Sw9E0Km~LLVoA1=0dNR zVhe{D=Yzrt_gO&Z5;jtN?!o7ze9qtmrA^~=3^n&r^7vR8n~~lQs8ps+g4PNo!*a7x zlja7)i(m=h4wix^*=?D+f^t7mRY0K`MMSw62^xb~6h`L>X$K%YOlCnI8~Xs1YW>{X z6Ny)2v5^r(Lte;MUhE=yjrvhIbU!$5aDRI3{^4OXBENgF3kIm}QCH~VJpyz=7XBi{ zy4dH!Y>z5QX_;0_n?AJgt9ZO+?#6ihYk!%ESCZ<+&w7okExZ;Acc~xMHhb7;i2E06S<+vB4sF)6JSF zXb#dENLkcO5UB(ZvNX&ANj!^zQT_{jUAU-G?1I8?pn63VcN5u!aLFr1-)_jq0Lfq{7Zd!M@P%}+n` z`)9h|^R4$hdhuoN5m~{@1-B<<1)JBLd#mI#tQ^ceb8Z4F=rQIV(E?1rvKCAtw7xV2 zsfdu*Z!kA6pG?cg*qmRMw##;awt3k|$KM~1Z~Lrwac*(6Iqq^#NFmjC$vt7C#yvLT zb6!3V%J{Cw=OEUH8+-F8>fy?m0N~)lP4*6RQS@dpdV^@m!qJA$Sgw=cNgZH5L`>#e zL&FUi`&N7r5+We(?$H@XGTJ zr@E!V(OAi=dy_|xCX?@cXY$EYM*Ryu=YgNX)Xf7BJq3!ee|3kX;L8*GXP- zm>oUExDlUiY$cC%*-M#Wyi1`rvRY@E^T#Gy<&0pc8QAe6qJHSIHv&LNM?x+;K2_4S`7Av9I-eBJdlCM;s;_|^hYn(j^91B) zpnTvw@sl1)y5K^mmfZ{>Yi_Y1;KAr8G3L^z9X-=P&IrVv#jMwlLkY zc@M!1o;-E)BBP%N!v4mS5f1K^*D_{M@lWHgwUiI(ap7=;2@#eZ8S1NVy zN+2x_E-*y#k7T>6?ZgJRp}x77^1@OYD8kg4$9NC8gyK37oTuBB=r$QeWjj zEvQ*v#V$N)oD04{O_0qwx2M%$g+|IOs{l~)$$~k?Z=^KV9O_zmC|_f_$Ld?>+O_`8y}BK@-_u#bS?7Pj22ltKKm^RG7Nsz@7*8 z?IknC-hB`3IUqXZ=#QrgL(`Ncb_;z#WH%{uA&AdgAiFi%2Ud1M?iEEbj;f?iOaw<^ zHCDoWYOtvn0z~>k3i9$pUbDGkrpCI)Dp4fB!rvza8>p~~5FK_`lHe?LMMm2tP)V1Y zB`LWX#d~uz&7hhn!e~Q&AxTW>CNq(CEk0UUiUy8tAb$A~VAc>1y;p4(#sSHRK)!Dgo(9$a1+%uH*G^OM5V~Ip-V{l}qH{B3OY&mBz62a?U z>=V&vkFNhtWAK^&ZGFv7{cyOkF??8sdd?dEl+)4p-3^D8+J|MP+uWhb(;~wSkYNBa z9Ms-!_E|yhbsGc(T71ml9%fv*B~DfDp;R#&=QB+SP@l3L54K)tV8!Cx>`Ba0A!JME zNw!Fd0;^R(1S#1QWlttj)o<5K_Fo)&$}1gF;X8_GfCH4xV{C!!Ab-F7`p- z{4(~^ulds2@#-b8?`VC9&V`IJx*XSd9O!}^Uk$rtqy7x+!Dy=o*5j;s{Gn<^{j>f# z9`A|aIAkp653>iHaejv9LpR9rPl;VAzf;fSHM_E>`a8V~cn9pw$^wp`$@5{a$#tbK zW~+9chK8LO7nI)#A-p9?|ztgu(|dx$ARx9+T%<1^Gn4Z_#X60WFws ztPs5d(+?4U_9=$pi#%e^x}F>dyX&WB^5aproV? zi&r^Jr3bK?tcjS3!W=N%hD=$PHPh24MqY+tE)N+KOKo7AEe_D#Dz^BMYlo*7V-CH? z1f=MMuHzQSeGYN%0$7*B{zg;(r8~Jx^xJ_Iq8HU*ir!VhaDTLwdt z8}hrKwKVu6U3rXfvDaTp2y1vAOc5ow4P%UYanOu)aKaZOM(;V0Vqq>5SQmvnIx=b| zPA+B4PN@#;jzMmB{SXbbRHX5}JEt$$I(XZWgLmb|Qyn84Cr3WWJ?&4&yYmn1zNoo( zxZAt^_~@waPIaF-diJ^Z-h1w$!3l@8&_1@R6AeMOCzGDej)h-cxvhJBHr{agj%{Nw z9#CUIy`r86V;+1U;P|MhFzPPW=1O2t0#(m^j{$3q_D}(g<|!~RiD?K@`n9JE;)tl*lS=^R|$5Da&?;!Ta+ zy*I?BEe@;48qgb?ybW8tR;xFhjMvSMy4zL*&>Pt_>$KQhdRtq*tKIGE7=b)*s%}s} zhQ8_79?=YrO;}RpFa%px-0PU;s01KXZ2rQ?KXWy>J4UhTfySkW_4z`qiNtnPxga?g z`=QZqL*{HF%ukrluoTK2VmNa?3xkWh2P>@(^9_xx=u9>m0a%3~RmlzBR;U?qBXR#G)jFnG^;z5HRaxW`ctFvERbMIP}F0LsNbAoqbcy@pL@kxHTT{OFyuB&(vnU zFp4gp?VnzA;nv(5i?=9;A^Q{dm0sEcUam^oo?y+YZ6XFO z6qtqJM^Fr^h^A}*yUn=aHE?;d)Sre@KR$M`0EUgY*?zlZ17X^KzxTFbRsE+ew_z}C zeZyYcn;^BnWs_}0jjO+gFMV8d0s%^D+mVk&8IW2v%;^9xYTn;eM_&^@ht$?0K>|w# zvw=8$S@;f8|Igc6ZdDA+ZK7l|L)7$gHZ{D}_rvtv&l!~f5( z+g|y>t~KkntM*;$yrb7Y^2qf^Z+PU9%DVdw1%pTK);sSz6pJ0Xn|6TG#BNX@2ClTh zyjRX&pOh?<{dipMWn7wZZIk@Se5Lb`S1xN68&zJ8WJjNanLJ&8Su{V1#LHvF?98k3{wt(52> zn8Xb#R3M&I@`ANWI+a6+)ZuW;G#_ikP({z$CDb9J%tb5Nk>XzRh;At`;9C zv#iMSKXG0>E-k)3)d(Ug(BA#^FUZP^rqnM3mDmvumF}fN{y>o)27nZ$O;ju3a=Hi{ zTfj2`0HS`qz%~bF2|_u+834zj+>SS@h&>7|X21|fQYHFWLS1e*K->W#pCJdO6{#YBd>3LG#v7QNr5&fddnf3 z?nChVZ;-JkMiz&hQ`#k_A6)Cv#Z3?K^|I~`!2pmv5G+xn*93VP!VoV~B?0uXQssqr z|BLXi74`n2r=JywZu=hye<=GOEOSbCd&n*UW(iE;Q~P;MTTF%|X+yAf!?okm%WLpR zylZHtH6DwtPquC7sx|ZKS$)pIzHlhpbI$Amb8wV9 z)F5e|#&L}CFffr8gZX(&LuL@G5Kk$*nQ%Qu5GKbQ8iajlV0V~NWlh-awQ;IoO~4`p z*ASama1B4~!*ZI)>r5Kfh45y&=#UJ7O^XIYU4u8%GeCpE?-ydJ1{z3NF&4nIG8ian zKmalP(y&1gZTczL-a;$%^e7g5O;cn413hL9MS^Zu?W36?`XfU>*gN|X)!{W1cdjRR z%eJe}-Pt>vc$TXDfw)&a*H(a^Q~&h9(D7WW=b zvkCr*PkkD5X1B;2f2GH82A)ni&N+^rn#}sN`NPDDFeJj(RjLQ@%Q?=j7HB434yP!g zFu0}4DPiDAwZm*k)<84bU1ggPl#x(VSj%PG3?Pmv>jX7*21oJA%B_{3uv#ux*s$2A z4*)Xy2E^kxz|l=(o`Zq1Bxt~n+AG@?!bgwHDC%9U?c>>1ZH>AaYZ(Ycn9s+m_T4b(}zU0C5v4K+XQoI`7ag| z`Dxm)<%$fy*!xpw-FDNtSInRq1a$Hv`np2z$oBIu+`Q?+L+a_9&O7&(bN($D4hPlQ zftgE_k)De-ZNBiLOIXPhdx{(F+`Ah zC}d5BQ)4!k@i>J~j)60B#$}PN@T}hwO{IIq$MM6t1LFW}4`jqJ@M?6-fZay34JI26 zTRaUkYhwhUh2hz8B15Bf9J>F?)#!W%?{4OTE6<;9*t$zs@$PmX=W%lQ_PTA?zi`z} zor%QycqBA*!3EpS(c5y#?w}phH=eBTKoC&Iuc!JOJ*=JrPMrcfsNoC1U6r09X4_0r zVX*nl^kV`o=y)$}WJvs9Y7lxtnye)^Ob0xNU{7oTATeAPO}EOB^(dP1HtWKcL+3wu z_{heyZ|u+YZ&e!a`Thg3wpH;XJGLD={>a5$x1BS+E8EFL%-L&(*S8@q!3hPwrAu_~ z4s*Zd1`(D0Hi~w023s;y(nE&c#g;1=sw{>^Y`HK6G0SS5(7Ev%c>A)Am&P&I3M@Pl zHLciyznBQP)L$h0jvp;*s{U8oSishFxm2sL}K{hzNd#0i9@TBYexRDF_it;&h77iC0qH|egp!z zrZZU6^l4Mb+yTl<|B!iJY&wL#q7CBYH_&;NzKt=XA4n%22H>6hBq`i3M`+(oO{KZH zs)6m>vE4Np?7vwF_i&kGlVDRC0(3}Lhrrv2Z}OJK%%Dz(ib4XIjRxBgkT|{7)jZSJ z)7Q6pM?TelZXugLlATPtA1L%59lSHz)gArHS2j$(1{nLtU3vB9Sa(n3Z^v8Td!RDU z`O^Ylonw;K(BqvgbO@ngvX`u0cr&LFl6*Z2Qjfup}`yVhQ-}kYHhn)VzwukQQGDhN09)o9I`Sm|x6e_=| zIPD>&eHV*5SWzC1h0@Zv=RWvIecGF~GQyh@U%65}P8uLU%?sN8S8t=lVp7BjxiNx-E2K)3BSNp<0 zjC86*$l?6m#MUQy;K^+hzs<@T|COGFO}$#X#;Bt!P8b~B?Rikwl-<2UB)j4oAHak| zdn3zYo*{!tCfhlEoKZ|JHZY2j-9Yoy(`Rn=7pKOHT>%8PCd8ozR1}!EB)dkZppudL zLf98$ipSszg7$5O0X_|M*z~4hSxoEj7odU7b4+jojVQmedGBa2k+?h_@0m(;wYnUU zLY!j%-U~eW8!I8b2x(DomZr@wa-M=TB-MeRWN4wYV&nDguUahwec6r-( zj21TaWV3q?9KJadYEEJrz?L{$Fv>|{qhJ5tve>tQQOfZOj|)awvpo!~(?d)X8HT1E zOH5O+P8W~Y&hLS(k~NOE^Ek4K@Sg>JA%CfU3A@~Z{e6IFXN0dZPgl|r3aI;8QD|I5M%TC~K=7yFu z@?#-28v7#qfy1j+_$Nk%9>_Upj=3;?BIs_Qt-}_2;ZQa{Q`G&YRf1 z>9PlRT{=6m4s&tV^hd|Y_UqlrSe+ zUi8YU>n&iFYQ0>R8hL&Z{KNtjo?oJ4>0UfBb4>m0keL`Sb>#5M4FBNJG|1VAbJPO7Nf#hWL${(Q&oCsbfRmo@2-MtUuyuW z-Q{*cJ=<}oa~d_PSgpoWh(#*5PKMid+mLE@+8Be6Q4f57)@QXagM{mJ7ks$|>=WdKnWQC-@GW-Sf~2>A z$0sf1;c3!V_HnNmHo}%j#_bbYpFY7V(b^1q1q{|=gV71m(~lSh%-K@i5VlCzB@AoA z(lvXPVfIN6vXNml)DU368m>3CaszEa+gd75F4J1vh|5>Kyed05Qs}Mx+3E~m)orq^ zW1bCjJ74iZCGtI$Pmg5ue2>~Sl6fC@d1K4q_WoXe{5Yrb$N3Smz<5%Z0Y{60^M}lz zltj2t%3{+Ph5drZ=lU&qWt*SoBxP?)#xM}7VB*OT-VRCs0fCo5V7(eg`w*fiYf0Hi zhWsekC{6~@uXxJXNZ_^3TLRI6$rT&PVmy9nn*ipN#=d#ly@hd#PVOTKlqJ82DE-L3 zjj~&Oa_jkVU;W@jvOB%BugtUsCVX!@_t2gQ+<-mj9l4#`g?f)tI}eWT>hLeyRL;(g zoU;pZP})vph40n#$XEEB#=>Z}zd)UI2Q?Rx6Of!^#;;MzDcT8JDRGKlkACTSgHa>p zD<)2qto45=+QAS1yA!XGH)GCbwFi5ODc@W&l?e9CHS=#72bU<|%lO8R4(rY1+GkQ$ zn>~?iCbWV7)G42Z4n7L*cQJ3n*uMes6qUc!&?fGmxJAv-d9)c3;i2jS`d{BB!GTGo z@4e$gpILYK@Vc%olU)f{s5upWv>_C1_9gZ6FZ-)cKcF`}@qwweAx~F#dw;NA&sx@? z%n=%-&b#}-r=EkDAg2S>%k>y2K0AT!ktvSRFH}rmlM^|$pPa83Gq+eT4a2z4F(0Fp z1X?!N;18u_Thr;{xzJpN_@fb=Fzy?QqD}6b)j7Ic8STTZzw+CzAE}?eR zvB27wj1@DLi-o9WM=!>eNoGVf0RY~>TE+8N;h)o}XHFcyX~j3{ym06tq@=uJK!xfB zWOBUtP?z!ncIf+=k-lUk$$b;gc5`)`9#WstTC@wu=ONAr+EKs|EC&f6XhC2dun?t{ zl$EWdG^^AnrVxcPoO237K!+ygCxwhQmuRd|LNl|HAI>>C+Jbdkz+&7b10zTut2yIs z$O(YSM>Dutfnd%$d{+Ouhq~|#7a{I8dGw-H>ef)ef7Yd4y#LBmS@pvB#t={*rF9}( zeNg<4bw&?i=77~4jn_FM#AN?#&ts7;1!k(P=6RgLuCgPM*nHJ9{o-kKzw_ondLFmF zyF)(bue%1G$M;KcJRbcb{Eja@Mz`a}?^Ayst()IqjfSdo#lYvW3}4Dkm}T z1|*!#M1oF$Inuk!fxs^j9I8+vID>fYTS9PrEZ0EUhA?=aisUrc)2V>etyAg^ksJg< zV{13Bjm1`N+L%eB7vc&`=Xt?&9!ezRP6tA?%4!Yz+FO&at%P^5hno3}KC3z`YyH#J ze^sAUMJ)mT8`&+`MYrtWK7mB0c^{AiEEC8;Iy{2JgKh78`5HMX9VI9B5^3mIY61>I z8&*<;$(B@)nZ&NK#`9TC=df8IYW0qF$MVUMvw%gPalRcq6!W$Vw#0pFFC5EmUy}`m z&s!4;uRkw@keFc5vc7-i>#*E~DhaSO_P%M^@so?kkHdH3ytAW9#fx4(gY0w4Xg;3{ z&5q>#e8M7Bi^vukG}vPTIpPvU7b$Y+NJ?4>dUdniPRwXELTesW#52v2-H`5EN!}z18qNJI$wnHp-*iBu4jEbF<%cA+}MOV{hK;9xWEA_+Sk^+UzY|J6yXDU%un~J!4M{^)!1BVhA>>A8n2|U3+yr zy6wj)>*_Th+j`uPp^*zr8Qvk4CSa!o8mqJ!*bGs91~VSEzTkaCFFs)<y#%J ztNiAU{YSRh8;s`JPpI|b@y(N6%bI6fL*eMzcb{GBo{hC|F8sf{6+k-3B!wIZL@ z`1~Eo$+;S5K{RsSUdU^sWX(6G1 zCubs}m!S1TTA1n!c0!Vhk{pq;L4b?Jgio|Gn*+6qeabOlF`?U-Q44@2?G3ZX=w8-CR?rv-@tiV&^rnSn%4B_V{FDDCOyoCKA6-B%%|m_8vqVtuG#a0Gk?kc4jl3-RTIt zeSc%8dX6aV&;Da`VQ345r-RGKkkt_E(~?$((JHcvVZyn5Ba8&J(kaOmWHZw`x^ZYe z>uqd>oM^-jv-gz<4908Rz&|?kkj|XP7xYNQ66(fKPg86G9Po)Dy<%6s#5R2p z;D`ut1eSl~i@Fx1m1Is%V zFO%ZdK}zwCSgh%DO^L+0i3H+Qv2zj$e3aMd+U2tR*`gs33V-vPD+CGsrdi<6jUaFH z&$M!Zvx?14@L+sDKVaxL^g2Nx1%Vd9Ed1zgi1au~bdiM3rM9mCB1hib?rd~O)6m&rnXK5^jsh1ot4*Q{3C3}NFt%3-HVXR}g z^vfj28J70c|7WW3dxLv+9cdEHH?nD+uca~Uaa%2QD|Dgm{&C05%*QCG$A@|>RvT*J zEf#1*1h5u0BKH!kT78vHJ$T=e9^~GL;~hAj*4D0m6a34!amwO&UXK3;=RYjxKc(;2 z?t$JcREx}~1yYPXCd!!{EU-zKEbekt>fxCMR&L{zHZ($6cV#gQf)^ttPC|uZKxiE} zF-g2cI_u&uvNecRqSS)0@?OkpV95%?^*}Rt1Xb`r^m8*F;`&NhXcZEvO;~xfwYHMk zdcX{K3Cg@IjxE#oW6A?TJ`mFreGqYJpcIcc^~dAyfN_ediBK2;u$FLW=T6Fzwkc(@ zqV5Fi8oCknF9l%!9kjslwNYKtmN%+jX+;4L^3e#@d;t3AQ34ynqDEP);0(wfrWcCF zG9F2v^T6x|eOx73D2c*dd!%&hkK(gg6&z5ZO zdV1|tYifMWc?03Y>ndM2ZSc>*27g)Y*mcRHFAr}~Yg0qRiJ3!2{Rpd7zWlL)ZNEl_ z0iK<{#{YL}Y(9JciW-~0T&#mwtg%T|@6^F$gR#kIcNjXCXkJMJ^j~fi)}2Of+CjKH z)Fdc#D7m35Bn%526To;eD*3Za(k|-~zLvJ{X~X-GU=4p~#>-IyX&`xa5$a|Dwxlyp z0llBoe)_+F)PbTK4w1}0KlnQgzerw)q+nCBy*-V&s19H%UV!|np=;wHnIt7lu7Id^M2wwJCt$w={f-Dd12;4T8}$ptIt8NFQ# z>QEw)d}i;Q1=FdW){)ll>$N|z^5`Kg+Xgr1_G(8`^Vp@3`b zUo33dMs^Yr;Z^ABh~>PB%NhMdyQIF6O9vl^9VQdgva!b3IlB@isI0COlYoP25 zGB=RxUu+g4=A2g&Q|6w-@N0N6aaJZ9z5}(jVdO~uOhKjBoV9CuVz)&PLJq6W+;PcZ zXJPW%vyZQa8VoA^$hr*&_isivk{X@N56-UYi^eaG$AgmxW_xBmHt)uf&4)H6ay0gq0`?g6Ys19sVaH*ncj?>?kmt`W0_jT3LWwe839}F^qX<2dMFp+1 zbun0U=y#ghZoOu+z;a=j5y>;iXorerfmu0Q;MZAXN*E0%%LU1}EU>HDK;B?1^&$vS z(h$`f7<8QCge*-szfo;rPCs)Kk+0Q!<=5Ob60&K)BN_gRb!x7rh8g$Y(ixb zB!xziM$sax8MVc_Ma05amM+qWL4^%MET!lOpB(pGuAgWgXX zC5@j%t6rt26N&4wzA8WHU$<{vWGoij9~d3kR_IvN!=dmCz;{C7@3@*LE?6H5PaYiU z2|BU1zz@J$L+mHePi>qNAb~)9vlKx3blD=?pamr};(swC6iyVM7L3dtM&26C0|HSn z-OBZ1tdV|)#u{1&7)0Y@TP*A4my}eQB-Fh?6(DsC6x&2AFB$)4ZO!u07M%>wOO{jk zNUJ+X?YwKrP%9kEnjhG$=mXK$2e?+`m< z6P|ssehacN>`=<0u(FmnK;;3sFbyvva#_j}^e~>-bjnm8MMllyrovSE#SgCrVzIko zv6Wm3VnFYwevgc;Ve$47H7AgRhsuid*$p%f)X_?XBPUg*8$}S_TyXG`- z#-ny}+0-x$wPE$g%0+Nn`x6Q1{t>tivDiEDaZ9MN<{wmA3_n3^G|b!0m>+z`y!PNy z2j*fHmcF^^LARHKvgys&Mf*K4BZQr)AO)k61Ss%38}60uk^~5XDshLv*|fOliSM** z&oiqG%sr20hemR|I+$h>V4*jf8D`dTzpCZk$U--==uX$a6zim*<|8|rd$vqN0dm)y z-kv@qxMJh$`f(r>m>M)3(f+|mSX@v;2~ek1zXf^R19=p*CyabwN%qw|-1QBt($)u) z6d8o`x~n~lni0bo0G8i@aqJMXsSYv-HA$j>lW4CdXfL)b<-m$t*-1sFp$|3eHomPT zzx;PxNOGD(I)#L0El`qQ?OxvdqieTzqpj}wct{o zI$aZTbX8815h4z}hf8*@c4oj|AoVp)g@YhoU2q7B7U5EEdu|7=w--Dla7 zFsrnw_s#Ncb9FF3AqQFK#Mj9oZK#|UbTbV%y_hqqy4ZppI2#0Yy0vqP?3T;OTuFMz z!XL0tKu%}l<&&6D=PY~OLdUzdkWPJt^Mk1bzZ)qj$dM`_-rGj>mf7PHA7X=6tS-hY zm@cJRWimi7%)8?tBN69Z*gwGji!24ku~>&p{LzK#OPOK`7!&0tfiXD62u!AM4*fc8 zpGZrEV^{LSJ}0Up_O!84t^mpW5qjH%cxFQ3pTar0bKmus?*voBNY|s2`wjs)q!Sko z5kKVfE`o&?&9oKpjI2-opiy_k+;{+}$c@L0KwDL+kU)V!+_L88ESZlEwshkGk+LT! zSNq9Q9#0j`m-FouLcR}A)t=|uc0w#j$;le$O35t*uNZcUD|62f{A4GC>*y7L21wuk6(z&1qR>drduz*WphP0aUoEkJT3FMStXa-7iEY#Lvh=fv+Xhf)bUog-^|i&W_fSpxl<9&$t|@M>CSKQ3{QMR16dn&E~w( zum@|lFz*u%A*8ReN(_XAK#5CTIH$`e9ZanlR~AF(;CGkp4t7G)O1&(G@_~!0#A?*C zwTt~bO3i3?Dy559@U^bB%*%R17(BBqCH-l2&G{Eyb06-b&PpW0c!A=Js+K)x zpR;?{xd+tvHIID!?pvSy@U6>{?;k#RXNQ+~x7lr}Q>spp zM+SWmr)YFc_*8x#9s1%B4Xg{Z#Vb+7a#YN!C|OyMzd)u9$b-06x{o>`&0q_1^!r5f z_jO~+PB{|2JVPukGx$;ypOb@zpG&{O)IH1T-+d+{0H^ydq?&i z+j8|?9$#QU*2*sB_>u_6a(e3%h56$tyoRG15D zJ%DHcD}&WiXnpv^9t`$pbW0?eYRdw4K{+z{1WIj4_CezfCJU4riSjp%7d?JyUX?K@ zwx_MaJCjswdyMovnJJx2i8qvmZoBolTP?0oW@y{_YZ^n|Ri1&_O})vk^<(GU`h8Sx zZ`pjw#os#b&O6SZ*s`N97x`b*y$g7wXLT<6|B@~FD(f!Gk}cb^WceoBlJ6ehW;{2~ zeI|3iW^TDBlPkGFLhd9Gns7-~jv!gDwA-C;Ha6zV)s3uC?Cfb7IoOZoSeUpXp7<9z1yc z^EX_-u=57Q9XG;Y{4;6`8c`XM;_BtFuv8nh9u5kCiqL|2dMO{TJY;~S=?e`Dh?ohg zXc9DQ=&&Ra1>$B_W&_P!tZC?gMC3vM6CO?o+QITsE|;T?QbaT{;WzAIagI%m7#%I% z@B%dPiy%rPSzrvqLuRQM(dSPj8pEI4w6!&Qc-`!`np}0;Puz9xmKXXvcW*e+#s08$ z{juT7=-wd9M@M%TZR2hD@=dHajLHO6lgUmDC*~D%t+etecJmWW)BIBZ z0d94#IU7TD$dqSl7H7S@))oJd6r@MQn^7GY@m1GZwoU(L!2cJh@pu@h4?1P^9Xc+k zqh)O|@JU85J&77L*8eB8cba&!HLbPm4h)&9V1m3I4=KGRhj(-RW$ z<{!aI;h#OyRU4?N)OVTjdX=0-MC#a|I@qQiSoc2df_d|58{wd0V+xfuvkSZ6pc>6w zarY=5LmLb2iT=zw@msH=-^%*nUC_*djIm1`wq1L%KYdW33qfJfc5b+H>@o9hq zXQI*I3-M@_zk4AVjh;cj^rxQ~!0+*%Xye|QcobNDd}Eph>z26>Q+Q8E$D{iC;kjG* zoAk!`2NiyxYu6wT|F5W7oYg*tTqZ1-0cIM2!7yVHh)dZrsCuEsk>+hL27QivIi}FT)Kyls zCX(rCL6{xWFmgQuc+YsQ#*y88|p$a-!1!3O;JWiX4fCOv%0Ajwxlc4 z+LLMzMK(sG73ta2E!B?7`leLl__mSSNMv6m=xiSx5*?g_4u+tES?%2>rihkTRPGrK zVd30LL!h}Tl@ZqLXv0yVW$L0dLm+?LEJ$%P%700Fl@k0g#XGTA8YS4QkA#P0d;}?L zpqGdU!!k4qZRw03^WB{gv_uS004PD;2CgL|!onmif^5B|%olsEgRg{KL%8UfPG(^e(l17ZD>?00enqeEhIPcwtoF8yynDcCdH>^Rib|EYOhFWUNICqyq4g1xN$` zftF7)tP3E+1piK%Mytwt;v8~c{zQ~Jq<{DZrLQ;PTh-fR?{KE1^`*H9*~+(Bdr5!% z(S-eUD-cO*)&2n|6l1JUY!&;roUt{RX-@1D(j{%Z#mZ(M1Py| z<@brbVqIBC{0Uen=)-x;Q~Dm(9APJ-l@kNM19_G5{rH*_{^(Iv;ILyR1InU8IP935 zL+BTo*UL8?gJYHnM&^rfynSYg@(R`F{WW}Ee|OOTIGFPuiAK{;pr!I9Iz&stVxaGa zV}y^4HD$3ToL}T`j^Tk_YsJrFwkgCXJviG>AYx;HM!=W-#BAGRkb=9V1+)No1v(2G zD&bT&UmEKf2G4#eU?D|+6_S9=8|NGPU=JDv+H54ri6)*er?$cWXTbt%;n=SMYzJ`* z!rR44pT9yb@JcB3GMOCs988R6S(~%yRglgiPnP1~Ew4AW9DB#xkGBBGJkfIe?e93o zSMNujef#-q7ozjX23~vqnHTOg?tu&W^gYJCFHos%q0?J8o!0j)ed_e4t>{>xjFgRo zjE>4a55r%kZg*<;FJqqhRXG64>lcy(_*oMG-&(p2nhsR}sYM?K#zrEvc4Iuw`Yx_{ zDPQ~!9l;Wzqo?rYCs<=2DyO7YiZ%-s0Y-|;LN96uR6=vO3E;Kb8&fc9t!CT~1|h(4 zhAsiQE>NuCQv$^ToPzEm@L{@hvA($pm`8P%1yei_u8ailO**W(y;bVa7{j%QM}oWT zG8jJ^cBFKmxqk-t4{aBv8^vF7li9(oz!luTk`%z zbQUd{3hT$(^FKS*|DQzpttDbV2fjb|>+MZVH#If2w_%jx|7dDzjYXr^N2BeZL?Zkq zd`AqcbYQhUP#)0r?U2VXQ7)ZsKJmRZ@O_Hr7mx3+3F_y1GCv=#A79cV5y}P5K>XwJ zNaTk0_SnbT@I4Y~n_exKRp-TqSV*%)A*YDK^`)bQToS&mW z4W|ZW0)p$8Ujjrj9B}BQ2P6vj$MB1%=O+wi;`(5aRJeYe;9qTx`0c9@{Sg*u$&G-R z5mvEBpZ#XIG4YX!S%qfAFQDGxg-pkpr+M)`we>t<41E`Xz8}OBxaV*uum*T1))oTR zIj;SR*%wj*@nHf0ztJoJ>PIa;pnl}`Gliz{wmvKnrNV;iqgju*K2V&Dpt~8#bP2BC z8ON%6O1M7D(*Ygu@&Gmp|qQk6kqM1jCXY-_7#1r{(bLH5D7$5KgfX_}!KQd>rYirHAD z=gY9Ofwh%HfCbraBM9z5lOJ4xw_M(=m;+V#8FSc9%Dz_)P;zt?@5f$|1* z?#sjb>iQ(Z`)V!#K0TmpGonYo62W{}bHwmQl|Vmi%Mt=FKfySgfccfJ2-M-dR}-w& z0PM4;I?m9&9NHgM$iDO#tS%B`J{V1DC#qZGyRzG_aV~u~8r9uZ=XT5-v)fS>7MwbH z_Mr#POy4|GcSBQ4)1l1)|H9GHRAU5{-1Vamo;h`YPru6vFbHa{)nMH_vF;>veNnL( zuu@w}Sy=x?p70hPXo9xnlwdInyr6>K)X@TANU1w;{hy@}%uI@RWB0q=LcfCazhL~` zEcZck`G6Lr@ zu%HWY5^*1##mkKI!r>FQ-G2PwExPylW5>2{JNj9RxBuMXJKuTri3cv|$!mA+y7t=b zdmyu7oZE?x6rdRGjC6rwFysS+6|Dn(N8x*@hoI8!P9-Q*0!S7JT}r*slM3lLuOM9z zI?f4P06Vh)7c7r}1F{TUNN%W?y_cQixh4#=j)I63Lh4H65!;zi6owvW9Crl0KzFSH z;II{U18q5c$4Om3bYgr?nZ@>w#>U$k8#}%TFb(xJ>7RTk8$bPYVQV%s1N*p?=XrnX zDHR2^CsjN!1Kp{@jLoF|qrflF^E84$5&olkeysHT`XBumx(1P4x=#OV^g++Fu7FY_ z@WUHpkUsY?UZalI4B@T)x9ck zkPu8TQBc8d69FLsAV@zoznOt2eg$kHz)rGYK>zEO>|kUr5}6HjB!ly#zK&3HZL^KB zNs}H)qZmW@*WQB?c7J5PbN*;zd{+Xa0N7gnmEMV(nx@)z`$d#h0Wp-IQbts;B1@?{H9M6>Q%zH>YaIn|iOtnMdU)W>0}q`&ITb{oquqXNX2-cI zl3_1>*LltM8+X>Y>kd`7$D8gyb>_j*`e^iMq%k#m6!iz24{fQT9fM3vj22pWKz>Ga zhu*0R_<6hK=cDYE(*7M!zmGrT-`$Dc{MY1tJMh4UI8d{V;O_5%NBq3H%h)x6V$OUE z^LO>ahgHvBsU!u7m{97}3wpLXS+s{JjKzpG$t6F*7L9y;a*Fz^G5*V%*qrWQELI?j8`0i5*(9Ty`{I}l0$UX#GPLd!sy9C^wIAee2P_{USMco38i za4k?iOzDvRsCI}YH;^SCodH4{dPZIDP&*FsIu$kRIK*gMs5M*g ztA412kkCUyx$aD1q$H#f5z+{&Yh2P2B|=KCZi#Y}eMvek%H?#0{ZJ)|oj6O0y&R3U z{vKipBo>fBU_PPk_gWEQLvAM^x4k8~y&&H?4g_Gd#R8 z#)N9hsLf9C#*h|J9K}Mv-4ll%_2XqU%$Ksi>o>@=Or>ynCi!2 zwzCzghh|Xvo>tqvwas2x%as256_`CULjTc9lb5nsyg%iYgL5jFIBZ_h{t3a##q!~~ z!q9JIPt?eFU50OCb5w8`K`{dy{a{oJcP+-6(oC|h!-k`c;caY) zLiA5I@ecfjNnX;4w;}r*5+FDPSjg;#YGdaUF7MLCB3I4I0!KV_>hlAiTL$u^CEv_B z?A~qju0l8hD89wI{kW^=Z9~w%+fCMu@e+5Fv)^!aXUN@z%NH#XqG#Y$A$=>jTUmF- zd^FO}tldL{qUcK)9Il4eAfuOBJ-Kz`xlXUqO)g~UkW;vuqMaki9zv5$uDKF1lW_Ke zt_gLGxEn~I!`pxu7P?6w174qk$Q+r?S`)`_j7LVi4Pu`JBd79dko+v(& zm!UaiRkC3o(k0-5_adt8)!wD1{>o4Z*d*0m&%v7Qk~C#};}@GG#BhE$ix^1I%6Sur z4?xraZ;stYsMumbRsxdKkR>qcq^P@&weV;-A;p1tSCFLw93PAuz+!^~iR80HI}*&5 z%9qSyHt9u{?j-Vim8!MgC>1k}o(=k+PCbdSqo<_IN#ACcGrCoREjTGKKk{P%qrgzI!2s7K$Hs-zP;kbQUYIY5 z^3xI)u(Jll14z)Y+6n$w-j6N;MZGjD<7<%hVSdNNgqZ+hDik#y&?7KG=i7x^1Z4^m zxSHuovxcLze46@VNdpeipGvUz9<1LVcozh4av1>VLeyHonCo4Xz%EE2S&9gu0yrzI zby{beh3D|+YPL436w5$Cjh*$HhGrL|pT6r1elJawI78@M!=+}#Bnpg;K_=avx_;R4 zEt1vDZGd*{5P}AtuPziq2`Jbmpdf~+p^3l9m<*g}0qpJ*SCHCOwSZPWdy9+@w%oSMJw)P!bITt&7pqkgF7@7aOPXrEPTXdvyH;O2C( z@)k8~gnNCMnuE>Rg3XzkfY8CBg^{Q%VDt2j(sK@b@@bT;p^$AoemygfU)$i_<$Z{H z^A2FZ`3j6yKzLie7mR4%!ksoL2&_eKS=NHHlC9hMz{xjpvgwB(f4j50QQmDLKhGV# zeYMZ+q&t4$ki^M6Gs|<*?5LtYYckPQb>TR$O1z@p(~)L%Njnb+EoRMS#nz)XF;zf#@$@ zXt?n=1m$eMYXorTb)3?bh(zweJhsT4kw{|f#=qASOMm%!IvwwOpG3QV6Y#5nUGIBe zX`S#N-eR3p!v967tui?{gjy6Jyx7uQLMj}MNNhdbeZ9Fh zi1RgW{59m z)~R@1n7P05op~}|<<_Tj$#ibL8}+3>X4+v8-`2YqGwTOIbT!AX_SX~1!OYxxMP~*2 zs{9OwyT7hgR>j)qX)r+-wn6H!U;{_3N@P8Hp6nW<-a1Q&fFJ>ATC#uP{J_Z(ZzS^Z z2nKgS@R;g#*G&Lg-}#~^64@Gw)c2ji6p#&(NW=a68gBgDv(G+TR5C;hU)Pf*Wz*#s z&vsKCsdtt5vJZN;{;;Ey~Jm5tFx}Wb$TJDX#dj(q_;B zKi23MC$=8|i!>PbZ-87~4c>`q)_mnUJo)WAy0-= z?ajXIA>@O{h-ddw^8>Ar^Z-Qaj>zmd8KpKx=meaS=yn9bmJyL|mk2A=lOznDO#Q9-G>qI`MbP_r*eIh1LkqrBl^E3Q?`N zKa%%0Cl3)$Led*4|AI%}PEbRUi}sD@;+{pjHjt&_G_sH)BIu0%A-xvp8z za6)jQ4xyYc32u~v6T}mO)6F`;Vu_$x1IMyMl`BQzGX;0Z9{5SH;?Vp|P#V)GHkm@( zJQibLdfVvP)RxbF`?-oLucxZ=xwc(<{ei1@w7x=dz0$fB>GPvovTuX3RM1jN5B~^e zR?lb~w4CWT*9IZ4Zu>tiIy@2Z&6yY{koEi+Y-C|#CP|k535pQ@MbE|jBu?UZ#+Vd( zlSwF!(KAUM5_*%#RTWSmb+VQ}ZG>PfbAMArzN8n)qFIsN%U#V|p)tX`tO%?4#xB*$lDNX&XJ~%ZhE&dk$+q66CE0xjf zF9j>UeYh{UuIKHar7}+08i#H*j^kYpcVS3PdSL&0 z)^Nz};$WGeL-C`P7u#DZ?Q`IhKtiezQTn?MLho}x=$gG^bc{j=g@g_kMK~!yL30rn z;A{Cx@GO8?DotIU@ZJb^0N~|% z+<71TWT#@VKu4FajeqJx-}mCIZvpRUbIY8-B(`l2dL$n`a?VOj_D5>9mkH*9PJG$eqCXVhSu}YQ!g0#dyIS2@urcoHBAr7fX!AoFs?w zS0)~zNPS11_1JCeu9~#ly;XHr-FwxKJ9}=as&_fxHaa=FdFZY!_q$vz75$OOJ21qv zA*uh`%)!}HYH&qz#P+>lb28Z+tgq>tp6&}R{bf8&`uq)}bN%j9J2oGlb4Tifc3+^r z3WWyOZ_4c1Y6IQshUNV@FZ=Q7<^5RR2E;D11?Zz-#mdjI6U2D96Rk8mvIjtXHbC~) zJutK|Ho@G5pt%dJ-h6cjFE`6RbjaAI*30b!3o%RE0P{>ZJKp}E*@wMq8^$K8>)hAf zwteM3%|CCu>#4}$rpz49PDfa8mHxL$dl&lU5pShS-P0o??J%h10VuYk+@y2 z{R4FCVGi+*-?V>qSIYHOTa7@{FZd-s~8G^5)>RphzFV(); zabML1@mcJE+ZHuDZXNk7=*k0lA)jUS=$qn4(06b8c$ZAQnfsvcm77w%#c4PC-S`9; zaZfp-SO9YUOLx-t%i4{Q&Nuk6@LWEZ?YKPu7d-zvKF1AEgL=KIZ0tiD0rvZKvtJwR zI7*1ZUpI=M#0&O`G#B+cR~T0?XM&)LE8heMP6nK2FXktp^AjrQfRUoMGrag6|G({! z@Ip_!(3Dn59JdaU1^Ux8`;UKf#`}X-`E??l3#SR;HQ|@2;&s`7jK;$%JOQhW%h#jj z>eJw<)$8hA;iiO&KxAN$F0g>nnUqTbolEEh$=jxIRrh2kW?>}weX0`e0js75on zq(J2i9!q^x(%fRW^e=32L?V;$gNFz4VjM3{ArSc#?&PJfotSHLZQqxkYj?TYE=*2N z+&wXFZq7IKW~#|gf5h9^_TPFBoZI-wNBXWhw|>Lf{r2}>xb^Ax+;Zo8%zan@CX+z# z>xlOEnt=p|vCEn&x|<0m$`rd=D4AN`1{CFVMO+wBJ(;fB`w z!*|?i^I&Lnz3<(HcLT)tKY8fbyJsR%??-RF{w06Cr^Wlxn{NIn@478mpId(ewl>A} zwe=V3OX~}o>jSeDqAhcM9;}PZ#%z8ArM7p`-`K(i7*ObdkD#y z{>HNPp^Em37C60o_Y4*|wR`9Du9gsJm@n8h$-~%V$V>eR?ukzO_iYjF&)iP?_iSC5X~0=# zob_-Uf203Y>&AEd=F+RMfMCck{e7$X_fGd_^o;Pi`n%`x9Pe|>^GkPY`clev-KAf` zD9i?JzXmdKP6Ma_BMOv;9-6y9rzuYpt?X0WRA z$F{syrK}iWIC~|t`^ey+3yn(up`|YYZP%yq#rDT%-vM-e>p6bKXI$EBd-&3I_DbB2 zuR|EW#m~L;Uj18_YHZKD9k$QP&tKR7`qF;eyYcg9nVOX4H0)NpHXhq)^nXf>32$UB|GX`+Res#ZPrK7{iOKBY}y?5C}evCb+lWwzfu$I%&K{# zAwq%}BO!Be5^!&f0p1p<=xD$g4AtutQ+zkgS=JqP_ZX0DtVMBleOW@;`)Xt-i18?BV2wxzAZBY zB4r>OGsc-8BjdR#Pkw$QHYV!L&A`5pOfRpa$rxeA%GNQfurJ#@W?vngEccU5`GbI_ zSGJ^dtk}5a_A({-vIaA~u58of+~A4^pVJojR-M`%imggY{Lqbn(XGI0aWa?=Co?^A zn_iKX7liC`t<+y+S_46X%3-!jU1}?-N$MqH5Q17aye!qF%&HaKJ6b+Q9ydkq>$4>3 zB+(7v1{a8;TeZ;nzEa}gV69Z0yZ${Wl@g8#{kLydDa;S1wpZ8HRc}xIN|C&`Scm#) z%?PNZPt?fp8657$a4GU5*D5lmUB-!ij*@&k{xM5h0Q zKW$A7J-j?_;JdqTTA0(%xNB>-Ca1Goe^Nd599}PYzfeSoPHx(jomF>%*`tK1wCl8A z){GG*UTR@D2KfST8>f*1hGWM~bHAER3-(9gZB%4%T*J7pNhsVeicHCjLx4!$llrj= zUXu|SK`wPXH?F{)W@c9dAa*3+78%d=d0($`#j@kX+wx%bXGzV#V8^ncOfrVdvaHyF zYjNq#w1c805P@x=h!$qfZzuUcZ^yidY(BH@!iLk4$nTidgsHvb$KSp%^8rdKvvcd| zGn+P_@ou2tHbzhVhj{bV0sr2i-g^$8nVNy{Pz!_0w8@r3&*>41H2_dUvPY@&g_ZRQ zrsESU5H8mFuEAbMl1GpiA%nxFE*>LPw8BAHlb#Nh3jtfx(ywCAzoobe^$(@2M08jf z_t)eUu!R(-K-t_3ZZOX@+6KZh&Mu6$0UeW2S$F{lagFg9SOky_7S>}z2FvXU3Ywe( zsz`Bl+gw)vEu?0a5IPk}1ztik*&~)tIg2bg-vd)^P?M+l1Y6>f^tA_dUU@&}84#)@2NWl` ztGOOXr=V!YTJj4;qJ8D9Pkv#b0&SnAAvJ}Lxdy{v!qUH#T6#l&CbjfMIbE}c$ zqU?bUB(%3HFH3400xHEMd3BJ!NM2pDp;)<)Xpb<>1PhzMdK<7N8Iww<5+rFzm|NT` zj7+6zHAkjaV3F|*oGWsK!KEU_5w_4|IP{g)z&>@P>7>@ya%AxPq3q|-?xgPk|HDq7 zZ>PCk-9De*b}-;)E8xM@SGmHkBAr3qzw~Ri0ocW%dj8JY>EsBx=3H zJh48>SclXQwT{b;U5LocT{bbI*xhi5!{R`d0=6&LPJUVLADG`zbVodXU_9sbzzxIz zhD;E5396%%VJ*e`CQPh$leej2`eCrCAGD_f>B-}Bxru1>L*}XC=DxQ7Z3m{q)3u%T zbJ=r43qL7Z(WlVz`-Q7F-7s^=4M>yweQ93!JF=|)DU%7$8g&>8UDn}TTFE4-WP;mB z(m|$7Fv?=hQ51`_ta{@>3Wx=pF5ocuY#d^S9fMGCDR#B9s2zErD-}plHLMb;7l~{f zKVmt()Q|%^Pl!YwgG3(f8!XjrQca%PGV_!v5$Z`5{8(}{@6kS_f&LtFaCov=HAa`n z7!Ej*>)B=)AETFcb9@tik8X(q66AB+fII2Ay;8QBRdbkUKsg-pqu_NkTWC;ifa^p} zNMc1^udSK0{Bn49pkx5Pr*4izkQFyw<(El8?(sxLR%#E!pTV6%5IXk zplOz*87}JLoJ=a26F9hSw?Eiz$>dy)t(jTIPg+TyyXc3m%qt$UayFOIJIgt&gM7(J zvR`}EoOA66Kz?v%3R987WZ08EWFpcDw`a0EN+s`OT0ABz%Bf#mJxKF_i-G z7P7S56gPqkR!>FT{3$~2yeXNrV2awPltxXG(&i6BZ1u+%C93 zX?Er|M8}zMSeCVbu508OEoSLwpf|DY@CWx7`ALMkzd(YP626AI(Ou0}0(;Iyaug-3fZ#anAI4e5>a7vgs9Kt)es@6Itc zbM57!-`Ag;Q3=d>==S6cVk#E>c{4dgxg5*Dz5;X-z|gS<(imd-qc((2Sz@OQf7jyM z-i=MU3csso)!Nj0)J0Vz!KwJR$2|F+d*O4%y#+@=+q?riled$Nn_Sykegp*Exx{#W z=Vth8u?hU`cy8~1hKSo4PI)IJ@?HA=6d5z({)u9yxrbr>ZvKt`D!To?b=XO@5kv!cTL6d&)k1H5PbVmTex!7>;=#o>An_Ed4%5CpHmwrt0q zZWkk--^sgeC;vgmRrm*YDthNF=y#so(u_Z3`md*-wI=yP_{(7rHmbpoJwZ4@o|}9P z_REMTI=N5VJ%y(BRGKqi^F2H9&h;C(rB!xp=}y!za@5*UN(%!}FL5Cp809&}L1vVgKi%v)wcJJpjNq6q|t+yJDHP6Th>h5dp3lZsnjHWg);jTyE` zY9g?Lom~Iheg5E9MLWI_jsCiFPJVqgJ-Jvj#XtTpMK`64o?PyXO^}NZcSbk)8pt3? zxq!tAlM-+r%LtEIH|4_iO_q5gZ=G*}T!d-{xhs}sN4fHXw?Znmy%}=ZI_&FGHCtx;3z7roA|f84V;D zpF9J%q>v6oF}tadwrhJa`ZbW|dIF?iP79A_8o;EU14&hzqND_gSDQ34w3#pm8X1s2 z$f3`U#F|#-FPldpB1(^Hw69gl~pe=>Hs2ec}+nXH-HkW zfR?u3+DVrB_EYOqeosS1xx$R4UPcb#E^vSlIC75AxD9tUPmWe$@5-gsV4N;2v(WAfpv||?t|aFZ9Ed@q(JphcNb4Zr z3XT{1f5BUc(*KZBB#@W3WX<`7bZ)^}nAbW%2qGLGKJt2kkq>1e5E!7u6c1Ff@PK7Z zxL72_Dc7IXLeibTOP>~6^sZE_s7JBXjhIbJO}h9go}PE03hr=Gmk?2Jy3rH~ZHuW- z>&eQ?+#P#e&~%h2&o?mjW-<_%M3^ha=Fd{stTE|8Db8al7&a=*GXK^Pd8~-EaaJ$F8`Zdwr63?1 z3d;#%t3q#Adsc<=z^_F%;DqVC2wptWnj>)nS+oiK)&>_Ag)JP+!afl+lyncml@;CS z0>}ozfCOY&FxF$iL6W5~&|+Ygh4RH7kEf?(OTKODhgg|ek7t%WANu~(lrk#9iz9}n zA_ckxo|8-eVCuw}h8`hSJ#vf%Y98}puz?r^Az4D2g-%pfd8rdw0Y=$OCil(Q#o}~c zG$;s%C-9HtAhjaNl_QffC?%5gC3)GPs1MbUc_(J=A(OwFs%k49f$yXPwofWif+s@HBf{AI6`tqd3O_Kz`cjzMf{()gPhJt zhyXyX1sGeka)Xs2M92k8m??C+>|{I; zj}Xk|Z|zWENCoc?v*!u@?C4Q3hS@DMJB-lYs2k=>>3jmG(r{@`t;uIn3}odN9li{A z7j&eY@5|Py5>?g-7htv^_rFkFgw1<(^}xEN825ZHWaPk}53}R-x42Nq9h7efE7L++&S%Jz-1L?7W)0T-OQhhA*63UNc{xHR2B{zG{ zBt4U}FpRTsLs56ElMqoAMcrwm!jw%yRA7kKj1oV~P()$9FU>-<#^%72mSb~7=$wVH zJPHvm=^8*YX%R375kNERhY^&RBGSuP!LC4g9GUZ|^u3s*aBS9y5_w!Z`%r8o6&xKZCOIsejZukg1)REkyQVGNs1RLdV}f7jpK(* zHT?LpG1I4E#jaWt%w5jUfVad7>B5bfxpY5J&RJo1`k>hj-7V2n4jO54Zc}XvkLFf~V6O{3IrWxJn1848ZVOQ%_DHp*N9>Sy(h z*Zyj4d*;aaxh;U0yf*ta8}{Ax@R8Ah%=jl*>)Q3;`f2yCoZsggU0By+89u!E$fJ*E zXU5a}4!KTO)zwwK+m+chy%n;$3bJZKonOE5AaF0>)>2Kb1CW&zU1+$tjCv@a;>$*~ zU5ICUs~{`HqhkEQkcbmZBoZ_!EF~aZa>f>I8e0c`FKUE}N7H&HIN?oFGL7XFcI_O?!pwa-&%*EEfY2~XkKqOL(vvK)2&W*2gvW683oUe;H=h}zs2W%+T*v<36k@N%NPLK; zjZ85qS*Th?gc@(EajK4n2A%b%L3?9WyKDE}1iG8r-IEr<#q=`b zn``#&I=7{!w#qZ9q^6^8@@CFqLtW3U7RCakke%cL(VTSd{nR;>cC%oFugO)wX7fgb z^05_M1(c0ZC$#qBse_xYn43ZcAoArh+_n{0=8ANPBHlxunVti?%02q;`-7OOa^1(E zG=Jc)u6xz=Idu#?j@)(|%LA1Ue($B5^uNa4)~wyM#u{e11~M;16!U`OS|EcaKYz>0 z&kwt7)JfDOpC2aL^7&DgAfG>W`Oj~LAx@9}q(2C(DSZcmEO05@ky4dbI>W0+bfydS zrM&2E7JeY-04Bi>W;mg)!Wx0-kF^vWXagV5Xq4VT87Ew4l7>oXq!X;HlUx)(u*?nC znf7bq`DGDF6Ao;`>h%v>%l<=Fpg>;_m;XN?^F_2j-V%uVk<{GnPqcGe)^2>vmr3a)}97f1`w>Y$CP=G8Dv?4WytH}am zSrDwn*#T&vF!m+@S)h3f+!SPDV;2_?3!pI86_#xXE^*=-mXJ*kpNK?GtlFR4IZRVB zH}7J~x=l;}mJ&n{s)qHpTpE-88svHBY>iw}js>DG(k-#r4JA+070Ci?cfF5iH6Fj~Q5}u<=m^^D&{N^&@AKi*X~Z>tNkQ`NCIhEmj`a*PHtXP`^H+0VNQ$!RE6@1tQtvID-~QaQe@UoGbb3v zicAOsEBGJCtXtE)^e}YLFsk$h$ujCydV{75mP_II)^_mnSryh3=M32f{DkzUU-{^) zpt7FNFE)UdCKBm+;g{jA;uJP4{ ztdqO`v#c{eFw6rP{kd0~kd@B8@FGsO z&@71cQOU=mQ;w8nK-?^;8%~fVM!*q5op=LUlkIqk@&LmwX$-p)ZvfU1#8dd^sFCEX zB^Ip3SF$cCl^`c4i&o8__-$CPI%^#;DT61^;;xuiut9Mu9&ZiKj^nMN;$qa5JZY=L zUiWb3WFzm3HD$nUULgZK`US~gSpX|$1HOp>+Ry$Y;zyqKoaXGE>L7z z=0!>FZOi%>paU-GfOMFb?#rUjBKN(_%F6Xh`p#(W9HjZ~aN9XDOJXpz?;@AijvZ~6 zloktgjew+U0-%;)vo0~VWQN^EMpkaxY(I^i5TA)_(4Ar1k?9=>pi~ERHs{7s2_YJj zyQ>_OlzE)CPAkq9KyJC5soUmKh-BTyL|b|&<^W>>5R?(u?c2J_A;~}RG*vdlqyNgo z@sHWzZ}K}FBJh!4x>@+hhsZyCIr*4I00EcLDrw(BqCw;%^=ZgLT#0--Gey+C=p+qf z(CJ~BD%c|_ihMP3>jq%;T0Vg!@fxy$7o4^iji*BMh2_yIERX&Ze-PJSdtxpU>udCy zQN3+CSz<6Z&Bwzry~@=bc$qSJsor1YFjuy60sa>CXIy(-VYX7AhN^DXU=gB6d}Y|{ zMQP&0fnMDCxCs%D0(~{&fdAq$xyMlA& zF*^|0TL5pe*jup7UA#EUsWZhqJL3+NbqaNJ3f*NW6Nb+)Qm!!;?|^Itk_JR|kyUbm zAY$4~Kwmc%H7uQ`0+ZOeL#7mHMu;&(tIu;6HFRR)GEIu--nP^sK+)dciJF z!5EYCeuA8YaH4>UiKieuq4X*0Aj5$86otxg)(sgVAykR?;G9-U;-He3Zn%;-8mF2_ zvwe+>cQ8UVd`cohqee&yYg`nBOn5=_g_OEAQ`4l|5BNzR3G&B=Q5n~fEexp2xUNhd zUG{)RcvUmL$6>_vjCRLzH51Wb_~GS+>d@DxqKTB>)Hlb@)L4^gxy)}ZPmg)Mqm`~$ zNU2=3V%+N;uWI_R97>tt^Kh8{NKy9&d4ILiA8{#6v)WgNY32qp1$#)KOw^8w|ApQ> z0wIu}IoKO$CUPbOo7TcK0ghaLK8E;cexM{d^du{DMc7g%AEJz}%u!3m+nv@2)}+Db zFK?xwV1FEm+@-Hy!%V5mjq)h!QHFO}J>>z<1Wu=+Icb%a$GaxWg{WotO)dzZRuya`vIidt6)T1EF~c083=dW?H_ZuQv_Tkcbj|6m zCWc8ld)cK20D*8?X6f{0_4ivX5j5k)NMvuBsp=}(B6Yo4%@l2u?b2)L;UoUz1OjAA zL})5T*fOJyOyYxcEG$n_q1{`LVX(PDNisq&3ZW<;uw+G$AXsn~$HCID8Uqyt?36`A z3v3bRBoCMfkeiKSA=15JSe)0W=g?50(XImU1GkT++b_(#z6828&?a1|75Zsay(I8>kqW)32#yWAQ>sT zw6Tc&=vlq?@B#72+jlNc|fR8d_bi#M!Q2;=Y*;gPl%&G%< zL_aI&k&TrKdZQY594GJ<`eTu52uda1A%T2HwY&9;{j3xfQf*A(Fd;KAbf76fYS-{K zrlMPVvHTAB_vBk|hCE^@^pviRd>1gWd|NwB%V=9##~@X5tOWyYLP%NUj)06nOj$0c z?6vw`b?2WwJ5v4Nk#i3&ovOWgPygwb8b?*_d*@F*>GWD9^xgTrdp7R7@qz1hoos0Y z9bVJe{YRGtqI5=KhEDK1{kh4XM&1qBnjT}w)Rvs5zfh$B!zy+MXe600^hQHNkPIlY zRsBJsEy)M!jDhAUcCk24m6sK&5~h?2O{rFm9XL`uICnwYK?A)*Db@rOYd1vKMC`Q< zZvC}Nj*Jh z>gl}pMRRr}N@9#z?h}XCQCZPPS8%9(a!C6OyEqEnO+n1U;!$Tq5>zvm77Z3>7W`<< z7mJY!I3CcNCF3J-)G_^;@;~V3MlhUa6Ix8Q+UhtU~`bRdPG%P+Q)Iqz@;l~5BV?(V{;uiId9qzQY_>ztDM1ZS$acMC&a8afmq%-uZIj3m65klf{_C zI&=L>gq<~V>q{L8!V=v2b+AF0-CgHq4+M*$(0Uro7R3fjZuE-SdgByZFVue4fs#dO zP!zF6T42w|bG;s@!cPndEL7$6xdFZ5L%PC_shPss!-?C$Y@hvcHf)|EY@^D_*ZzCptR1D5I zS!5gW1{5jEl&MPOGEfx{h+~wu`ieP;D#kC$$RiXM5F9+C({1(rh%NLzNLiaL;eR~R z5t#`1cYA`J`fsY3)kMbsW@&w*v%5XSX^GU6Ly!Y!LiTD8X$HJ!smE<5+=J3N)l4)S z#&8>>nel|yj5y0O9uke>LZuAM*^~p>fsytyDx~X1#zVHLDKQ|&F@^2On*G>o42Ue|1C~cbV$H+A)4kaHQP%2O=i)Wd#vpr&Rx|Je zV>}#;yE-6kRSiwWD|<}1e%L|Lnz2mABk!_bXvh8NwOpJP(rFMgu#Ku6bHuRxemOGz z)Fns19GN8F=@`br8D|(}2cJo9cs$o&t#BCjTxp?Ik=I0bNKSsuiW%S>;R91>H+u$# zM4zxS{Qfs;%1_@Y^_%U5oLF}?TFR69a4}-$Q4@iUHRV**CUOD|wVt~oIUzwr_KKfF zNJSg+*7Y)ic&L+uMDxFj8M#d9RY^jBYn?Ttimwip%!ufcrG5{WofH!>F1MwI7+Y1z zty9Geykwd%aaA%+Q4Vk`H0ixQs1@2kBn|(w4Cz5}6;sR-a0y((aVo|zdx2($S*Hc% zMUE8*i@lA-8mueTq+%0+V5<7-m(?WG-stXCdbAW%XAd$W*97O1O8s7fGE(yyuZU@) zpttx2AS0xML`GM{FY#uq5-ffUKq|0DB#}U(0hVXgh*GP$yd5nf0q@gC}9|^lX_bu@)6$EwKzw6aBro_`ieqm$t#i$t|G>~qEREn+%@#b*dxfy zDp4L=GjB2-zFrP9-7vnus1W9m3S?M}h6*Z810oiDWjkhti~=C2VZ_9(LtKx}S#e-E z0Eb5_p)Prue#3cJ`5*`uw7kcKml`U9AfdRutlZP|>LMnR<0-Q*p18g#{oyLOzRXX+ zl{xd`htA@f!nOWbB(hNT3H|EjqPwM!>c@z|n>YL4KrTa&3-fYm`f`{XA$T7kcEjmI zD?aLA#f57_EN=Aj<^s@6^tm#??77J0%?N^X5fz7n9LVsKW^b1FI{~kHh=Fc^9mfnW z{TOSL*FXlO&4g$T!EV{ISJG_ zV8nb1iTi2)}GxpM46`pFnN#6q?lanP{|F zgscAzeF(&N&&jvgY&(qS=$qnm=Xd0}djHpo@7J!sberY< zM~m;*u6O*qJcs(*c1oQ2g#28Ge|NpiKK{7;jFuV9KI4D?5%u5s7xmwtA&w<;XFzGj z|Ng&!7dFSHW$;{Kf?dE%)p$MNf0D3fxKz*^s#t1|YiKcC1$HT!HUma{q%@hwbi$fI zY+&G)l%|bGAsj&fk2?F4fc;>M#G1{$URVN z{XaQtYw1?EtztbSz~}N_+X+6e{tr9{!Kpk~F}%EgjTrto->?0o7{lY=IdH#3`l$1+y?gF<=pSCX+Hv>G_t<7X^}s!!eC^*~Tl(j-k3VrHb>@jDDHwZ^>r|@+ zGh0|YXnt;HCt*Z;=$97!UisWmcwuV6=Vl9?mo0Qc2#hQ|KR0X4eCV#k$~~gmUbb3R z%PgGI=Y@}5x%`0vQ|rB_9(nA<$wwd6!_Mm#w%*{-?^t?I)pc7o-=P1-!zYiu^Pv;R z9{!UpCr)ij%^y7mJyLQu_YwMtU9jwnxsQ0h&wM_MeQC7)ciCS|mb3qg`CR7i74;6! zKW{$AZbPr0!~1X54&wQ<=5sZhs$QP|F`gS`?_d7+%imv`UAN->--3R=Q^Pr1@$Vm! z=l_@G-$!*SK2K@A_79r>-BY;ye`5LkpXPIH*);CGCinwQ+Bu88FXre~U17=eH)0Hs zpp!138hS1ww*brb>tv*>WZT^tmH-#i%*gyPLMJ?d@jq~q%M$G#TB{;&jYmJ}5B^nC ziLmDPOJ#(LtVY!k-iDm{H*rr#v}-MKw-fEp(zw~c3!0&&P`z+CF+Z6W#I!FNOAmtG z@;lg!u`M?-B1r-ez#`0v^t!4RTIp}dN7s>q{fjLumx=Zvk)-d?(Yt?iDPO!^Wv*E* z_GFLJDNZ4@e~6-oZmRoddC!HKAA2&2n3;PrqJ31|A}o+VJecbQnnLS9N+eXVNne*1 zAfL{lf*8CFRbjx-YE%chR`ndHR~4xn3uJ&b)Ju~hdED~u5W+ju%&=7=jZI7ovXEvL z9KfeTx``b__$jTx7qW1I=>Z4>5z8S?yOkb_Qt%9klo;QvJBy^`GHsDrl|3}FRbuCl zpig|*!cz1eMPAfa3ZA}sSJo&Q%tI?PE!9o07Yh-4t>~XY#ce(r$9EDCBKX0nDC?RnQy8gF zMKa5r5Q4W_>o)a+kHexYLI+Twwp_cJdw(SI^h+!2H}$cUU{Fd!{vq|J3`A7@Cf7-y zka}_1by6$-)9Xz9eCxa#Ma8$girvFHmv{J8l%-zx)XM73@1$nW}hG zd;;Zrv$q9Ph=Q;eDpgNJl01|~igysohzgn`l7fc{WJT&2S}sZ`n;_{Ka?8eq#LN>Y zA0j-9GzEtP!WCE?@He6fXb3kF>idw7D^@rRp*in$M_r_?J;juBqbJ{o=`eZ!04xVb zfom$8<_Tkh+IUX$NK!vHiu_tP-nSem3q!hD(||aL#?`bLqo@^u=~%OeLmxqL>(tgm z$2ZNNw&_0fR?S?0uiRnED^-+Idc6fB_{9t3`LL}0><;2#J z%?(c1`kwWNCovG^nn+{cy7hyZN$89@vlyHnuV@Cj4GkX+Fifj(r5kO$qtJg}7+L3Q zgDf%XL?}BjTNob5VoVD{Qc@41szXrpfVA3TSDhQrQBB1mlwdyrC`Z1Id><-9KJBwd z+alwf%9(R}^2r{^stcaARO_z;{$zmcoB|y2iwvJj6|;Zk4gWKOc%yuAhluOc%c7zf!E0<6Vok_?M9v8%KJbMi)~ zTyLF{ixelxK~@WvmNW%mit1>hkyg)PkWpQEY9(&Tq(R+Q<=s}KRLsPhCF54Ix3-~D zYRy;5yOpvjv_Xd;2j<$++B;1-F!lsj0y#A*t`?NZgX9x929P6%7GChtIkTD|^qF~zn#mAaHDky!3Mh_Pqg4rHY1KhI!J#Ae_U4(%%W8Rhhte~bAY zsCMM?G(@AyS1EeLrMoF0qpVD<3kz~~s zEivCD7zsJkYqcO^x?*F4D9WzmoTku`YFDT_jjW^kqiMoNuryJYIvTRuQOp25d=x@Bx&YTeE8VvjDtH zQS&m~E#>CeXXelvvCk=Ty$bIMM$urn{i3s);}SAPixeKTKuQYD0sft@1oxDzH*O}2 zRt z7dqUR@Ny8+kz#{L3>mi4>Nj8_znwsOnng4(_#h?Wl;11rO ze6R&(vjX==1=C_o{`iFXxkX1+YFUy{fw*fI@y0(F+SSstFg+xEA0yK{+jscNYQ$|d zf9v&orv_gk_<%l~^k69PRqB4RikCWH%!>^$&%t{%hsu99bU<^&6V!n^>71#PG4s4i zn68gz3a$232m%5dnRL^EH_!nAHG>P%<3creAY1Guz$kj~7L#}xjF5&V+M=QMndFez zerf&A;Ja20V`-PhvsUKBS|Apb4!%VpZ?isNkr*m@&zCw1aGl%)ZAr{MqZFp=xgOO{ zP%Poy`mz#?+yz~iZsA>l-U8gC(tAN3MRH@77scnae&v%*z`DIf(L1_j>5>_Jt<;%5 zB)W{&KE7;4T3yHiT0`)@DxGLQhBh>@l9_aBl}wL{gK8Q_v&aCB#I#+N@D$lzfay2r zLEaX&jHiDrTh}~r_~y8DQ*JxFp<}!~RJA=CeJ&bpuk(OVvMPFy=b8GJsV?{3S8*^L zj*YSLM5MDnQD6KqnrVyHI~=z)*u3ns?803zYTJOGrKePX8k_J)>nt4Ca9a!YPCx09 zaZMJwlk!q=uS=^|_#)}h$k*+X-T{|@11==HjId-D!$1ymF4eQu3R$hm%+4mId=Jwf ztYL&%1V=JJYt$-3GV${IgW_m`5?7A?hsWnO9M<0*i7ef*UAjx-x4w1@WOP0fY0~fB za^%GPfAI%yl3Q}ufjjR!FiyE(+2r51ZL-a4t#}2-G;!47UE!8fpgf8=3z04#KzWi8 z!p8yxFRMo2LwF6=9Ih2$H>rGs4C+J%?6pM4B_v^lRKe0)90dsGj0}OBw=r5civD z2<~^fTV9f5v;!;0IOqP0Ojj8lQi-%;U~H~K9#sE&l-L};0m)SzD++{80 zm(db>a9~?kr0Ou*U$D!c#!Td+>l2f0L1&ZfdHjNHL;K*mUiW_q_|09P#}OEa*UQF_ zAH0KdXb0|345MFrT(N}+uWB`e3Z(FO0Nk{ONYE6L?Hb!;U^j&h8zBa0LO9qSok<}n z`U^=aT-svn(iS6)h-iy>a|^*(gBC(b0rVhcXFp&nKX-n5MF*DOp?4mlj^pHS)0tPj z)3gSdeR>Yo;DY;}efm!7{Mms2cT5vtuK!Nvb=oie&^BUQ4@^1={{S3W9_WG!w1^Wf zE3P3j!97q&+C8KP79qeNf=dXC4>O-@AOOIAOK-o>a0Y?SOT}UXD;9ANu#FSFx^f>h z1LrZS!$|uG88f@#!zh`>I9UA4_znlM)q|;Z4!i-=D2l;EHsfmpBflS;MC>38|Z?mcq!-usRmy+>cX`1I2^-1ziQzYq`m z{df5NEn_Kt%aOb9IgJ0y{V;JUXLCXyV(dqj+#CW05-T{Q<;j7BfV zyJ_k3Aoqh8{JBWusGeN<`k(Bno($A(Ec*IqyOv(tgABN7!v}GuJIHNVtlPyLPMPB^ z6!-C1J*IaH_7=uL##jQ?q+{vOvG(S`iR-|YVH}DPmVqkl12a0rx^=)c;&_652d=Yx zfGADTUI#o%WmOb(IYa#p7;5-2tBiEftvhh%9R~-MiH1$Waxfck2E0D&v=5Z8PEsSh zp}NQFkp*D|ej%OZOIAVAXfT);IinX%djgSlaZ{Wwk?~?&5;2MZZ%oiog zA1PThw&B>r?85>Fd>Rv-r#ObdqwE%A6H zUlPa^s=LInMYG7lQhTMW1-8xXx#@%~z~V#47z*DaQA`V4|2oAh?nGs>)Ub4-8H6S* z-vt7DB%uceL&WQ+?dG70VkvtG&hIqsnFB}ne;&L5W8LR|?o>4TNf@q^(dfww2gR{@ ze*VM}_uU86Te~9Fk5eHY5Bgs`cxRWmGcO+5u<-=s$^*75BJ?*WzjW!VAPx^|Q84)>wls;^6Q?7Q@8Fy6W*YDR=I2~$MY@kV z$=uz)*>+>EMzxnsZwpAW6kiGZPee8P3X9Sph)9V;pidrtdY)7PlCMk3RCb1D%Y)g$ zF~|<^tHPPaGb!_H}(`_vNW&6D7$4oj9n z@^jLnT6WT{??9vy)Ndc!-XVTS{|NL_kdNbLrs()XBc@FvFk06myA7z8Dx_}Bm#NIF-` z^N6MhFkpsQhtGG}Q$B85?m4!kbXAR%l((jd_+ z?nW~@C@{4H*RD92%}67_LW9(WO%f7sW~Oarn`+(4VMb(SzvXuJ_O5Sj=!-`0iALkt z$^RaWK0a~vatQW9a%{k}Ui4hZTt2Yt02I?TYhSIjU zQ@IdoWS4qPP|eAjFz{LYzAHA}^~p%&<8%A=415M2^Tun&Q>Rm}xDq{hLtt#EYdGNF zwXSF0-GA&=KVky8Kk#J^bLF~KqyR?)MFTjAb}vw7R3Fd{iudQCkSGgLWU|=3j*F~P zBGpuj_zi-iAS~dt06-@^l&~d+CRN88n&@StsFoND+SyMUF5Ij@3UUrkDwDwwIFC4# zjnH7+wOEzhsdS#u&T7NmGAdI{+2<+=HI?lN0+yfD5kA-&HjTUw9>=414abIBQi19r z*Yx~7Cw3gaB@%w|?o;Qt?^xQ|IbAmsZ_p3ze`VMgJiM)QptHI=H+$ml(>qhi6K~(N zdFfZ1g9psJW+VKpIQ=Z*X-ui3l3>}-^3=C*OG(b-gu%kDzLlRvPGRx0MB&BHii@8` zCIcy&jj!^v%B~Nr7sXzp$nj26_voKKJ~w{|HVCOII$4MoKN@KqQjxGYR)dFbzw-dy zEghL$_zG?KGHx5f_Y`Lc7H^I`Ysi(ufT9Y83!^+3xG;E_1R``?WFHPgVN?n^R1yU@ zhpG`^c$~uONZi#dC|olgDPlq?_HqTrbIqQdpYCJ%tul>vWd_5tqoxzp$)QP}Hd|t; z&F`C7@cXyvlOtdC`@h^hxS7 z`~>(z+pxxd?S9i6V(ncWj1qaHuqMM3VPpGZhZbX@CZfE{UXFb=#r9fPPjYJjxja5`?{jy9mYRQd!Jd@9G*!$IK}2;MBf; z_x5Pyxs$0-d#tJ{wb@-8>)qN{#C7IQACC3tJG<}q`+GW<40rFk!8q29ObF^EkK+`h zZwY`0I2f_kD2*{(Aj2iGiHlxcRPdT82I>@?6+LX|hlQ4)AN}em`l1s=n2(fN33o2W zpbxi9uC`waziV0Ttv{%egOQ;Bgx^1qJ~Wrw=nbCi&+e?U`2u5*kr%o#fjg^< zTw|4#5p#_$%7{Ur59JH7N*DY_WQ$O~AXU6o7dijT2via83lQ(B62cl}Ux1FPkXKBd zTv8ekWK5dkdz*Q#$zasXEMq0rKcU+8%c6geg#G2WmHtytV%Ny7n-4`IuSOz?gOgp; zF&D~Vo`-ArdH=et*?ad=8^GCP%$`Y&4|-11r%?Jog*Aq4XW@I@XXX*olbwS7@Cu&q-qP7t>+On=O?ypTX zg!{W6i!^y!8hSc1Gku=J4=R~-!|r8Z&lB3sCg%s32oOGCJc4k3*eMd237L7PSUxNi z9~N>C9u2m0UpC6*(7U`>HOXMZeTtmH>V0^W<7Itx?S(@JidgT_^&5@=+$7Me-@Whd zI}R43Wx|{4HP?g>eCc;>4Ymbf0R!4^Yq@B@1j3m5guIU+`CtyGN|h$# z33#Zm1liE2Q_8{c+G+7tZ4K3aPt4V(-+0mO(JO3Ao8?>KV8`i)AG!MU!(BJ++I{0q zyLQ~7UlZw1Mxu2={o($KhJbz&u$JC=w&J0yPrvh_t9M^_{jS~D-vC+ah@&*y4D@3} z`u`u^#{gg=n%iX4wNc2spZ(&vwPz7vY|Pmi4)3GO`id(@{^ z=+jQgnuAMp|OCH_odK_eAAnL`l zu9pl1YgUa$sb_60W*{0{AsSh1g_dhjC3aZD5}kl0x)CD;{ku(PWR*D@O#M)z z*j?rJ2>B)4JB$ZYw6Ux$WA&I3!{$>h0HDIyqHsm|lgK7XMT$7UQjrptiWDhX!2k|c zF0YMOV5#C+I!I0$luLDWF3rI@(iJH;z1NyVyRd&bvG z7`>OsuAG=YdDR7^;t@_X=?AwRKREgof1q1c8|1IL??6&H8K}Q8>j;rl9TE8y@7E6c zRhY_A1fZ3oGqvKWk5SQ!@e*)pa6euacFu+0>YAl7^|E8iqT(x}CGsR$Wq@0DKQk$` zaz`FJ@`S|ra>?F#Kt)Rym9}Yf*z+peUa@Zvn1~~QXQP_|`)oHV#!1ymw{ft);I(Tv zSHO3ISwq(sAZEiQ%yTXjo#m>8@QK*7S*Ast3Vyb9kP=;9ba-n~ra=IMb!fO;5&DA> zW)QGDYWxJEl3;;eb6ST1D|*V}F4Iw#D-#^)UNnvgLzG`XH??m2=>Lo~-rCZ8ZtIp4 ziH>`Rx_p`E(S6734hTxW1ExNAt{le1@K;&?YmywWwC?%bs=Gv-54%nD+1Oe@L#Kh(VmbYxd` zE_~0ec~s3+QVmk6BvolFsY-JXQny;&t)3^l+itt<9^5vjX&Vn=vYpsqAPEUW0&xf- zKnVCbFHlvI?T`tF1&Td!6CmPFX1IP33-GhICklM-#)iAw40FpYrTK1zn875 zx++!ObIv~d?BUzr9uu%;J&B@OsdXx_9(z5=$by9R=p(6QZ2I75QVd1;MtwMh6*k#+ z4eoc8#&eQRk|e-1)0tIJIYm4xjx4K^03!ztaROnxVtBi9g@Xzfvl)KzTwc>-Ft5R?|W}OuP)W@v1hq;v$p%MKQ%XDLvk$^FWUcb&uM$} zN&8K=s{OW`_AK6vGu_0T?MI%xfNK1o+04vHi&GlAW;)}{lOtY>0}N4t73UZ))TQ)X z+6-Y5m>sOUhcGu5uObjbx{&yqSSseASUE;s-yX12S`}~oC`YN=+!7D6>fDM*mRQqf>;t$TYX9l`?mzk3o$3RzSn~oV=F6G(nVVvnvSKXaDGySm-w=QpX*k<@no`CohtQf%U#v^OpNso*!Xon3is;MEI6cJ z*M}!7fc13D2$EQPu~+p`O$z=RA-XLpsbw{1w{bBNoyeON0gF;inxDZNW-!T7Wji%< zie z6!STUE7>`vjxC{ZJ@BiOmB7E)cM}u(vU*)q;qsAtX7z)9qcnzyW@>#;JK`c|BF1CbGi*I_@c@tr}zt31ex<(nFGLNgz+PtX8yT!oC z!TNikvl<~H>I~*8YWQ+cJV^B&dUX+ZfOYj?OAvSiWDOlp2gCsNcJ&BWsbRY4eWT9- z@kU6e$LNOB;FCBC6BdjZ61EHU6_%oV;!~gKA3wd@7x8tC%`Q!s%s0KWI&|`c$LI45 zjZRH3=$NoF-f13CpEv)=XoGJ_oD@zuPzvze^MdSN)k5W(y0Q|`vlOOUL$#{~fGb+G zd)o6Tyx(`i=XLr0mH!8I;0FpPe150bUx@>qf;sWRW<07sj-1|aS)E)^dV^>c6lViA zg_u*CL@-f6T@&yc^)SGGlFvi>Qi4(;1hI@5IH)kEKCPWWeF4zoN=#pmVXk#L0jz8~ zrUfEP`4c2lvZ0ldywHT4pzE(2z|+@L)YE9`o=4|llZXFmC(#d}4gQYch1GOLZZk_S?CE z;|LJOo4?mNq@O$-kM&RQ+QqpWU)^aAs~UU^^p_QTo8p9D3<~pWiBA~f@N?Y^KLT~N zY?-dBPZ!sE)+*`XF+z#C1)z$f@I#YTv@miRgi`EvMB5IA0%lOFI3nOlD#x zmA+-Ss+~J@EYtq#u^o3zJrj$+S5$@>iN{W!#Sw+r$4+y%dJ1vuAp^FVWXzal$}m(3 zBaK^0quYRh&{Zs`ra(Z%>McSr7f${k4QLaFoc0ohLV8oX5WA>((N6@zQ11jI3WvPT zLAnvsBqq0VG~>Ji6&a5bhYk8v+x+>%#CNMBs5A?ylQ zHx{wNB}dS=W^fvS@LHb+iRujt-*&B9X>^N&-Ax;|JG3&UH^W%DF(zU#HXzrf{`@~M zMa-+V>y#BjfPe_p^MFC>iD?<&8z)BGPOv0zw9R!;JdQ)V+&kNya3O(u8cxaQGnv=! zn8@26#VEY(>zp_7*->RNU<@-B*%~fln)Jk=0S62^T!$3SSfGj$^G-V7yzl`FH z?%^Jh+7ftVn!bT`IyQ;vvX8dzutGi*X;A~qMzAwrL_t!6{1;gvMahDhywS!$uOW0O z=uP6)q6F)Orv~(p#Cg1|bz@;k7dSPM;)1P&dGQKUsY0ZCT^op>3j4(ZfoTmgF|PhF zMs;xP?AEtt^$Z&a(DS3*{Q>_zC?$+K!G%Fa<+IBHofkk7ITa z_lEr@NFf_Cc&OE)LX088u;K~YQ|NGxgo6mE?qk8_*+GW4A>9%{`g>MB;D0 zyZ{u)wA!C}R}0Vhcuc9h$oq}^2VVZJy0?X721=VHBC`@*0@rujg1-(t{dEB$&G_Lm#|Qgh#rI-MUKZbnBg9d0Hprx?cKv=3dq`DAF4YH_+Y0k0B4t29+Sq6pco~Uwn2Z}I|6qgK3 zfdG!U#7vlX!MSBY41rMGauss@-;R@4nk?>el8*eCK!WIH#U^@P?b-c;gS~Lt*^;Z~W5k zn{V1v+IsXD&Lxe-+6oSXetOA}6{%d4>?30zE1n+159cYVfmjh(G6MoKwLr!ss0R=W zaS@2_N>J+)rjWXsN97Xt@BrG3ywbYF(y&OMT@!S*A1PvNxoaV=J{6C>D-r!*3vczo z(;9CjGHz~zjDu%OPFNkh(d^fh@in<#F%m+1jYa;yE#is)fQWC1cw6VnwL}~>HdlV6 zfImL)wS&+;G%1aa5LPi-t|3ntf&_=;ObPK!aZAC48#y;( ztKdmMMQ+kF*o9G$7Vhp{7#K5C&}GQ{zO zyT(~x)kocSHHWq=^x&=cs4JZly<7KQ*l^s{`#g|J*MN&#n3R z!26nC?yn|2-GP8|4>TVE=-)K<;OswvGouSO;++=Km)+<{v?ys`_B>>7aE5G&ABBa8 zW5co#f#203TVX0SfQl7UFb@^8;zXHpP{MU=D+kvH?m`q4d9^TyjC{U8K*uN15FTMU z4RL^fkN8aziL5fFgl=(V(hL`;c8x`D+_mTJr;9%s$bLE&d-q#cVzGw&R_ zw%o`{EHsOfBH^gb0uEU>KvZwk({aql5X}OVz+4@QM|4KfOIlxaeMC7`WMm;1unwYy zvBGR;u8_$bfG9klPQQIA*j*{MsbApH)4aPplkMzLYT#u-%6?@(?Su(92RWIGcV9El zE5!X!T~JtE3rD<8Ze-x4FIH-L{8s`7i(F(4~9!9=`KV$i`0WlVh;i%H)&Db0WC$ z2}R{x&>Sg+YVnq>;zu9|mz>xt00>!N)ZvX_RA5h62J}uCC^rVujE)R2UU2&HBMCk(Wv9Oyy@*ntQ`eTZ9TKue0bNm8`OWM1ttQMbT)J2*I(3tX2@ zqmp=V`$$KrKh{2y$vle2hg6jimi$;L*xW|Ai29x3X@4P+Z8=J)<`;PZlw5rS`zD7j zf)}lmhDTeU?trLKGE|oxQdTWBLM0w~tdK?`kB3X29DIFb#db?Bj|X23xjZ5w7`^Pr zW06@qBs!=kH=l*@l(nYp2JSDBbRm|@6qfo%toci^=*}In*p7*@$78Wyn^T95URI^v zslm1*e;th?Ee#}?ZTpGRYaTdmj_u6{1qh~$N$CH6oSj3)$Bae?*BcwZk>nHld6I&K z7t!eE@^{j5!2tjke^xzZiD@(l;9;o~u~0hHBvcpZY9om{CSZ*);wnhgp$tUM2Lpm% z?*O-XJ;lb40by$k(llqtBsNp;g#FS;^|P`7{Yp^_Q}aM&2E=5S8IWqXc#K=ZIuP}i zvD78~DDpL^YG(|y@&0z-`QTJKee1Eq+EE-U9XWmuc0nc&12y3};&(NFJr>`3fXC$k zPsy8_hjr}j^y*IaUf48YMBj7?6p@~Gv1t?ve60F-T@G&$=ta~j!0}?pFy}O0O(NUG z8>{Bkdv92HGwQahJHD66s7!9@4}0&&u@;@?gYd&qpDfylrAMNTfL3XU zHe$R2Wkt9NalN5>5?BeTy)_a@bCZ=7^j+AobyBI@8LBYs3{craEuF-ypq)``C#JmJ zQbt+CCIGw;djwj>n$Gdbxfb?tEo!IFBeOGo;P|<%dmlJ+UnXAZ0aDHF&k1(BLaBz%!#72AyJLrN@OP1xp1*IR_Z;F)B#4@^pnUpr1KpY|? z#P&%fmICib!!I1Fep2;zN-be0VhyrqIu%656Gr?AgX*T<@Iy&VNxezY$^-_L6>in` zC2CHnM1!{1#(hrvo6ntpB%AAd;-zS;`8ohUzCM5H$k6U$yXR|;lsA0-;YZF5?=JNH zIu*p9&+k8SXXnrq;i4FmoTuXF!nWwb)?q4t18(YC?(Y9>51_~`_9L9!|J0Gnn9>hF zQcsABi@CU2*7tMp?M@pthpZ}UzG^rqXm7=3&FH2I8y%9s0$&iMWfUc4%kxp-EgL~8 zx(G7(z*4y=qWB0W8ud5>s3|O~1Gy7RF-8mQ>5O1e42lt5uWhEO7jX2OpW9XH`&Q8T zm+uv&zT)1sI~LpB6Z8a|OR{cW-0M;7?Je{ha0OYUwYa+U!#Xf+`2q&g55 zb#g6`W+Cnxh7ctgK=+G*oZ~5gL!mdk5^{hMSd69^5}vgJg6N2+)eUs$6DQ)T4Nb@_ z#es=`M-1LXvbDv{@TR?O&i-+4`7LM9KhivxdSP3*aOp_@LNVk0%K8@HI@n{LefW{1 z!@iy6*&}ym`p1A!(tF*DagG_!8jS*FCQy|l(=EMs8^e~#SQ`k{+AT9lu`4awT+m%a zwZ}U6n`xCHZ5$1i8$$w(9b&i{P9uVX@d5NdfL-bRjdTmJ9-%yHJD zNO8uValp0xL9pg}1`vtZ$cS2YxubZrQkDt2pp1?TAWX9CNMy>GSh2WG?wbwTz*3Be zgKfB|k5*YBP_(n0j9(lRN_A>GAo>21m4nq(kRmkKgCiv9OnH!D6{I+cKvH zwwHQk|1s{|iM&?M00v#oJJjyz^A4e9i6GFFVxQjxwj7WWIPMrd@GxXxctB#o?xsBM z;!>m7uoJ#**x4C#{kQ{OqHbpjhR0phI_`XgSK6gpSnv9Ar=6|VUfO)n(W`#NA!k_& z*6{}U7@|%Q8xbA8*2cu97{vVPs0asysECBmf7qZ9V{Q#9BN65dVZ|XG4v&6N~n#~-POkM zq?Pc5#a;`x+_*J6OcG$oc;b+P0$b_Rx~UH&bs?6vvi$+NJuzA`DOG5bFvMz;p_QtB zW3}9<3a+=x9XKgzAEraC+_M1UtCAB0!+YI3Y3ZY3f>d4T+GU4c3T4Za^oL74^sQuR z%f^R?2kK_gRC4-P=QOX~rgIN*CGGJ&Zsm`s=lyC8{xe=_WtFQFs=Fh7U{XDGsH^!+ zI7qv41J%yv7!Nn~=`Bbuubq6qHt_r8vU?TQx68Z@*v)%1&!c1yQBUgT_C}OM+@(ej zT|iiX2tp!0Ermh3D<}~X`f4A1MV1R2Q5d-}cLc#0r8}4tqbG-a2KaW+FM#+>qb`*) zgeypdp0(4=7i)1A?feS=P(3_zF=0E`GdllFI{nNFG8CUi3iXCe=7t;1+d`3;f4F}R za-*v(Pk`iaW)Eapc)CTFk3pV@+u13&ol!{CcH`eH`>01dwp-DqheJjwYHh4i8!Ssc zl|@iF`1hZH{1G&PVWX6KIdH%eBBeEX3KAw%rWoNiMwc+9;mON080` zb~=6I8k(ogadKz-HBmgh`%c>*l}yU7EoIB2Yj7UgBRoU1*!NlEUMqJ?pV3YK{fFdk zRV^o^fb<*NXk6$dE&N6%Y2i0+NYc{n^3qdelD6GS(uO5Ti^>EFWlI*X;eKD8oqa`K zwmD-ZWg&mx0)PFFsSkr{J1hEZ+4zq74YeE9zGb5pEU!3p0Sz=E9v+BW9Jw+8 zk$iT;jW|0@CV1UZltGeD0HI{I13+AL{8@LntuI|oMg7r{_R0KsI-quk}A|-T91fvpQq4g!KP8WOW!Kf2E&emFuR+?+Wv1fHzZarwnxAlH2 z(mH8T$si53pZ~)ZAf3!lrqe&k|F2Ceb1%r{_Z9Sl0AuJftl+YCf6Ss{O#41nEqmTr9q^e%hf9Tjp7FCxdDQv zfMN|_;Vs}2^^40l$lKs6pj0b(HLP!MSb}!gF~mvn*NF;lG-2In(Sl;)PA5SWiQ;nJ z{nUo5-D^Ynt5hVC3g_hB!{LARhplnw$NOcFn@1SFpoe~8(>pP8>TW&uKLJ-r5}D?C z19`WVpdPeaa6W7phV)dC@Pz9C$*zEOmUffK<$yxB}A2OCtyw&6P zd-&t!tL}c#@8+-E{`2~d&$sTFd(LJ&ChPR4M#9QKp+c(`t*#{X?J686RY@P_*&cbN zXx%YlkRXVaM54GyA#sl+k(dB_fnEVi@^Y1x9zCRX49SHE(X!pwGaza`$&{PK5)oC$ z;P8W2ut3*Xs^>=Bfq?sM9LOlg^XyfFJjwy`7w7rVch%ZJw?O`XN#x&y%s&$Qlzxbe z^`L;PM3~r;_Dw>M+u9)P11$l^+?mGL@R@IcjC-KDrueC~wyO1s$d9jmHh>=bvn>$@ zK$4(Dywc)oeZ~6D(&G3wie04jjV+Pnntcsx#_#1`P8)kS?`CA}tql#91Bvp4uCbfA zy|J4*u$$toTWM|azO{X^eoNM7ACQfr@9e4#u{JZxnwQT){()K560dVu*zZ0J-flPi zGI#_C^x}&s{LS0AcyGf>Z*9peWt5*ctsY&ajLsUhHIZy6Vp8W7(0~e&%zsxPG0~1| z%bQAGC0d(BNxuL4W=56GlUV2fgzOk=v#6Y+K+KcSj9f;+`TEQbwP;=!a@qgqYSa7} zzNe)(xUSThucE$eDNAT+tt6L~8+IL!W5|z~5}TDJrdR81E-xUsD@nb)^&q>O*uY{{ zQ{lSl!lOG<_W?k?uG_DzGOeY*;VSKT9Ksn%&9=0Or4Kn@s?GSST2jIP*Yl+h78P6* zDST!MGBCgTzYHH{2D^2|Zleq-1U74i^weOnE0-BMMR%p`?o%m$$dCV4-cG^d58NK) z>4JJ1ePy2){d~22f;SVN0)|iuCTx4^sMo7I_yNtw@KZg1m25&b-eNA|vwC5p(@tUS ze(QPG3IN z8Y?)&GUiUmuFsNPh#B>sevl9Lh~DExeahmaTWBN>-t4t>?V9|C>3ffv8RL?967u_e zli3sML(7Jgd)}g1E%`+uWNb;|yAqY>(oD!J=N%M55Q?Y`{HOBF>evM)dK>}_C5}&W zzFN;!(VH!mvYUV0*E`+UH{Gj-`}o)=7<#P9lD@Z9P;26!Qi0X`)SlH({^w*88nLDS zbFznzu8Sn(=uv#|Cm=`ds4dlfEts1^xfNUhX*~p@0@Oh^0<;~CfH<51#NOF@Xl<4U zRZtS{THdgxRds~tTu-sL8OFp*ImGIe};$tu;FQ? zg1ZSUw;>T2UUtTFOqtiB#I7OP!{49|45SLJ--u0Y)pRD&)|)go`m~Qbg=-pTeG#<| zw14$>KoL=~Z)?!$ajNv}+ytfgjC%M1C$BXBYRh(g<&g)R>We;JNlXtt#GUx@3zH8x zJ-DROatTxO@r&aRI9qcuin-_n6BQDK*qeF=pjUM&6;yfPLxI25unDGwG>MIfg!_mz zVYo#z%0fxYh+?C4ZD|n#o{Y&b!1e8P1y zs59Q9{GO1#^ZK6Tc){yyyET+`|3tAj8uQ}1+wV#gw)em5UCO51{?Nnzcwk27Pg}B= zFs3!1*7{-nBTs1Nthpz&#w7_9b{=_0#41#eHuZcy%?)t_7hXNxn%Cydym9EC*7pa1 zD4EyQ1~p%`mM?2|wW1sqZb-LV6N57M)h<%r4!@4%Fh)I3Z`+PTaf(|JccJgV#v8J3 zCX6Y?fBH^uX&nbRIQE_7Eh%p-UMM!7!%czkgSUnb@;ErYdgP>BbgSOJ-k3F&vS*)w zu+Bpkg3!?oJNGP%CLx4F!*ZR2Z8(K8%?Uf3r9Yf0+zr+hp*JpWRn7ZDfgAX3>Um2H ze^o?ngRI4`wJr%kwTdY(F0PpJaWUoNFy+}ki{EB_&4zWs_p0QBTo2X5pAX>%Ua=^f zbkYF7AKYs|k5iWOK&PRGu;C!?-f*D$nVoq_!~>{oqeB>oItrLYpwyTX z!WQimHn*yUH`5z4j9Nh0!rM(Or^HOu_?!aru+YqmRdW;cea?7GJ;ghFiY4Efd!1gJ z$8CS;10N6M_f!;$vfTdWvwb)&l&{p+t3p5Jwt0h&Uur%p%>N!fRKThF%UYb)%t?_s{$Q8@;GckKv7n@&KC)=s<}AVb!& zjbIfm**%fPF!@P1;ChNL>80Y}no)WFcSX>qi|T2~lYCb%xFpy0dS{tXDd9#zh7%&x zNJ`OKk|!9@J#ykw1FqS1TNHV0^=@9f6%n9=$D1$nn_t4L`jjsk>+Q4dN(AUVhqMav zV!GecTWsmY)|rzrUxit~nM1i@>Ydy*F-g70kUVTPu0tmpwj@Nq zhf{FIVpv8(f}t3+kQG1?PClGr1N*hqtj!nXX4JT#YbI)0 zopJ2O*(}WYFjRynrc3h)_E!jL>$MvlupCjK4;-5x_lnzZ`0aEEF(n8V;*!4wUh8Hq zzXM<&*;I+GV8loKa1P3M|WA)F(5ezJX$GbA$NuAMk~h98}|5&0yhDc5(l^MHKM!2k zPgp#A7)jmm0+C{t>wy31<|H1pVkRhzrB%$9JoAn3zg@rqYNX~JA&Sal1Ah^adbS_@ z*4$70D@fpg90((m5(09doydU>AO}JnKn^@@D33Q3ZpU-|j~Noq0e(`#j@@PRgpWF zKH~zEE$PxygfBrdT;*wjXmkG+=C~U9erk_A;U9So|KJ&tkTWC!sDqRdBtY^5ob>ux z(wg=P)Qu)@iTZY_UR6$jcHbki@V$p3cSwmzr>64?9vCfP5Nt z9}Ck9xM3KEvKyetS~n|azOA(YVhOzD^4i?bWwqy{zBv4x<18nc_vrT6e9n(1yPxER zznJR&HM@#8r(`Mq06#aLxheV^OjXigld}O<0s2kxZ1C}HFnmaiTTIIm5^LSWMi1!7 zH5?!H{I#7O-NSGeb2iF45aBEZroHY#b$6{;hE+OHg*Js*Qa7aU&zQIdsCH?TBx_%y zF>SHKL4`U+h4zos+#{rwpnipKH?UE^2BBXA(60&V*Ch07AW8k|_5^~7WdFcb`gJWH z0g%JwM)hhhIPbY9FMev zXl%oEGU^CeaYNL4!hrJ8@Y!g3ivw%5O_R`ou5^*jp&nef#$&Ob;zu4Bq@~|8+OK4b ztLMcsr+*7qwpfV6m2LjvBOHmDpHvf+JH0>1_4ygBbr3PzB`YFet5Q=TUrQi0EjuK` zp{eM9iqmO`rb7M*WK#qmZ*+W-4X5IT5Mmp*9A7rS+wf9-rsW48X+7EO5N~phP2nSL z-6J9~NI}#4t|c@O6&*FiQJU9|67KaC?l{kNFmPNo0s^LSuhGr!Q-1Tkes}E-PpmzA ze%-Cwbf?reH^Pr=-Roh|_-pgnNJ_aksSzZ^_n)8D&3B4b#jLLTeb<`R^*ddECNlgY zEU|mcn~(+WG62Z8miU~5ul@%r3+K>&D#jLuVhy=#YY_*Tu&6{5C>A^TZh`;}W z{JoFN9(LdktJa4|sS9HM_#E(Qko`og4zzo#E1$ScQTfCv`T3jjv0q;Q@R#`o_yzg_-ps6w!nSb^JLyoXu}#vKr;&(w3?AJpQ{EXQ(ja9DQoX9GXi;W1<& zeliws@5ps(-J~|X5~Pgk4M#xd)X(w^SGeH_mIZZR9>tcQnyE+7%ZCA3RBnW3>Juz@ zWGyaItS-_&HM%0cb! z*lb2GCVg!&K>EEBmumu{^Iu`D1^#|HV)&6R%gKvIPF6Apn_=0=0oB1Z0vUugZPM-q z8Z#e89ayarsYk%!h-o8px1hMic$k{dCXv3v5=pBd$cYFJ%VO51?JZk|_C+HHr}vk_ z{X0ioa8A(^g0q^K7&U)0X^|Q!q@X zG=5@hg;qOT7`P-}H7x8f~wb!AO_% z%B`}=Vr@L~R9kjvxMPaVUjY!uguuGT4;Yf$)D&Y|n7uiRLb(mUp`)#PyjmfSq5!yX zb-d3^Ba*k{@`1ybJcCZB%kMjFce(>+$H}>^$5NTTsj0r=T8zt}|MgE!jl@i`}$a>#*n{Sl&UC{4e{Zs4xW&QrA`TpDB<9}WKb*K-Y z`>jx)W8dz74!IoXAnWfFzNJsC{PpmlXh7ZX({U8`CRANEjIU`tlkV36`6Q(niLAaR#Ly*)cgwc6W zqeKqS6$ukr9NamG3bI_%-NC^mL^1_o=?K>{5E3_F-pGGJd~mN*EGO0s50nh_WmK}I zH~?1{?2t>H1LE8GJ} z-Sy)EAtjcKmocb0JhqXy%7Grn?7CY^N5JHYVvr{T{(R3csm6@@7O*3dEpICPq|McD zl-bI-o?``2030OfBQ1rcNvEp_ZdY`oK`-NLC=sct3!)Yvi8Ymt(>(=7EEg5b6YcR- zxZTT+#@o*BOQn;kjjfDrZ@A;O(dm5`|6%LV`$rG#nR{O-s;$VEWXyv z7|F6-HxCV+8lU4?`@5@uuij;LAuk$74WdL-S3*{v7O8>Y74lOdNy|JyJ;vG>L&-VY zTZn4Wb1@{Lfjnpfw)fF@2O|Ek`N&$9wyEIf_-*c&|JlmW%076)xE{GvBjjqk!!Vjp zi9Eg)7{qUzI|MHVz9gK~gDd?umhd+Oz>m!ON;ufl0sW6c;5f9qt*3+J_z1)!47TStxbHnOn1gJwA!Waud!chb8%; z`gDyinEd(5>fN1Fw)LcRRed`i+qG@`k%On=u`_$l9VyLie`iN$XGgldtFyxl?@Fip zyXPu{i@kg{bYOb%x=^;cBQuo2e|jH6hE1FC4eb7{#%~)nPq{t~(XEulMh1NaaO1Th zgq64p(LPEyR#}du*@Bz=YgPz!0#DIih8-Lry0vns<|M#;7SCtPbPRAw$`&{a>>rh6 zfE3l96D;dOjpr=rm^=W6J3&^0pj*9*%=id))GO22#ZEs6$D~IBel^A_ATaIC)P};1 zWPt>4&_xhhm(DIiyr-z~TiS{AZj%$T+gpbz$);Qu#{iUq`ubcoFqrEcbUm7J2BGHD zZ=OF_oSZD8ZgdKJKlNxZ677gYj`o+gwIvd{!NJZ*&~!S@D3tP}eBQ2TW zG<{%J1=~1XVa9%Xtux9Rp3Av*uyI=htISD2gDtfumCht%LA%Et2o^569j-uNaeT6` zv%}*HxkC?iB;skG-3zBZHfP77skuw^P}h+*`+E}_7Vac+9T_p2#;6Pe5qtsU;a`oW>#Zz zcAkUH@@M$Yv%JT)+bHh1zOwpV^-;BgGZL6k+~F4MdI{&zs5)Z3--X{As|WZwa{V1^ z*P$_1cjEds?3;Eqi8Gm>{0@8)4>r}hj(F{&!SoA48yh+ckOc)vJP?vtL zAXl56Q6v6#Y!VD35|RXHza{voosfOS5jgb3-SfzDTtgb`{Q0{PgKY=Z~{CIgI??+ z?Ffk>Q^c#H?h;E<0O3L)@d31CI^1x(VB&}(5FGY{RIwuflR?in3fh{x_PjpPD;Pwc}7c7+egY?kgSh z`Y|UmcFai;=jJP#$xFU|VenKx@ninn>1-5JGbqeSDILk%SthAxs5Yrc}a>Tqk|c zx&wAzJthaL8x?*}qWf=mFU=q*OU{P2`dmd6a%6!A5}TFo=QXEpLe1Q`)ITuPT|A!y zp;`Kt(aOTE8PHyDnzvHhV0W%`Xyi~T6))}DUYVQ=y<+k(HJj$_g7L^IC8=NLjQrr= z2Vc4mn&KfV8cC=Wn3suCAXl(%3jo;AiV;t=lh;_pgp(M$P;)+`nWfRQ;`sB4)vTBUg zLzobmnO{n$%ht3UI)R>?lmGS5h+F1Ey&bhhXRP^%M@`C%HBRp7BOLvpwU6Ec{pp77 zJZ#(`RtE}J&{$r&a)N^s<&sh)4@HA5upFZ!t!Z}1&t!Co7u`jwci@rGY?0MF@SY23 zy$V!mElbk^+kTVx{O?=7^;0`PluiHIb$9&`Gk2jh9*ggvxu#Xib(ZmHePM8%a0X=k z<@_-B^1P|-!u1cdu4DY8=5AbnwVa`?>vATVew?9)V4J>xzxcF#jy@}?k1oEz&&SnG z*Rdy8Uywb?-zfeTda8u*SI*F7wJZt3UX|J)MHUMfrL-sF`3dvX%a4~zC5}U%cV7`- z^NH0j-)E9@3U60_5XXkvyf$Z>cdlf2 zfYqr|EM}4sm&f6B_~!p>81-|9)$7A=kHCiwswu>m;%-~e?d(a}+)j@p8!dGd18{WR zHrKzt{DWw`G@;%(VLCisPQSuD9Y+6|ajd}2*x|(If}WQqHfrajq#5E3wYD5gQoIR$ z1dA+54`44hoHAAHwJUaWly&hTn!t6Bs5%EGr5-^0Ld=2Qw5>=7h9+LQ4sf7z>OZB^ zqn8dId9|hZ8Nx6j%`bmxao7CPn!;zW0)bE5_xoV4T~Yhc4f&Ws!Vy-LMWhc!1okMc zSfG#^1_L_=`z0yR<}!1}5m>#z5L2(yITz9|4_)%Q0~9Va*rb~ky%|w5gmzF7PlWTZ z!?$(x$D&RkjB-1AcOP&54d4zcF^RXt!#40ycIR)|H_-eOiT_kx z+!w#WsfS!X1^xOFHDtKKNLRH^YJs)E0&}hTYh)oZajPJotA~NqXxJUn9TK-GW;6t4 zO3N2c5jFHhkyI8wuaG-Q*=rCl2 z>j}7w>;3>@R`3kP%weR90#2e~9rc=2Q&z0%3eBGLtq7J_KM!XT@l1Lk*Zez}K~5d6 zx~^pkagIgz7~jxZ2d^v8U8nH6*mgVY>96qmm-)WmHg8rRuyw*#-f4UcS9cn)889y& z2jl7|Y+Z=SkE7FiEezKl74SnywsCfF;yG!|%{!qMI5Vy8H{~lUrk$KyH8aG#65dI| zM_xd8m*5cp1`ZfjkiKGA+`-U5h1QXH*7Yc0H#KLt(Vjys6Q(Ru?}otu%b`{j%>l=- zZ>^GH<@<>*($CBW^f4ZOeMv+mdmA-WIWYJm$3D)qS$PQ0UA?yLm`(-mu?3 zu=*lmb#sRC(sQ_|FqcnbF1sXu1*FG{*JebS#{fep^fp8N9w}pq>IIgtCiLHP9;eG2 z`-IQY7BPR@?so;;+ih(@0iv76H^5o`8hoY)F}EeD+(D@ow6Db1Vpmq&cH??XF0z;= zzM@0FLZ<>ZkzszQnwlA2@dcB)P8e6U4&i0NPO%Iu#Ey!t;?#&;lq<2u*}Ps%^TEDnV&=U_|1%00;k373b zz0-;Xt_Cm3-qX9$dZA9lye_A&Gnw<4fn2oH8}tTU-hkKdGvAO)yL~=)Cf(g0%O_nf zuNR9L2>U%QkH^)9L^b5$1@qnNPMZs732-r}=|d7iRt_8z7>Z6fGz>|}8cfORmN_dS zoqTZ0;GNto?2A*6psfaVv4p@U#(5yNA_r7?&>cKWl?NwY)Nc>E-NBymzOc*P=JcCi z4u<0)f6$IoGm^^n^rm8AN5J9anTIud+I*Y(Fk%L6=Eu!|vXAoGSIv_WFKBbbjd}Au zBkjBfnZJ4U1+48}tnD}XxA6OSqR!^4xON`b{#HKk&mpsyY+a!?$AY=!xbzI(xBAoO z+l>45cNpKazT>~4cffn_K4%D@htF@lZgU(oPdV;=hI9DR>Z{b5)i1&3MlF>`<`D5c zP9+|!LKjk0Bf4ij(03F~s?!KBbtGcBTrAmsTR5MK#j{-)GxXaxagT43ebsH8l?oxB ztPU$(kr)HpE6-~wn(2UMpc=iF2bX?$+2CXg ztYmG!xh5)8XrXD{AcF8oR}rb}OvvXCk92g{A|6j)D$!^6Iim{uFV8LKjf8#v+j9wf z+!yt5zqx}Uchp=AFF?*PZyy4C`WK>6%|DAdd>hv7uGO!CQ~w_N z;+BxPhFZS_eXZfLSw8im^kbTM5ku5}7m^O5B5>*Y8|6|AbW0*5`pS zpC$PNV%SawNB!_sXz19aFQ#LIu_q@RBBYYl;bB+RR-JBp=JxFqi_YvQ_H=ukf!^+Dv7;~GaJAW~FcoC0pkAjwh4Xwwv?UY` zqS4ehcu=Tnv1%)M5%o)WAPj>0iXmEkV`?b4ZF?ZNZBBh^M{3))pdTup^ZZw=-e4GwjFTdnxJ@LI53!s zIy*`A&cAr0PW1*tHUBg95GWB`&p4KyAs09ip4tB5W&GB+9Olagey%5d4YXJbgu;^W zRZrr`YP}F<1dc|8AK6+M*GSiH%+{)}?VH_S=>uLchR$Xs)8^kdT9_O#donW%JWsLFr7~>!!qPjP|Cts~LddZ#Ps0tl>u+oU&Wg{u0g{*)= z38S?#-K2wKDskh$Yk7ceQpjNB; z+~R~#V6sJzXQ+J_tXBk}^;~SqV*`xDg}(OW+~f|Fw+vj2ceMq&D(PHg!#o8FJ^3Ea zQ+uN4cte zD9K1zezPQ?bqngNv(Hi#P_+DJbnuYl4f&-APBrApT7h!zsUMy&a>8rAoK4?*;_j{O z+b--GN+*XV4(>WKnojT9fACi)Kn-wa*VJ*Zv-fEASS-G+yM(rYaBHbEb$_&3=;DO2 z(~2^JnqI0GrCOk`>87hi9EWI$~;Y#t=Jco0iNT)pLz()bCMFdT}Z z>)-hB;hXMzHQHXVc*}h`aSiMIggktsuxXo(O}`nb_i8f4FOkpED-sc zKU7EJQZVvNi3OQEF>y?INo+-=RWF1QTU3sOWZFt~yVRW1A9DPb89-?Ni6fThf~ zklE$5qw7r5qyEJoaM=*&rCze`1Ly6Hz^GK8;aa@`bMv?8e;$TMcuF&!(@rM?OuS6> zf0^nKJ|G&-Fed~r7Io&N)Lw4$MBv&YC@bDz1Wv3l(W8=29QwHEcJ0Kr?CdyzOB{t0 z?Y%60KdExN&fdId@0nsOVs?)8tAlsW@2*VmRUJ3YFC0F)*gR^BM%2lz@2_m%QmyQq z$2eY#yvw((-1*zBT>0yDu3Xmp>D8Z8A4Z>{DC&Z)iYfp5(d4>~=M#CXuN77P1GrZs zTT9n(LsWSxu%ntxrLuFU7*)nSUDa>Nc&%|neOsZw5v0yo)jV4ge=+_HUMnzo2;&HT z9wn^Q|9)v@=@*Y1ZY*XS1!b)k9UzmUy)Qhhc4NhI@K!C=DOba1TfUHm!u}GFMo4 zw45tuu;2wq0YccQ7Hc`T)YforS!}`*!UEa)yJl*kFni_N$m&b1M*!LuQA$7z8on&} z(dp+dw}rY2I-1)Ohw;87weChAcT>KPhWCVe&~kZmEpL}-jtw!8hb=_L1KQ>NF+p&& zQhJPrK;^2Gc?!9xzd0b-EAI$Lnvy^}B2?#QBb){C*tv6@HKF?}mN#2FaBj+njT=Y-lI#WwDiCKri#3 zlU&>TR^Pt%_c{l+EZ2?Zjpq|3@1>+sf_u)Dit?+#%o&LR0gz4aLsI`6U{n1P-|GR0v?!-Jz zUt_*>{8#2{?fq}Y{9gJY?|;qu{NnwRhcqcikc;L^`24)AhrT|E^|%XIAS<`mdcWi? zZ-+1W{>|@~>sUWyW%G5j8=rHB@u%1PoLA#>C{G){S3c)8xc;HF>yq0u$1yHQ(rOIX zncXvuM_1nqEQTGK-a+H_77obK4f$Y~WB}U1NX^FGlKOxd2-aBehc$x26cz*n9YR1P zI0j!BSKlhIWfNdzNCa)xOWa5Z)WCG&)4*vGWkEK`FjP?84YLkVS&5Zxlmt19He@fO zYvRB>=0PnEy9m0r(wU1|B$WNlCkI+0L0IzcOH6MzUs@9itihwJ=Qw7}>20!>lILt) zH@mDkg0^s=s}HY35q)Aq&U*d*7p>2^ee>tY`8}Glg={j?u zkC_A3{djRdy>dUi{vI<7nL7xX`$v5pYsu?hH=aj)sYD*w5JD5ukq%ru4TnS^91@x+ zlS{r)GH|BNmn0X^PZlDfNe3G`psWJw0*g<*4>d1HaG8ShqZtlc0eJvoZUcy6AV0pg zFl!7VVv%dFSi+$~5bRR#(km3~930Fg``lYk;fQwWB(UQ~YkB&II&*^^u~8n_R!%qu zR6N#zV{01EV=itpJ|wfP=OV^D4zFV_KKRmKaW0lOz8`B`n}rs|yMZ^7zg_#hcj+~K zfj9*Ge(m}v1jDD-ZsYa$t3SZ^Us`RhKD4$L#_BnoP?zPtfbzrR1>^OrcjEU&`8?L1N@*cd7 zOdqc6yx4a!0|<5khjGV0e@ynhK1b~EtqaD(7JD5@Qj=1unuMjZ;!FdX4$Te?06U|L z=K=~5EZ%5~CYlK&q#T%Pu%hVL`!qCYa`K2Ef3!r(0VKq;be9+yMAN|TKoC7EINKJe z1};M~zu^?f3$>jNu&A$E_QakIW>d?wc+RpVo@*Tgjwx+ba1KgXqfzX4%8p(Gom11- zS8)9T>N9I=`T)O`x#Jw|f-ErK{yxbaxBhM#*ZFy^HMOqG`~MNwFBy-oG6Ju!iyfzK zl<(OsYjaLMAJfm; ztJd%ehQ*6YO37HYA$GMEXw=!a(M@v;1g)5)F_3e8kGZyJpID=P>ft?4{?t!9Q6}S5 zuWQz$@hz9dG`;M&`NcCA903G1@z1(X^mpur9j|-WF)zhlE-P_|KL^=Cjh9u{MRrwW zdK#wmBrrj^&8!0RKQJPsiYp8!s_vB5EODZbJ%CX{&j{zZP67!50aJ(?Qy>TzFkLYC z6m$J}IT}@WwO0-{7u8ddXf)h>tiP|r8pby}!)=Mq!oE@@;ER+y`lHq`U>{-4kOzj& z9HosNrhUY&!QcwDaIoxfUSKNKF&r*nOvoWhO)}aovko#UUGAgoOoTc}>P`^23W6d< zpkbOPpDVmpLL->|l1ZIT~j z(vjwVhKNVzbxTGaWI$Zx2r#~bnKa{R`l`-uIOlNBsbWrZD5^K{-VgMVSFBP zJSFlf*KKZGzhm{Y#_z3Pmt6W5>>q7Q;oWQdM{?|;CDD8zxca)|gS^h)%KKxo77yU_ zFRtEdydIzb5&b!<7p+{qK4)>&y57p+Yn!)q-HXpTWWHbS^H$>sKPN{$p!30h4_!Dd z-@DiNvV4xd{s|fHW#b9FAHQFF|7UoPe~Wj6tN3Tf=S)jpncQz#;5PrtXh7Kyu1ss3 z%~l45U`CTz4&m8032%c{XBkWp$cG?&DdBFQIR~!P0o+_3nizo3#@^k4aKeFH32RE< zaEOR5G6UidmR;#2{u*&kybw=FKLY@C3xuu*qS&*AUTO~@C<5^eOknqn%+zgTaOb>1 z7~M1VB!aptc85`%hDTRV1KnJY0fnNwEZA( zv+XRdJp%n{q(3aP| z!b4OIt`mGKvtXUUpIf~m>uAUs{2N#=rE}EAi-?{ecLU&sbVHDcZXEWIL{1RkL8qx@ zPsD|lU`J7Om!cKR2rmz}Aaevh!nuX&8xChL(&pHIuk?GvaBI}pU$Z&iAB+EG^ZSd7 zJ2t2Ll`pC9A31b0f%~SH_s@_t-nY(Ye;xNt-SmJkYY@MHvlzuNqJn^2?ZaJX`_i~; zz!=%fhTQemQA}k7AOIN|k-n89naHkgcDU(;T#)Vtr9v?4q9`Y-g$Qx8jJr?*7htdl zAsicxJQlTWfccJq-DzDtSp^*&f2USTzp;DDPtVOay~}d^Z`iiwcr1RirHj;jZE|-2 z?a<-ECBpt)vr)_ccT^@O(9{>xAnQwv!MZ)o*&%0M1vbxP#-Af*mqW%M7!Mz{H&gur zniP}=GW<}~+#S&#+PhKWd(kn6Fb@VbncnY#qNb;s4X-%eMv4g_5D_Ce%2tiRu$(}> zR)Die;sM|)D+KdT0@g3Fj&axI1f!m6XnMR{9qJnxn@go;N}2Gk z56l0& zM*|p_Gcc0~4upTbtpcFLbenwtxA{G4NqpQ`b35d7MXrBdzW-a0+lxl5`4{>+)}@8R z@Sv(d`T(=CQVNv-Mg)7|Ya}Tp4GM>?0uz+eK^aYw=)aoscU{-BHS`7%7 zxorTNsS_t@eH3!MY%j&%F6+WpBV(5(v372 z5MW}T_{BbfD~yJWm{&h4QWEA62gx`7~DV+jRXCmfelGufC3R!ot}g$8%rnoT#CC2-+5o z4=V>Gw{xwdf*@iGHrzTjkl@n)%sopeu6d$Z z!4NrNX&zS9*B@QlbJi-AI5sx*>W#G$NBj4@7Nu}&fz*kcs%ym&R459)aPKw;rRL-z z87IrJyEd1aplJ%mi7Kx~2T3sII~X18*jQk)R!vgHKs#Vaz+k5#8?__&H&b7$AL$}e z5GUtYxbAMpfQQg_FHwj>{5FmlnN8H8UR@mc%Ayg|x8*ots?X^m@Mmk-tkUi1y z48|mW4$hE1$LBa!$KKUDn~y^lmRJ8;y&JU>5n%iEu_@=L281CK(+E0whahytSpupE z5}E=P2Jn;BG1jW{BoF`xA`V70sd*?m(K(yWxFVCe*+_eEF&fTy;1+|>X0&VMJj1!| zm8wVmtvII_*LtdPBQ|h+q+A^vJ2#Wb=F)#& z+|fTa&_A)GV`Mm+N_C(~7|+*7u_&KljD&ik6$8a}9Z%bM9Z?@#x9|s8=i~Cag&A-< zPRMHuD}di$C$Fu%I$rBs(j&;*{XKlkUgN{4qoG#-h27}FlSTnfGD|C$+X^YzZnQ|* z>{Yl!K#U~q))=2N`~XbRNRj;K+>5trj6r9pmV2h&jZ}L(Fb1jacGQy>_|s-Ax92*o zjCzVs3(Sa0PPFbP^eEXwb3es`hR96agLu5uyhYbr`%%HS^3TP>2G~Kh?^~Pe53d_N zl3&9f1S&4|-`1-Q3(cQehadbLR-QTMRpeM_k#Q~>2dv0OTUBGuHPL@7^I(;iGJr%8 zU}(7)smg}0P2iz*1Gl|m^Tm?7O@w5hgJuC}0Q{7;1Cy;RH1-TkG!~g57wt`!yZTCz zP`EGMHIno&E!|rT_l2WVJqL7dx@U25K*i%B6k;##F6u1x&b#An(b>kSC$V3?wfZ8~ zbP6Lt)hya206J|g>`Gg2)`kWJ(JI?-$w)R*B^fD#=I4deCIyHHtu!PGY0eW(!6Ev) zuozjaj)%uGD-mD0Duh@|ew|5LF-M4y{MPxstP7nyuy}Yhoj49yv>6Mz%U#`LdtR-6 zr?M1_kLR;vCAD=Zv(%Q@VT<%GcAP5o_5ch6IdA2hoer>eY5c;4dDIXK>qbIa$H5nK4U}$`MCNl9*I-irKSYC z_IpN;`Xq9+e9vvxdz$i|8RL-gTk;<912$py>-YkNt^=3U|29kT`>vPVA$a&(nQ~pJ z#1;eeumJ?aFBrdpkx5e-uf*;1q0XI7KoY2Lz^CgWoZHJmTa3Ojla8&-Rg!y#QK=DI z8S(}`GWEajJ$C%QuCDZ%x8K}5JzH_x3!(kf3##Mj%{LtzoQV$g?%ca~=fGqxQ!Pf^ z$5WA$oD>s%pf5qbz6bf5LQOMKa++}hT^fbmz;|V(qVKk%o176R%A4;fue7&*A>`la zfz0b@&teUV=rF(%L9s@39ft@CgnAZ4M1XrC6Z~YGC*F-hO1v-_IH;9yWDnuUb_xw! z9d&+@C!M}zNBfAJ-YN!GFTswpsu{A_hj0|@oF}HhX$f-~zl>r=aLuwA)tTJDK<@63 z+|xgPDywpRm7p_PKxs`n8YzvcONs8DZujW!`O~`=ZZt>6=JQ=MLq|@ZJ~F)6yDiJ*~oVw>dW8E;j!nJJOjgB?krvckf)>&H1|-IrD#0UqDQ<%D6VWKd}Og zGlA^!m?Z&V{&ETn^@yINP^C5!lCur7#(Bc5p_aBK3akhpp(sSjbo`Xf6G^59$2d`_ zbScs7sdph;4%jn7El)a)XuN|76FAN-=Rvx<(R5C_xM^i>;P%?sM79{TYm-|2+Z(Pw z`TBiZF3ud8Zp%jwUOuRD1B1Dq!tkgkJUua%JHL0&1>1?Uw_dmHOeFVEX?ms@Dh5m0 z$wP-Gy}h0ie9Y~GrP!dxsS;Cq>K&>g2tclUk{|lGoGHTanWU zmhMXX+C+`4PgL$o3-uioW}cNnJ!gXM0s|0DXJez+$F0D4z8gisK{k zLa*BrF7TYx+p(A6b=SQkQ0#^ic>qk<+K!@sYEk{iQwlB` zbSvR?s1l2EpRWMH*`P{DQkEo?8TMQV@?!?v?HvSh8&X!k>~M8tTJl1rKBW&YC=}_d zye2l>Q|h1Y`oz6QlHK_;@4E5u)`=TCbG@(LyZ?gYx-)04JGLvGoht5LTH3AVXDVZu zgZTK=;@;g7V5NV<8lo=%aq|w;!*FK+M#8%VLKKKuK1-C@{sYfcZH5RJB4&9V$QBqO ztf=QJr2m0~V)vDCeF2m=vlwHZ{*8k%lc&*+H&N&vpr$bD3B*#^)M=c{pA1vC&bW=+qRU) zWjewE%*Q7WByw$FsM&zL1{XvAA#Vp8W7kPW9{=(^KQ~a&CV4c*)HYBuy^%N>{V)CwR?85Z)9jl zoj)=)dHB%O^kJK0&$0C0(}DMx9g#Vw?cCz-3+MOb7hmO%Elmtn_cvb}p33)4>?n`U ze`o6Ok;$pU78Y^dxL~H#7ZJr9VpT8OT@$`+sqIV!F{~9>wlcw>Zv@`HS5GNW2jJSe zN`}pKmH9vNb%1j?0 zZeO~-b1VFja`o`^x!s3fH8S#=*~x4!-Z}Rz$?w-Bz)YjhPoj%{m>zD4jEV}PJ^u+hoS8!h3Dt$~n=x@)>L zax~O=Y!>!p=eM_a6ij;|F{gUEqTA2UJFeez zW4g2Sp*Qwzafh#)+2M{3WCt_3TXX55b87H4XJV&sdQ+j}-N%;9c;=+L)4A`Lqq#Tj ztp$`jFfkO~J`^Z;9Obv}%Z!cZ`tXl-nAqOXqr{!P)oA1?)#zeFIffn9r=Ov-O@}4v zK%6D7SQ?QD3rc!H&v`+2$3sIYYirY23b7tDiUbwSGE@k8!^jM#uz))abL$xeFZvC{qH;W?7jW=y{AWSdFw6lye;Uu@7VEs zdyd_6?C9)_>O6emj-S0_&mZqNaBxR%DCF+TwjDWl>(S=V+;jZc%!u{gZEUSD4a4yV;dCxga6j zE(~wQ^oLX8h13gRd}{>TK;A;(phg$bkm^ote(G)p*hW(cTT6f8mHPhJ$>{#r=wfPr zY3cUcm-b$~aeHikw0AU-IC^Y$>cp{`Ek|v(!->Vw|K8Pm@jVxB->YV~pHAeuLe2Qh zu@h6XM>$;Vty$wXGY9=ufWByiu+lYmmE|ZLK^`=ZK#ic%II4#PvpE@2p5ggO z4%$XM#ULVf3G!4mN2FoeXiRDi*HUn(0hb?b~uBpMLDItw**j++qgpXRlkjI6Zw)#&Q8`_fM$v8elaw z>pxgQgn>x&R*G)R_(S6Y=SJgi-P~L5Gr-Q%^2Pwx)r%=PxOP}VPrGfNV63x17eO0< zFn`^8`Cj2rg1$prLhz%YA1SIaXh`)>_no-z-eN9)%TL{MWcs?-^o$Pe*g5X(G3P2{ zi`xfARp!{4n~qnv#>NH~_AV_KU3;I}@p*!{$+y!OFhj*I#$?^@lvc9q0G%K0mt@IXE@D=Sq_a9hPztCr9x~2{t+-fh+GV{;=;}^^h=(GxJgV5gt zMt^l>Sj+h+|TVv;sAAIfn;83Cj#c_W)x!b9?4dzDT7MPn(<8FzySpEc@%9cL? zT55Pp>;420@{(X6rNn#(C>be8@GDyL!Zrjks&5-nqo`mvSql`@A+i)UQr!Ue6c0qH zUq{ajXm6TsQj!t&H_+bEHHAf)Slwk7(8nQ#EL;gT*=LMK23`PrSWy%7P%Ct`Dh!2cZnV5o zlJYxYUPM=|Nxkf7!%V{BP=zZmaRpK6sCTWkSJ=yvvJtP;(9RNvT5I6*7G2^3cGVs z`T=H~_Lr@+QdRcXSdlewvmY-vN9aB1V$~CiwSk>xtU$69gDmkvB+ss>01wa|I%kkp zOf!x;{MdAB>{kTs*6drb+}AEeCjg2@nsV8pm}1y}jlMFrAAEVYWm#j?RcXdu);d05 zvNU_-FZw12w7uD=z_ny>fyJoHUo~nyfmFTuP*dXYDqdDe35Q2szs~=!g`vQ=PjG9$MNn$=)v@ zfg{08+W*eKvU&_|t2t4bOjicc_B3F3#xse&Trye9!fq>L4PuND%#io1rI{XE^~LN( z9OhI{h-MMi+Y~=qu1ry1LU_-3S=85a6W2*)swWlT03I=kKWYmBb6jIE)9%o46IZ$- z8X(U?_6GnXG}7ak&k@VxcF+;Vrf7-{D47#utyr`yYu!PbPe5)sIhuV*5Ll3^I)!x! z5~F3sU~!I2@`T6RzU>9x3M)g*Cjh4Z%-cX7NwNZrY1Jeg5}$Cdug3F1_x6R2yLQdb z_NT|n8>cg_YxXVO)EzY(T$tbYmM-^>sT-$jW23c42D6(JL;acI4cA`1xNt0=-&Eeb zsn0nzy65n|9g&ow&%I%OVMBZ$Ci=F~9Xm!Rwj;g-9eU*aCh=QM2pz1z_JOyhm>T5$ zqFXG@PlW5d@+707)kWq9BGA;$lRkx_Rxn2DA#@?-7r3d0Po=XsaJIr!1lb3>$L)#n z99nxJp?=P5~N&^H}`$=Fhxovuoo#aYZ~V*7gNJr*ulcIA^@|JZ#VrGVchDS2j}FvCC2KUe^#koauV9ly z(tYZg3eR70&FUqT{%5Ybs8@*}vT9OsBFY-KUwdK04nuxCL=A}nki~vnarh%CLZRs3 z7-Z4VbOgfWN|B=#A}fT7F$Mgb?&PpEHN7}+m16}*E9#^u_H?wYI@YVGBwiQpu5+7)zu^w$1E+PbPfw$HuBG51S5bscuYKC!*CYqHk<(&YNFa zOb(AE?se{QB}Rsmv-5AZdT!WZ`F^LfN4t2}>=3++=;k41u8zT0dsTb`9@XtqGeL^b zrz%2KQjiU(KvL|XUxZDPhP&Xyf_IB9R&6Cm{*(nwnKK$$iy{zS#jkKcC=>;jpb2t^ z2u4|>zuH-z70VxKtO-#PuU`9^lUs4x?%y!K%eR!QUB21lvU;`@29L(HO9u`tZJQ4G zEAhDva~p&vQrKRb-0b!^^NzyAp8Q_up&D>F7wn0CSY6A!8_LQa$Z=Nu%>_&WE^`&TnA5Vo16}su#eA1K-8Wr6G8X&@E*yDNke9D(?0Lpc~FAxVvpT z0;DR^Fk$drmZk0jSzc}ORK3T#?C>YagFz!7knfw?9GhyL zoVw?2;%~c3p>@?Q=@SoMzA(N!CW2XSx>Kuj#zRh*301SllOP5UFYSNpXW#k+DAE_O z86zy*9g4@k2Af%fJ>rD9$ZMzVqB<=EKa#7CBv&#;&EViQQG}$eIWYu9_DieMO^f-f z8t_#MK404_0y_>J{@7*n;#A8$v$%O?Zaq9xnK{-vO&AiJmHq{t z14+^OODhA)R?ifHpHN~pnggua{pg*E-LHm0k*-KSxdNO}9{U>b6zDqKNs4s4^k^k36TDD@;dBBf-Ml@t@r&e|jMuvc!Us zLM~{`8w>-+aK1m7_{q6s`~tG>cM7-EoF;Ngi9#NT zJS&nwfB-n`v^hZE+@U<$l3$4{kD!l7_>rhDQDst9VwNK|D!#u>(Y6465JO$#6;aiU zWRckHblr7D3W*S~ssyH~h{-Gn5-Zb+**KkFndL`~T@br%MBgk?9qxK@q&~#Qy5CYW znDkCdD4rW%*s=rjiUMEw*ap-wnZ|E{S+|gk8oQ8?!ApXEwjsEXsA%2TvIAk;Kt!%> zNYY{%uhY?) z9Io|gJ#uJ&>1b!do3=)Zg=p~~d{=z9cYFDgrQO@cwwT=p|Gw$z+Eig&_l1SoVqtFU z^X|B?kItG7ECp?1H2JR^-y&<@d90A{Ot_qd^#!siF6aUix;O4e4-`5ZRJRHaRvVN= zpC%jws(q;~swQSz7ZCf=3`Al@BX=8mSpQMoxrPK#3lS=?|JFF^RTp6q6i8tvDJg}U z9=$2{g#VV5cl*sTV6F%M^q}x>JA8P%__x+QFE~G-KXS>`=1o)U7Zz}G6(O$)_$LRY zZ%d5Q*Oy8wbBZYLE)r0s#x!9Ip=K9J7Y;U5HOC`SKa{V}oL!kvq;7)SA*`{ANY!sI zwk?GH#l~R_Y+?pNkfiMRn%>v%FBFGrQ*#(2golK%2cb@r+2Pr`Yc~(vLAGZcZZDig zsQtRhx?&Aw^Gp^n!(H_Gi}c5%*3V0x($vh>{dfie3+nhZNLE$-pGSja#Wexl94J1- zhw5p-;zSrb*Rj?hFBOBGA+Ycve2f*+6~s=x!(i6;bocb??7^|leWj=`r=NFjH4RK` zC|X8_Y@MD=ELto@^gi+70bz#KXZ5Fj9?OP_0c+50vS>|mtZsCg@4YvanfRP%BARje zdW;=GN2({`1Ywx{}CTDHaBl(q`YB{W<`TG3X`Zhk!>D~Rs<~-*E&u@k)2$m2;e`UAQ&t^B5 zvt6kv&uXAd8NeSYu8b)!REtd)B_7pWNE&S%!x~8Q*fJYN>W3}$og?+#*2YX1W?FAK zWA}!lnS7c2@_LN`6REL?_)XJ1S);WHSUxYgY-97=Xj9Z9jHn#3EN7~_57P@f5le$X z1xqT}!An7tuaZvN3!^>-WI{ZGB+gFg8vIih^a4-}q7qCLj)C*RO6AtHDvk<%-Mu1N zUN>kS+dd)t^>Mv+V_>k~rwMi^%C98e zTnHdusIdn_1zmGh^BlF$U+nJv`0eA}H<=Q-rLpmyyGAXJoTru@s14s1?I^@7(Qv0_ zp}#z~YiYc^KV|LEda~j2z|_=0xya`aVIK41H;DbGq$7$?OSB|NWHW@5NFgSnD+>Hu zl2QZN1kmO57;zQE4)I8Ue*sPalHAF9ptsQ79=Ypa7?B4{8Ku;bgu5UC3a4|NNTN`+ z+;v&ino7{_Gf7*|nKmywW@959Ru!)!hTJ za-VJEzP%eaP529MUOzjYC@*>pLNk#~O|2`SK6;-uAB^@dRnv2p07%vvX}5HZED)ay zfh$r$HaFKVLtELli3+wzXTDK}hnJ@=$8BCyu!!cYYABCU0$rO&xoj=!V5p{)nJX$S zl8j*-ElTmzh9>|%fSe@_Cv*9PVuP@PEb#V5XNh!Jp5R~|csm*VrUG7qTBxyp!Aw=u z0{oy<18^)kqK;CM&uX49^w>f>N}gO<96qvs_R=Hk-#U1>JU?F=F3%r6eE6PnY&cpj z#|{s^6`YZ4H;MPZZT*qUKzrCTUoI^yl=j5-+_@)Ku0-Rd*1x=M)3qBnGPm|1>dpLK zJOM5(*4KCAR@saurdvW_Bq6OB9;WVqPO zk{f3zamlox6w4x=n$v1V^Y!Fv`j;NFs}`A|j3w$QVBqOD3?UyO%gqI{1sqeL+yU02 zbj_xl+Yz{_3Bnj&(!=&|24kGZ!nqIDeg)oEYezoP2oit@j@C`!`>A?Uu1i9pgKe#wV6WbJpuN z?D6}5;P>zS`9%M~#Kf+Nr*)_H9eAt5etdDuHMO#Ux?EbC%x~(Q+1qu%VYfS=*Sj%~ zo8=lf6KSf&Liu~#ufz^np*EzUz~Jc`EF* zc^&rFb@!jR{(<|iz2O0}TfinqKEO%Ij69H?YW4|XGZLx`lc0cg2bhYJrI8te=A_`u zHzi(w2{nt5?n#@{fYdgK!g9o5L(d<;dl5iYNE)G35n>FYOD`=NrR6%Rh>wceBYuDD zLq6Xl4!eFVZFeAT^Mud$?6G5{x9712W~{*s>1{Ld`wg@VnDpi#mO&|(I7$`&;)V>^ z{%E954JbN+JtFRb3_QwKbQPCVy;OK@0ymtRpu0>(ahD*_Qr;<54byRjX)pnZYh15F z#n-cFON>kaih!g*O@m8?#i~xgHI=+!Lnc9D8?c~uPQ7Gp3|46U`Y0(v*Mkp^G)9T+ zmdfIF(m*uwj9MfhNx1@0-SAM^kAU_`DwCpWZ>16wKEz}qiu`qmqe#x~FWkKE_&SH( zVH5I^^>YVHgRS5Ad{^wedh4!n$gEvxwvSG3v$_VuQzBz`#5OD(nh*GVCw=b1`i1I1 zz87D-dwTDf&F4Gnb7#k=3(ly+s-xarCCqai^Bjh@xk2f63MdJ6&PmoO1t|#$P?Q5Q z8fE(*Jbe!Iv1V+0e85}+}VT6h! zx95U3HJga4%En_h1^Gl1!M>raE3mh;bjNMGeZJ%Oz3=F;2eyx9%AfHS%cFhcVlFaW z3>%9hA3S>PgAV&+r*8Y3&)m6t|MdSb$EN~U&8{DVOf5p#d!ZkPkR8@^lQ>a@32Twd zN>IaFvZlMrZmKSUYAY)PCVEI%7g;j{W-8G~2N-3{e zfLcBGfiS6bpeRDnipc6+mWm{3INU&2{SnAOd)xc}|f~g|)U1yac8QA$NMgZ^CR*y#PPaHMM*=prtTKXUiSl7z1Segl>06 z3Cx75ag?eZOrmp@)*g_Kl0y@*_A*{BU^;?<(K0pFPSQQn`GkK`PQq#S)BRYmf_(oTzo^IuD>LH3B8dezEXtd zC4rIpi!2KbBj(8HB>6C+Aj^ucE8@h=P=VHLnjY3WN{uWXnm~$eXX~+IyT&O9tkm&_ zTE62uOZ6;?|1e=&A=R!Cn~9Q z)nGOBxomx{ua#Dxgl_)SO|6H-#DUWX#qrkj@v-3qb)v9GMBMtRPpx?xvkC9g%{PB^ zVdmgL;#Vs&Q-ZgA+r?JKn(egWj(1rwL&(W&#yMp^jv7TxHy65d`-xD3QWA;{g7bZi z83J9i))Fxl2|hvc5Ewp<5CjFH^@pa&TIGo*QB($zPw1$q{-Mfzz-hBNA5g3mR+0aZ z_4)4c`LfU}lh6}h(jyw=t3npakcAlb#*nm5`l8Ycrk*Krl>pqaK2Q5Xk3?Q>tlJoe zSolJDD)EI4g1sQ>HHw5$DMtYC#KdY$;vpCulqW`~4vrOy`@q(0CaGaeEg8Ypyu4w& z=rG%W8PMn@GaL399V3mANjxz-vIctMC98(GOH#w9$Lj`1mbf#Gao}^y)?SsbQRRb& zH8Yc!X~zJ#Ascj!Vz&1}XA_9S3|<%6r)0|CrZW!uO37?r`m)Q@ec5Ei1KVTK@9)(Z zy}4*%c(~ACadegHBfd$`PmNug#Xu%HKXT8|%P$YzGcrH(Ceqr&qcy*6(#T>LeB!As zuf_SjV6G7k>FOc8XV&07)00oEd-v;C@OzeAJ&+@sp}+HY4U-vhDull}e}}5EX6hv^ zYCV!L_Vk*}76ht7VUAL@f2j46OBbJlrHc>3s6pI5Zu0rA^ZP5q*R4N3GVJqR@AH`^ zXRbRbck^i%lRMA-Ui^q2pZks6bM7_BowM5>OMtBeFj>$27?x$y$BJqXnBQ8*4;p^}LCANm2C(8?Bjm;OJq6qYO2)sB#t! zkvjSHawk79lDQ#Lj$Er)iL0`W<25~!s~Qm;U`Rn}xRH|4=%=)J%0-hJ9mALbWEB)J zEsYo_Pa#8vx>&0Y6t(lisYL)H(S9b;E*()nRe@J>FdaE-kqqHiCei`g3V~6uUpB^ZrFD( z(ZeujsvQY?_@5PY0DwS@bPoAT9icSr-zHb2ce?Q+8rL=Dz(Ct5HYq(L|I>+Fu9<+G z5VeD9e&Wz-hWbf(LcqK5lazcvHxKMjDwZ?{&%gOHcRZ-e&}c4bG}QBRcKwcs&!X#h zPfRSn`~YhvcdjnwD7Sh55p<_ZN5`Y9>vvKe37wypgzR*=2UJa@Dx$t93`bJ?Z8=l) zzE5>(FMdm`&;5li;`d+YL;Dt=@A8j4bcN67i9k`|1)bd83bEAT>})|D!6sLqV?NOz z5&p#tG}9+8whM|RfvCFEOcqHwB?q9KjM1(lgb^0ziZ#Fw$?Le_gst(wSas!AT~x32gxT+{x+-{rr`_b*| z_d~@O($_3v;-cq&C0C%EM$tzXr=N?o(=;YvfTjxNWb1|EN+jqSfQ{W-YEUP|Aq1E7 z^a2$RZtB@<({t_g4U`XphMSFbn3Z*AMm+3zXjjZpfFR8h>#xixuj9@Fzq7gBAUf_|P5ZFcu-U@YVLuoS!JZQKtAOS2}?Vd3@9FQhyL=_CG zNsZ4)76hVj2++PJK_m6gA$&Y1L;wvKa3s(scLD$AN2=e;$AR40=INqC+5Zd`FvCWB zZ`3pHb>~BqmF(n}LKxkj4`sKuyfY45LH?*?Jo2&n3AAo3b5Sz+CSoHRxhj890bWuPh6vbD$Q2(leT~_6S@tunmiZ1*+PKD&Wh#vA$kv zhCn@=dVxteik#SyCho0HPoYz#DO1A_L1o;f*~JMA4J%v$MzSEjZAg(2#kIEF+6`Ul zT#GP1CT=2d@f#=QMeOa}e!m01_W6A7)=zfq+fOLtZ*Tg*C-1%c(U0Bz0&VkFh-T~` z{^bJ7NP4q`35!E^OVaz*sw05f$W@dHGC)mL%u~IO%XTzT$qFy6c!kb}DdG}x@RtCv zqIfAR`ld|nt!n5oh+OF+$lmm<2YsFJlS+U>2O{Hzh|DQSaf(GZg%ZmE0O3D2ODXz% zi7I9f7Rj%ol%_s*3NO}mto+a!Qqg?;Z0#(G<<_r;e7<-4{aH-Nn{nWZUOdMGPtNCi z4?Q>Ygb2lF_y?L6Me|`k79l%5n2#*^LsY|r>p?Thj9J0rB1%=DRP?@2f_P=I$-mZa;QZ zyxn54nsh=RB4Eh2B5Jmae=yeEbkb%NUZ1r<%Ju z(9Zy49l=;b7;C@uF14yHX<=%D+4K^&4sk~pY#@hNYl|0JyQicqHvl8#cPRo49oIRe+q7}nVH58G6WS^^Wwz2}Dp^DTl8n^{>l0#tX#&y+6;Qk=hTnetUng=Dn_7o)7v&maSExOMd9dU8dU+hr9#Q83wU!zNC$>FX*fc8+3$3aH-LDk4 zgm*pWA$FE_F%5<6oppcq`gh5o=>=D<-T|_pmdS!ob^NctqL(}CPO^i*6g235*IVy? zQhFSj=B_|cv2DZ|JTo{~pQ~MJ8WQBT(aWXOt4Q^*@xkrl(!Z)k(6Cjh1ro_%EKQK} zm^0Lw>YN=+%=BCm^!p(ru{m8n;2-Qrbxfs$19pecQ%;H_smV&AwS#6$oE{h*&b2J89bskk&`M!_YqOMtCW+>iUfOTC!8Maa;U^btv^L`X%b^(ZD!2TKu_kTYxQ z#4dp(tpp?d5aB!`_DxOjvnf2Q!VjL#*8^uME|2-d3(XW`*Qq!pX%yHs%T)k%Yl#U6 z(SnCk&lRaMbW_jbr-!xQv=fqDvqQ}`W*}X5lCDll3bmpB44LJuA=PjXlnOc_u2G+$ z*piIlD93rdYFw6f6*pxqTTZm?qz@Paxpdq$;P*rDvhj(C<$b%xO?{KeEWaW?TA1-ONoRBy=5M=PlMJvhQ31?InwI2^V;ltZ zr$R<6rjM%ehiDEYGPPG+gY@cZ-W=Ru*zRhp4Gjif#vGOvr5nHu1v2!P0h&wr{8aXU zR55wP8h|VfBPGq1Ee!{(Nvg(z90&xF$Ri#E?6RUac{NI}ZipXG4~6j=9v{M-a?u0W zL5%IQ>%&`?av`Wbg3J6PHQKk6Vt^a3bo$N3f$@mN(WNuS{Ha8()`jxop7wd1reNTq zZF1V{nR8?kJzCSiNWitt7?k^Ct?$x)ZTrA|RCh&3GY0pMB7*doqWLD6A81+XCJ}@M zPAZBXQ*58!@gp+4c zjq5(L3E}BQ#nhuPFNW=U<0n6ffZ>0`gGbW~KHrTu`b@?J@srhu7=!oxEApf~0UYjL zrQ-@IS2BsS#b$(Cu0)_imunF;;$!ScX}Jm>y5lT)Uv*R&NZ^LmkQXD(Sm-=Qa`K4e zFbAEQU@lwTz6hPIptYIEq|H+~PZS72#>TQnZ=#Z@*dG+^pw(2$StQ$bnEHwWwOxpZ zsGFpv?Y(MrfO9$lTF(bj~8pB4!}LB(fhZLMk{WI_{*OgwMIC#b8;*!sI%c0is58B-XoguICRpeFHq&!$LpH`x)jS6Hxf%U8R zE2R;kU0h`QOD^DvD;<(KfenHdd`y4|F~~u12GHpSqFWTwB1uy824o^%`Ia9eYoG z^_On;<4ri;5UR#z0>s!!x~8`iz~$I4d-&}T-bPJAV;A1*c@lL9jQ|?y>=S1QcO=wN z_41h=-!%lwDMo<=OurSP43==iIf7-d_vJaQLT70t@wT!66ik8U7FtbIqp<#98N!0X zNoiAsyRE9!;utK|qDB2et)ZmumzBiz7GYM|zK5qdY1uh-5pYXg)G_3X(nAjy!f;rY`w|7EPLQ8$ag$?|%u}nS z5e0j)o@m!|w<-F7=Lz!M)(|A#I3)RHRTHq?@t^3A+Z27^4_LZ~B5@>|`|SCl0_z2r zUH3=oL)_ce48gvne|)Zz@cA5wtZyX^;j$Zb7poCWG3NLfYG1z!oiRZggHO;QOkV&~ zsvH48NLQpzqZ{*zqje=y;D;F!@JT*6>9llUV<-+8Q3Yx#*za_LQdq+q0dW`*GWEzA z;bN>Kq&C*!tq~X)j~lHvH>dePNkC*9-86?NUW2%KfeGLlR8OZPHjb(l5hOd2l}j1| zv}~Zr%G4}iM2*kw(J4jx(8XUV$a{a_>H|<=y5e6$deJzUdMSifc_4WNT(+2?UeQxRIn|6t9WO4oY$Mm!H z`oj7Hi21eSqBGJo`uLrdns5eir=`Xm)FiA2A`Wm)KLyiQlBIq>1`NP&Wz!%(+=PcC z6Z{Zb6XwUVwI6-;M14!Xo;}-`!Fzj@) zIH$H&%PEf!aG}oUc<-%?Z@RBKTAG}ntZtslcjtRkv^4i*ibKip*eKQp)pO+z$YK!u zhCe}$Imw~{(~R~u3_=i-p*Nxh0)h781a31UjCvQ>dVxqcBV3(0f|AtST*i2o>^Uq% z-2zer_b(DiXjGau){H1(yj(L9Qh|^g=vRdNG4i=s5KIUw#>?3;tE+{CUJU ziipoX%(_hByOrP#AW}a_qb8nezXCw9or^?6*)T}R6T*lq{vc=`a6qFf4BA(KO)b|dMZ9KVsw&!8%Mc_u7jzCJEzA0zIMta$b%^Co zomQ!PMx9IP3@8#S>S9PnwQMNV3Y&i|gF(1r;rMo=Ihb}?^KE!D5ggcL)OI_qVVirP z9nOBmdx=eFNF)<4kSu)Dv)!UKx&!Xm^E7+JdD#U0WR%zA#GHf=@Ezp#(EJc1EJVjA zNE-~(HQ-S;v)rvZ3y)_-Qy!(+($EwwsEn-psljeJ|@yrBh)Q537p% zgq9hNMfxC|aLOfHXZgIYM`J6G_R{n?*;xWSoW~v|3TZuKS&oO2RH8*{Xmr?+)d|dK zne))HNIO9^(B~Okw|)aY(Fjp70_szW2okm@Z#61olLLR6E$=SJB=Q*AD6?YWsHIxe z7b|5OtRAu`OGtzrj>krZa zvxk`!J#=EY88~$lT@8T{guPJ#hkBMYBRZ1g2+6R{T(8hk4H<6>GY(@#m>U!Le1(f@ za0P3|lwwRcBZvLqy2}PNhIDRAKZ$T=W42GL z#hejmjj-kb*sV=3$uNa4O~jUvOrTmR*onZTb;DB)5-viFo2HQw5o*)W46q8LuAEfz zj|3UuD<@rBK)I=V8S)?{u41NK_ok{iu!?K<*-HVFro7(2SQ=kMym4r0aLe^-jQ(MS zb>4-*!x~AU&hpZ{1l^N|ei{9K{x)eU`f&KJa>QzbO8cIos~dK@nEOTN&^s zN8#hY{B!mMD48~icp%)g1Q-rUkVc5%a6o;#I|rnUO0YOOfsOoUz=#%6r=(`VFgvYz%@{-|zrXrhaj5V6CVK*3cnjEj9dz0Ig_s?lkAuq3TU1*d;J9lnthF zm29hrT}Ei4Ni8Ca?b9zKY^AEq2V+QxU0So&I5na<;rIEj^7%q{jg*TccZK+g|Ac0^ zX4G1``emQnpwqP8cvZpG&;90^XViHOy)2||=rrPJZ-xvQV6)aJqn2c#iS48?xY-vV z42WtA)-k;>FPa)WZszb~#b9D&q4hHcO}u8Yj!2Jdb^3IFMp+qg(KBAA9FgI?XlK@#1p6K`6Qf0G zBukK4GuFyXbV92OAD|T?trqM6v;4oJDFeYHU3t-_O^DE>LOJgJGoP>AW++=_W+<08 z@84X$dh5gL)-B7fHs`qGkNC;`WsAeLt~kB-e%iqmM1!$*W9MI!1K7U_sbq>O?fOaOlH=y$q#ibhVWp3bHsAZZ23-su$M`G9Vr#RxTVmSRBksjWOjVMt%ly zSz;)gjIs!DFieD*N6csQkRwy-JA9Sc{)o@_Eg)Bq?c6Yh>_i^p{Q1|JXA-08glPGeQQ87iJiw@e zyKMy(X9Y#}Kg@gtvI)awb;Xll1XUC@3foO1F(oMG!_ZZfx_(g7g>Yik-C;6lvFd9! z-$go_$`-0z6CrNg=k@*wnG<{c{vUb0`&$3erE@xMLbP`pt(LI2zN+;B%0nAGuJ?m_*nuQAL=rKY&;G$+%uZ2gb<{GRWPlYkW)zu@3kBa`a18beP z*~L4&-k~8cFru|XyYC3t(@NwH_T&W~@d(?O)bsNVj&)Fh7+Fd~@8sngVjU&v!&0-8 zCV;qB21sumjTX|F0Klbb&Y(!cd|GyN8px|bgcIFffSgewG6c3*(I6wA^oY9ll(ztt z!d%cIaa}vni2*JX9vMg_M=jJxC_r!nk*cJI0}jiwR+Hs)=d@*60;UapK-rrrh(>No z8?<5Lvt7qd!PsDHwz0IoB0AsXWdk$L^wxpNZE>A37z@Vy<*LIznwVo8?e9lUS6+U3 z{U_gCDlHD=HuY$c4HEUZO9N%>iksUQT8GLhuVF3+q%v$xu!xc51hQtS0RmE8TWp$W zufs6s3Sx-+sq7+3sH5&m+_-^AX)(^aW42_Gj203h&?f0rr1QFl-=lyPo5;1mgfxmd z996w0TezFFBIP1AQ3IT(stndqty^QXxcuQ*I!{$$FIbJmAr0H*WWOMN16VxU>D8o? zoTNM%QVG@?#!^1)sUl^R9ADm0wWI^ikwrz#jlx5#O%T$aX&sg7>%;zWbtpnjk83fN3g0 zU)F#t998xm)6?O}^)Oo917}2ucOWp-3^Mo~1R_AxNNOcZ?sG5W1ZX$L9vT%B!2D5I zS;j*hrViQ|#1K3*=SoYba+;qT5gwDx=xU=<` zq6)?Ks}B3KDZ9_zU)jL>NErjgz4FR)>?0@dE6PLAFvofvx+Mzawc6$okccP{0w}}K zF)KNPj!=Y-JWll-VvLdl$dbix4nL{ZutJYBj5vd_p>#gTYM|V5(?{qX)FuLq4Ihrh zDN<~9F{D5^EUBz2as6sl93zZb=_${U;;Jdg6ngeSu__pfJxt!c3gsb+u{Qn%G6DM` z#xtZzw z)+fn>dvYsva_GyYk8Rqya}#vWA8?N;bBY+oN_wW^Hc;fy#lb2UV9}LW*o*`!%4@58 zVOoNF99KI|G3T22#LYPC>UadxfMQMr`j24xMidiB1^o%SCTx_r$5vtl5yQAKNlBeY>gC{BxnamQJJf0}tUUXh5SpP*fNE`4cgl`Sv|KtAy^0C~^wac7eqyg@NtFRVS?TVIQ?4Z?g-Dp| zK{n%6pBR+{C@BQ47{M$N=J`e7d^AIhxP+*4Tmf^7294H699h9qql-*Er($Zab*SLy zQjwz|Sdo}S$Rv(+ls7V?aj$rz*Y5{kW#=W6gPR6{A&PKza?m|{S?bQ6fsuuwxai$< zQXD4P+IqDkSWJxv-L=aqVf~g#k8jJBhQ#E_1(CXr$KCcz{ggFGxiK6>L$6iNkujJ& zZXn4_t_U z{Xpssuh%JmHSG5v^LoSIos+xUhjiO1ptVUsFB0B5L9qcsxX9RtUEdVBVY{U?emf=Tb8_LgUIILAE)P zqlY7K5QsuewkB~qQw*O_@SUL%Rw90^Y6ccf!-HS}yy>;#-u9r$Qg>8gkFTN=>Q1@H zltYZ>FRNbfgNixx*GTa>`l-=K%x&&8S#2gqKLY6xgCYB(`NgN$l7qEscf34yq#W@~ z>bpAzY!1J@^=W=<+K|-~j)u8}{)K+dVg1`Zo7baaO-#C4Wo{zjrD-Q|2dA9m*hmpC zG`17S`=+L1YPt)O8lY_yq?%Dg0XwjL0$DL56nE`Kt_uQLBmt#nf7bDJ`O)~MJ|6Pgf5d&Ho5rP@b4AUSo2pDss26)dFrp8nA z2#V58_P_c~#jh&^L*XzSC@4=n_48M6rMwuRi;7lwWb0mt9Ubyup6+a=JeSZxA0?;|=1?HP7$~@(n-VZE(5M<%;UIi!<~* zb>RgH`3Xo^fean^k+Q`TjwUGD3PXoHJiAFGwMkUoSUtW_(#BO)I^E@kmR$Gb>NKNK zV98Z2x#YpFZlgIdm`WL4C|?c+!s3F%m`V*M%tk|}KkN;LgMQNupb73AndtW8nK%4Z zeUH^WK0aP5)cT2y5E&aR;;Gx*t1Au;4qjEJ>||f!s{9}jS^?hchveN@mqlcJG*PpH z1*|y*No#VSXZZ5ochHbP(!$>e{!ZgG~8w3(O;rb*6bLUGGYiMl3hpEb7Q!1b;>PUT(G0CvCF*Qd* zYR0qzq-@Ml#WCFw110rI(qxJ{?u0tNn0Lje|LR6=r_o%@W=AgYZ==Ieudz!AU9Uei znT+SN*8ma&BP-SC$Z52ONl$K6zc4X#u;B1##>)Q7XRw!TLUXC?a>pu}5wF?ZH<~I0 z9X*Z*X_tSbG}U9&S|_dAm`7|OoQ!B6^(1BpvrHr-(2AkuRjYuy2F_xtXU%MhgAzj)L>#Y<;^8P4 z0Op(kA`&~ylSDk(B^1?3pVe}rAmJe&=G-JJTM>!=Q1Y_NGm-vi{~mX4 z^Ug{(chyw6SP>K5ZeP$+ecl@ZeE^N@C`m~*C1!@-QGNO4>Yt3v+;wT-#Kh^PgL|I1 zZ^H&rC{AviL}BQikC$(bQ8v>-(m9&d9PDn%3!ibLs!1sW4oVi{N!k-MIA~8b^M;&2 zU{Fl%3pg-MT@q^S6B-^yy{{E8fo)VflB)wtx068O7{H%EY*L{N+*k>Cu`*}!!tl>8CZrYvK0aQmsd z5jrNf)8MEC1>$+!rKb#|5}B}&hi*VMj72uG@Z=Cdk{Uxm$yV&XzE#>tuSRiXiJV53 z;u__KCrbV~ue~70myZzfMVq%*E9?$aU!OOUb?Tzgq0}J$-5#G!d>O2v`)kjFZ z)JJ-VO%sl&aN~wH9f2?{H8qfYT!;j3r!GpBal^$WbhZbPaD+xMIcU{LRS6&p>gk9I zmqTio21PaquvXj<@o$JQmVARrPk8XrO@qT5-*@!8VAt)6?-TExU4Qr>w2)1|bm#c} z$4~Cx{pFv$0@Ua5I*er-#-hbosK>}ns`p8Xm_2yJC7NDdb1%%L6-`KQf=1?Hq1Olu zD`9@xC5BQ#)av(3i**Emv7y2g>Xbt2lnnieCWVK zS3MX^@ACO(3U@E{&fULZZf+6hPn%1&NqXDGT(X(9Tr!14HcuON^B=OvY8Y8fNwNTm zu(1V$CFB1kBs;vbe^cbrK-j* zmS|uTPQ(roVNNPJ&*~{nCL|OQB=XA!4>1B%`G^q44Ye1_*-lEeN@P@se z-92WvC+c9vS*(l>XlD&Nd+h?_ta7WN%{VJ8y20U~~2jE@R zQDBr*2Kf?Y4sMkxhK0x$8QLU9lx|kWnY_~NG$#oBLXreIQEqfVLM(Bm!`2rgh90r@ zkca{v0J$GkgkW73mD0+!l-Q3_i+FJ5U>7sYAZ2i}CeiJH*p46h#PDve|ESq1sNR2KJ&qssYpLl;?{@*GH%R8X=2=xJQjxL;EhL-e56EE zA1lO?s6qipuY+gV#f&I~2pev0`+K!{*RiqI1z0^9bhSH1Em z8L{<2*sz5sv4?+Gmtn*H=-)r6*tU7Ro_i7@a~-|c3v#vuW`9Snz~-pfvaDTIl&NYuZ z-#{*jMJ34hkbpuznV7S z$!VW2cWletm8k`J>hzIZV(Y|zkc;%&>`32W&w2I*KQX;;q2%#}VT05B1R#@Rat)oU z-y<~#Xcr@iop&))dP32GDz>mvTSe68^7X=5PQpV59$7IIVeaRUYd_Kc1I(PDUESD# zcLPYjThLM-$iVp%O32NcxREOl0&R`hz2EtxbcFNU9?z2>f7O|P6zW{h&a2ikHH`YzTTE?>IN9UM=Ug3fl5M1I$mQ;R3Z zB4o{}j z+pf7Zcl5QR%lBPddE;xEJ2u?<&=;S zs6s-Jc;lyh~ zec;n^pD3oWTA90H1-Ysr*{J#sZqcr6G*k)3UZWxsI3Vd%TziW)JEq*ZcFM^a#q^Xx z-cV=%HJ<%K-#|K6xM&Sp-x-Rxwu{deT7B)skfn!lf1l;G(6jV*Ul21Lq!(GYB)u93 zF7B`;E#k8;rqhoXTNMUQxGSbWIXn*u>OR zhC~<26hvm?r)b$YyzpJs3!j_12FZ%NDtxB%45B~&X=>|iNrApZ_h_rJ3CRu~Y zJ2%fuHb4Tl^QzCex z^;5dPPvd%ZT+asYfL9LCfW8Nk(18spnec!X3?hNq!v&KSSB3sBS@2v;oj8#`f&cJB zES;{MZk-m7;nz+0|A(#D@RRncke=6893B z!cgQiqknW|buPufoooE3*ZeBaAzc$^j$qG>D|<*3^V5)Ux-f}0ht?L=RFnURHGkjx z1|Iz6KON;I|4i#X@#b%w!~K0nx>>wOu1TGe6W8jZ-AXNR4A`C8)y2k8<##>xl-T!) zspp@s@j8A5>-divhot4d(@|#~E+hf21HyFpdYi=-O+t0N@QdOX(;COQkF;KR?pe(p zw03{=x61t!4?_7{x^+jjqCxK*>QfZ=e3ta zNiR@gCaM=Do9xV-mxQMOl~?+O2I&8b8hdN5wNCtb>k9Fdc%pTUgcWZ8Z3r=dG~xmk ziUd&SU;_Q2v?=eM{SKs5pj)V|{{np#9TbKC3gg>p{qR7>gSl91kUOOcn zYiE6XSJCPcwDkuYOH-*Pf1XbN;unuUPFw25*1t{t_p?}^woW!+j(KlFheCaVBtfM@ zaGoQg%T|=4qhLL$+RjoE&Ga|Z_;>WzFSmlY<|kj0acn^5FI>}v7#V93<*E%Ww6l8M zhTaIRLiU-WQrcH`xGW_NX;Ss->2H2h{en-tBpztxUcUJylB>2Jv>-yndk5KoCMDw! zTUy;PP=Si-hZcn9$1P8txcX|SeEK2Ye!6z&U99-;JYBo%PUyh*(%cH^Wz6k0m|GLr zBYgKA+@Bd6glt&+Lg~2_o%G4NQRA3M*|tkW`JEUHnydWP{R=-5iHJ-6ET{u|u;dvWipdQP<~I@`Lc zeOih4(n1OzhYR{<$Rr^I9{UCZc!xCID+y{k`%tj4#g`y#-s6SeJU^c+p zfB|U6p0WZCY-m7WZFK5bYY3VFS_7a+I5KEjuny?(iIuRqbeoTfPLa-NH(n|0(t_s1 zYTaMEQ^-*FH<*Xamc6w%3yrv262gsfO<`Ob$%T9CXiGZ_5Huw;R40@oraBGr{{MXq zW#llufxPM)u^&-O)`k&RVS-j%+R8_95WSBMqj&RR)T%#Pd;a+;$eBheKsP2aCOwc^ z6WcI2Z*jJCkPSmSgAd-eP*pTGXkVKK=_gu?79)M(h4k~!f9~1((N7DrFr4|@)~l^w ziyyR(KJyLn;b+$FkyW|sItxZ}``AhVZE^B@(jf4{n$%ihRt%fvY+n4-~NEdNp>2nc4!0a9WvM(q7%|0qlF?JaF!_V zQkO&2VCSYElE44P)39old7upE`B=^r<5c;&goY!F3Nld`|Q5gFLn@)?om)!RkE8d`8mt z#-?u1Q`DjU{wPvzwEwMSd;e}3u{G>(sB94l;#M8O*X$Rp}7NH7u=dT)DC%EUshHJDkOSV_N- ze&MK`ZM`fXX_*94DrjE_znGGL@?Y!=c-XuzL_mnAdAu)6-xt@&KgT={VLdR3C^`>I zgY=v7YuSKi#mvjXja`^uL?;zfO-Vay>o{>u%;%4ih7-Zyz(5cR?%viN+9ZG1-O_8Q zJ^nv&N|Eh{Q%Yj*nU5$9>_8^uGdtIpK^{_krvUw31^ez;bC zV3V%O57J&5U90j#_HGICGqox|Bs^yn`60}Le6B9O7cJilrks8FF1i=aT)Ew}K%is5PCIvvp|Iz((le%(*UN)o7WQLYZ?^<^Z>cYlF)L z31@u>O-KnMRsJGzgUy*v+ud$EzE2#14lG++UlZfn)L=?0ZotjsyES~?J%jJ23}GYW zU%>vMeFWi0P8Iv$2HHv;>Qp2xF5UqqVX2DyzWn;@>GbP=m-({n`%3G+@BdzN$M63y zd+WZhDC7DgA4kWjstuX!epUPl3}T0(WPy)E&WY?-ypz^zI6!cIbpF9TI&qia9#gS0 z>gPb%keB;SMQ#Ryo0d=N-i~Mg~3ZXP(%n^9Z|zY#2sSdhKT*8F~W8;TTuLn znvvBVR&GOt>0LW&Zq5fHN71SFy2PV3GqGJUomGJ8;%O20sFrl=*s9Bp^?wxW|BqOM zZblz~1c6XX@*@*=rT{=}m8V?`&gE)#ky2dA> z!f9a!R* z8iua`qlkekA}Of0BEF^B*<&IYRB^|YZ%UH&6cU!z?>lws@l)z|ioe6&`VjsTMBBj} zZGtW}Vq7-qutfdaY)WlXaL#281;_&X1M8MSC;%mu8l7;1+P*t&B(Q5Q7@ca7DvC;) zG0zOYfLEfR)6k2s$kBj{lrmzhCB#v%?=QP1y=P;3VQ>1nLr3K+bss&s?_K&=UN*dY z*L9ERaer;wt_M+M)_u}hYAHgxPx-anjcyqzA*dcIdZbq?O3$WZ+~if~#LI1+CqJX; zymJou7pl(tSFG(PFbAu8M)h}TVK12FWUs(Z5Qg>=Vy6q~zu|9J@VBcv1~wRsK(fJ5 zx7f%rC?bS4dpLuL(ymy?7~#{P93~dd#+QZdiTO}$uAyH-NJzt^(@t?jG@(;k_pyFy zeO*jg%WhKD&@8*9`(z=%1=jKZ#0O!Az^l~UF@3IA+}?V-{HxYGaQ|D+{}8!|4?vst&{*3yuZ*~T+dS?M zDtEj(b|E>``|Ux5k>Ct%i6se$&RVHyXk^|#x`%cI;c#ujxHYI0NCUFyU3d28IY7m?aG$3%Y7ln19L!$xoGE;s?U?oaku1 z@m%YdtzSNO?$@$`?2`-k4g0A_v7eyA=qRNv215b%lYZ3{!?wrQ(5+)zsrP(J{--y- z&NofBybksgVhpPNWHxoZP2MYi_DKoy(_T}LuXJkp^g4dm-KKGC)TasLE0v=odEt9T_Nx%RR7t-_Z>;J2_zvq*g4^E{govEFf_19jxI9S+U+;sW%@EltI{HYJOe*PazpZN67 zbHex0kBM-rweif~E++DCnqRnm%bmBL`pD>gd-uF=&y8=p0Xw&?LuIzjt%!2?z=LP- z8|N05qX8N|=QxtJW(FOV?IgnHd)nu4U05EM^u}I`O_9~O%Yvwps=(vbB*ZQ+ECGzC zk`xktw# zh5LlHj{h`v=$3PxY;m2Df6vB?ES-i<`!ndYHCTnj7B(R@gjIBs@x>rur$vLTYz_RI z6TkQc(EFoeGfZ43_V9bfV}w`Gc)MUbyQF(m&aJEzLxBZG8C+x*fh0@`ei|FUCnpng)(X8`O9-e8>wvce8`Df;vFWZ+3Di+D=rZrRJ3=pi3M*RQY0* z2n8{s5L2{=!#(3Cq!h1^t-gJGdgsm;_Uxgb@JWxR#Yb8%i|5jpUYc(GnPR(3FN#g# zdhtJC1LP#QMJpW|D!i!vVxs_ulU6yLlwYbfW@EF4vi~EA7Zbw?{1fl%4kVL-?m#-t zd-6r;w76dOn4?`E$Ma#cjNc=ezxPVt6wjdRkpY!ao81(@*OcnoJT-RLl3fOMrsrnB zc*ft>=w`KGU@}ZeTg4WPR<#w^OK*w11^@6v?!eE!x8Prd&V~!Jv>Mw0HiI*aRvT6l zuz~hbg(O~_fTY3L@P`bEkBJ*0HE%pEzNOggwBBfF18(?H4KBMgSp-4 z?*cNf8(T2|y#gc#Z)y-a=eVx~W24c9T7Lp9?dzVi-~y@1mXu;irn%(D#w8vH*}4?W zf+8dYjTna7B9WhvfQoP#4aWQ{pO z59LJL7HKsuwoSxVeonT2`)swq&J3cTF{ifC$dM{fBiC`pgpO$8u&~gD348!lJZQ?y zTOkWepg|mY2dEuJMSMs_=}1KpsL*Z2NMU9YdlJKJglUW(#i}AMWG_7Ex=xAC0+drw zPmEXrvplFgfgryI4Z%^rZz_H1wbx#nK6w502N(A4xoOkxJx4#I`{;?2f3ADN* zzV0tjp0t%VynAWiJ6Z+ttp1(NcR^?A&ku92jHnEN;M^Y6gNU)r&g1of_?($u@8Z|j z^XtS756J&$j>vbtegdD4lbGQ;PVfT)@>zQQmp9?{S@reI`D2vdiR;{T{`GTLD}TYC z--g!@(Cd{x`HgfBtIj z%|-YB%DLmpU-0J-;`7S=YtJ20{(|rS5#{r^&sWa9W6kFuRz8pJJNk3?sNVo3d44B- z9w%}|`hdjEkLp1#jfx7tN|Oz$x|T0u*ASbosfoc}hhReBbnL=$ry3NkGoPjGrlz?- z_Pm)?sgmdf{@R=@3t(3Y@T$;$26}*aC=7iHr5Nm9#zLACr=Zw<1MQxmo zUZ+YnX*)duW!qrYI#l8*v>+w;b1i_O$`3jQtTs>1{v>KbM^HoZvU^V*oEnIFCyk=V zn%q{o@4@+#H#p2ao=cE_iG+mrA0IyUzK)zc1`@DEXAqyWk1v#3p*t_ZD#%!ytu%M= z62>TN9-1xFpsGZ$jA1F^K{p;Koua@YQ#u(T4-=Lv?Z&3ZOr!OndXvu z&r(`9C2%7rr)}Z3(^dRr-b;R}(~V#{;#rL}`k#`4))b5`P^t@>!qBI*S?IP2d?bYA z>n7U$k=xb?f{F%)S!0MGhmMiPAi`^p>tgYAX+)u}sCgWfi)ImldCum+d?VJOboJ($ zFKLOhth|{Jzd_-I!{r5KQg!Nd>$~@!I<@!udAxVm;;~~J7mpp451jiuyJPQWd!{|P zH0^WZ^cU_q_L9SX_V}jF$B%E^a-8`o=Vw$|KMvX5yGC}0RarkSxz24-Wmlv0t$BQkb_VmiD zeZe)9*GKi|)oe)G6Qw?dhKj<%%6ptWj#3Cz_Q(FpjoMD5UTe4!y;HOkVicJulS<(W z@faTe@Bw1#A9%^l{rD+W62gL(^ zzGBXLCb2Hu9)jp?H8a&X$kM*4QRhG84$=qyAL`yb&atY@AHL@%m8FtYWlvQlwJ)hX zRY~pp+Erb>R9Ela^g;vD(%oQ#fGmx)4obU>Ah_TTq9`hwn+nR{)}Xk|ID>2BKF;WX z%YcBWfK=}LeV%iZO1cY&-~8V9kCzW!xq0r%x#ynqoM$`FLd8v}EM5(hpc(|naHdgs z8d5B7JJy_6vI;qal64&1jj~siSQO!cl>dw336Wo6xv7k5wNYZ7)|l3Wb)od4190=# zOb7y^{4Ne;SfofxWKl~tV_hm~D}@KOh8lcQ`Vm1MuQe=w$ zZ~on7QJ=pEYhrWo-<4-yZdKM}RhSp9uFKW`lv@|(4QKB!{D9YS0Dg$XYmb2E(f*p3 zyjFjWu^&%jKN7dfwg*q*_9xN)Re9~Vq5Tu!Z6}_(-mRiyZFW2}fL}&%N<<)jYrq*# zBrku8csZn1e(oi}pGx&_%$q~X9k{bT_g-=Jec-8=JBd0V$(I3NhrBhF4F3h-3kJLw z{X}K|D)o=&$@gOTW5k0UR|38v+g{&;_ADzvCfF-~!Sl8qaOSo8%E}J;3&3Un|BB}x zQHe7b_Wz8#UOA$_q`bp;fRoZz8={Ted{l_&1}!p<&IWn^3{s^pH^&GPQ^ww+v<(QU zC1yMYZZgFN>ZuVlvrN%LEc0kE<<|PJd8*<>zfx^|v=!UHu<;k!^iq8k*E`Wifx7T0 zMZA*1xu>u~wZ$WyvbF-j7 zf$Zq9EC(XJq>a~NpE5&rjd`KiM;(qaN1udu)he79f|gr>z9isct4(mZg45!aQp>)pMDwD z5k-4~)6Uz79c+eVtB5C-13d zX|0z+$k<~>3x{y&FUNs+9l2K5G?D^+4hvy%EL*))@^yGs*Ee!(m6XIt*rhA+kv!&c zG@EqI$jFv?Hc(mBTvhdq{3zfanIB{py*|m(y*{lf;OD0DgXb1=_>!pzwCig9Kb1FQ zZnYfE98q5&edgQO0m>$RtOe&%FqydhkCmHn2Tmz~x9OL03@_RrH@x8>T81D6cF4)DW(%Ullo^j8Bnq+X=3zPD&Rm)hS+ zJVHGm7LZ>7VwPTwS4g$oBss**~>s{V?1Av@xC|XiwZJ z+kV;@&k?jIxNHxSqb^Ck@$1B+5q!$emGIZnJb(Q;z)1t4aZ30@#IvAPp56XDhCiR* z9(vNSeZqj>c{_zCB5f}p=%Kbl2{dXJBB_2n5GM>Bg0^myqJg8qv zx#EaNLk74^{sR55L z={Hpt63)!;&RT*K1xt`y1NFxwD=JqRE0B-3Zk#DJfky5ycoxNIHj{pYYyfFbBt)rG zptlMtb6=6m8c!L>);~e70d4;n zNja3&h2|)zuLF&}oZz2(7VvEbT(*CdXz#C&ozwmh!Jo<$wnsgAZtte{pxxX*<=?GT z_fmW3Ujg5d?H{yylffh0Osnw=jAuLazDn_F*1w!9$K@|*{M`NyjPr8$ZnuhK2%~>$ zuUZN2{3hTqH7b)G)