diff --git a/responsibleai_text/responsibleai_text/utils/feature_extractors.py b/responsibleai_text/responsibleai_text/utils/feature_extractors.py index f66183db26..640415c625 100644 --- a/responsibleai_text/responsibleai_text/utils/feature_extractors.py +++ b/responsibleai_text/responsibleai_text/utils/feature_extractors.py @@ -134,7 +134,7 @@ def append_metadata_values(start_meta_index, text_dataset, i, for j in range(start_meta_index, text_dataset.shape[1]): if has_dropped_features and column_names[j] in dropped_features: continue - extracted_features.append(text_dataset.iloc[i][j]) + extracted_features.append(text_dataset.iloc[i, j]) return extracted_features diff --git a/responsibleai_vision/responsibleai_vision/utils/feature_extractors.py b/responsibleai_vision/responsibleai_vision/utils/feature_extractors.py index da414e4c9c..d5a4aed352 100644 --- a/responsibleai_vision/responsibleai_vision/utils/feature_extractors.py +++ b/responsibleai_vision/responsibleai_vision/utils/feature_extractors.py @@ -45,7 +45,7 @@ def extract_features(image_dataset: pd.DataFrame, continue feature_names.append(column_names[j]) for i in tqdm(range(image_dataset.shape[0])): - image = image_dataset.iloc[i][0] + image = image_dataset.iloc[i, 0] if isinstance(image, str): image = get_image_from_path(image, image_mode) mean_pixel_value = image.mean() @@ -54,6 +54,6 @@ def extract_features(image_dataset: pd.DataFrame, for j in range(start_meta_index, image_dataset.shape[1]): if has_dropped_features and column_names[j] in dropped_features: continue - row_feature_values.append(image_dataset.iloc[i][j]) + row_feature_values.append(image_dataset.iloc[i, j]) results.append(row_feature_values) return results, feature_names diff --git a/responsibleai_vision/tests/test_rai_vision_insights.py b/responsibleai_vision/tests/test_rai_vision_insights.py index ce0d8d5015..31617c85ce 100644 --- a/responsibleai_vision/tests/test_rai_vision_insights.py +++ b/responsibleai_vision/tests/test_rai_vision_insights.py @@ -223,7 +223,11 @@ def test_rai_insights_object_detection_fridge_image_transforms(self, task_type = ModelTask.OBJECT_DETECTION class_names = np.array(['can', 'carton', 'milk_bottle', 'water_bottle']) - dropped_features = [i for i in range(0, 10)] + dropped_cols_num = [i for i in range(0, 10)] + dropped_features = ["{}".format(i) for i in dropped_cols_num] + # rename column names to strings since RAI validation fails otherwise + data = data.rename(columns={i: j for i, j in zip( + dropped_cols_num, dropped_features)}).reset_index(drop=True) run_rai_insights(model, data[:3], ImageColumns.LABEL, task_type, class_names, dropped_features=dropped_features)