diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py index 2747c83467d..9679a899f6c 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/inference_task.py @@ -19,6 +19,7 @@ import shutil import tempfile import warnings +from contextlib import nullcontext from typing import Optional import numpy as np @@ -47,17 +48,14 @@ check_input_parameters_type, ) - from mmseg.apis import export_model -from segmentation_tasks.apis.segmentation.config_utils import (patch_config, - prepare_for_testing, - set_hyperparams) -from segmentation_tasks.apis.segmentation.configuration import OTESegmentationConfig -from segmentation_tasks.apis.segmentation.ote_utils import InferenceProgressCallback, get_activation_map +from mmseg.core.hooks.auxiliary_hooks import FeatureVectorHook, SaliencyMapHook from mmseg.datasets import build_dataloader, build_dataset from mmseg.models import build_segmentor from mmseg.parallel import MMDataCPU - +from segmentation_tasks.apis.segmentation.config_utils import (patch_config, prepare_for_testing, set_hyperparams) +from segmentation_tasks.apis.segmentation.configuration import OTESegmentationConfig +from segmentation_tasks.apis.segmentation.ote_utils import InferenceProgressCallback, get_activation_map logger = logging.getLogger(__name__) @@ -198,46 +196,37 @@ def hook(module, input, output): pre_hook_handle = self._model.register_forward_pre_hook(pre_hook) hook_handle = self._model.register_forward_hook(hook) - self._infer_segmentor(self._model, self._config, dataset, - save_mask_visualization=not is_evaluation) - + prediction_results = self._infer_segmentor(self._model, self._config, dataset, dump_features=True, + dump_saliency_map=not is_evaluation) + self._add_predictions_to_dataset(prediction_results, dataset) pre_hook_handle.remove() hook_handle.remove() return dataset - def _add_predictions_to_dataset_item(self, prediction, feature_vector, dataset_item, save_mask_visualization): - soft_prediction = np.transpose(prediction, axes=(1, 2, 0)) - hard_prediction = create_hard_prediction_from_soft_prediction( - soft_prediction=soft_prediction, - soft_threshold=self._hyperparams.postprocessing.soft_threshold, - blur_strength=self._hyperparams.postprocessing.blur_strength, - ) - annotations = create_annotation_from_segmentation_map( - hard_prediction=hard_prediction, - soft_prediction=soft_prediction, - label_map=self._label_dictionary, - ) - dataset_item.append_annotations(annotations=annotations) - - if feature_vector is not None: - active_score = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) - dataset_item.append_metadata_item(active_score, model=self._task_environment.model) - - if save_mask_visualization: - for label_index, label in self._label_dictionary.items(): - if label_index == 0: - continue - - if len(soft_prediction.shape) == 3: - current_label_soft_prediction = soft_prediction[:, :, label_index] - else: - current_label_soft_prediction = soft_prediction - - class_act_map = get_activation_map(current_label_soft_prediction) - result_media = ResultMediaEntity(name=f'{label.name}', - type='Soft Prediction', - label=label, + def _add_predictions_to_dataset(self, prediction_results, dataset): + for dataset_item, (prediction, feature_vector, saliency_map) in zip(dataset, prediction_results): + soft_prediction = np.transpose(prediction, axes=(1, 2, 0)) + hard_prediction = create_hard_prediction_from_soft_prediction( + soft_prediction=soft_prediction, + soft_threshold=self._hyperparams.postprocessing.soft_threshold, + blur_strength=self._hyperparams.postprocessing.blur_strength, + ) + annotations = create_annotation_from_segmentation_map( + hard_prediction=hard_prediction, + soft_prediction=soft_prediction, + label_map=self._label_dictionary, + ) + dataset_item.append_annotations(annotations=annotations) + + if feature_vector is not None: + active_score = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) + dataset_item.append_metadata_item(active_score, model=self._task_environment.model) + + if saliency_map is not None: + class_act_map = get_activation_map(saliency_map, (dataset_item.width, dataset_item.height)) + result_media = ResultMediaEntity(name="saliency_map", + type="Saliency map", annotation_scene=dataset_item.annotation_scene, roi=dataset_item.roi, numpy=class_act_map) @@ -245,7 +234,7 @@ def _add_predictions_to_dataset_item(self, prediction, feature_vector, dataset_i def _infer_segmentor(self, model: torch.nn.Module, config: Config, dataset: DatasetEntity, - save_mask_visualization: bool = False) -> None: + dump_features: bool = False, dump_saliency_map: bool = False) -> None: model.eval() test_config = prepare_for_testing(config, dataset) @@ -259,18 +248,28 @@ def _infer_segmentor(self, dist=False, shuffle=False) if torch.cuda.is_available(): - eval_model = MMDataParallel(model.cuda(test_config.gpu_ids[0]), - device_ids=test_config.gpu_ids) + model = MMDataParallel(model.cuda(test_config.gpu_ids[0]), device_ids=test_config.gpu_ids) else: - eval_model = MMDataCPU(model) + model = MMDataCPU(model) - # Use a single gpu for testing. Set in both mm_val_dataloader and eval_model - for data, dataset_item in zip(mm_val_dataloader, dataset): - with torch.no_grad(): - result, repr_vector = eval_model(return_loss=False, output_logits=True, **data) - assert len(result) == 1 + eval_predictions = [] + feature_vectors = [] + saliency_maps = [] - self._add_predictions_to_dataset_item(result[0], repr_vector, dataset_item, save_mask_visualization) + # Use a single gpu for testing. Set in both mm_val_dataloader and eval_model + with FeatureVectorHook(model.module.backbone) if dump_features else nullcontext() as fhook: + with SaliencyMapHook(model.module.backbone) if dump_saliency_map else nullcontext() as shook: + for data in mm_val_dataloader: + with torch.no_grad(): + result = model(return_loss=False, output_logits=True, **data) + eval_predictions.extend(result) + feature_vectors = fhook.records if dump_features else [None] * len(dataset) + saliency_maps = shook.records if dump_saliency_map else [None] * len(dataset) + assert len(eval_predictions) == len(feature_vectors) == len(saliency_maps), \ + 'Number of elements should be the same, however, number of outputs are ' \ + f"{len(eval_predictions)}, {len(feature_vectors)}, and {len(saliency_maps)}" + predictions = zip(eval_predictions, feature_vectors, saliency_maps) + return predictions @check_input_parameters_type() def evaluate(self, output_result_set: ResultSetEntity, evaluation_metric: Optional[str] = None): diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py index 8408a648d95..2e97ce45bcb 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/model_wrappers/blur.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions # and limitations under the License. +from typing import Any, Dict, Optional, Union, Iterable +import warnings + import cv2 import numpy as np -from typing import Any, Dict, Optional from openvino.model_zoo.model_api.models import SegmentationModel from openvino.model_zoo.model_api.models.types import NumericalValue @@ -23,6 +25,16 @@ from ote_sdk.utils.segmentation_utils import create_hard_prediction_from_soft_prediction +@check_input_parameters_type() +def get_actmap( + features: Union[np.ndarray, Iterable, int, float], output_res: Union[tuple, list] +): + am = cv2.resize(features, output_res) + am = cv2.applyColorMap(am, cv2.COLORMAP_JET) + am = cv2.cvtColor(am, cv2.COLOR_BGR2RGB) + return am + + class BlurSegmentation(SegmentationModel): __model__ = 'blur_segmentation' @@ -60,7 +72,6 @@ def _get_outputs(self): def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]): predictions = outputs[self.output_blob_name].squeeze() soft_prediction = np.transpose(predictions, axes=(1, 2, 0)) - feature_vector = outputs.get('repr_vector', None) # Optional output hard_prediction = create_hard_prediction_from_soft_prediction( soft_prediction=soft_prediction, @@ -68,9 +79,17 @@ def postprocess(self, outputs: Dict[str, np.ndarray], metadata: Dict[str, Any]): blur_strength=self.blur_strength ) hard_prediction = cv2.resize(hard_prediction, metadata['original_shape'][1::-1], 0, 0, interpolation=cv2.INTER_NEAREST) - soft_prediction = cv2.resize(soft_prediction, metadata['original_shape'][1::-1], 0, 0, interpolation=cv2.INTER_NEAREST) - - metadata['soft_predictions'] = soft_prediction - metadata['feature_vector'] = feature_vector + + if 'feature_vector' not in outputs or 'saliency_map' not in outputs: + warnings.warn('Could not find Feature Vector and Saliency Map in OpenVINO output. ' + 'Please rerun OpenVINO export or retrain the model.') + metadata["saliency_map"] = None + metadata["feature_vector"] = None + else: + metadata["saliency_map"] = get_actmap( + outputs["saliency_map"][0], + (metadata["original_shape"][1], metadata["original_shape"][0]), + ) + metadata["feature_vector"] = outputs["feature_vector"] return hard_prediction diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py index e1c8a075947..5ff7e6063a5 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/openvino_task.py @@ -105,12 +105,11 @@ def pre_process(self, image: np.ndarray) -> Tuple[Dict[str, np.ndarray], Dict[st @check_input_parameters_type() def post_process(self, prediction: Dict[str, np.ndarray], metadata: Dict[str, Any]) -> AnnotationSceneEntity: hard_prediction = self.model.postprocess(prediction, metadata) - soft_prediction = metadata['soft_predictions'] feature_vector = metadata['feature_vector'] - + saliency_map = metadata['saliency_map'] predicted_scene = self.converter.convert_to_annotation(hard_prediction, metadata) - return predicted_scene, soft_prediction, feature_vector + return predicted_scene, feature_vector, saliency_map @check_input_parameters_type() def forward(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]: @@ -165,38 +164,25 @@ def infer(self, inference_parameters: Optional[InferenceParameters] = None) -> DatasetEntity: if inference_parameters is not None: update_progress_callback = inference_parameters.update_progress - save_mask_visualization = not inference_parameters.is_evaluation + dump_saliency_map = not inference_parameters.is_evaluation else: update_progress_callback = default_progress_callback - save_mask_visualization = True + dump_saliency_map = True dataset_size = len(dataset) for i, dataset_item in enumerate(dataset, 1): - predicted_scene, soft_prediction, feature_vector = self.inferencer.predict(dataset_item.numpy) + predicted_scene, feature_vector, saliency_map = self.inferencer.predict(dataset_item.numpy) dataset_item.append_annotations(predicted_scene.annotations) if feature_vector is not None: feature_vector_media = TensorEntity(name="representation_vector", numpy=feature_vector.reshape(-1)) dataset_item.append_metadata_item(feature_vector_media, model=self.model) - if save_mask_visualization: - for label_index, label in self._label_dictionary.items(): - if label_index == 0: - continue - - if len(soft_prediction.shape) == 3: - current_label_soft_prediction = soft_prediction[:, :, label_index] - else: - current_label_soft_prediction = soft_prediction - - class_act_map = get_activation_map(current_label_soft_prediction) - result_media = ResultMediaEntity(name=f'{label.name}', - type='Soft Prediction', - label=label, - annotation_scene=dataset_item.annotation_scene, - roi=dataset_item.roi, - numpy=class_act_map) - dataset_item.append_metadata_item(result_media, model=self.model) + if dump_saliency_map and saliency_map is not None: + saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", + annotation_scene=dataset_item.annotation_scene, + numpy=saliency_map, roi=dataset_item.roi) + dataset_item.append_metadata_item(saliency_map_media, model=self.model) update_progress_callback(int(i / dataset_size * 100)) diff --git a/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py b/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py index 26d775534f7..9250d7bc819 100644 --- a/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py +++ b/external/mmsegmentation/segmentation_tasks/apis/segmentation/ote_utils.py @@ -13,6 +13,7 @@ # and limitations under the License. +import cv2 import importlib from typing import Iterable, Union import yaml @@ -43,15 +44,11 @@ def get_task_class(path: str): @check_input_parameters_type() -def get_activation_map(features: Union[np.ndarray, Iterable, int, float]): - min_soft_score = np.min(features) - max_soft_score = np.max(features) - factor = 255.0 / (max_soft_score - min_soft_score + 1e-12) - - float_act_map = factor * (features - min_soft_score) - int_act_map = np.uint8(np.floor(float_act_map)) - - return int_act_map +def get_activation_map(features: Union[np.ndarray, Iterable, int, float], output_res: Union[tuple, list]): + am = cv2.resize(features, output_res) + am = cv2.applyColorMap(am, cv2.COLORMAP_JET) + am = cv2.cvtColor(am, cv2.COLOR_BGR2RGB) + return am class TrainingProgressCallback(TimeMonitorCallback): diff --git a/external/mmsegmentation/submodule b/external/mmsegmentation/submodule index 9f50fc158be..337c109596a 160000 --- a/external/mmsegmentation/submodule +++ b/external/mmsegmentation/submodule @@ -1 +1 @@ -Subproject commit 9f50fc158be50594ea4aecf0a07ea652c91ec846 +Subproject commit 337c109596acf7f06a73c199f368a8f0f18d026c diff --git a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py index 06185328556..160ec8b1ace 100644 --- a/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py +++ b/external/model-preparation-algorithm/mpa_tasks/apis/segmentation/task.py @@ -16,6 +16,7 @@ from mpa import MPAConstants from mpa_tasks.apis import BaseTask, TrainType from mpa_tasks.apis.segmentation import SegmentationConfig +from mpa_tasks.utils.data_utils import get_actmap from mpa.utils.config_utils import MPAConfig from mpa.utils.logger import get_logger from ote_sdk.configuration import cfg_helper @@ -71,6 +72,8 @@ def infer(self, inference_parameters: Optional[InferenceParameters] = None ) -> DatasetEntity: logger.info('infer()') + dump_features = True + dump_saliency_map = not inference_parameters.is_evaluation if inference_parameters else True if inference_parameters is not None: update_progress_callback = inference_parameters.update_progress @@ -84,15 +87,13 @@ def infer(self, stage_module = 'SegInferrer' self._data_cfg = self._init_test_data_cfg(dataset) self._label_dictionary = dict(enumerate(self._labels, 1)) - results = self._run_task(stage_module, mode='train', dataset=dataset) + results = self._run_task(stage_module, mode='train', dataset=dataset, dump_features=dump_features, + dump_saliency_map=dump_saliency_map) logger.debug(f'result of run_task {stage_module} module = {results}') predictions = results['outputs'] - # TODO: feature maps should be came from the inference results - featuremaps = [None for _ in range(len(predictions))] - for i in range(len(dataset)): - result, featuremap, dataset_item = predictions[i], featuremaps[i], dataset[i] - self._add_predictions_to_dataset_item(result, featuremap, dataset_item, - save_mask_visualization=not is_evaluation) + prediction_results = zip(predictions['eval_predictions'], predictions['feature_vectors'], + predictions['saliency_maps']) + self._add_predictions_to_dataset(prediction_results, dataset, dump_saliency_map=not is_evaluation) return dataset def evaluate(self, @@ -208,45 +209,33 @@ def _init_test_data_cfg(self, dataset: DatasetEntity): ) return data_cfg - def _add_predictions_to_dataset_item(self, prediction, feature_vector, dataset_item, save_mask_visualization): - soft_prediction = np.transpose(prediction, axes=(1, 2, 0)) - hard_prediction = create_hard_prediction_from_soft_prediction( - soft_prediction=soft_prediction, - soft_threshold=self._hyperparams.postprocessing.soft_threshold, - blur_strength=self._hyperparams.postprocessing.blur_strength, - ) - annotations = create_annotation_from_segmentation_map( - hard_prediction=hard_prediction, - soft_prediction=soft_prediction, - label_map=self._label_dictionary, - ) - dataset_item.append_annotations(annotations=annotations) - - if feature_vector is not None: - active_score = TensorEntity(name="representation_vector", numpy=feature_vector) - dataset_item.append_metadata_item(active_score, model=self._task_environment.model) - - if save_mask_visualization: - for label_index, label in self._label_dictionary.items(): - if label_index == 0: - continue - - if len(soft_prediction.shape) == 3: - current_label_soft_prediction = soft_prediction[:, :, label_index] - else: - current_label_soft_prediction = soft_prediction - min_soft_score = np.min(current_label_soft_prediction) - max_soft_score = np.max(current_label_soft_prediction) - factor = 255.0 / (max_soft_score - min_soft_score + 1e-12) - result_media_numpy = (factor * (current_label_soft_prediction - min_soft_score)).astype(np.uint8) - - result_media = ResultMediaEntity(name=f'{label.name}', - type='Soft Prediction', - label=label, - annotation_scene=dataset_item.annotation_scene, - roi=dataset_item.roi, - numpy=result_media_numpy) - dataset_item.append_metadata_item(result_media, model=self._task_environment.model) + def _add_predictions_to_dataset(self, prediction_results, dataset, dump_saliency_map): + """ Loop over dataset again to assign predictions. Convert from MMSegmentation format to OTE format. """ + + for dataset_item, (prediction, feature_vector, saliency_map) in zip(dataset, prediction_results): + soft_prediction = np.transpose(prediction[0], axes=(1, 2, 0)) + hard_prediction = create_hard_prediction_from_soft_prediction( + soft_prediction=soft_prediction, + soft_threshold=self._hyperparams.postprocessing.soft_threshold, + blur_strength=self._hyperparams.postprocessing.blur_strength, + ) + annotations = create_annotation_from_segmentation_map( + hard_prediction=hard_prediction, + soft_prediction=soft_prediction, + label_map=self._label_dictionary, + ) + dataset_item.append_annotations(annotations=annotations) + + if feature_vector is not None: + active_score = TensorEntity(name="representation_vector", numpy=feature_vector) + dataset_item.append_metadata_item(active_score, model=self._task_environment.model) + + if dump_saliency_map and saliency_map is not None: + saliency_map = get_actmap(saliency_map, (dataset_item.width, dataset_item.height) ) + saliency_map_media = ResultMediaEntity(name="saliency_map", type="Saliency map", + annotation_scene=dataset_item.annotation_scene, + numpy=saliency_map, roi=dataset_item.roi) + dataset_item.append_metadata_item(saliency_map_media, model=self._task_environment.model) @staticmethod def _patch_datasets(config: MPAConfig, domain=Domain.SEGMENTATION): diff --git a/external/model-preparation-algorithm/submodule b/external/model-preparation-algorithm/submodule index 71d64222924..3f98a38df93 160000 --- a/external/model-preparation-algorithm/submodule +++ b/external/model-preparation-algorithm/submodule @@ -1 +1 @@ -Subproject commit 71d6422292412bd5546d0ca722fb199e528732bc +Subproject commit 3f98a38df933583d079fd297bd742ad546a076ec diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multiclass.py b/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multiclass.py index 1bea9ce7db3..8c9527e5e7c 100644 --- a/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multiclass.py +++ b/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multiclass.py @@ -117,19 +117,16 @@ def test_ote_demo_openvino(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(self, template): - pytest.xfail("Known issue CVS-84981") ote_deploy_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_deployment(self, template): - pytest.xfail("Known issue CVS-84981") # require pass for test_ote_deploy_openvino ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_deployment(self, template): - pytest.xfail("Known issue CVS-84981") ote_demo_deployment_testing(template, root, ote_dir, args) @e2e_pytest_component diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multilabel.py b/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multilabel.py index fe40c623366..919c95c3697 100644 --- a/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multilabel.py +++ b/external/model-preparation-algorithm/tests/ote_cli/test_cls_cls_il_multilabel.py @@ -107,31 +107,29 @@ def test_ote_eval_openvino(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo(self, template): - pytest.skip("demo for multi-label classification is not supported now.") + pytest.skip("Demo for multi-label classification is not supported now.") ote_demo_testing(template, root, ote_dir, args) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_openvino(self, template): - pytest.skip("demo for multi-label classification is not supported now.") + pytest.skip("Demo for multi-label classification is not supported now.") ote_demo_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(self, template): - pytest.xfail("Known issue CVS-84981") ote_deploy_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_eval_deployment(self, template): - pytest.xfail("Known issue CVS-84981") # require pass for test_ote_deploy_openvino ote_eval_deployment_testing(template, root, ote_dir, args, threshold=0.0) @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_demo_deployment(self, template): - pytest.xfail("Known issue CVS-84981, demo for multi-label classification is not supported now.") + pytest.xfail("Demo for multi-label classification is not supported now.") ote_demo_deployment_testing(template, root, ote_dir, args) @e2e_pytest_component diff --git a/external/model-preparation-algorithm/tests/ote_cli/test_seg_cls_il.py b/external/model-preparation-algorithm/tests/ote_cli/test_seg_cls_il.py index d41d4df0f75..b29e0cc58c8 100644 --- a/external/model-preparation-algorithm/tests/ote_cli/test_seg_cls_il.py +++ b/external/model-preparation-algorithm/tests/ote_cli/test_seg_cls_il.py @@ -106,7 +106,6 @@ def test_ote_demo_openvino(self, template): @e2e_pytest_component @pytest.mark.parametrize("template", templates, ids=templates_ids) def test_ote_deploy_openvino(self, template): - pytest.xfail("Known issue CVS-84981") ote_deploy_openvino_testing(template, root, ote_dir, args) @e2e_pytest_component diff --git a/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt b/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt index b9e54263900..a9c42113de3 100644 --- a/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt +++ b/ote_sdk/ote_sdk/usecases/exportable_code/demo/requirements.txt @@ -1,3 +1,3 @@ openvino==2022.1.0 openmodelzoo-modelapi @ git+https://github.com/openvinotoolkit/open_model_zoo/@releases/2022/SCv1.1#egg=openmodelzoo-modelapi&subdirectory=demos/common/python -ote-sdk @ git+https://github.com/openvinotoolkit/training_extensions/@83a70cdd11d7c1bbc11ceb1b758750fb4bd18aae#egg=ote-sdk&subdirectory=ote_sdk +ote-sdk @ git+https://github.com/openvinotoolkit/training_extensions/@685ef00ac72965956a769c80d1174667831c381e#egg=ote-sdk&subdirectory=ote_sdk