|  | 
|  | 1 | +# Copyright (c) 2025 Intel Corporation | 
|  | 2 | +# Licensed under the Apache License, Version 2.0 (the "License"); | 
|  | 3 | +# you may not use this file except in compliance with the License. | 
|  | 4 | +# You may obtain a copy of the License at | 
|  | 5 | +#      http://www.apache.org/licenses/LICENSE-2.0 | 
|  | 6 | +# Unless required by applicable law or agreed to in writing, software | 
|  | 7 | +# distributed under the License is distributed on an "AS IS" BASIS, | 
|  | 8 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
|  | 9 | +# See the License for the specific language governing permissions and | 
|  | 10 | +# limitations under the License. | 
|  | 11 | + | 
|  | 12 | +from pathlib import Path | 
|  | 13 | +from typing import Dict, Tuple | 
|  | 14 | + | 
|  | 15 | +import openvino as ov | 
|  | 16 | +import torch | 
|  | 17 | +from ultralytics import YOLO | 
|  | 18 | +from ultralytics.data.utils import check_det_dataset | 
|  | 19 | +from ultralytics.engine.validator import BaseValidator as Validator | 
|  | 20 | +from ultralytics.utils.torch_utils import de_parallel | 
|  | 21 | + | 
|  | 22 | +import nncf | 
|  | 23 | +from nncf.torch import disable_patching | 
|  | 24 | +from tests.post_training.pipelines.base import OV_BACKENDS | 
|  | 25 | +from tests.post_training.pipelines.base import BackendType | 
|  | 26 | +from tests.post_training.pipelines.base import PTQTestPipeline | 
|  | 27 | + | 
|  | 28 | + | 
|  | 29 | +class UltralyticsDetection(PTQTestPipeline): | 
|  | 30 | +    """Pipeline for Yolo detection models from the Ultralytics repository""" | 
|  | 31 | + | 
|  | 32 | +    def prepare_model(self) -> None: | 
|  | 33 | +        if self.batch_size != 1: | 
|  | 34 | +            msg = "Batch size > 1 is not supported" | 
|  | 35 | +            raise RuntimeError(msg) | 
|  | 36 | + | 
|  | 37 | +        model_path = f"{self.fp32_model_dir}/{self.model_id}" | 
|  | 38 | +        yolo = YOLO(f"{model_path}.pt") | 
|  | 39 | +        self.validator, self.data_loader = self._prepare_validation(yolo, "coco128.yaml") | 
|  | 40 | +        self.dummy_tensor = torch.ones((1, 3, 640, 640)) | 
|  | 41 | + | 
|  | 42 | +        if self.backend in OV_BACKENDS + [BackendType.FP32]: | 
|  | 43 | +            onnx_model_path = Path(f"{model_path}.onnx") | 
|  | 44 | +            ir_model_path = self.fp32_model_dir / "model_fp32.xml" | 
|  | 45 | +            yolo.export(format="onnx", dynamic=True, half=False) | 
|  | 46 | +            ov.save_model(ov.convert_model(onnx_model_path), ir_model_path) | 
|  | 47 | +            self.model = ov.Core().read_model(ir_model_path) | 
|  | 48 | + | 
|  | 49 | +        if self.backend == BackendType.FX_TORCH: | 
|  | 50 | +            pt_model = yolo.model | 
|  | 51 | +            # Run mode one time to initialize all | 
|  | 52 | +            # internal variables | 
|  | 53 | +            pt_model(self.dummy_tensor) | 
|  | 54 | + | 
|  | 55 | +            with torch.no_grad(): | 
|  | 56 | +                with disable_patching(): | 
|  | 57 | +                    self.model = torch.export.export(pt_model, args=(self.dummy_tensor,), strict=False).module() | 
|  | 58 | + | 
|  | 59 | +    def prepare_preprocessor(self) -> None: | 
|  | 60 | +        pass | 
|  | 61 | + | 
|  | 62 | +    @staticmethod | 
|  | 63 | +    def _validate_fx( | 
|  | 64 | +        model: ov.Model, data_loader: torch.utils.data.DataLoader, validator: Validator, num_samples: int = None | 
|  | 65 | +    ) -> Tuple[Dict, int, int]: | 
|  | 66 | +        compiled_model = torch.compile(model, backend="openvino") | 
|  | 67 | +        for batch_i, batch in enumerate(data_loader): | 
|  | 68 | +            if num_samples is not None and batch_i == num_samples: | 
|  | 69 | +                break | 
|  | 70 | +            batch = validator.preprocess(batch) | 
|  | 71 | +            preds = compiled_model(batch["img"]) | 
|  | 72 | +            preds = validator.postprocess(preds) | 
|  | 73 | +            validator.update_metrics(preds, batch) | 
|  | 74 | +        stats = validator.get_stats() | 
|  | 75 | +        return stats, validator.seen, validator.nt_per_class.sum() | 
|  | 76 | + | 
|  | 77 | +    @staticmethod | 
|  | 78 | +    def _validate_ov( | 
|  | 79 | +        model: ov.Model, data_loader: torch.utils.data.DataLoader, validator: Validator, num_samples: int = None | 
|  | 80 | +    ) -> Tuple[Dict, int, int]: | 
|  | 81 | +        model.reshape({0: [1, 3, -1, -1]}) | 
|  | 82 | +        compiled_model = ov.compile_model(model) | 
|  | 83 | +        output_layer = compiled_model.output(0) | 
|  | 84 | +        for batch_i, batch in enumerate(data_loader): | 
|  | 85 | +            if num_samples is not None and batch_i == num_samples: | 
|  | 86 | +                break | 
|  | 87 | +            batch = validator.preprocess(batch) | 
|  | 88 | +            preds = torch.from_numpy(compiled_model(batch["img"])[output_layer]) | 
|  | 89 | +            preds = validator.postprocess(preds) | 
|  | 90 | +            validator.update_metrics(preds, batch) | 
|  | 91 | +        stats = validator.get_stats() | 
|  | 92 | +        return stats, validator.seen, validator.nt_per_class.sum() | 
|  | 93 | + | 
|  | 94 | +    def get_transform_calibration_fn(self): | 
|  | 95 | +        def transform_func(batch): | 
|  | 96 | +            return self.validator.preprocess(batch)["img"] | 
|  | 97 | + | 
|  | 98 | +        return transform_func | 
|  | 99 | + | 
|  | 100 | +    def prepare_calibration_dataset(self): | 
|  | 101 | +        self.calibration_dataset = nncf.Dataset(self.data_loader, self.get_transform_calibration_fn()) | 
|  | 102 | + | 
|  | 103 | +    @staticmethod | 
|  | 104 | +    def _prepare_validation(model: YOLO, data: str) -> Tuple[Validator, torch.utils.data.DataLoader]: | 
|  | 105 | +        custom = {"rect": False, "batch": 1}  # method defaults | 
|  | 106 | +        args = {**model.overrides, **custom, "mode": "val"}  # highest priority args on the right | 
|  | 107 | + | 
|  | 108 | +        validator = model._smart_load("validator")(args=args, _callbacks=model.callbacks) | 
|  | 109 | +        stride = 32  # default stride | 
|  | 110 | +        validator.stride = stride  # used in get_dataloader() for padding | 
|  | 111 | +        validator.data = check_det_dataset(data) | 
|  | 112 | +        validator.init_metrics(de_parallel(model)) | 
|  | 113 | + | 
|  | 114 | +        data_loader = validator.get_dataloader(validator.data.get(validator.args.split), validator.args.batch) | 
|  | 115 | + | 
|  | 116 | +        return validator, data_loader | 
|  | 117 | + | 
|  | 118 | +    def _validate(self): | 
|  | 119 | +        if self.backend == BackendType.FP32: | 
|  | 120 | +            stats, _, _ = self._validate_ov(self.model, self.data_loader, self.validator) | 
|  | 121 | +        elif self.backend in OV_BACKENDS: | 
|  | 122 | +            stats, _, _ = self._validate_ov(self.compressed_model, self.data_loader, self.validator) | 
|  | 123 | +        elif self.backend == BackendType.FX_TORCH: | 
|  | 124 | +            stats, _, _ = self._validate_fx(self.compressed_model, self.data_loader, self.validator) | 
|  | 125 | +        else: | 
|  | 126 | +            msg = f"Backend {self.backend} is not supported in UltralyticsDetection" | 
|  | 127 | +            raise RuntimeError(msg) | 
|  | 128 | + | 
|  | 129 | +        self.run_info.metric_name = "mAP50(B)" | 
|  | 130 | +        self.run_info.metric_value = stats["metrics/mAP50(B)"] | 
0 commit comments