From 9d07137fc413f6590205ed94702a7666da7bb7a0 Mon Sep 17 00:00:00 2001 From: YoungPeng Date: Wed, 25 Feb 2026 16:43:19 +0800 Subject: [PATCH 1/6] Add: yolov5s inference script. --- .../object_detection/yolov5s/igie/README.md | 95 ++++ .../yolov5s/igie/ci/prepare.sh | 24 + .../object_detection/yolov5s/igie/export.py | 58 +++ .../yolov5s/igie/inference.py | 160 +++++++ .../object_detection/yolov5s/igie/quantize.py | 114 +++++ .../yolov5s/igie/requirements.txt | 5 + .../scripts/infer_yolov5s_fp16_accuracy.sh | 49 ++ .../scripts/infer_yolov5s_fp16_performance.sh | 50 ++ .../scripts/infer_yolov5s_int8_accuracy.sh | 57 +++ .../scripts/infer_yolov5s_int8_performance.sh | 58 +++ .../cv/object_detection/yolov5s/igie/utils.py | 435 ++++++++++++++++++ 11 files changed, 1105 insertions(+) create mode 100644 models/cv/object_detection/yolov5s/igie/README.md create mode 100644 models/cv/object_detection/yolov5s/igie/ci/prepare.sh create mode 100644 models/cv/object_detection/yolov5s/igie/export.py create mode 100644 models/cv/object_detection/yolov5s/igie/inference.py create mode 100644 models/cv/object_detection/yolov5s/igie/quantize.py create mode 100644 models/cv/object_detection/yolov5s/igie/requirements.txt create mode 100644 models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh create mode 100644 models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh create mode 100644 models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh create mode 100644 models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh create mode 100644 models/cv/object_detection/yolov5s/igie/utils.py diff --git a/models/cv/object_detection/yolov5s/igie/README.md b/models/cv/object_detection/yolov5s/igie/README.md new file mode 100644 index 00000000..d4db50f3 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/README.md @@ -0,0 +1,95 @@ +# YOLOv5s (IGIE) + +## Model Description + +The YOLOv5 architecture is designed for efficient and accurate object detection tasks in real-time scenarios. It employs a single convolutional neural network to simultaneously predict bounding boxes and class probabilities for multiple objects within an image. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +| :----: | :----: | :----: | +| MR-V100 | 4.4.0 | 26.03 | + +## Model Preparation + +### Prepare Resources + +Pretrained model: + +Dataset: + +- to download the labels dataset. +- to download the validation dataset. +- to download the train dataset. + +```bash +unzip -q -d ./ coco2017labels.zip +unzip -q -d ./coco/images/ train2017.zip +unzip -q -d ./coco/images/ val2017.zip + +coco +├── annotations +│   └── instances_val2017.json +├── images +│   ├── train2017 +│   └── val2017 +├── labels +│   ├── train2017 +│   └── val2017 +├── LICENSE +├── README.txt +├── test-dev2017.txt +├── train2017.cache +├── train2017.txt +├── val2017.cache +└── val2017.txt +``` + +### Install Dependencies + +```bash +pip3 install -r requirements.txt +``` + +### Model Conversion + +```bash +# download the weight from the recommend link +wget https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt + +python3 export.py --weight yolov5s.pt --output yolov5s.onnx +# Make sure numpy < 2.0 +# Use onnxsim optimize onnx model +onnxsim yolov5s.onnx yolov5s_opt.onnx +``` + +## Model Inference + +```bash +export DATASETS_DIR=/Path/to/coco/ +``` + +### FP16 + +```bash +# Accuracy +bash scripts/infer_yolov5s_fp16_accuracy.sh +# Performance +bash scripts/infer_yolov5s_fp16_performance.sh +``` + +### INT8 + +```bash +# Accuracy +bash scripts/infer_yolov5s_int8_accuracy.sh +# Performance +bash scripts/infer_yolov5s_int8_performance.sh +``` + +## Model Results + +| Model | BatchSize | Precision | FPS | MAP@0.5 | MAP@0.5:0.95 | +| :----: | :----: | :----: | :----: | :----: | :----: | +| YOLOv5s | 32 | FP16 | 1433.13 | 0.567 | 0.374 | +| YOLOv5s | 32 | INT8 | 2832.94 | 0.556 | 0.357 | diff --git a/models/cv/object_detection/yolov5s/igie/ci/prepare.sh b/models/cv/object_detection/yolov5s/igie/ci/prepare.sh new file mode 100644 index 00000000..5531dec7 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/ci/prepare.sh @@ -0,0 +1,24 @@ +#!/bin/bash +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +pip3 install -r requirements.txt + +python3 export.py --weight yolov5s.pt --output yolov5s.onnx + +# Use onnxsim optimize onnx model +onnxsim yolov5s.onnx yolov5s_opt.onnx diff --git a/models/cv/object_detection/yolov5s/igie/export.py b/models/cv/object_detection/yolov5s/igie/export.py new file mode 100644 index 00000000..bb0c669a --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/export.py @@ -0,0 +1,58 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse + +import torch + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--weight", + type=str, + required=True, + help="pytorch model weight.") + + parser.add_argument("--output", + type=str, + required=True, + help="export onnx model path.") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + model = torch.hub.load("ultralytics/yolov5", "custom", path=args.weight).cpu() + + input_names = ['input'] + output_names = ['output'] + dynamic_axes = {'input': {0: '-1'}, 'output': {0: '-1'}} + dummy_input = torch.randn(1, 3, 640, 640) + + torch.onnx.export( + model, + dummy_input, + args.output, + input_names = input_names, + dynamic_axes = dynamic_axes, + output_names = output_names, + opset_version=13 + ) + +if __name__ == "__main__": + main() diff --git a/models/cv/object_detection/yolov5s/igie/inference.py b/models/cv/object_detection/yolov5s/igie/inference.py new file mode 100644 index 00000000..42f9407d --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/inference.py @@ -0,0 +1,160 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import argparse +import tvm +import torch +import numpy as np +from tvm import relay +from tqdm import tqdm + +from utils import COCO2017Dataset, COCO2017Evaluator + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--engine", + type=str, + required=True, + help="igie engine path.") + + parser.add_argument("--batchsize", + type=int, + required=True, + help="inference batch size.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="datasets path.") + + parser.add_argument("--input_name", + type=str, + required=True, + help="input name of the model.") + + parser.add_argument("--warmup", + type=int, + default=3, + help="number of warmup before test.") + + parser.add_argument("--num_workers", + type=int, + default=16, + help="number of workers used in pytorch dataloader.") + + parser.add_argument("--acc_target", + type=float, + default=None, + help="Model inference Accuracy target.") + + parser.add_argument("--fps_target", + type=float, + default=None, + help="Model inference FPS target.") + + parser.add_argument("--conf", + type=float, + default=0.001, + help="confidence threshold.") + + parser.add_argument("--iou", + type=float, + default=0.65, + help="iou threshold.") + + parser.add_argument("--perf_only", + type=bool, + default=False, + help="Run performance test only") + + args = parser.parse_args() + + return args + +def get_dataloader(data_path, label_path, batch_size, num_workers): + + dataset = COCO2017Dataset(data_path, label_path, image_size=640) + + dataloader = torch.utils.data.DataLoader(dataset, + batch_size=batch_size, + drop_last=False, + num_workers=num_workers, + collate_fn=dataset.collate_fn) + return dataloader + +def main(): + args = parse_args() + + batch_size = args.batchsize + data_path = os.path.join(args.datasets, "images", "val2017") + label_path = os.path.join(args.datasets, "annotations", "instances_val2017.json") + + # create iluvatar target & device + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + device = tvm.device(target.kind.name, 0) + + # load engine + lib = tvm.runtime.load_module(args.engine) + + # create runtime from engine + module = tvm.contrib.graph_executor.GraphModule(lib["default"](device)) + + # just run perf test + if args.perf_only: + ftimer = module.module.time_evaluator("run", device, number=100, repeat=1) + prof_res = np.array(ftimer().results) * 1000 + fps = batch_size * 1000 / np.mean(prof_res) + print(f"\n* Mean inference time: {np.mean(prof_res):.3f} ms, Mean fps: {fps:.3f}") + else: + # warm up + for _ in range(args.warmup): + module.run() + + # get dataloader + dataloader = get_dataloader(data_path, label_path, batch_size, args.num_workers) + + # get evaluator + evaluator = COCO2017Evaluator(label_path=label_path, + conf_thres=args.conf, + iou_thres=args.iou, + image_size=640) + + for all_inputs in tqdm(dataloader): + image = all_inputs[0] + pad_batch = len(image) != batch_size + if pad_batch: + origin_size = len(image) + image = np.resize(image, (batch_size, *image.shape[1:])) + + module.set_input(args.input_name, tvm.nd.array(image, device)) + + module.run() + + all_outputs = [] + num_outputs = module.get_num_outputs() + for i in range(num_outputs): + output = module.get_output(i).asnumpy() + if pad_batch: + output = output[:origin_size] + all_outputs.append(output) + + evaluator.evaluate(all_outputs[0], all_inputs) + + evaluator.summary() + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/object_detection/yolov5s/igie/quantize.py b/models/cv/object_detection/yolov5s/igie/quantize.py new file mode 100644 index 00000000..6771ad41 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/quantize.py @@ -0,0 +1,114 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import onnx +import psutil +import argparse +import numpy as np +from inference import get_dataloader +from onnxruntime.quantization import (CalibrationDataReader, QuantFormat, + quantize_static, QuantType, + CalibrationMethod) + +class CalibrationDataLoader(CalibrationDataReader): + def __init__(self, input_name, dataloader, cnt_limit=100): + self.cnt = 0 + self.input_name = input_name + self.cnt_limit = cnt_limit + self.iter = iter(dataloader) + + # avoid oom + @staticmethod + def _exceed_memory_upper_bound(upper_bound=80): + info = psutil.virtual_memory() + total_percent = info.percent + if total_percent >= upper_bound: + return True + return False + + def get_next(self): + if self._exceed_memory_upper_bound() or self.cnt >= self.cnt_limit: + return None + self.cnt += 1 + print(f"onnx calibration data count: {self.cnt}") + input_info = next(self.iter) + + ort_input = {k: np.array(v) for k, v in zip(self.input_name, input_info)} + return ort_input + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--model_path", + type=str, + required=True, + help="original model path.") + + parser.add_argument("--out_path", + type=str, + required=True, + help="igie export engine path.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="calibration datasets path.") + + parser.add_argument("--num_workers", + type=int, + default=16, + help="number of workers used in pytorch dataloader.") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + model = onnx.load(args.model_path) + input_names = [input.name for input in model.graph.input] + + data_path = os.path.join(args.datasets, "images", "val2017") + label_path = os.path.join(args.datasets, "annotations", "instances_val2017.json") + + dataloader = get_dataloader(data_path, label_path, batch_size=1, num_workers=args.num_workers) + calibration = CalibrationDataLoader(input_names, dataloader, cnt_limit=20) + + quantize_static(args.model_path, + args.out_path, + calibration_data_reader=calibration, + quant_format=QuantFormat.QOperator, + per_channel=False, + activation_type=QuantType.QInt8, + weight_type=QuantType.QInt8, + use_external_data_format=False, + nodes_to_exclude= [ + '/model/model/model.24/Add', '/model/model/model.24/Add_1', '/model/model/model.24/Add_2', '/model/model/model.24/Concat_16', + '/model/model/model.24/Concat_25', '/model/model/model.24/Concat_27', '/model/model/model.24/Concat_7', '/model/model/model.24/Mul_10', + '/model/model/model.24/Mul_11', '/model/model/model.24/Mul_12', '/model/model/model.24/Mul_13', '/model/model/model.24/Mul_18', + '/model/model/model.24/Mul_19', '/model/model/model.24/Mul_2', '/model/model/model.24/Mul_20', '/model/model/model.24/Mul_21', + '/model/model/model.24/Mul_3', '/model/model/model.24/Mul_4', '/model/model/model.24/Mul_5' + ], + calibrate_method=CalibrationMethod.Percentile, + extra_options = { + 'ActivationSymmetric': True, + 'WeightSymmetric': True + } + ) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/object_detection/yolov5s/igie/requirements.txt b/models/cv/object_detection/yolov5s/igie/requirements.txt new file mode 100644 index 00000000..d58360ef --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/requirements.txt @@ -0,0 +1,5 @@ +tqdm +onnx +onnxsim +ultralytics==8.3.97 +pycocotools diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh new file mode 100644 index 00000000..64a94ad6 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolov5s_opt.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +# build engine +python3 ../../igie_common/build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,640,640 \ + --precision fp16 \ + --engine_path yolov5s_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine yolov5s_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh new file mode 100644 index 00000000..84cef150 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolov5s_opt.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +# build engine +python3 ../../igie_common/build_engine.py \ + --model_path ${model_path} \ + --input input:${batchsize},3,640,640 \ + --precision fp16 \ + --engine_path yolov5s_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine yolov5s_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ + --perf_only True \ No newline at end of file diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh new file mode 100644 index 00000000..a37bb904 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh @@ -0,0 +1,57 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolov5s_opt.onnx" +quantized_model_path="yolov5s_int8.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +if [ ! -e $quantized_model_path ]; then + # quantize model to int8 + python3 quantize.py \ + --model_path ${model_path} \ + --out_path ${quantized_model_path} \ + --datasets ${datasets_path} +fi + +# build engine +python3 ../../igie_common/build_engine.py \ + --model_path ${quantized_model_path} \ + --input input:${batchsize},3,640,640 \ + --precision int8 \ + --engine_path yolov5s_bs_${batchsize}_int8.so + +# inference +python3 inference.py \ + --engine yolov5s_bs_${batchsize}_int8.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh new file mode 100644 index 00000000..47435056 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolov5s_opt.onnx" +quantized_model_path="yolov5s_int8.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +if [ ! -e $quantized_model_path ]; then + # quantize model to int8 + python3 quantize.py \ + --model_path ${model_path} \ + --out_path ${quantized_model_path} \ + --datasets ${datasets_path} +fi + +# build engine +python3 ../../igie_common/build_engine.py \ + --model_path ${quantized_model_path} \ + --input input:${batchsize},3,640,640 \ + --precision int8 \ + --engine_path yolov5s_bs_${batchsize}_int8.so + +# inference +python3 inference.py \ + --engine yolov5s_bs_${batchsize}_int8.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ + --perf_only True diff --git a/models/cv/object_detection/yolov5s/igie/utils.py b/models/cv/object_detection/yolov5s/igie/utils.py new file mode 100644 index 00000000..8ab517b2 --- /dev/null +++ b/models/cv/object_detection/yolov5s/igie/utils.py @@ -0,0 +1,435 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import cv2 +import json +import torch +import torchvision +import numpy as np + +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + +coco80_to_coco91 = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, + 89, 90 +] + +coco80_to_coco91_dict = {idx: i for idx, i in enumerate(coco80_to_coco91)} +coco91_to_coco80_dict = {i: idx for idx, i in enumerate(coco80_to_coco91)} + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114)): + # Resize and pad image while meeting stride-multiple constraints + # current shape [height, width] + + shape = im.shape[:2] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + + # Compute padding + ratio = r, r + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] + + dw /= 2 + dh /= 2 + + if shape[::-1] != new_unpad: + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, + top, + bottom, + left, + right, + cv2.BORDER_CONSTANT, + value=color) + return im, ratio, (dw, dh) + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + +def xywh2xyxy(x): + # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x + y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y + y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x + y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y + return y + +def xyxy2xywh(x): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center + y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center + y[:, 2] = x[:, 2] - x[:, 0] # width + y[:, 3] = x[:, 3] - x[:, 1] # height + return y + +def box_area(box): + return (box[2] - box[0]) * (box[3] - box[1]) + +def box_iou(box1, box2, eps=1e-7): + # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2) + (a1, a2), (b1, b2) = box1[:, None].chunk(2, 2), box2.chunk(2, 1) + inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2) + + # IoU = inter / (area1 + area2 - inter) + return inter / (box_area(box1.T)[:, None] + box_area(box2.T) - inter + eps) + +def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None): + # Rescale boxes (xyxy) from img1_shape to img0_shape + if ratio_pad is None: # calculate from img0_shape + gain = min(img1_shape[0] / img0_shape[0], + img1_shape[1] / img0_shape[1]) + pad = (img1_shape[1] - img0_shape[1] * gain) / 2, ( + img1_shape[0] - img0_shape[0] * gain) / 2 + else: + gain = ratio_pad[0][0] + pad = ratio_pad[1] + + boxes[:, [0, 2]] -= pad[0] # x padding + boxes[:, [1, 3]] -= pad[1] # y padding + boxes[:, :4] /= gain + clip_boxes(boxes, img0_shape) + + return boxes + + +class COCO2017Dataset(torch.utils.data.Dataset): + def __init__(self, + image_dir_path, + label_json_path, + image_size=640, + pad_color=114, + val_mode=True, + input_layout="NCHW"): + + self.image_dir_path = image_dir_path + self.label_json_path = label_json_path + self.image_size = image_size + self.pad_color = pad_color + self.val_mode = val_mode + self.input_layout = input_layout + + self.coco = COCO(annotation_file=self.label_json_path) + + if self.val_mode: + self.img_ids = list(sorted(self.coco.imgs.keys())) + else: + self.img_ids = sorted(list(self.coco.imgToAnns.keys())) + + def __len__(self): + return len(self.img_ids) + + def __getitem__(self, index): + img_path = self._get_image_path(index) + img, (h0, w0), (h, w) = self._load_image(index) + + img, ratio, pad = letterbox(img, + self.image_size, + color=(self.pad_color, self.pad_color, self.pad_color)) + shapes = (h0, w0), ((h / h0, w / w0), pad) + + # load label + raw_label = self._load_json_label(index) + # normalized xywh to pixel xyxy format + raw_label[:, 1:] = xywhn2xyxy(raw_label[:, 1:], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1]) + + raw_label[:, 1:] = xyxy2xywhn(raw_label[:, 1:], + w=img.shape[1], + h=img.shape[0], + clip=True, + eps=1E-3) + + nl = len(raw_label) + labels_out = np.zeros((nl, 6)) + labels_out[:, 1:] = raw_label + + # HWC to CHW, BGR to RGB + img = img.transpose((2, 0, 1))[::-1] + img = np.ascontiguousarray(img) / 255.0 + if self.input_layout == "NHWC": + img = img.transpose((1, 2, 0)) + + return img, labels_out, img_path, shapes + + def _get_image_path(self, index): + idx = self.img_ids[index] + path = self.coco.loadImgs(idx)[0]["file_name"] + img_path = os.path.join(self.image_dir_path, path) + return img_path + + def _load_image(self, index): + img_path = self._get_image_path(index) + + im = cv2.imread(img_path) + h0, w0 = im.shape[:2] + r = self.image_size / max(h0, w0) + if r != 1: + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR) + return im.astype("float32"), (h0, w0), im.shape[:2] + + def _load_json_label(self, index): + _, (h0, w0), _ = self._load_image(index) + + idx = self.img_ids[index] + ann_ids = self.coco.getAnnIds(imgIds=idx) + targets = self.coco.loadAnns(ids=ann_ids) + + labels = [] + for target in targets: + cat = target["category_id"] + coco80_cat = coco91_to_coco80_dict[cat] + cat = np.array([[coco80_cat]]) + + x, y, w, h = target["bbox"] + x1, y1, x2, y2 = x, y, int(x + w), int(y + h) + xyxy = np.array([[x1, y1, x2, y2]]) + xywhn = xyxy2xywhn(xyxy, w0, h0) + labels.append(np.hstack((cat, xywhn))) + + if labels: + labels = np.vstack(labels) + else: + if self.val_mode: + labels = np.zeros((1, 5)) + else: + raise ValueError(f"set val_mode = False to use images with labels") + + return labels + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) + for i, lb in enumerate(label): + lb[:, 0] = i + return np.concatenate([i[None] for i in im], axis=0), np.concatenate(label, 0), path, shapes + + +def non_max_suppression( + prediction, + conf_thres=0.25, + iou_thres=0.45, + classes=None, + agnostic=False, + multi_label=True, + labels=(), + max_det=300, + nm=0, # number of masks +): + """Non-Maximum Suppression (NMS) on inference results to reject overlapping detections + + Returns: + list of detections, on (n,6) tensor per image [xyxy, conf, cls] + """ + + if isinstance(prediction, (list, tuple)): + prediction = prediction[0] + + bs = prediction.shape[0] # batch size + nc = prediction.shape[2] - nm - 5 # number of classes + xc = prediction[..., 4] > conf_thres # candidates + + # Checks + assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0' + assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0' + + # Settings + # min_wh = 2 # (pixels) minimum box width and height + max_wh = 7680 # (pixels) maximum box width and height + max_nms = 30000 # maximum number of boxes into torchvision.ops.nms() + time_limit = 0.5 + 0.05 * bs # seconds to quit after + redundant = True # require redundant detections + multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img) + merge = False # use merge-NMS + + mi = 5 + nc + output = [torch.zeros((0, 6 + nm))] * bs + for xi, x in enumerate(prediction): + + x = x[xc[xi]] + + if labels and len(labels[xi]): + lb = labels[xi] + v = torch.zeros((len(lb), nc + nm + 5), device=x.device) + v[:, :4] = lb[:, 1:5] + v[:, 4] = 1.0 + v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 + x = torch.cat((x, v), 0) + + # If none remain process next image + if not x.shape[0]: + continue + + # Compute conf + x[:, 5:] *= x[:, 4:5] + + # Box/Mask + box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2) + mask = x[:, mi:] + + # Detections matrix nx6 (xyxy, conf, cls) + if multi_label: + i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T + x = torch.cat( + (box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1) + else: # best class only + conf, j = x[:, 5:mi].max(1, keepdim=True) + x = torch.cat((box, conf, j.float(), mask), + 1)[conf.view(-1) > conf_thres] + + # Filter by class + if classes is not None: + x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)] + + n = x.shape[0] # number of boxes + if not n: # no boxes + continue + elif n > max_nms: + x = x[x[:, 4].argsort(descending=True)[:max_nms]] + else: + x = x[x[:, 4].argsort(descending=True)] + # Batched NMS + c = x[:, 5:6] * (0 if agnostic else max_wh) # classes + boxes, scores = x[:, :4] + c, x[:, 4] + i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS + if i.shape[0] > max_det: + i = i[:max_det] + if merge and (1 < n < 3E3): + # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4) + iou = box_iou(boxes[i], boxes) > iou_thres + weights = iou * scores[None] + x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) + if redundant: + i = i[iou.sum(1) > 1] + + output[xi] = x[i] + return output + +def get_coco_accuracy(pred_json, ann_json): + coco = COCO(annotation_file=ann_json) + coco_pred = coco.loadRes(pred_json) + + coco_evaluator = COCOeval(cocoGt=coco, cocoDt=coco_pred, iouType="bbox") + + coco_evaluator.evaluate() + coco_evaluator.accumulate() + coco_evaluator.summarize() + return coco_evaluator.stats + +class COCO2017Evaluator: + def __init__(self, + label_path, + image_size=640, + conf_thres=0.001, + iou_thres=0.65): + + self.conf_thres = conf_thres + self.iou_thres = iou_thres + self.label_path = label_path + self.image_size = image_size + + self.jdict = [] + + # iou vector for mAP@0.5:0.95 + self.iouv = torch.linspace(0.5, 0.95, 10) + self.niou = self.iouv.numel() + + def evaluate(self, pred, all_inputs, nms_count=None): + im = all_inputs[0] + targets = all_inputs[1] + paths = all_inputs[2] + shapes = all_inputs[3] + + _, _, height, width = im.shape + targets[:, 2:] *= np.array((width, height, width, height)) + + pred = torch.from_numpy(pred) + pred = non_max_suppression(pred, self.conf_thres, self.iou_thres) + + for idx, det in enumerate(pred): + img_path = paths[idx] + + predn = det + shape = shapes[idx][0] + scale_boxes(im[idx].shape[1:], predn[:, :4], shape, shapes[idx][1]) # native-space pred + self._save_one_json(predn, self.jdict, img_path, coco80_to_coco91) # append to COCO-JSON dictionary + + + def _save_one_json(self, predn, jdict, path, class_map): + # Save one JSON result in the format + image_id = int(os.path.splitext(os.path.basename(path))[0]) + box = xyxy2xywh(predn[:, :4]) + box[:, :2] -= box[:, 2:] / 2 + for p, b in zip(predn.tolist(), box.tolist()): + jdict.append({ + 'image_id': image_id, + 'category_id': class_map[int(p[5])], + 'bbox': [round(x, 3) for x in b], + 'score': round(p[4], 5) + }) + + + def summary(self): + if len(self.jdict): + pred_json = os.path.join("coco2017_predictions.json") + with open(pred_json, 'w') as f: + json.dump(self.jdict, f) + result = get_coco_accuracy(pred_json, self.label_path) + else: + raise ValueError("can not find generated json dict for pycocotools") + return result -- Gitee From 22f52b0bedd59325df01da70569e8988bbeedcf6 Mon Sep 17 00:00:00 2001 From: YoungPeng Date: Thu, 26 Feb 2026 11:19:02 +0800 Subject: [PATCH 2/6] Add: yolov26n inference script. --- .../object_detection/yolov26n/igie/README.md | 89 ++++++++ .../yolov26n/igie/ci/prepare.sh | 32 +++ .../object_detection/yolov26n/igie/export.py | 43 ++++ .../yolov26n/igie/inference.py | 153 +++++++++++++ .../yolov26n/igie/requirements.txt | 5 + .../scripts/infer_yolov26n_fp16_accuracy.sh | 49 +++++ .../infer_yolov26n_fp16_performance.sh | 50 +++++ .../object_detection/yolov26n/igie/utils.py | 208 ++++++++++++++++++ .../yolov26n/igie/validator.py | 89 ++++++++ 9 files changed, 718 insertions(+) create mode 100644 models/cv/object_detection/yolov26n/igie/README.md create mode 100644 models/cv/object_detection/yolov26n/igie/ci/prepare.sh create mode 100644 models/cv/object_detection/yolov26n/igie/export.py create mode 100644 models/cv/object_detection/yolov26n/igie/inference.py create mode 100644 models/cv/object_detection/yolov26n/igie/requirements.txt create mode 100644 models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh create mode 100644 models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh create mode 100644 models/cv/object_detection/yolov26n/igie/utils.py create mode 100644 models/cv/object_detection/yolov26n/igie/validator.py diff --git a/models/cv/object_detection/yolov26n/igie/README.md b/models/cv/object_detection/yolov26n/igie/README.md new file mode 100644 index 00000000..bc2097fe --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/README.md @@ -0,0 +1,89 @@ +# YOLOv26n (IGIE) + +## Model Description + +YOLOv26 is the latest generation of real-time detection models released by Ultralytics in 2026. Its core evolution lies in the native NMS-Free design, enabling direct end-to-end output from model to prediction. By incorporating the MuSGD optimizer and STAL strategy, it significantly streamlines post-processing logic while further enhancing inference speed and small object detection precision. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +| :----: | :----: | :----: | +| MR-V100 | 4.4.0 | 26.03 | + +## Model Preparation + +### Prepare Resources + +Pretrained model: + +Dataset: + +- to download the labels dataset. +- to download the validation dataset. +- to download the train dataset. + +```bash +unzip -q -d ./ coco2017labels.zip +unzip -q -d ./coco/images/ train2017.zip +unzip -q -d ./coco/images/ val2017.zip + +coco +├── annotations +│   └── instances_val2017.json +├── images +│   ├── train2017 +│   └── val2017 +├── labels +│   ├── train2017 +│   └── val2017 +├── LICENSE +├── README.txt +├── test-dev2017.txt +├── train2017.cache +├── train2017.txt +├── val2017.cache +└── val2017.txt +``` + +### Install Dependencies + +```bash +pip3 install -r requirements.txt +``` + +## Model Conversion + +```bash +# download the weight from the recommend link +wget https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n.pt + +python3 export.py --weight yolo26n.pt --batch 32 + +# Use onnxsim optimize onnx model +onnxsim yolo26n.onnx yolo26n_opt.onnx +``` + +## Model Inference + +```bash +export DATASETS_DIR=/Path/to/coco/ +``` + +### FP16 + +```bash +# Accuracy +bash scripts/infer_yolov26n_fp16_accuracy.sh +# Performance +bash scripts/infer_yolov26n_fp16_performance.sh +``` + +## Model Results + +| Model | BatchSize | Precision | FPS | IOU@0.5 | IOU@0.5:0.95 | +| -------- | --------- | --------- | ------- | ------- | ------------ | +| YOLOv26n | 32 | FP16 | 1344.11 | 0.558 | 0.402 | + +## References + +- [YOLOv26](https://github.com/ultralytics/ultralytics) diff --git a/models/cv/object_detection/yolov26n/igie/ci/prepare.sh b/models/cv/object_detection/yolov26n/igie/ci/prepare.sh new file mode 100644 index 00000000..d1302eda --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/ci/prepare.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x + +ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') +if [[ ${ID} == "ubuntu" ]]; then + apt install -y libgl1-mesa-glx +elif [[ ${ID} == "centos" ]]; then + yum install -y mesa-libGL +else + echo "Not Support Os" +fi + +pip3 install -r requirements.txt + +python3 export.py --weight yolo26n.pt --batch 32 + +onnxsim yolo26n.onnx yolo26n_opt.onnx diff --git a/models/cv/object_detection/yolov26n/igie/export.py b/models/cv/object_detection/yolov26n/igie/export.py new file mode 100644 index 00000000..780b9b2a --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/export.py @@ -0,0 +1,43 @@ +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +from ultralytics import YOLO + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--weight", + type=str, + required=True, + help="pytorch model weight.") + + parser.add_argument("--batch", + type=int, + required=True, + help="batchsize of the model.") + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + model = YOLO(args.weight).cpu() + + model.export(format='onnx', batch=args.batch, imgsz=(640, 640), optimize=True, simplify=True, opset=13) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/object_detection/yolov26n/igie/inference.py b/models/cv/object_detection/yolov26n/igie/inference.py new file mode 100644 index 00000000..5053a00f --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/inference.py @@ -0,0 +1,153 @@ +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import os + +import torch +import tvm +from tvm import relay + +import numpy as np +from pathlib import Path +from ultralytics import YOLO +from ultralytics.cfg import get_cfg +from ultralytics.utils import DEFAULT_CFG +from validator import IGIE_Validator + +from utils import COCO2017Dataset + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--engine", + type=str, + required=True, + help="igie engine path.") + + parser.add_argument("--batchsize", + type=int, + required=True, + help="inference batch size.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="datasets path.") + + parser.add_argument("--input_name", + type=str, + required=True, + help="input name of the model.") + + parser.add_argument("--warmup", + type=int, + default=3, + help="number of warmup before test.") + + parser.add_argument("--acc_target", + type=float, + default=None, + help="Model inference Accuracy target.") + + parser.add_argument("--fps_target", + type=float, + default=None, + help="Model inference FPS target.") + + parser.add_argument("--perf_only", + type=bool, + default=False, + help="Run performance test only") + + args = parser.parse_args() + + return args + +def get_dataloader(data_path, label_path, batch_size, num_workers): + + dataset = COCO2017Dataset(data_path, label_path, image_size=640) + + dataloader = torch.utils.data.DataLoader(dataset, + batch_size=batch_size, + drop_last=False, + num_workers=num_workers, + collate_fn=dataset.collate_fn) + return dataloader + +def main(): + args = parse_args() + + batch_size = args.batchsize + + # create iluvatar target & device + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + device = tvm.device(target.kind.name, 0) + + # load engine + lib = tvm.runtime.load_module(args.engine) + + # create runtime from engine + module = tvm.contrib.graph_executor.GraphModule(lib["default"](device)) + + # just run perf test + if args.perf_only: + ftimer = module.module.time_evaluator("run", device, number=100, repeat=1) + prof_res = np.array(ftimer().results) * 1000 + fps = batch_size * 1000 / np.mean(prof_res) + print(f"\n* Mean inference time: {np.mean(prof_res):.3f} ms, Mean fps: {fps:.3f}") + else: + root_path = args.datasets + val_path = os.path.join(root_path, 'val2017.txt') + + overrides = {} + overrides['mode'] = 'val' + + cfg_args = get_cfg(cfg=DEFAULT_CFG, overrides=overrides) + + cfg_args.batch = args.batchsize + + cfg_args.data = { + 'path': Path(root_path), + 'val': val_path, + 'names': + { + 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', + 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', + 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', + 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', + 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', + 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', + 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', + 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', + 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', + 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', + 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', + 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', + 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', + 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', + 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush' + }, + 'nc': 80} + cfg_args.save_json = True + cfg_args.plots = False + + validator = IGIE_Validator(args=cfg_args, save_dir=Path('.')) + validator.stride = 32 + + validator(module, device) + +if __name__ == "__main__": + main() diff --git a/models/cv/object_detection/yolov26n/igie/requirements.txt b/models/cv/object_detection/yolov26n/igie/requirements.txt new file mode 100644 index 00000000..9eecef9e --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/requirements.txt @@ -0,0 +1,5 @@ +tqdm +onnx==1.16.0 +huggingface_hub +ultralytics==8.4.16 +pycocotools diff --git a/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh new file mode 100644 index 00000000..ba4b2db9 --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolo26n_opt.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +# build engine +python3 ../../igie_common/build_engine.py \ + --model_path ${model_path} \ + --input images:${batchsize},3,640,640 \ + --precision fp16 \ + --engine_path yolo26n_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine yolo26n_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name images \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh new file mode 100644 index 00000000..cbae9330 --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="yolo26n_opt.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +# build engine +python3 ../../igie_common/build_engine.py \ + --model_path ${model_path} \ + --input images:${batchsize},3,640,640 \ + --precision fp16 \ + --engine_path yolo26n_bs_${batchsize}_fp16.so + + +# inference +python3 inference.py \ + --engine yolo26n_bs_${batchsize}_fp16.so \ + --batchsize ${batchsize} \ + --input_name images \ + --datasets ${datasets_path} \ + --perf_only True \ No newline at end of file diff --git a/models/cv/object_detection/yolov26n/igie/utils.py b/models/cv/object_detection/yolov26n/igie/utils.py new file mode 100644 index 00000000..1b35ad76 --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/utils.py @@ -0,0 +1,208 @@ +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import cv2 +import torch +import numpy as np + +from pycocotools.coco import COCO + +coco80_to_coco91 = [ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, + 89, 90 +] + +coco91_to_coco80_dict = {i: idx for idx, i in enumerate(coco80_to_coco91)} + +def letterbox(im, new_shape=(640, 640), color=(114, 114, 114)): + # Resize and pad image while meeting stride-multiple constraints + # current shape [height, width] + + shape = im.shape[:2] + if isinstance(new_shape, int): + new_shape = (new_shape, new_shape) + + # Scale ratio (new / old) + r = min(new_shape[0] / shape[0], new_shape[1] / shape[1]) + + # Compute padding + ratio = r, r + new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r)) + dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] + + dw /= 2 + dh /= 2 + + if shape[::-1] != new_unpad: + im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR) + top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1)) + left, right = int(round(dw - 0.1)), int(round(dw + 0.1)) + im = cv2.copyMakeBorder(im, + top, + bottom, + left, + right, + cv2.BORDER_CONSTANT, + value=color) + return im, ratio, (dw, dh) + +def xywhn2xyxy(x, w=640, h=640, padw=0, padh=0): + # Convert nx4 boxes from [x, y, w, h] normalized to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = w * (x[:, 0] - x[:, 2] / 2) + padw # top left x + y[:, 1] = h * (x[:, 1] - x[:, 3] / 2) + padh # top left y + y[:, 2] = w * (x[:, 0] + x[:, 2] / 2) + padw # bottom right x + y[:, 3] = h * (x[:, 1] + x[:, 3] / 2) + padh # bottom right y + return y + +def clip_boxes(boxes, shape): + # Clip boxes (xyxy) to image shape (height, width) + if isinstance(boxes, torch.Tensor): # faster individually + boxes[:, 0].clamp_(0, shape[1]) # x1 + boxes[:, 1].clamp_(0, shape[0]) # y1 + boxes[:, 2].clamp_(0, shape[1]) # x2 + boxes[:, 3].clamp_(0, shape[0]) # y2 + else: # np.array (faster grouped) + boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, shape[1]) # x1, x2 + boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, shape[0]) # y1, y2 + +def xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0): + # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] normalized where xy1=top-left, xy2=bottom-right + if clip: + clip_boxes(x, (h - eps, w - eps)) # warning: inplace clip + y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) + y[:, 0] = ((x[:, 0] + x[:, 2]) / 2) / w # x center + y[:, 1] = ((x[:, 1] + x[:, 3]) / 2) / h # y center + y[:, 2] = (x[:, 2] - x[:, 0]) / w # width + y[:, 3] = (x[:, 3] - x[:, 1]) / h # height + return y + +class COCO2017Dataset(torch.utils.data.Dataset): + def __init__(self, + image_dir_path, + label_json_path, + image_size=640, + pad_color=114, + val_mode=True, + input_layout="NCHW"): + + self.image_dir_path = image_dir_path + self.label_json_path = label_json_path + self.image_size = image_size + self.pad_color = pad_color + self.val_mode = val_mode + self.input_layout = input_layout + + self.coco = COCO(annotation_file=self.label_json_path) + + if self.val_mode: + self.img_ids = list(sorted(self.coco.imgs.keys())) + else: + self.img_ids = sorted(list(self.coco.imgToAnns.keys())) + + def __len__(self): + return len(self.img_ids) + + def __getitem__(self, index): + img_path = self._get_image_path(index) + img, (h0, w0), (h, w) = self._load_image(index) + + img, ratio, pad = letterbox(img, + self.image_size, + color=(self.pad_color, self.pad_color, self.pad_color)) + shapes = (h0, w0), ((h / h0, w / w0), pad) + + # load label + raw_label = self._load_json_label(index) + # normalized xywh to pixel xyxy format + raw_label[:, 1:] = xywhn2xyxy(raw_label[:, 1:], + ratio[0] * w, + ratio[1] * h, + padw=pad[0], + padh=pad[1]) + + raw_label[:, 1:] = xyxy2xywhn(raw_label[:, 1:], + w=img.shape[1], + h=img.shape[0], + clip=True, + eps=1E-3) + + nl = len(raw_label) + labels_out = np.zeros((nl, 6)) + labels_out[:, 1:] = raw_label + + # HWC to CHW, BGR to RGB + img = img.transpose((2, 0, 1))[::-1] + img = np.ascontiguousarray(img) / 255.0 + if self.input_layout == "NHWC": + img = img.transpose((1, 2, 0)) + + return img, labels_out, img_path, shapes + + def _get_image_path(self, index): + idx = self.img_ids[index] + path = self.coco.loadImgs(idx)[0]["file_name"] + img_path = os.path.join(self.image_dir_path, path) + return img_path + + def _load_image(self, index): + img_path = self._get_image_path(index) + + im = cv2.imread(img_path) + h0, w0 = im.shape[:2] + r = self.image_size / max(h0, w0) + if r != 1: + im = cv2.resize(im, (int(w0 * r), int(h0 * r)), interpolation=cv2.INTER_LINEAR) + return im.astype("float32"), (h0, w0), im.shape[:2] + + def _load_json_label(self, index): + _, (h0, w0), _ = self._load_image(index) + + idx = self.img_ids[index] + ann_ids = self.coco.getAnnIds(imgIds=idx) + targets = self.coco.loadAnns(ids=ann_ids) + + labels = [] + for target in targets: + cat = target["category_id"] + coco80_cat = coco91_to_coco80_dict[cat] + cat = np.array([[coco80_cat]]) + + x, y, w, h = target["bbox"] + x1, y1, x2, y2 = x, y, int(x + w), int(y + h) + xyxy = np.array([[x1, y1, x2, y2]]) + xywhn = xyxy2xywhn(xyxy, w0, h0) + labels.append(np.hstack((cat, xywhn))) + + if labels: + labels = np.vstack(labels) + else: + if self.val_mode: + labels = np.zeros((1, 5)) + else: + raise ValueError(f"set val_mode = False to use images with labels") + + return labels + + @staticmethod + def collate_fn(batch): + im, label, path, shapes = zip(*batch) + for i, lb in enumerate(label): + lb[:, 0] = i + return np.concatenate([i[None] for i in im], axis=0), np.concatenate(label, 0), path, shapes diff --git a/models/cv/object_detection/yolov26n/igie/validator.py b/models/cv/object_detection/yolov26n/igie/validator.py new file mode 100644 index 00000000..c91fd6f4 --- /dev/null +++ b/models/cv/object_detection/yolov26n/igie/validator.py @@ -0,0 +1,89 @@ +# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import tvm +import json +import torch +import numpy as np + +from tqdm import tqdm + +from ultralytics.models.yolo.detect import DetectionValidator +from ultralytics.utils.metrics import ConfusionMatrix, DetMetrics +from ultralytics.data.converter import coco80_to_coco91_class + +class IGIE_Validator(DetectionValidator): + def __call__(self, engine, device): + self.data = self.args.data + self.dataloader = self.get_dataloader(self.data.get(self.args.split), self.args.batch) + self.init_metrics() + self.device = torch.device('cpu') + self.end2end = False + self.metrics = DetMetrics() + self.stats = {'tp': [], 'conf': [], 'pred_cls': [], 'target_cls': [], 'target_img': []} + + # wram up + for _ in range(3): + engine.run() + + for batch in tqdm(self.dataloader): + batch = self.preprocess(batch) + + imgs = batch['img'] + pad_batch = len(imgs) != self.args.batch + if pad_batch: + origin_size = len(imgs) + imgs = np.resize(imgs, (self.args.batch, *imgs.shape[1:])) + + engine.set_input(0, tvm.nd.array(imgs, device)) + + engine.run() + + outputs = engine.get_output(0).asnumpy() + + if pad_batch: + outputs = outputs[:origin_size] + + outputs = torch.from_numpy(outputs) + + preds = self.postprocess([outputs]) + + self.update_metrics(preds, batch) + + stats = self.get_stats() + + if self.args.save_json and self.jdict: + with open(str(self.save_dir / 'predictions.json'), 'w') as f: + print(f'Saving {f.name} ...') + json.dump(self.jdict, f) # flatten and save + + stats = self.eval_json(stats) + + return stats + + def init_metrics(self): + """Initialize evaluation metrics for YOLO.""" + val = self.data.get(self.args.split, '') # validation path + self.is_coco = isinstance(val, str) and 'coco' in val and val.endswith(f'{os.sep}val2017.txt') # is COCO + self.class_map = coco80_to_coco91_class() if self.is_coco else list(range(1000)) + self.args.save_json |= self.is_coco and not self.training # run on final val if training COCO + self.names = self.data['names'] + self.metrics.names = self.names + self.nc = len(self.names) + self.seen = 0 + self.jdict = [] + self.stats = [] + self.confusion_matrix = ConfusionMatrix(names=self.names) -- Gitee From 3b7eabf82f39e2570a6b0081fdb317e083c9fc95 Mon Sep 17 00:00:00 2001 From: YoungPeng Date: Thu, 26 Feb 2026 14:40:34 +0800 Subject: [PATCH 3/6] Fix: modify mobilevit_s fp16 performance test script input size. --- .../igie/scripts/infer_mobilevit_s_fp16_performance.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_performance.sh b/models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_performance.sh index 6620b051..caa2f277 100644 --- a/models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_performance.sh +++ b/models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_performance.sh @@ -36,7 +36,7 @@ echo "batch size is ${batchsize}" # build engine python3 ${RUN_DIR}build_engine.py \ --model_path ${model_path} \ - --input input:${batchsize},3,288,288 \ + --input input:${batchsize},3,224,224 \ --precision fp16 \ --engine_path mobilevit_s_bs_${batchsize}_fp16.so @@ -47,4 +47,4 @@ python3 inference.py \ --batchsize ${batchsize} \ --input_name input \ --datasets ${datasets_path} \ - --perf_only True \ No newline at end of file + --perf_only True -- Gitee From 0f533cc0df769f04bdebb8a2789491ce924d31d4 Mon Sep 17 00:00:00 2001 From: "dun.zhang" Date: Wed, 11 Mar 2026 06:11:08 +0000 Subject: [PATCH 4/6] add densenet121 int8 --- .../classification/densenet121/igie/README.md | 12 +- .../densenet121/igie/quantize.py | 107 ++++++++++++++++++ .../infer_densenet121_int8_accuracy.sh | 58 ++++++++++ .../infer_densenet121_int8_performance.sh | 59 ++++++++++ 4 files changed, 235 insertions(+), 1 deletion(-) create mode 100644 models/cv/classification/densenet121/igie/quantize.py create mode 100644 models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh create mode 100644 models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh diff --git a/models/cv/classification/densenet121/igie/README.md b/models/cv/classification/densenet121/igie/README.md index dc037765..dca2cd89 100644 --- a/models/cv/classification/densenet121/igie/README.md +++ b/models/cv/classification/densenet121/igie/README.md @@ -47,8 +47,18 @@ bash scripts/infer_densenet121_fp16_accuracy.sh bash scripts/infer_densenet121_fp16_performance.sh ``` +### INT8 + +```bash +# Accuracy +bash scripts/infer_densenet121_int8_accuracy.sh +# Performance +bash scripts/infer_densenet121_int8_performance.sh +``` + ## Model Results | Model | BatchSize | Precision | FPS | Top-1(%) | Top-5(%) | | :----: | :----: | :----: | :----: | :----: | :----: | -| DenseNet121 | 32 | FP16 | 2199.75 | 74.40 | 91.931 | +| DenseNet121 | 32 | FP16 | 1894.092 | 74.40 | 91.931 | +| DenseNet121 | 32 | INT8 | 2093.822 | 70.675 | 89.561 | \ No newline at end of file diff --git a/models/cv/classification/densenet121/igie/quantize.py b/models/cv/classification/densenet121/igie/quantize.py new file mode 100644 index 00000000..d079be70 --- /dev/null +++ b/models/cv/classification/densenet121/igie/quantize.py @@ -0,0 +1,107 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import onnx +import psutil +import argparse +import numpy as np +from inference import get_dataloader +from onnxruntime.quantization import (CalibrationDataReader, QuantFormat, + quantize_static, QuantType, + CalibrationMethod) + +class CalibrationDataLoader(CalibrationDataReader): + def __init__(self, input_name, dataloader, cnt_limit=100): + self.cnt = 0 + self.input_name = input_name + self.cnt_limit = cnt_limit + self.iter = iter(dataloader) + + # avoid oom + @staticmethod + def _exceed_memory_upper_bound(upper_bound=80): + info = psutil.virtual_memory() + total_percent = info.percent + if total_percent >= upper_bound: + return True + return False + + def get_next(self): + if self._exceed_memory_upper_bound() or self.cnt >= self.cnt_limit: + return None + self.cnt += 1 + print(f"onnx calibration data count: {self.cnt}") + input_info = next(self.iter) + + ort_input = {k: np.array(v) for k, v in zip(self.input_name, input_info)} + return ort_input + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--model_path", + type=str, + required=True, + help="original model path.") + + parser.add_argument("--out_path", + type=str, + required=True, + help="igie export engine path.") + + parser.add_argument("--datasets", + type=str, + required=True, + help="calibration datasets path.") + + parser.add_argument("--num_workers", + type=int, + default=16, + help="number of workers used in pytorch dataloader.") + + args = parser.parse_args() + + return args + +def main(): + args = parse_args() + + model = onnx.load(args.model_path) + graph = model.graph + input_names = [input.name for input in model.graph.input] + + dataloader = get_dataloader(args.datasets, batch_size=1, num_workers=args.num_workers) + calibration = CalibrationDataLoader(input_names, dataloader, cnt_limit=20) + + disable_quant_nodes = [node.name for node in graph.node if node.op_type == 'Concat'] + + quantize_static(args.model_path, + args.out_path, + calibration_data_reader=calibration, + quant_format=QuantFormat.QOperator, + per_channel=False, + activation_type=QuantType.QInt8, + weight_type=QuantType.QInt8, + use_external_data_format=False, + calibrate_method=CalibrationMethod.Percentile, + nodes_to_exclude=disable_quant_nodes, + extra_options = { + 'ActivationSymmetric': True, + 'WeightSymmetric': True + } + ) + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh new file mode 100644 index 00000000..d75b0a1a --- /dev/null +++ b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh @@ -0,0 +1,58 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="densenet121.onnx" +quantized_model_path="densenet121_int8.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +if [ ! -e $quantized_model_path ]; then + # quantize model to int8 + python3 quantize.py \ + --model_path ${model_path} \ + --out_path ${quantized_model_path} \ + --datasets ${datasets_path} +fi + +# build engine +python3 ${RUN_DIR}build_engine.py \ + --model_path ${quantized_model_path} \ + --input input:${batchsize},3,224,224 \ + --precision int8 \ + --engine_path densenet121_bs_${batchsize}_int8.so + + +# inference +python3 ${RUN_DIR}inference.py \ + --engine densenet121_bs_${batchsize}_int8.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ No newline at end of file diff --git a/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh new file mode 100644 index 00000000..908097e5 --- /dev/null +++ b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +batchsize=32 +model_path="densenet121.onnx" +quantized_model_path="densenet121_int8.onnx" +datasets_path=${DATASETS_DIR} + +# Update arguments +index=0 +options=$@ +arguments=($options) +for argument in $options +do + index=`expr $index + 1` + case $argument in + --bs) batchsize=${arguments[index]};; + esac +done + +echo "batch size is ${batchsize}" + +if [ ! -e $quantized_model_path ]; then + # quantize model to int8 + python3 quantize.py \ + --model_path ${model_path} \ + --out_path ${quantized_model_path} \ + --datasets ${datasets_path} +fi + +# build engine +python3 ${RUN_DIR}build_engine.py \ + --model_path ${quantized_model_path} \ + --input input:${batchsize},3,224,224 \ + --precision int8 \ + --engine_path densenet121_bs_${batchsize}_int8.so + + +# inference +python3 ${RUN_DIR}inference.py \ + --engine densenet121_bs_${batchsize}_int8.so \ + --batchsize ${batchsize} \ + --input_name input \ + --datasets ${datasets_path} \ + --perf_only True \ No newline at end of file -- Gitee From 56cbce198f8ef8b1d819348db5f8a49926211a58 Mon Sep 17 00:00:00 2001 From: "dun.zhang" Date: Wed, 11 Mar 2026 06:24:01 +0000 Subject: [PATCH 5/6] add detr fp16 --- .../cv/object_detection/detr/igie/Dockerfile | 13 + models/cv/object_detection/detr/igie/LICENSE | 201 ++++++++ .../cv/object_detection/detr/igie/README.md | 94 ++++ .../detr/igie/build_engine.py | 66 +++ .../object_detection/detr/igie/d2/README.md | 39 ++ .../d2/configs/detr_256_6_6_torchvision.yaml | 45 ++ .../detr_segm_256_6_6_torchvision.yaml | 46 ++ .../detr/igie/d2/converter.py | 69 +++ .../detr/igie/d2/detr/__init__.py | 4 + .../detr/igie/d2/detr/config.py | 34 ++ .../detr/igie/d2/detr/dataset_mapper.py | 122 +++++ .../detr/igie/d2/detr/detr.py | 261 ++++++++++ .../detr/igie/d2/train_net.py | 145 ++++++ .../detr/igie/datasets/__init__.py | 25 + .../detr/igie/datasets/coco.py | 158 ++++++ .../detr/igie/datasets/coco_eval.py | 257 ++++++++++ .../detr/igie/datasets/coco_panoptic.py | 99 ++++ .../detr/igie/datasets/panoptic_eval.py | 44 ++ .../detr/igie/datasets/transforms.py | 277 +++++++++++ .../cv/object_detection/detr/igie/engine.py | 151 ++++++ .../cv/object_detection/detr/igie/export.py | 169 +++++++ .../cv/object_detection/detr/igie/hubconf.py | 168 +++++++ .../object_detection/detr/igie/inference.py | 278 +++++++++++ models/cv/object_detection/detr/igie/main.py | 248 ++++++++++ .../detr/igie/models/__init__.py | 6 + .../detr/igie/models/backbone.py | 119 +++++ .../object_detection/detr/igie/models/detr.py | 359 ++++++++++++++ .../detr/igie/models/matcher.py | 86 ++++ .../detr/igie/models/position_encoding.py | 89 ++++ .../detr/igie/models/segmentation.py | 363 ++++++++++++++ .../detr/igie/models/transformer.py | 297 +++++++++++ .../detr/igie/requirements.txt | 9 + .../detr/igie/run_with_submitit.py | 111 +++++ .../igie/scripts/infer_detr_fp16_accuracy.sh | 34 ++ .../scripts/infer_detr_fp16_performance.sh | 35 ++ .../cv/object_detection/detr/igie/test_all.py | 209 ++++++++ models/cv/object_detection/detr/igie/tox.ini | 3 + .../detr/igie/util/__init__.py | 1 + .../detr/igie/util/box_ops.py | 88 ++++ .../object_detection/detr/igie/util/misc.py | 468 ++++++++++++++++++ .../detr/igie/util/plot_utils.py | 107 ++++ 41 files changed, 5397 insertions(+) create mode 100644 models/cv/object_detection/detr/igie/Dockerfile create mode 100644 models/cv/object_detection/detr/igie/LICENSE create mode 100644 models/cv/object_detection/detr/igie/README.md create mode 100644 models/cv/object_detection/detr/igie/build_engine.py create mode 100644 models/cv/object_detection/detr/igie/d2/README.md create mode 100644 models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml create mode 100644 models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml create mode 100644 models/cv/object_detection/detr/igie/d2/converter.py create mode 100644 models/cv/object_detection/detr/igie/d2/detr/__init__.py create mode 100644 models/cv/object_detection/detr/igie/d2/detr/config.py create mode 100644 models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py create mode 100644 models/cv/object_detection/detr/igie/d2/detr/detr.py create mode 100644 models/cv/object_detection/detr/igie/d2/train_net.py create mode 100644 models/cv/object_detection/detr/igie/datasets/__init__.py create mode 100644 models/cv/object_detection/detr/igie/datasets/coco.py create mode 100644 models/cv/object_detection/detr/igie/datasets/coco_eval.py create mode 100644 models/cv/object_detection/detr/igie/datasets/coco_panoptic.py create mode 100644 models/cv/object_detection/detr/igie/datasets/panoptic_eval.py create mode 100644 models/cv/object_detection/detr/igie/datasets/transforms.py create mode 100644 models/cv/object_detection/detr/igie/engine.py create mode 100644 models/cv/object_detection/detr/igie/export.py create mode 100644 models/cv/object_detection/detr/igie/hubconf.py create mode 100644 models/cv/object_detection/detr/igie/inference.py create mode 100644 models/cv/object_detection/detr/igie/main.py create mode 100644 models/cv/object_detection/detr/igie/models/__init__.py create mode 100644 models/cv/object_detection/detr/igie/models/backbone.py create mode 100644 models/cv/object_detection/detr/igie/models/detr.py create mode 100644 models/cv/object_detection/detr/igie/models/matcher.py create mode 100644 models/cv/object_detection/detr/igie/models/position_encoding.py create mode 100644 models/cv/object_detection/detr/igie/models/segmentation.py create mode 100644 models/cv/object_detection/detr/igie/models/transformer.py create mode 100644 models/cv/object_detection/detr/igie/requirements.txt create mode 100644 models/cv/object_detection/detr/igie/run_with_submitit.py create mode 100644 models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh create mode 100644 models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh create mode 100644 models/cv/object_detection/detr/igie/test_all.py create mode 100644 models/cv/object_detection/detr/igie/tox.ini create mode 100644 models/cv/object_detection/detr/igie/util/__init__.py create mode 100644 models/cv/object_detection/detr/igie/util/box_ops.py create mode 100644 models/cv/object_detection/detr/igie/util/misc.py create mode 100644 models/cv/object_detection/detr/igie/util/plot_utils.py diff --git a/models/cv/object_detection/detr/igie/Dockerfile b/models/cv/object_detection/detr/igie/Dockerfile new file mode 100644 index 00000000..3e6da220 --- /dev/null +++ b/models/cv/object_detection/detr/igie/Dockerfile @@ -0,0 +1,13 @@ +FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime + +ENV DEBIAN_FRONTEND=noninteractive + +RUN apt-get update -qq && \ + apt-get install -y git vim libgtk2.0-dev && \ + rm -rf /var/cache/apk/* + +RUN pip --no-cache-dir install Cython + +COPY requirements.txt /workspace + +RUN pip --no-cache-dir install -r /workspace/requirements.txt diff --git a/models/cv/object_detection/detr/igie/LICENSE b/models/cv/object_detection/detr/igie/LICENSE new file mode 100644 index 00000000..b1395e94 --- /dev/null +++ b/models/cv/object_detection/detr/igie/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 - present, Facebook, Inc + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/models/cv/object_detection/detr/igie/README.md b/models/cv/object_detection/detr/igie/README.md new file mode 100644 index 00000000..d5964650 --- /dev/null +++ b/models/cv/object_detection/detr/igie/README.md @@ -0,0 +1,94 @@ +# DETR (IGIE) + +## Model Description + +DETR (DEtection TRansformer) is a novel approach that views object detection as a direct set prediction problem. This method streamlines the detection process, eliminating the need for many hand-designed components like non-maximum suppression procedures or anchor generation, which are typically used to explicitly encode prior knowledge about the task. + +## Supported Environments + +| GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | +| :----: | :----: | :----: | +| MR-V100 | 4.4.0 | 26.03 | + + +## Model Preparation + +this is a fork version of official detr https://github.com/facebookresearch/detr.git + +### Prepare Resources + +Pretrained model: + +Dataset: + +- to download the labels dataset. +- to download the validation dataset. +- to download the train dataset. + +```bash +unzip -q -d ./ coco2017labels.zip +unzip -q -d ./coco/images/ train2017.zip +unzip -q -d ./coco/images/ val2017.zip + +coco +├── annotations +│   └── instances_val2017.json +├── images +│   ├── train2017 +│   └── val2017 +├── labels +│   ├── train2017 +│   └── val2017 +├── LICENSE +├── README.txt +├── test-dev2017.txt +├── train2017.cache +├── train2017.txt +├── val2017.cache +└── val2017.txt +``` + +### Install Dependencies + +Contact the Iluvatar administrator to get the missing packages: + +- mmcv-*.whl + +```bash +# Install libGL +## CentOS +yum install -y mesa-libGL +## Ubuntu +apt install -y libgl1-mesa-glx + +pip3 install -r requirements.txt +``` + +### Model Conversion + +```bash +python3 export.py --no_aux_loss --eval --resume detr-r50-e632da11.pth --coco_path /path/data/coco + +onnxsim detr.onnx detr_opt.onnx +``` + +## Model Inference + +```bash +export DATASETS_DIR=/path/to/coco/ +``` + +### FP16 + +```bash +# Accuracy +bash scripts/infer_detr_fp16_accuracy.sh +# Performance +bash scripts/infer_detr_fp16_performance.sh +``` + +## Model Results + +| Model | BatchSize | Precision | FPS | MAP@0.5 | MAP@0.5:0.95 | +| :----: | :----: | :----: | :----: | :----: | :----: | +| DETR | 32 | FP16 | 149.37 | 0.581 | 0.385 | diff --git a/models/cv/object_detection/detr/igie/build_engine.py b/models/cv/object_detection/detr/igie/build_engine.py new file mode 100644 index 00000000..9692cc52 --- /dev/null +++ b/models/cv/object_detection/detr/igie/build_engine.py @@ -0,0 +1,66 @@ +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tvm +import argparse +from tvm import relay +from tvm.relay.import_model import import_model_to_igie +import os + +def parse_args(): + parser = argparse.ArgumentParser() + + parser.add_argument("--model_path", + type=str, + required=True, + help="original model path.") + + parser.add_argument('--batch_size', default=32, type=int) + + parser.add_argument("--precision", + type=str, + choices=["fp32", "fp16", "int8"], + required=True, + help="model inference precision.") + + + parser.add_argument("--engine_path", + type=str, + required=True, + help="igie export engine path.") + + args = parser.parse_args() + return args + +def main(): + + args = parse_args() + + input_dict = {"tensor": [args.batch_size, 3, 800, 800], "mask": [args.batch_size, 800, 800]} + + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + + mod, params = import_model_to_igie(args.model_path, input_dict, backend="igie") + + # build engine + lib = tvm.relay.build(mod, target=target, params=params, precision=args.precision) + + # export engine + lib.export_library(args.engine_path) + print("done.") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/models/cv/object_detection/detr/igie/d2/README.md b/models/cv/object_detection/detr/igie/d2/README.md new file mode 100644 index 00000000..7f1d7531 --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/README.md @@ -0,0 +1,39 @@ +Detectron2 wrapper for DETR +======= + +We provide a Detectron2 wrapper for DETR, thus providing a way to better integrate it in the existing detection ecosystem. It can be used for example to easily leverage datasets or backbones provided in Detectron2. + +This wrapper currently supports only box detection, and is intended to be as close as possible to the original implementation, and we checked that it indeed match the results. Some notable facts and caveats: +- The data augmentation matches DETR's original data augmentation. This required patching the RandomCrop augmentation from Detectron2, so you'll need a version from the master branch from June 24th 2020 or more recent. +- To match DETR's original backbone initialization, we use the weights of a ResNet50 trained on imagenet using torchvision. This network uses a different pixel mean and std than most of the backbones available in Detectron2 by default, so extra care must be taken when switching to another one. Note that no other torchvision models are available in Detectron2 as of now, though it may change in the future. +- The gradient clipping mode is "full_model", which is not the default in Detectron2. + +# Usage + +To install Detectron2, please follow the [official installation instructions](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). + +## Evaluating a model + +For convenience, we provide a conversion script to convert models trained by the main DETR training loop into the format of this wrapper. To download and convert the main Resnet50 model, simply do: + +``` +python converter.py --source_model https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth --output_model converted_model.pth +``` + +You can then evaluate it using: +``` +python train_net.py --eval-only --config configs/detr_256_6_6_torchvision.yaml MODEL.WEIGHTS "converted_model.pth" +``` + + +## Training + +To train DETR on a single node with 8 gpus, simply use: +``` +python train_net.py --config configs/detr_256_6_6_torchvision.yaml --num-gpus 8 +``` + +To fine-tune DETR for instance segmentation on a single node with 8 gpus, simply use: +``` +python train_net.py --config configs/detr_segm_256_6_6_torchvision.yaml --num-gpus 8 MODEL.DETR.FROZEN_WEIGHTS +``` diff --git a/models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml b/models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml new file mode 100644 index 00000000..25d64184 --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml @@ -0,0 +1,45 @@ +MODEL: + META_ARCHITECTURE: "Detr" + WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + MASK_ON: False + RESNETS: + DEPTH: 50 + STRIDE_IN_1X1: False + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + DETR: + GIOU_WEIGHT: 2.0 + L1_WEIGHT: 5.0 + NUM_OBJECT_QUERIES: 100 +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 64 + BASE_LR: 0.0001 + STEPS: (369600,) + MAX_ITER: 554400 + WARMUP_FACTOR: 1.0 + WARMUP_ITERS: 10 + WEIGHT_DECAY: 0.0001 + OPTIMIZER: "ADAMW" + BACKBONE_MULTIPLIER: 0.1 + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "full_model" + CLIP_VALUE: 0.01 + NORM_TYPE: 2.0 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + CROP: + ENABLED: True + TYPE: "absolute_range" + SIZE: (384, 600) + FORMAT: "RGB" +TEST: + EVAL_PERIOD: 4000 +DATALOADER: + FILTER_EMPTY_ANNOTATIONS: False + NUM_WORKERS: 4 +VERSION: 2 diff --git a/models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml b/models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml new file mode 100644 index 00000000..ade490e6 --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml @@ -0,0 +1,46 @@ +MODEL: + META_ARCHITECTURE: "Detr" +# WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" + PIXEL_MEAN: [123.675, 116.280, 103.530] + PIXEL_STD: [58.395, 57.120, 57.375] + MASK_ON: True + RESNETS: + DEPTH: 50 + STRIDE_IN_1X1: False + OUT_FEATURES: ["res2", "res3", "res4", "res5"] + DETR: + GIOU_WEIGHT: 2.0 + L1_WEIGHT: 5.0 + NUM_OBJECT_QUERIES: 100 + FROZEN_WEIGHTS: '' +DATASETS: + TRAIN: ("coco_2017_train",) + TEST: ("coco_2017_val",) +SOLVER: + IMS_PER_BATCH: 64 + BASE_LR: 0.0001 + STEPS: (55440,) + MAX_ITER: 92400 + WARMUP_FACTOR: 1.0 + WARMUP_ITERS: 10 + WEIGHT_DECAY: 0.0001 + OPTIMIZER: "ADAMW" + BACKBONE_MULTIPLIER: 0.1 + CLIP_GRADIENTS: + ENABLED: True + CLIP_TYPE: "full_model" + CLIP_VALUE: 0.01 + NORM_TYPE: 2.0 +INPUT: + MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) + CROP: + ENABLED: True + TYPE: "absolute_range" + SIZE: (384, 600) + FORMAT: "RGB" +TEST: + EVAL_PERIOD: 4000 +DATALOADER: + FILTER_EMPTY_ANNOTATIONS: False + NUM_WORKERS: 4 +VERSION: 2 diff --git a/models/cv/object_detection/detr/igie/d2/converter.py b/models/cv/object_detection/detr/igie/d2/converter.py new file mode 100644 index 00000000..6fa5ff4c --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/converter.py @@ -0,0 +1,69 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version. +""" +import json +import argparse + +import numpy as np +import torch + + +def parse_args(): + parser = argparse.ArgumentParser("D2 model converter") + + parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert") + parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model") + return parser.parse_args() + + +def main(): + args = parse_args() + + # D2 expects contiguous classes, so we need to remap the 92 classes from DETR + # fmt: off + coco_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, + 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91] + # fmt: on + + coco_idx = np.array(coco_idx) + + if args.source_model.startswith("https"): + checkpoint = torch.hub.load_state_dict_from_url(args.source_model, map_location="cpu", check_hash=True) + else: + checkpoint = torch.load(args.source_model, map_location="cpu") + model_to_convert = checkpoint["model"] + + model_converted = {} + for k in model_to_convert.keys(): + old_k = k + if "backbone" in k: + k = k.replace("backbone.0.body.", "") + if "layer" not in k: + k = "stem." + k + for t in [1, 2, 3, 4]: + k = k.replace(f"layer{t}", f"res{t + 1}") + for t in [1, 2, 3]: + k = k.replace(f"bn{t}", f"conv{t}.norm") + k = k.replace("downsample.0", "shortcut") + k = k.replace("downsample.1", "shortcut.norm") + k = "backbone.0.backbone." + k + k = "detr." + k + print(old_k, "->", k) + if "class_embed" in old_k: + v = model_to_convert[old_k].detach() + if v.shape[0] == 92: + shape_old = v.shape + model_converted[k] = v[coco_idx] + print("Head conversion: changing shape from {} to {}".format(shape_old, model_converted[k].shape)) + continue + model_converted[k] = model_to_convert[old_k].detach() + + model_to_save = {"model": model_converted} + torch.save(model_to_save, args.output_model) + + +if __name__ == "__main__": + main() diff --git a/models/cv/object_detection/detr/igie/d2/detr/__init__.py b/models/cv/object_detection/detr/igie/d2/detr/__init__.py new file mode 100644 index 00000000..a618f828 --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/detr/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .config import add_detr_config +from .detr import Detr +from .dataset_mapper import DetrDatasetMapper diff --git a/models/cv/object_detection/detr/igie/d2/detr/config.py b/models/cv/object_detection/detr/igie/d2/detr/config.py new file mode 100644 index 00000000..9ea267dd --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/detr/config.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from detectron2.config import CfgNode as CN + + +def add_detr_config(cfg): + """ + Add config for DETR. + """ + cfg.MODEL.DETR = CN() + cfg.MODEL.DETR.NUM_CLASSES = 80 + + # For Segmentation + cfg.MODEL.DETR.FROZEN_WEIGHTS = '' + + # LOSS + cfg.MODEL.DETR.GIOU_WEIGHT = 2.0 + cfg.MODEL.DETR.L1_WEIGHT = 5.0 + cfg.MODEL.DETR.DEEP_SUPERVISION = True + cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1 + + # TRANSFORMER + cfg.MODEL.DETR.NHEADS = 8 + cfg.MODEL.DETR.DROPOUT = 0.1 + cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048 + cfg.MODEL.DETR.ENC_LAYERS = 6 + cfg.MODEL.DETR.DEC_LAYERS = 6 + cfg.MODEL.DETR.PRE_NORM = False + + cfg.MODEL.DETR.HIDDEN_DIM = 256 + cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100 + + cfg.SOLVER.OPTIMIZER = "ADAMW" + cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1 diff --git a/models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py b/models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py new file mode 100644 index 00000000..f428a493 --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py @@ -0,0 +1,122 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import copy +import logging + +import numpy as np +import torch + +from detectron2.data import detection_utils as utils +from detectron2.data import transforms as T +from detectron2.data.transforms import TransformGen + +__all__ = ["DetrDatasetMapper"] + + +def build_transform_gen(cfg, is_train): + """ + Create a list of :class:`TransformGen` from config. + Returns: + list[TransformGen] + """ + if is_train: + min_size = cfg.INPUT.MIN_SIZE_TRAIN + max_size = cfg.INPUT.MAX_SIZE_TRAIN + sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING + else: + min_size = cfg.INPUT.MIN_SIZE_TEST + max_size = cfg.INPUT.MAX_SIZE_TEST + sample_style = "choice" + if sample_style == "range": + assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) + + logger = logging.getLogger(__name__) + tfm_gens = [] + if is_train: + tfm_gens.append(T.RandomFlip()) + tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) + if is_train: + logger.info("TransformGens used in training: " + str(tfm_gens)) + return tfm_gens + + +class DetrDatasetMapper: + """ + A callable which takes a dataset dict in Detectron2 Dataset format, + and map it into a format used by DETR. + + The callable currently does the following: + + 1. Read the image from "file_name" + 2. Applies geometric transforms to the image and annotation + 3. Find and applies suitable cropping to the image and annotation + 4. Prepare image and annotation to Tensors + """ + + def __init__(self, cfg, is_train=True): + if cfg.INPUT.CROP.ENABLED and is_train: + self.crop_gen = [ + T.ResizeShortestEdge([400, 500, 600], sample_style="choice"), + T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE), + ] + else: + self.crop_gen = None + + self.mask_on = cfg.MODEL.MASK_ON + self.tfm_gens = build_transform_gen(cfg, is_train) + logging.getLogger(__name__).info( + "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen)) + ) + + self.img_format = cfg.INPUT.FORMAT + self.is_train = is_train + + def __call__(self, dataset_dict): + """ + Args: + dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. + + Returns: + dict: a format that builtin models in detectron2 accept + """ + dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below + image = utils.read_image(dataset_dict["file_name"], format=self.img_format) + utils.check_image_size(dataset_dict, image) + + if self.crop_gen is None: + image, transforms = T.apply_transform_gens(self.tfm_gens, image) + else: + if np.random.rand() > 0.5: + image, transforms = T.apply_transform_gens(self.tfm_gens, image) + else: + image, transforms = T.apply_transform_gens( + self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image + ) + + image_shape = image.shape[:2] # h, w + + # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, + # but not efficient on large generic data structures due to the use of pickle & mp.Queue. + # Therefore it's important to use torch.Tensor. + dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) + + if not self.is_train: + # USER: Modify this if you want to keep them for some reason. + dataset_dict.pop("annotations", None) + return dataset_dict + + if "annotations" in dataset_dict: + # USER: Modify this if you want to keep them for some reason. + for anno in dataset_dict["annotations"]: + if not self.mask_on: + anno.pop("segmentation", None) + anno.pop("keypoints", None) + + # USER: Implement additional transformations if you have other types of data + annos = [ + utils.transform_instance_annotations(obj, transforms, image_shape) + for obj in dataset_dict.pop("annotations") + if obj.get("iscrowd", 0) == 0 + ] + instances = utils.annotations_to_instances(annos, image_shape) + dataset_dict["instances"] = utils.filter_empty_instances(instances) + return dataset_dict diff --git a/models/cv/object_detection/detr/igie/d2/detr/detr.py b/models/cv/object_detection/detr/igie/d2/detr/detr.py new file mode 100644 index 00000000..95f89dff --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/detr/detr.py @@ -0,0 +1,261 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import logging +import math +from typing import List + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn.functional as F +from scipy.optimize import linear_sum_assignment +from torch import nn + +from detectron2.layers import ShapeSpec +from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess +from detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks +from detectron2.utils.logger import log_first_n +from fvcore.nn import giou_loss, smooth_l1_loss +from models.backbone import Joiner +from models.detr import DETR, SetCriterion +from models.matcher import HungarianMatcher +from models.position_encoding import PositionEmbeddingSine +from models.transformer import Transformer +from models.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm +from util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh +from util.misc import NestedTensor +from datasets.coco import convert_coco_poly_to_mask + +__all__ = ["Detr"] + + +class MaskedBackbone(nn.Module): + """ This is a thin wrapper around D2's backbone to provide padding masking""" + + def __init__(self, cfg): + super().__init__() + self.backbone = build_backbone(cfg) + backbone_shape = self.backbone.output_shape() + self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()] + self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels + + def forward(self, images): + features = self.backbone(images.tensor) + masks = self.mask_out_padding( + [features_per_level.shape for features_per_level in features.values()], + images.image_sizes, + images.tensor.device, + ) + assert len(features) == len(masks) + for i, k in enumerate(features.keys()): + features[k] = NestedTensor(features[k], masks[i]) + return features + + def mask_out_padding(self, feature_shapes, image_sizes, device): + masks = [] + assert len(feature_shapes) == len(self.feature_strides) + for idx, shape in enumerate(feature_shapes): + N, _, H, W = shape + masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device) + for img_idx, (h, w) in enumerate(image_sizes): + masks_per_feature_level[ + img_idx, + : int(np.ceil(float(h) / self.feature_strides[idx])), + : int(np.ceil(float(w) / self.feature_strides[idx])), + ] = 0 + masks.append(masks_per_feature_level) + return masks + + +@META_ARCH_REGISTRY.register() +class Detr(nn.Module): + """ + Implement Detr + """ + + def __init__(self, cfg): + super().__init__() + + self.device = torch.device(cfg.MODEL.DEVICE) + + self.num_classes = cfg.MODEL.DETR.NUM_CLASSES + self.mask_on = cfg.MODEL.MASK_ON + hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM + num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES + # Transformer parameters: + nheads = cfg.MODEL.DETR.NHEADS + dropout = cfg.MODEL.DETR.DROPOUT + dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD + enc_layers = cfg.MODEL.DETR.ENC_LAYERS + dec_layers = cfg.MODEL.DETR.DEC_LAYERS + pre_norm = cfg.MODEL.DETR.PRE_NORM + + # Loss parameters: + giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT + l1_weight = cfg.MODEL.DETR.L1_WEIGHT + deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION + no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT + + N_steps = hidden_dim // 2 + d2_backbone = MaskedBackbone(cfg) + backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True)) + backbone.num_channels = d2_backbone.num_channels + + transformer = Transformer( + d_model=hidden_dim, + dropout=dropout, + nhead=nheads, + dim_feedforward=dim_feedforward, + num_encoder_layers=enc_layers, + num_decoder_layers=dec_layers, + normalize_before=pre_norm, + return_intermediate_dec=deep_supervision, + ) + + self.detr = DETR( + backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision + ) + if self.mask_on: + frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS + if frozen_weights != '': + print("LOAD pre-trained weights") + weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model'] + new_weight = {} + for k, v in weight.items(): + if 'detr.' in k: + new_weight[k.replace('detr.', '')] = v + else: + print(f"Skipping loading weight {k} from frozen model") + del weight + self.detr.load_state_dict(new_weight) + del new_weight + self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != '')) + self.seg_postprocess = PostProcessSegm + + self.detr.to(self.device) + + # building criterion + matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight) + weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight} + weight_dict["loss_giou"] = giou_weight + if deep_supervision: + aux_weight_dict = {} + for i in range(dec_layers - 1): + aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + losses = ["labels", "boxes", "cardinality"] + if self.mask_on: + losses += ["masks"] + self.criterion = SetCriterion( + self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, + ) + self.criterion.to(self.device) + + pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1) + pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1) + self.normalizer = lambda x: (x - pixel_mean) / pixel_std + self.to(self.device) + + def forward(self, batched_inputs): + """ + Args: + batched_inputs: a list, batched outputs of :class:`DatasetMapper` . + Each item in the list contains the inputs for one image. + For now, each item in the list is a dict that contains: + + * image: Tensor, image in (C, H, W) format. + * instances: Instances + + Other information that's included in the original dicts, such as: + + * "height", "width" (int): the output resolution of the model, used in inference. + See :meth:`postprocess` for details. + Returns: + dict[str: Tensor]: + mapping from a named loss to a tensor storing the loss. Used during training only. + """ + images = self.preprocess_image(batched_inputs) + output = self.detr(images) + + if self.training: + gt_instances = [x["instances"].to(self.device) for x in batched_inputs] + + targets = self.prepare_targets(gt_instances) + loss_dict = self.criterion(output, targets) + weight_dict = self.criterion.weight_dict + for k in loss_dict.keys(): + if k in weight_dict: + loss_dict[k] *= weight_dict[k] + return loss_dict + else: + box_cls = output["pred_logits"] + box_pred = output["pred_boxes"] + mask_pred = output["pred_masks"] if self.mask_on else None + results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes) + processed_results = [] + for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): + height = input_per_image.get("height", image_size[0]) + width = input_per_image.get("width", image_size[1]) + r = detector_postprocess(results_per_image, height, width) + processed_results.append({"instances": r}) + return processed_results + + def prepare_targets(self, targets): + new_targets = [] + for targets_per_image in targets: + h, w = targets_per_image.image_size + image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device) + gt_classes = targets_per_image.gt_classes + gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy + gt_boxes = box_xyxy_to_cxcywh(gt_boxes) + new_targets.append({"labels": gt_classes, "boxes": gt_boxes}) + if self.mask_on and hasattr(targets_per_image, 'gt_masks'): + gt_masks = targets_per_image.gt_masks + gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w) + new_targets[-1].update({'masks': gt_masks}) + return new_targets + + def inference(self, box_cls, box_pred, mask_pred, image_sizes): + """ + Arguments: + box_cls (Tensor): tensor of shape (batch_size, num_queries, K). + The tensor predicts the classification probability for each query. + box_pred (Tensor): tensors of shape (batch_size, num_queries, 4). + The tensor predicts 4-vector (x,y,w,h) box + regression values for every queryx + image_sizes (List[torch.Size]): the input image sizes + + Returns: + results (List[Instances]): a list of #images elements. + """ + assert len(box_cls) == len(image_sizes) + results = [] + + # For each box we assign the best class or the second best if the best on is `no_object`. + scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1) + + for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip( + scores, labels, box_pred, image_sizes + )): + result = Instances(image_size) + result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image)) + + result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0]) + if self.mask_on: + mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False) + mask = mask[0].sigmoid() > 0.5 + B, N, H, W = mask_pred.shape + mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32) + result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device) + + result.scores = scores_per_image + result.pred_classes = labels_per_image + results.append(result) + return results + + def preprocess_image(self, batched_inputs): + """ + Normalize, pad and batch the input images. + """ + images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs] + images = ImageList.from_tensors(images) + return images diff --git a/models/cv/object_detection/detr/igie/d2/train_net.py b/models/cv/object_detection/detr/igie/d2/train_net.py new file mode 100644 index 00000000..82f69292 --- /dev/null +++ b/models/cv/object_detection/detr/igie/d2/train_net.py @@ -0,0 +1,145 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR Training Script. + +This script is a simplified version of the training script in detectron2/tools. +""" +import os +import sys +import itertools + +# fmt: off +sys.path.insert(1, os.path.join(sys.path[0], '..')) +# fmt: on + +import time +from typing import Any, Dict, List, Set + +import torch + +import detectron2.utils.comm as comm +from d2.detr import DetrDatasetMapper, add_detr_config +from detectron2.checkpoint import DetectionCheckpointer +from detectron2.config import get_cfg +from detectron2.data import MetadataCatalog, build_detection_train_loader +from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch +from detectron2.evaluation import COCOEvaluator, verify_results + +from detectron2.solver.build import maybe_add_gradient_clipping + + +class Trainer(DefaultTrainer): + """ + Extension of the Trainer class adapted to DETR. + """ + + @classmethod + def build_evaluator(cls, cfg, dataset_name, output_folder=None): + """ + Create evaluator(s) for a given dataset. + This uses the special metadata "evaluator_type" associated with each builtin dataset. + For your own dataset, you can simply create an evaluator manually in your + script and do not have to worry about the hacky if-else logic here. + """ + if output_folder is None: + output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") + return COCOEvaluator(dataset_name, cfg, True, output_folder) + + @classmethod + def build_train_loader(cls, cfg): + if "Detr" == cfg.MODEL.META_ARCHITECTURE: + mapper = DetrDatasetMapper(cfg, True) + else: + mapper = None + return build_detection_train_loader(cfg, mapper=mapper) + + @classmethod + def build_optimizer(cls, cfg, model): + params: List[Dict[str, Any]] = [] + memo: Set[torch.nn.parameter.Parameter] = set() + for key, value in model.named_parameters(recurse=True): + if not value.requires_grad: + continue + # Avoid duplicating parameters + if value in memo: + continue + memo.add(value) + lr = cfg.SOLVER.BASE_LR + weight_decay = cfg.SOLVER.WEIGHT_DECAY + if "backbone" in key: + lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER + params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] + + def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class + # detectron2 doesn't have full model gradient clipping now + clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE + enable = ( + cfg.SOLVER.CLIP_GRADIENTS.ENABLED + and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" + and clip_norm_val > 0.0 + ) + + class FullModelGradientClippingOptimizer(optim): + def step(self, closure=None): + all_params = itertools.chain(*[x["params"] for x in self.param_groups]) + torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) + super().step(closure=closure) + + return FullModelGradientClippingOptimizer if enable else optim + + optimizer_type = cfg.SOLVER.OPTIMIZER + if optimizer_type == "SGD": + optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( + params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM + ) + elif optimizer_type == "ADAMW": + optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( + params, cfg.SOLVER.BASE_LR + ) + else: + raise NotImplementedError(f"no optimizer type {optimizer_type}") + if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": + optimizer = maybe_add_gradient_clipping(cfg, optimizer) + return optimizer + + +def setup(args): + """ + Create configs and perform basic setups. + """ + cfg = get_cfg() + add_detr_config(cfg) + cfg.merge_from_file(args.config_file) + cfg.merge_from_list(args.opts) + cfg.freeze() + default_setup(cfg, args) + return cfg + + +def main(args): + cfg = setup(args) + + if args.eval_only: + model = Trainer.build_model(cfg) + DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume) + res = Trainer.test(cfg, model) + if comm.is_main_process(): + verify_results(cfg, res) + return res + + trainer = Trainer(cfg) + trainer.resume_or_load(resume=args.resume) + return trainer.train() + + +if __name__ == "__main__": + args = default_argument_parser().parse_args() + print("Command Line Args:", args) + launch( + main, + args.num_gpus, + num_machines=args.num_machines, + machine_rank=args.machine_rank, + dist_url=args.dist_url, + args=(args,), + ) diff --git a/models/cv/object_detection/detr/igie/datasets/__init__.py b/models/cv/object_detection/detr/igie/datasets/__init__.py new file mode 100644 index 00000000..571b126e --- /dev/null +++ b/models/cv/object_detection/detr/igie/datasets/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch.utils.data +import torchvision + +from .coco import build as build_coco + + +def get_coco_api_from_dataset(dataset): + for _ in range(10): + # if isinstance(dataset, torchvision.datasets.CocoDetection): + # break + if isinstance(dataset, torch.utils.data.Subset): + dataset = dataset.dataset + if isinstance(dataset, torchvision.datasets.CocoDetection): + return dataset.coco + + +def build_dataset(image_set, args): + if args.dataset_file == 'coco': + return build_coco(image_set, args) + if args.dataset_file == 'coco_panoptic': + # to avoid making panopticapi required for coco + from .coco_panoptic import build as build_coco_panoptic + return build_coco_panoptic(image_set, args) + raise ValueError(f'dataset {args.dataset_file} not supported') diff --git a/models/cv/object_detection/detr/igie/datasets/coco.py b/models/cv/object_detection/detr/igie/datasets/coco.py new file mode 100644 index 00000000..93a436ba --- /dev/null +++ b/models/cv/object_detection/detr/igie/datasets/coco.py @@ -0,0 +1,158 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +COCO dataset which returns image_id for evaluation. + +Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py +""" +from pathlib import Path + +import torch +import torch.utils.data +import torchvision +from pycocotools import mask as coco_mask + +import datasets.transforms as T + + +class CocoDetection(torchvision.datasets.CocoDetection): + def __init__(self, img_folder, ann_file, transforms, return_masks): + super(CocoDetection, self).__init__(img_folder, ann_file) + self._transforms = transforms + self.prepare = ConvertCocoPolysToMask(return_masks) + + def __getitem__(self, idx): + img, target = super(CocoDetection, self).__getitem__(idx) + image_id = self.ids[idx] + target = {'image_id': image_id, 'annotations': target} + img, target = self.prepare(img, target) + if self._transforms is not None: + img, target = self._transforms(img, target) + return img, target + + +def convert_coco_poly_to_mask(segmentations, height, width): + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = torch.as_tensor(mask, dtype=torch.uint8) + mask = mask.any(dim=2) + masks.append(mask) + if masks: + masks = torch.stack(masks, dim=0) + else: + masks = torch.zeros((0, height, width), dtype=torch.uint8) + return masks + + +class ConvertCocoPolysToMask(object): + def __init__(self, return_masks=False): + self.return_masks = return_masks + + def __call__(self, image, target): + w, h = image.size + + image_id = target["image_id"] + image_id = torch.tensor([image_id]) + + anno = target["annotations"] + + anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0] + + boxes = [obj["bbox"] for obj in anno] + # guard against no boxes via resizing + boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) + boxes[:, 2:] += boxes[:, :2] + boxes[:, 0::2].clamp_(min=0, max=w) + boxes[:, 1::2].clamp_(min=0, max=h) + + classes = [obj["category_id"] for obj in anno] + classes = torch.tensor(classes, dtype=torch.int64) + + if self.return_masks: + segmentations = [obj["segmentation"] for obj in anno] + masks = convert_coco_poly_to_mask(segmentations, h, w) + + keypoints = None + if anno and "keypoints" in anno[0]: + keypoints = [obj["keypoints"] for obj in anno] + keypoints = torch.as_tensor(keypoints, dtype=torch.float32) + num_keypoints = keypoints.shape[0] + if num_keypoints: + keypoints = keypoints.view(num_keypoints, -1, 3) + + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) + boxes = boxes[keep] + classes = classes[keep] + if self.return_masks: + masks = masks[keep] + if keypoints is not None: + keypoints = keypoints[keep] + + target = {} + target["boxes"] = boxes + target["labels"] = classes + if self.return_masks: + target["masks"] = masks + target["image_id"] = image_id + if keypoints is not None: + target["keypoints"] = keypoints + + # for conversion to coco api + area = torch.tensor([obj["area"] for obj in anno]) + iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) + target["area"] = area[keep] + target["iscrowd"] = iscrowd[keep] + + target["orig_size"] = torch.as_tensor([int(h), int(w)]) + target["size"] = torch.as_tensor([int(h), int(w)]) + + return image, target + + +def make_coco_transforms(image_set): + + normalize = T.Compose([ + T.ToTensor(), + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ]) + + scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800] + + if image_set == 'train': + return T.Compose([ + T.RandomHorizontalFlip(), + T.RandomSelect( + T.RandomResize(scales, max_size=1333), + T.Compose([ + T.RandomResize([400, 500, 600]), + T.RandomSizeCrop(384, 600), + T.RandomResize(scales, max_size=1333), + ]) + ), + normalize, + ]) + + if image_set == 'val': + return T.Compose([ + T.RandomResize([800], max_size=1333), + normalize, + ]) + + raise ValueError(f'unknown {image_set}') + + +def build(image_set, args): + root = Path(args.coco_path) + assert root.exists(), f'provided COCO path {root} does not exist' + mode = 'instances' + PATHS = { + "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), + "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), + } + + img_folder, ann_file = PATHS[image_set] + dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks) + return dataset diff --git a/models/cv/object_detection/detr/igie/datasets/coco_eval.py b/models/cv/object_detection/detr/igie/datasets/coco_eval.py new file mode 100644 index 00000000..9487c08f --- /dev/null +++ b/models/cv/object_detection/detr/igie/datasets/coco_eval.py @@ -0,0 +1,257 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +COCO evaluator that works in distributed mode. + +Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py +The difference is that there is less copy-pasting from pycocotools +in the end of the file, as python3 can suppress prints with contextlib +""" +import os +import contextlib +import copy +import numpy as np +import torch + +from pycocotools.cocoeval import COCOeval +from pycocotools.coco import COCO +import pycocotools.mask as mask_util + +from util.misc import all_gather + + +class CocoEvaluator(object): + def __init__(self, coco_gt, iou_types): + assert isinstance(iou_types, (list, tuple)) + coco_gt = copy.deepcopy(coco_gt) + self.coco_gt = coco_gt + + self.iou_types = iou_types + self.coco_eval = {} + for iou_type in iou_types: + self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type) + + self.img_ids = [] + self.eval_imgs = {k: [] for k in iou_types} + + def update(self, predictions): + img_ids = list(np.unique(list(predictions.keys()))) + self.img_ids.extend(img_ids) + + for iou_type in self.iou_types: + results = self.prepare(predictions, iou_type) + + # suppress pycocotools prints + with open(os.devnull, 'w') as devnull: + with contextlib.redirect_stdout(devnull): + coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO() + coco_eval = self.coco_eval[iou_type] + + coco_eval.cocoDt = coco_dt + coco_eval.params.imgIds = list(img_ids) + img_ids, eval_imgs = evaluate(coco_eval) + + self.eval_imgs[iou_type].append(eval_imgs) + + def synchronize_between_processes(self): + for iou_type in self.iou_types: + self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) + create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) + + def accumulate(self): + for coco_eval in self.coco_eval.values(): + coco_eval.accumulate() + + def summarize(self): + for iou_type, coco_eval in self.coco_eval.items(): + print("IoU metric: {}".format(iou_type)) + coco_eval.summarize() + + def prepare(self, predictions, iou_type): + if iou_type == "bbox": + return self.prepare_for_coco_detection(predictions) + elif iou_type == "segm": + return self.prepare_for_coco_segmentation(predictions) + elif iou_type == "keypoints": + return self.prepare_for_coco_keypoint(predictions) + else: + raise ValueError("Unknown iou type {}".format(iou_type)) + + def prepare_for_coco_detection(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + boxes = prediction["boxes"] + boxes = convert_to_xywh(boxes).tolist() + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + "bbox": box, + "score": scores[k], + } + for k, box in enumerate(boxes) + ] + ) + return coco_results + + def prepare_for_coco_segmentation(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + scores = prediction["scores"] + labels = prediction["labels"] + masks = prediction["masks"] + + masks = masks > 0.5 + + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + + rles = [ + mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] + for mask in masks + ] + for rle in rles: + rle["counts"] = rle["counts"].decode("utf-8") + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + "segmentation": rle, + "score": scores[k], + } + for k, rle in enumerate(rles) + ] + ) + return coco_results + + def prepare_for_coco_keypoint(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + boxes = prediction["boxes"] + boxes = convert_to_xywh(boxes).tolist() + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + keypoints = prediction["keypoints"] + keypoints = keypoints.flatten(start_dim=1).tolist() + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + 'keypoints': keypoint, + "score": scores[k], + } + for k, keypoint in enumerate(keypoints) + ] + ) + return coco_results + + +def convert_to_xywh(boxes): + xmin, ymin, xmax, ymax = boxes.unbind(1) + return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) + + +def merge(img_ids, eval_imgs): + all_img_ids = all_gather(img_ids) + all_eval_imgs = all_gather(eval_imgs) + + merged_img_ids = [] + for p in all_img_ids: + merged_img_ids.extend(p) + + merged_eval_imgs = [] + for p in all_eval_imgs: + merged_eval_imgs.append(p) + + merged_img_ids = np.array(merged_img_ids) + merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) + + # keep only unique (and in sorted order) images + merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) + merged_eval_imgs = merged_eval_imgs[..., idx] + + return merged_img_ids, merged_eval_imgs + + +def create_common_coco_eval(coco_eval, img_ids, eval_imgs): + img_ids, eval_imgs = merge(img_ids, eval_imgs) + img_ids = list(img_ids) + eval_imgs = list(eval_imgs.flatten()) + + coco_eval.evalImgs = eval_imgs + coco_eval.params.imgIds = img_ids + coco_eval._paramsEval = copy.deepcopy(coco_eval.params) + + +################################################################# +# From pycocotools, just removed the prints and fixed +# a Python3 bug about unicode not defined +################################################################# + + +def evaluate(self): + ''' + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + :return: None + ''' + # tic = time.time() + # print('Running per image evaluation...') + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = 'segm' if p.useSegm == 1 else 'bbox' + print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) + # print('Evaluate annotation type *{}*'.format(p.iouType)) + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType == 'segm' or p.iouType == 'bbox': + computeIoU = self.computeIoU + elif p.iouType == 'keypoints': + computeIoU = self.computeOks + self.ious = { + (imgId, catId): computeIoU(imgId, catId) + for imgId in p.imgIds + for catId in catIds} + + evaluateImg = self.evaluateImg + maxDet = p.maxDets[-1] + evalImgs = [ + evaluateImg(imgId, catId, areaRng, maxDet) + for catId in catIds + for areaRng in p.areaRng + for imgId in p.imgIds + ] + # this is NOT in the pycocotools code, but could be done outside + evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds)) + self._paramsEval = copy.deepcopy(self.params) + # toc = time.time() + # print('DONE (t={:0.2f}s).'.format(toc-tic)) + return p.imgIds, evalImgs + +################################################################# +# end of straight copy from pycocotools, just removing the prints +################################################################# diff --git a/models/cv/object_detection/detr/igie/datasets/coco_panoptic.py b/models/cv/object_detection/detr/igie/datasets/coco_panoptic.py new file mode 100644 index 00000000..b24f615c --- /dev/null +++ b/models/cv/object_detection/detr/igie/datasets/coco_panoptic.py @@ -0,0 +1,99 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import json +from pathlib import Path + +import numpy as np +import torch +from PIL import Image + +from panopticapi.utils import rgb2id +from util.box_ops import masks_to_boxes + +from .coco import make_coco_transforms + + +class CocoPanoptic: + def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True): + with open(ann_file, 'r') as f: + self.coco = json.load(f) + + # sort 'images' field so that they are aligned with 'annotations' + # i.e., in alphabetical order + self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id']) + # sanity check + if "annotations" in self.coco: + for img, ann in zip(self.coco['images'], self.coco['annotations']): + assert img['file_name'][:-4] == ann['file_name'][:-4] + + self.img_folder = img_folder + self.ann_folder = ann_folder + self.ann_file = ann_file + self.transforms = transforms + self.return_masks = return_masks + + def __getitem__(self, idx): + ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx] + img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg') + ann_path = Path(self.ann_folder) / ann_info['file_name'] + + img = Image.open(img_path).convert('RGB') + w, h = img.size + if "segments_info" in ann_info: + masks = np.asarray(Image.open(ann_path), dtype=np.uint32) + masks = rgb2id(masks) + + ids = np.array([ann['id'] for ann in ann_info['segments_info']]) + masks = masks == ids[:, None, None] + + masks = torch.as_tensor(masks, dtype=torch.uint8) + labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64) + + target = {} + target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]]) + if self.return_masks: + target['masks'] = masks + target['labels'] = labels + + target["boxes"] = masks_to_boxes(masks) + + target['size'] = torch.as_tensor([int(h), int(w)]) + target['orig_size'] = torch.as_tensor([int(h), int(w)]) + if "segments_info" in ann_info: + for name in ['iscrowd', 'area']: + target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + def __len__(self): + return len(self.coco['images']) + + def get_height_and_width(self, idx): + img_info = self.coco['images'][idx] + height = img_info['height'] + width = img_info['width'] + return height, width + + +def build(image_set, args): + img_folder_root = Path(args.coco_path) + ann_folder_root = Path(args.coco_panoptic_path) + assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist' + assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist' + mode = 'panoptic' + PATHS = { + "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'), + "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'), + } + + img_folder, ann_file = PATHS[image_set] + img_folder_path = img_folder_root / img_folder + ann_folder = ann_folder_root / f'{mode}_{img_folder}' + ann_file = ann_folder_root / ann_file + + dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file, + transforms=make_coco_transforms(image_set), return_masks=args.masks) + + return dataset diff --git a/models/cv/object_detection/detr/igie/datasets/panoptic_eval.py b/models/cv/object_detection/detr/igie/datasets/panoptic_eval.py new file mode 100644 index 00000000..9cb4f834 --- /dev/null +++ b/models/cv/object_detection/detr/igie/datasets/panoptic_eval.py @@ -0,0 +1,44 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import json +import os + +import util.misc as utils + +try: + from panopticapi.evaluation import pq_compute +except ImportError: + pass + + +class PanopticEvaluator(object): + def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"): + self.gt_json = ann_file + self.gt_folder = ann_folder + if utils.is_main_process(): + if not os.path.exists(output_dir): + os.mkdir(output_dir) + self.output_dir = output_dir + self.predictions = [] + + def update(self, predictions): + for p in predictions: + with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f: + f.write(p.pop("png_string")) + + self.predictions += predictions + + def synchronize_between_processes(self): + all_predictions = utils.all_gather(self.predictions) + merged_predictions = [] + for p in all_predictions: + merged_predictions += p + self.predictions = merged_predictions + + def summarize(self): + if utils.is_main_process(): + json_data = {"annotations": self.predictions} + predictions_json = os.path.join(self.output_dir, "predictions.json") + with open(predictions_json, "w") as f: + f.write(json.dumps(json_data)) + return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) + return None diff --git a/models/cv/object_detection/detr/igie/datasets/transforms.py b/models/cv/object_detection/detr/igie/datasets/transforms.py new file mode 100644 index 00000000..25ba9362 --- /dev/null +++ b/models/cv/object_detection/detr/igie/datasets/transforms.py @@ -0,0 +1,277 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Transforms and data augmentation for both image + bbox. +""" +import random + +import PIL +import torch +import torchvision.transforms as T +import torchvision.transforms.functional as F + +from util.box_ops import box_xyxy_to_cxcywh +from util.misc import interpolate + + +def crop(image, target, region): + cropped_image = F.crop(image, *region) + + target = target.copy() + i, j, h, w = region + + # should we do something wrt the original size? + target["size"] = torch.tensor([h, w]) + + fields = ["labels", "area", "iscrowd"] + + if "boxes" in target: + boxes = target["boxes"] + max_size = torch.as_tensor([w, h], dtype=torch.float32) + cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) + cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) + cropped_boxes = cropped_boxes.clamp(min=0) + area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) + target["boxes"] = cropped_boxes.reshape(-1, 4) + target["area"] = area + fields.append("boxes") + + if "masks" in target: + # FIXME should we update the area here if there are no boxes? + target['masks'] = target['masks'][:, i:i + h, j:j + w] + fields.append("masks") + + # remove elements for which the boxes or masks that have zero area + if "boxes" in target or "masks" in target: + # favor boxes selection when defining which elements to keep + # this is compatible with previous implementation + if "boxes" in target: + cropped_boxes = target['boxes'].reshape(-1, 2, 2) + keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) + else: + keep = target['masks'].flatten(1).any(1) + + for field in fields: + target[field] = target[field][keep] + + return cropped_image, target + + +def hflip(image, target): + flipped_image = F.hflip(image) + + w, h = image.size + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) + target["boxes"] = boxes + + if "masks" in target: + target['masks'] = target['masks'].flip(-1) + + return flipped_image, target + + +def resize(image, target, size, max_size=None): + # size can be min_size (scalar) or (w, h) tuple + + def get_size_with_aspect_ratio(image_size, size, max_size=None): + w, h = image_size + if max_size is not None: + min_original_size = float(min((w, h))) + max_original_size = float(max((w, h))) + if max_original_size / min_original_size * size > max_size: + size = int(round(max_size * min_original_size / max_original_size)) + + if (w <= h and w == size) or (h <= w and h == size): + return (h, w) + + if w < h: + ow = size + oh = int(size * h / w) + else: + oh = size + ow = int(size * w / h) + + return (oh, ow) + + def get_size(image_size, size, max_size=None): + if isinstance(size, (list, tuple)): + return size[::-1] + else: + return get_size_with_aspect_ratio(image_size, size, max_size) + + # size = get_size(image.size, size, max_size) + size = (800, 800) + rescaled_image = F.resize(image, size) + + if target is None: + return rescaled_image, None + + ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) + ratio_width, ratio_height = ratios + + target = target.copy() + if "boxes" in target: + boxes = target["boxes"] + scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) + target["boxes"] = scaled_boxes + + if "area" in target: + area = target["area"] + scaled_area = area * (ratio_width * ratio_height) + target["area"] = scaled_area + + h, w = size + target["size"] = torch.tensor([h, w]) + + if "masks" in target: + target['masks'] = interpolate( + target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 + + return rescaled_image, target + + +def pad(image, target, padding): + # assumes that we only pad on the bottom right corners + padded_image = F.pad(image, (0, 0, padding[0], padding[1])) + if target is None: + return padded_image, None + target = target.copy() + # should we do something wrt the original size? + target["size"] = torch.tensor(padded_image.size[::-1]) + if "masks" in target: + target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) + return padded_image, target + + +class RandomCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + region = T.RandomCrop.get_params(img, self.size) + return crop(img, target, region) + + +class RandomSizeCrop(object): + def __init__(self, min_size: int, max_size: int): + self.min_size = min_size + self.max_size = max_size + + def __call__(self, img: PIL.Image.Image, target: dict): + w = random.randint(self.min_size, min(img.width, self.max_size)) + h = random.randint(self.min_size, min(img.height, self.max_size)) + region = T.RandomCrop.get_params(img, [h, w]) + return crop(img, target, region) + + +class CenterCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, img, target): + image_width, image_height = img.size + crop_height, crop_width = self.size + crop_top = int(round((image_height - crop_height) / 2.)) + crop_left = int(round((image_width - crop_width) / 2.)) + return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) + + +class RandomHorizontalFlip(object): + def __init__(self, p=0.5): + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return hflip(img, target) + return img, target + + +class RandomResize(object): + def __init__(self, sizes, max_size=None): + assert isinstance(sizes, (list, tuple)) + self.sizes = sizes + self.max_size = max_size + + def __call__(self, img, target=None): + size = random.choice(self.sizes) + return resize(img, target, size, self.max_size) + + +class RandomPad(object): + def __init__(self, max_pad): + self.max_pad = max_pad + + def __call__(self, img, target): + pad_x = random.randint(0, self.max_pad) + pad_y = random.randint(0, self.max_pad) + return pad(img, target, (pad_x, pad_y)) + + +class RandomSelect(object): + """ + Randomly selects between transforms1 and transforms2, + with probability p for transforms1 and (1 - p) for transforms2 + """ + def __init__(self, transforms1, transforms2, p=0.5): + self.transforms1 = transforms1 + self.transforms2 = transforms2 + self.p = p + + def __call__(self, img, target): + if random.random() < self.p: + return self.transforms1(img, target) + return self.transforms2(img, target) + + +class ToTensor(object): + def __call__(self, img, target): + return F.to_tensor(img), target + + +class RandomErasing(object): + + def __init__(self, *args, **kwargs): + self.eraser = T.RandomErasing(*args, **kwargs) + + def __call__(self, img, target): + return self.eraser(img), target + + +class Normalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, target=None): + image = F.normalize(image, mean=self.mean, std=self.std) + if target is None: + return image, None + target = target.copy() + h, w = image.shape[-2:] + if "boxes" in target: + boxes = target["boxes"] + boxes = box_xyxy_to_cxcywh(boxes) + boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) + target["boxes"] = boxes + return image, target + + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + + def __repr__(self): + format_string = self.__class__.__name__ + "(" + for t in self.transforms: + format_string += "\n" + format_string += " {0}".format(t) + format_string += "\n)" + return format_string diff --git a/models/cv/object_detection/detr/igie/engine.py b/models/cv/object_detection/detr/igie/engine.py new file mode 100644 index 00000000..ac5ea6ff --- /dev/null +++ b/models/cv/object_detection/detr/igie/engine.py @@ -0,0 +1,151 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Train and eval functions used in main.py +""" +import math +import os +import sys +from typing import Iterable + +import torch + +import util.misc as utils +from datasets.coco_eval import CocoEvaluator +from datasets.panoptic_eval import PanopticEvaluator + + +def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, + data_loader: Iterable, optimizer: torch.optim.Optimizer, + device: torch.device, epoch: int, max_norm: float = 0): + model.train() + criterion.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) + header = 'Epoch: [{}]'.format(epoch) + print_freq = 10 + + for samples, targets in metric_logger.log_every(data_loader, print_freq, header): + samples = samples.to(device) + targets = [{k: v.to(device) for k, v in t.items()} for t in targets] + + outputs = model(samples) + loss_dict = criterion(outputs, targets) + weight_dict = criterion.weight_dict + losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) + + # reduce losses over all GPUs for logging purposes + loss_dict_reduced = utils.reduce_dict(loss_dict) + loss_dict_reduced_unscaled = {f'{k}_unscaled': v + for k, v in loss_dict_reduced.items()} + loss_dict_reduced_scaled = {k: v * weight_dict[k] + for k, v in loss_dict_reduced.items() if k in weight_dict} + losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) + + loss_value = losses_reduced_scaled.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + print(loss_dict_reduced) + sys.exit(1) + + optimizer.zero_grad() + losses.backward() + if max_norm > 0: + torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) + optimizer.step() + + metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) + metric_logger.update(class_error=loss_dict_reduced['class_error']) + metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + return {k: meter.global_avg for k, meter in metric_logger.meters.items()} + + +@torch.no_grad() +def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): + model.eval() + criterion.eval() + + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) + header = 'Test:' + + iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) + coco_evaluator = CocoEvaluator(base_ds, iou_types) + # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] + + panoptic_evaluator = None + if 'panoptic' in postprocessors.keys(): + panoptic_evaluator = PanopticEvaluator( + data_loader.dataset.ann_file, + data_loader.dataset.ann_folder, + output_dir=os.path.join(output_dir, "panoptic_eval"), + ) + + for samples, targets in metric_logger.log_every(data_loader, 10, header): + samples = samples.to(device) + targets = [{k: v.to(device) for k, v in t.items()} for t in targets] + + outputs = model(samples) + loss_dict = criterion(outputs, targets) + weight_dict = criterion.weight_dict + + # reduce losses over all GPUs for logging purposes + loss_dict_reduced = utils.reduce_dict(loss_dict) + loss_dict_reduced_scaled = {k: v * weight_dict[k] + for k, v in loss_dict_reduced.items() if k in weight_dict} + loss_dict_reduced_unscaled = {f'{k}_unscaled': v + for k, v in loss_dict_reduced.items()} + metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), + **loss_dict_reduced_scaled, + **loss_dict_reduced_unscaled) + metric_logger.update(class_error=loss_dict_reduced['class_error']) + + orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) + results = postprocessors['bbox'](outputs, orig_target_sizes) + if 'segm' in postprocessors.keys(): + target_sizes = torch.stack([t["size"] for t in targets], dim=0) + results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) + res = {target['image_id'].item(): output for target, output in zip(targets, results)} + if coco_evaluator is not None: + coco_evaluator.update(res) + + if panoptic_evaluator is not None: + res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) + for i, target in enumerate(targets): + image_id = target["image_id"].item() + file_name = f"{image_id:012d}.png" + res_pano[i]["image_id"] = image_id + res_pano[i]["file_name"] = file_name + + panoptic_evaluator.update(res_pano) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + if coco_evaluator is not None: + coco_evaluator.synchronize_between_processes() + if panoptic_evaluator is not None: + panoptic_evaluator.synchronize_between_processes() + + # accumulate predictions from all images + if coco_evaluator is not None: + coco_evaluator.accumulate() + coco_evaluator.summarize() + panoptic_res = None + if panoptic_evaluator is not None: + panoptic_res = panoptic_evaluator.summarize() + stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} + if coco_evaluator is not None: + if 'bbox' in postprocessors.keys(): + stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() + if 'segm' in postprocessors.keys(): + stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() + if panoptic_res is not None: + stats['PQ_all'] = panoptic_res["All"] + stats['PQ_th'] = panoptic_res["Things"] + stats['PQ_st'] = panoptic_res["Stuff"] + return stats, coco_evaluator diff --git a/models/cv/object_detection/detr/igie/export.py b/models/cv/object_detection/detr/igie/export.py new file mode 100644 index 00000000..dc284ec9 --- /dev/null +++ b/models/cv/object_detection/detr/igie/export.py @@ -0,0 +1,169 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import datetime +import json +import random +import time +from pathlib import Path + +import numpy as np +import torch +from torch.utils.data import DataLoader, DistributedSampler + +import datasets +import util.misc as utils +from datasets import build_dataset, get_coco_api_from_dataset +from engine import evaluate, train_one_epoch +from models import build_model + + +def get_args_parser(): + parser = argparse.ArgumentParser('Set transformer detector', add_help=False) + parser.add_argument('--lr', default=1e-4, type=float) + parser.add_argument('--lr_backbone', default=1e-5, type=float) + parser.add_argument('--batch_size', default=32, type=int) + parser.add_argument('--weight_decay', default=1e-4, type=float) + parser.add_argument('--epochs', default=300, type=int) + parser.add_argument('--lr_drop', default=200, type=int) + parser.add_argument('--clip_max_norm', default=0.1, type=float, + help='gradient clipping max norm') + + # Model parameters + parser.add_argument('--frozen_weights', type=str, default=None, + help="Path to the pretrained model. If set, only the mask head will be trained") + # * Backbone + parser.add_argument('--backbone', default='resnet50', type=str, + help="Name of the convolutional backbone to use") + parser.add_argument('--dilation', action='store_true', + help="If true, we replace stride with dilation in the last convolutional block (DC5)") + parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), + help="Type of positional embedding to use on top of the image features") + + # * Transformer + parser.add_argument('--enc_layers', default=6, type=int, + help="Number of encoding layers in the transformer") + parser.add_argument('--dec_layers', default=6, type=int, + help="Number of decoding layers in the transformer") + parser.add_argument('--dim_feedforward', default=2048, type=int, + help="Intermediate size of the feedforward layers in the transformer blocks") + parser.add_argument('--hidden_dim', default=256, type=int, + help="Size of the embeddings (dimension of the transformer)") + parser.add_argument('--dropout', default=0.1, type=float, + help="Dropout applied in the transformer") + parser.add_argument('--nheads', default=8, type=int, + help="Number of attention heads inside the transformer's attentions") + parser.add_argument('--num_queries', default=100, type=int, + help="Number of query slots") + parser.add_argument('--pre_norm', action='store_true') + + # * Segmentation + parser.add_argument('--masks', action='store_true', + help="Train segmentation head if the flag is provided") + + # Loss + parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', + help="Disables auxiliary decoding losses (loss at each layer)") + # * Matcher + parser.add_argument('--set_cost_class', default=1, type=float, + help="Class coefficient in the matching cost") + parser.add_argument('--set_cost_bbox', default=5, type=float, + help="L1 box coefficient in the matching cost") + parser.add_argument('--set_cost_giou', default=2, type=float, + help="giou box coefficient in the matching cost") + # * Loss coefficients + parser.add_argument('--mask_loss_coef', default=1, type=float) + parser.add_argument('--dice_loss_coef', default=1, type=float) + parser.add_argument('--bbox_loss_coef', default=5, type=float) + parser.add_argument('--giou_loss_coef', default=2, type=float) + parser.add_argument('--eos_coef', default=0.1, type=float, + help="Relative classification weight of the no-object class") + + # dataset parameters + parser.add_argument('--dataset_file', default='coco') + parser.add_argument('--coco_path', type=str) + parser.add_argument('--coco_panoptic_path', type=str) + parser.add_argument('--remove_difficult', action='store_true') + parser.add_argument("--output", type=str, required=False, default="detr.onnx", help="export onnx model path.") + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=42, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true') + parser.add_argument('--num_workers', default=2, type=int) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') + + return parser + +from util.misc import NestedTensor +class DETRForExport(torch.nn.Module): + def __init__(self, original_model): + super().__init__() + self.model = original_model + + def forward(self, tensor, mask): + nested_input = NestedTensor(tensor, mask) + return self.model(nested_input) + +def main(args): + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + + if args.frozen_weights is not None: + assert args.masks, "Frozen training is meant for segmentation only" + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + model, criterion, postprocessors = build_model(args) + model.to(device) + + model_without_ddp = model + + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + + export_model = DETRForExport(model_without_ddp) + export_model.eval() + + batch_size = args.batch_size + channels = 3 + height, width = 800, 800 + + dummy_input = torch.randn(batch_size, channels, height, width, device=device) + dummy_mask = torch.zeros(batch_size, height, width, dtype=torch.bool, device=device) + + output_file = args.output + + torch.onnx.export( + export_model, + (dummy_input, dummy_mask), + output_file, + input_names=['tensor', 'mask'], + output_names=['pred_logits', 'pred_boxes'], + opset_version=13, + do_constant_folding=True + ) + print(f"onnx file exported to {output_file}") + +if __name__ == '__main__': + parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) + args = parser.parse_args() + + main(args) diff --git a/models/cv/object_detection/detr/igie/hubconf.py b/models/cv/object_detection/detr/igie/hubconf.py new file mode 100644 index 00000000..328c3306 --- /dev/null +++ b/models/cv/object_detection/detr/igie/hubconf.py @@ -0,0 +1,168 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import torch + +from models.backbone import Backbone, Joiner +from models.detr import DETR, PostProcess +from models.position_encoding import PositionEmbeddingSine +from models.segmentation import DETRsegm, PostProcessPanoptic +from models.transformer import Transformer + +dependencies = ["torch", "torchvision"] + + +def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False): + hidden_dim = 256 + backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation) + pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) + backbone_with_pos_enc = Joiner(backbone, pos_enc) + backbone_with_pos_enc.num_channels = backbone.num_channels + transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True) + detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100) + if mask: + return DETRsegm(detr) + return detr + + +def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False): + """ + DETR R50 with 6 encoder and 6 decoder layers. + + Achieves 42/62.4 AP/AP50 on COCO val5k. + """ + model = _make_detr("resnet50", dilation=False, num_classes=num_classes) + if pretrained: + checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True + ) + model.load_state_dict(checkpoint["model"]) + if return_postprocessor: + return model, PostProcess() + return model + + +def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False): + """ + DETR-DC5 R50 with 6 encoder and 6 decoder layers. + + The last block of ResNet-50 has dilation to increase + output resolution. + Achieves 43.3/63.1 AP/AP50 on COCO val5k. + """ + model = _make_detr("resnet50", dilation=True, num_classes=num_classes) + if pretrained: + checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True + ) + model.load_state_dict(checkpoint["model"]) + if return_postprocessor: + return model, PostProcess() + return model + + +def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False): + """ + DETR-DC5 R101 with 6 encoder and 6 decoder layers. + + Achieves 43.5/63.8 AP/AP50 on COCO val5k. + """ + model = _make_detr("resnet101", dilation=False, num_classes=num_classes) + if pretrained: + checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True + ) + model.load_state_dict(checkpoint["model"]) + if return_postprocessor: + return model, PostProcess() + return model + + +def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False): + """ + DETR-DC5 R101 with 6 encoder and 6 decoder layers. + + The last block of ResNet-101 has dilation to increase + output resolution. + Achieves 44.9/64.7 AP/AP50 on COCO val5k. + """ + model = _make_detr("resnet101", dilation=True, num_classes=num_classes) + if pretrained: + checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True + ) + model.load_state_dict(checkpoint["model"]) + if return_postprocessor: + return model, PostProcess() + return model + + +def detr_resnet50_panoptic( + pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False +): + """ + DETR R50 with 6 encoder and 6 decoder layers. + Achieves 43.4 PQ on COCO val5k. + + threshold is the minimum confidence required for keeping segments in the prediction + """ + model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True) + is_thing_map = {i: i <= 90 for i in range(250)} + if pretrained: + checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth", + map_location="cpu", + check_hash=True, + ) + model.load_state_dict(checkpoint["model"]) + if return_postprocessor: + return model, PostProcessPanoptic(is_thing_map, threshold=threshold) + return model + + +def detr_resnet50_dc5_panoptic( + pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False +): + """ + DETR-DC5 R50 with 6 encoder and 6 decoder layers. + + The last block of ResNet-50 has dilation to increase + output resolution. + Achieves 44.6 on COCO val5k. + + threshold is the minimum confidence required for keeping segments in the prediction + """ + model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True) + is_thing_map = {i: i <= 90 for i in range(250)} + if pretrained: + checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth", + map_location="cpu", + check_hash=True, + ) + model.load_state_dict(checkpoint["model"]) + if return_postprocessor: + return model, PostProcessPanoptic(is_thing_map, threshold=threshold) + return model + + +def detr_resnet101_panoptic( + pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False +): + """ + DETR-DC5 R101 with 6 encoder and 6 decoder layers. + + Achieves 45.1 PQ on COCO val5k. + + threshold is the minimum confidence required for keeping segments in the prediction + """ + model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True) + is_thing_map = {i: i <= 90 for i in range(250)} + if pretrained: + checkpoint = torch.hub.load_state_dict_from_url( + url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth", + map_location="cpu", + check_hash=True, + ) + model.load_state_dict(checkpoint["model"]) + if return_postprocessor: + return model, PostProcessPanoptic(is_thing_map, threshold=threshold) + return model diff --git a/models/cv/object_detection/detr/igie/inference.py b/models/cv/object_detection/detr/igie/inference.py new file mode 100644 index 00000000..d71d830a --- /dev/null +++ b/models/cv/object_detection/detr/igie/inference.py @@ -0,0 +1,278 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import datetime +import json +import random +import time +from pathlib import Path +import os + +import numpy as np +import torch +from torch.utils.data import DataLoader, DistributedSampler + +import datasets +import util.misc as utils +from datasets import build_dataset, get_coco_api_from_dataset +from models import build_model + +from datasets.coco_eval import CocoEvaluator +from datasets.panoptic_eval import PanopticEvaluator + +import tvm + +def get_args_parser(): + parser = argparse.ArgumentParser('Set transformer detector', add_help=False) + parser.add_argument('--engine_path', type=str) + parser.add_argument("--perf_only", type=bool, default=False, help="Run performance test only") + parser.add_argument('--lr', default=1e-4, type=float) + parser.add_argument('--lr_backbone', default=1e-5, type=float) + parser.add_argument('--batch_size', default=2, type=int) + parser.add_argument('--weight_decay', default=1e-4, type=float) + parser.add_argument('--epochs', default=300, type=int) + parser.add_argument('--lr_drop', default=200, type=int) + parser.add_argument('--clip_max_norm', default=0.1, type=float, + help='gradient clipping max norm') + + # Model parameters + parser.add_argument('--frozen_weights', type=str, default=None, + help="Path to the pretrained model. If set, only the mask head will be trained") + # * Backbone + parser.add_argument('--backbone', default='resnet50', type=str, + help="Name of the convolutional backbone to use") + parser.add_argument('--dilation', action='store_true', + help="If true, we replace stride with dilation in the last convolutional block (DC5)") + parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), + help="Type of positional embedding to use on top of the image features") + + # * Transformer + parser.add_argument('--enc_layers', default=6, type=int, + help="Number of encoding layers in the transformer") + parser.add_argument('--dec_layers', default=6, type=int, + help="Number of decoding layers in the transformer") + parser.add_argument('--dim_feedforward', default=2048, type=int, + help="Intermediate size of the feedforward layers in the transformer blocks") + parser.add_argument('--hidden_dim', default=256, type=int, + help="Size of the embeddings (dimension of the transformer)") + parser.add_argument('--dropout', default=0.1, type=float, + help="Dropout applied in the transformer") + parser.add_argument('--nheads', default=8, type=int, + help="Number of attention heads inside the transformer's attentions") + parser.add_argument('--num_queries', default=100, type=int, + help="Number of query slots") + parser.add_argument('--pre_norm', action='store_true') + + # * Segmentation + parser.add_argument('--masks', action='store_true', + help="Train segmentation head if the flag is provided") + + # Loss + parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', + help="Disables auxiliary decoding losses (loss at each layer)") + # * Matcher + parser.add_argument('--set_cost_class', default=1, type=float, + help="Class coefficient in the matching cost") + parser.add_argument('--set_cost_bbox', default=5, type=float, + help="L1 box coefficient in the matching cost") + parser.add_argument('--set_cost_giou', default=2, type=float, + help="giou box coefficient in the matching cost") + # * Loss coefficients + parser.add_argument('--mask_loss_coef', default=1, type=float) + parser.add_argument('--dice_loss_coef', default=1, type=float) + parser.add_argument('--bbox_loss_coef', default=5, type=float) + parser.add_argument('--giou_loss_coef', default=2, type=float) + parser.add_argument('--eos_coef', default=0.1, type=float, + help="Relative classification weight of the no-object class") + + # dataset parameters + parser.add_argument('--dataset_file', default='coco') + parser.add_argument('--coco_path', type=str) + parser.add_argument('--coco_panoptic_path', type=str) + parser.add_argument('--remove_difficult', action='store_true') + + parser.add_argument('--output_dir', default='', + help='path where to save, empty for no saving') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=42, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true') + parser.add_argument('--num_workers', default=2, type=int) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') + return parser + +def evaluate(batch_size, module, criterion, postprocessors, data_loader, base_ds, cuda_device, iluvatar_device, output_dir): + + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) + header = 'Test:' + + iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) + coco_evaluator = CocoEvaluator(base_ds, iou_types) + # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] + + panoptic_evaluator = None + if 'panoptic' in postprocessors.keys(): + panoptic_evaluator = PanopticEvaluator( + data_loader.dataset.ann_file, + data_loader.dataset.ann_folder, + output_dir=os.path.join(output_dir, "panoptic_eval"), + ) + + tvm_infer = True + for samples, targets in metric_logger.log_every(data_loader, 10, header): + samples = samples.to(cuda_device) + targets = [{k: v.to(cuda_device) for k, v in t.items()} for t in targets] + + if samples.tensors.shape[0] < batch_size: + pad_len = batch_size - samples.tensors.shape[0] + + tensors = samples.tensors.cpu().numpy() + mask = samples.mask.cpu().numpy() + + tensors = np.concatenate([tensors, np.zeros((pad_len, samples.tensors.shape[1], samples.tensors.shape[2], samples.tensors.shape[3]), dtype=tensors.dtype)], axis=0) + mask = np.concatenate([mask, np.zeros((pad_len, samples.mask.shape[1], samples.mask.shape[2]), dtype=mask.dtype)], axis=0) + + + module.set_input("tensor", tvm.nd.array(tensors, iluvatar_device)) + module.set_input("mask", tvm.nd.array(mask, iluvatar_device)) + + module.run() + + pred_logits = module.get_output(0).asnumpy()[:samples.tensors.shape[0]] + pred_boxes = module.get_output(1).asnumpy()[:samples.tensors.shape[0]] + + else: + tensors = samples.tensors.cpu().numpy() + mask = samples.mask.cpu().numpy() + + module.set_input("tensor", tvm.nd.array(samples.tensors.cpu().numpy(), iluvatar_device)) + module.set_input("mask", tvm.nd.array(samples.mask.cpu().numpy(), iluvatar_device)) + + module.run() + + pred_logits = module.get_output(0).asnumpy() + pred_boxes = module.get_output(1).asnumpy() + + + outputs = {"pred_logits": torch.from_numpy(pred_logits).cuda(), "pred_boxes": torch.from_numpy(pred_boxes).cuda()} + loss_dict = criterion(outputs, targets) + weight_dict = criterion.weight_dict + + # reduce losses over all GPUs for logging purposes + loss_dict_reduced = utils.reduce_dict(loss_dict) + loss_dict_reduced_scaled = {k: v * weight_dict[k] + for k, v in loss_dict_reduced.items() if k in weight_dict} + loss_dict_reduced_unscaled = {f'{k}_unscaled': v + for k, v in loss_dict_reduced.items()} + metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), + **loss_dict_reduced_scaled, + **loss_dict_reduced_unscaled) + metric_logger.update(class_error=loss_dict_reduced['class_error']) + + orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) + results = postprocessors['bbox'](outputs, orig_target_sizes) + if 'segm' in postprocessors.keys(): + target_sizes = torch.stack([t["size"] for t in targets], dim=0) + results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) + res = {target['image_id'].item(): output for target, output in zip(targets, results)} + if coco_evaluator is not None: + coco_evaluator.update(res) + + if panoptic_evaluator is not None: + res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) + for i, target in enumerate(targets): + image_id = target["image_id"].item() + file_name = f"{image_id:012d}.png" + res_pano[i]["image_id"] = image_id + res_pano[i]["file_name"] = file_name + + panoptic_evaluator.update(res_pano) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + if coco_evaluator is not None: + coco_evaluator.synchronize_between_processes() + if panoptic_evaluator is not None: + panoptic_evaluator.synchronize_between_processes() + + # accumulate predictions from all images + if coco_evaluator is not None: + coco_evaluator.accumulate() + coco_evaluator.summarize() + panoptic_res = None + if panoptic_evaluator is not None: + panoptic_res = panoptic_evaluator.summarize() + stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} + if coco_evaluator is not None: + if 'bbox' in postprocessors.keys(): + stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() + if 'segm' in postprocessors.keys(): + stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() + if panoptic_res is not None: + stats['PQ_all'] = panoptic_res["All"] + stats['PQ_th'] = panoptic_res["Things"] + stats['PQ_st'] = panoptic_res["Stuff"] + return stats, coco_evaluator + +def main(args): + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + + print(args) + + cuda_device = torch.device(args.device) + + target = tvm.target.iluvatar(model="MR", options="-libs=cudnn,cublas,ixinfer") + iluvatar_device = tvm.device(target.kind.name, 0) + + _, criterion, postprocessors = build_model(args) + dataset_val = build_dataset(image_set='val', args=args) + + if args.distributed: + sampler_val = DistributedSampler(dataset_val, shuffle=False) + else: + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, + drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers) + + if args.dataset_file == "coco_panoptic": + # We also evaluate AP during panoptic training, on original coco DS + coco_val = datasets.coco.build("val", args) + base_ds = get_coco_api_from_dataset(coco_val) + else: + base_ds = get_coco_api_from_dataset(dataset_val) + + + output_dir = Path(args.output_dir) + + lib = tvm.runtime.load_module(args.engine_path) + module = tvm.contrib.graph_executor.GraphModule(lib["default"](iluvatar_device)) + + if args.perf_only: + ftimer = module.module.time_evaluator("run", iluvatar_device, number=100, repeat=1) + prof_res = np.array(ftimer().results) * 1000 + fps = args.batch_size * 1000 / np.mean(prof_res) + print(f"\n* Mean inference time: {np.mean(prof_res):.3f} ms, Mean fps: {fps:.3f}") + return + + + test_stats, coco_evaluator = evaluate(args.batch_size, module, criterion, postprocessors, + data_loader_val, base_ds, cuda_device, iluvatar_device, args.output_dir) + if args.output_dir: + utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth") + +if __name__ == '__main__': + parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) + args = parser.parse_args() + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + main(args) diff --git a/models/cv/object_detection/detr/igie/main.py b/models/cv/object_detection/detr/igie/main.py new file mode 100644 index 00000000..e5f9eff8 --- /dev/null +++ b/models/cv/object_detection/detr/igie/main.py @@ -0,0 +1,248 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import argparse +import datetime +import json +import random +import time +from pathlib import Path + +import numpy as np +import torch +from torch.utils.data import DataLoader, DistributedSampler + +import datasets +import util.misc as utils +from datasets import build_dataset, get_coco_api_from_dataset +from engine import evaluate, train_one_epoch +from models import build_model + + +def get_args_parser(): + parser = argparse.ArgumentParser('Set transformer detector', add_help=False) + parser.add_argument('--lr', default=1e-4, type=float) + parser.add_argument('--lr_backbone', default=1e-5, type=float) + parser.add_argument('--batch_size', default=2, type=int) + parser.add_argument('--weight_decay', default=1e-4, type=float) + parser.add_argument('--epochs', default=300, type=int) + parser.add_argument('--lr_drop', default=200, type=int) + parser.add_argument('--clip_max_norm', default=0.1, type=float, + help='gradient clipping max norm') + + # Model parameters + parser.add_argument('--frozen_weights', type=str, default=None, + help="Path to the pretrained model. If set, only the mask head will be trained") + # * Backbone + parser.add_argument('--backbone', default='resnet50', type=str, + help="Name of the convolutional backbone to use") + parser.add_argument('--dilation', action='store_true', + help="If true, we replace stride with dilation in the last convolutional block (DC5)") + parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), + help="Type of positional embedding to use on top of the image features") + + # * Transformer + parser.add_argument('--enc_layers', default=6, type=int, + help="Number of encoding layers in the transformer") + parser.add_argument('--dec_layers', default=6, type=int, + help="Number of decoding layers in the transformer") + parser.add_argument('--dim_feedforward', default=2048, type=int, + help="Intermediate size of the feedforward layers in the transformer blocks") + parser.add_argument('--hidden_dim', default=256, type=int, + help="Size of the embeddings (dimension of the transformer)") + parser.add_argument('--dropout', default=0.1, type=float, + help="Dropout applied in the transformer") + parser.add_argument('--nheads', default=8, type=int, + help="Number of attention heads inside the transformer's attentions") + parser.add_argument('--num_queries', default=100, type=int, + help="Number of query slots") + parser.add_argument('--pre_norm', action='store_true') + + # * Segmentation + parser.add_argument('--masks', action='store_true', + help="Train segmentation head if the flag is provided") + + # Loss + parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', + help="Disables auxiliary decoding losses (loss at each layer)") + # * Matcher + parser.add_argument('--set_cost_class', default=1, type=float, + help="Class coefficient in the matching cost") + parser.add_argument('--set_cost_bbox', default=5, type=float, + help="L1 box coefficient in the matching cost") + parser.add_argument('--set_cost_giou', default=2, type=float, + help="giou box coefficient in the matching cost") + # * Loss coefficients + parser.add_argument('--mask_loss_coef', default=1, type=float) + parser.add_argument('--dice_loss_coef', default=1, type=float) + parser.add_argument('--bbox_loss_coef', default=5, type=float) + parser.add_argument('--giou_loss_coef', default=2, type=float) + parser.add_argument('--eos_coef', default=0.1, type=float, + help="Relative classification weight of the no-object class") + + # dataset parameters + parser.add_argument('--dataset_file', default='coco') + parser.add_argument('--coco_path', type=str) + parser.add_argument('--coco_panoptic_path', type=str) + parser.add_argument('--remove_difficult', action='store_true') + + parser.add_argument('--output_dir', default='', + help='path where to save, empty for no saving') + parser.add_argument('--device', default='cuda', + help='device to use for training / testing') + parser.add_argument('--seed', default=42, type=int) + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start_epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument('--eval', action='store_true') + parser.add_argument('--num_workers', default=2, type=int) + + # distributed training parameters + parser.add_argument('--world_size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') + return parser + + +def main(args): + utils.init_distributed_mode(args) + print("git:\n {}\n".format(utils.get_sha())) + + if args.frozen_weights is not None: + assert args.masks, "Frozen training is meant for segmentation only" + print(args) + + device = torch.device(args.device) + + # fix the seed for reproducibility + seed = args.seed + utils.get_rank() + torch.manual_seed(seed) + np.random.seed(seed) + random.seed(seed) + + model, criterion, postprocessors = build_model(args) + model.to(device) + + model_without_ddp = model + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) + print('number of params:', n_parameters) + + param_dicts = [ + {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]}, + { + "params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad], + "lr": args.lr_backbone, + }, + ] + optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, + weight_decay=args.weight_decay) + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) + + dataset_train = build_dataset(image_set='train', args=args) + dataset_val = build_dataset(image_set='val', args=args) + + if args.distributed: + sampler_train = DistributedSampler(dataset_train) + sampler_val = DistributedSampler(dataset_val, shuffle=False) + else: + sampler_train = torch.utils.data.RandomSampler(dataset_train) + sampler_val = torch.utils.data.SequentialSampler(dataset_val) + + batch_sampler_train = torch.utils.data.BatchSampler( + sampler_train, args.batch_size, drop_last=True) + + data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, + collate_fn=utils.collate_fn, num_workers=args.num_workers) + data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, + drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers) + + if args.dataset_file == "coco_panoptic": + # We also evaluate AP during panoptic training, on original coco DS + coco_val = datasets.coco.build("val", args) + base_ds = get_coco_api_from_dataset(coco_val) + else: + base_ds = get_coco_api_from_dataset(dataset_val) + + if args.frozen_weights is not None: + checkpoint = torch.load(args.frozen_weights, map_location='cpu') + model_without_ddp.detr.load_state_dict(checkpoint['model']) + + output_dir = Path(args.output_dir) + if args.resume: + if args.resume.startswith('https'): + checkpoint = torch.hub.load_state_dict_from_url( + args.resume, map_location='cpu', check_hash=True) + else: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + args.start_epoch = checkpoint['epoch'] + 1 + + if args.eval: + test_stats, coco_evaluator = evaluate(model, criterion, postprocessors, + data_loader_val, base_ds, device, args.output_dir) + if args.output_dir: + utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth") + return + + print("Start training") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + sampler_train.set_epoch(epoch) + train_stats = train_one_epoch( + model, criterion, data_loader_train, optimizer, device, epoch, + args.clip_max_norm) + lr_scheduler.step() + if args.output_dir: + checkpoint_paths = [output_dir / 'checkpoint.pth'] + # extra checkpoint before LR drop and every 100 epochs + if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0: + checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth') + for checkpoint_path in checkpoint_paths: + utils.save_on_master({ + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'epoch': epoch, + 'args': args, + }, checkpoint_path) + + test_stats, coco_evaluator = evaluate( + model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir + ) + + log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, + **{f'test_{k}': v for k, v in test_stats.items()}, + 'epoch': epoch, + 'n_parameters': n_parameters} + + if args.output_dir and utils.is_main_process(): + with (output_dir / "log.txt").open("a") as f: + f.write(json.dumps(log_stats) + "\n") + + # for evaluation logs + if coco_evaluator is not None: + (output_dir / 'eval').mkdir(exist_ok=True) + if "bbox" in coco_evaluator.coco_eval: + filenames = ['latest.pth'] + if epoch % 50 == 0: + filenames.append(f'{epoch:03}.pth') + for name in filenames: + torch.save(coco_evaluator.coco_eval["bbox"].eval, + output_dir / "eval" / name) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) + args = parser.parse_args() + if args.output_dir: + Path(args.output_dir).mkdir(parents=True, exist_ok=True) + main(args) diff --git a/models/cv/object_detection/detr/igie/models/__init__.py b/models/cv/object_detection/detr/igie/models/__init__.py new file mode 100644 index 00000000..a3f26531 --- /dev/null +++ b/models/cv/object_detection/detr/igie/models/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +from .detr import build + + +def build_model(args): + return build(args) diff --git a/models/cv/object_detection/detr/igie/models/backbone.py b/models/cv/object_detection/detr/igie/models/backbone.py new file mode 100644 index 00000000..96680932 --- /dev/null +++ b/models/cv/object_detection/detr/igie/models/backbone.py @@ -0,0 +1,119 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Backbone modules. +""" +from collections import OrderedDict + +import torch +import torch.nn.functional as F +import torchvision +from torch import nn +from torchvision.models._utils import IntermediateLayerGetter +from typing import Dict, List + +from util.misc import NestedTensor, is_main_process + +from .position_encoding import build_position_encoding + + +class FrozenBatchNorm2d(torch.nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters are fixed. + + Copy-paste from torchvision.misc.ops with added eps before rqsrt, + without which any other models than torchvision.models.resnet[18,34,50,101] + produce nans. + """ + + def __init__(self, n): + super(FrozenBatchNorm2d, self).__init__() + self.register_buffer("weight", torch.ones(n)) + self.register_buffer("bias", torch.zeros(n)) + self.register_buffer("running_mean", torch.zeros(n)) + self.register_buffer("running_var", torch.ones(n)) + + def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs): + num_batches_tracked_key = prefix + 'num_batches_tracked' + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super(FrozenBatchNorm2d, self)._load_from_state_dict( + state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + def forward(self, x): + # move reshapes to the beginning + # to make it fuser-friendly + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + rv = self.running_var.reshape(1, -1, 1, 1) + rm = self.running_mean.reshape(1, -1, 1, 1) + eps = 1e-5 + scale = w * (rv + eps).rsqrt() + bias = b - rm * scale + return x * scale + bias + + +class BackboneBase(nn.Module): + + def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): + super().__init__() + for name, parameter in backbone.named_parameters(): + if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: + parameter.requires_grad_(False) + if return_interm_layers: + return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} + else: + return_layers = {'layer4': "0"} + self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) + self.num_channels = num_channels + + def forward(self, tensor_list: NestedTensor): + xs = self.body(tensor_list.tensors) + out: Dict[str, NestedTensor] = {} + for name, x in xs.items(): + m = tensor_list.mask + assert m is not None + mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] + out[name] = NestedTensor(x, mask) + return out + + +class Backbone(BackboneBase): + """ResNet backbone with frozen BatchNorm.""" + def __init__(self, name: str, + train_backbone: bool, + return_interm_layers: bool, + dilation: bool): + backbone = getattr(torchvision.models, name)( + replace_stride_with_dilation=[False, False, dilation], + pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) + num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 + super().__init__(backbone, train_backbone, num_channels, return_interm_layers) + + +class Joiner(nn.Sequential): + def __init__(self, backbone, position_embedding): + super().__init__(backbone, position_embedding) + + def forward(self, tensor_list: NestedTensor): + xs = self[0](tensor_list) + out: List[NestedTensor] = [] + pos = [] + for name, x in xs.items(): + out.append(x) + # position encoding + pos.append(self[1](x).to(x.tensors.dtype)) + + return out, pos + + +def build_backbone(args): + position_embedding = build_position_encoding(args) + train_backbone = args.lr_backbone > 0 + return_interm_layers = args.masks + backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) + model = Joiner(backbone, position_embedding) + model.num_channels = backbone.num_channels + return model diff --git a/models/cv/object_detection/detr/igie/models/detr.py b/models/cv/object_detection/detr/igie/models/detr.py new file mode 100644 index 00000000..23c2376d --- /dev/null +++ b/models/cv/object_detection/detr/igie/models/detr.py @@ -0,0 +1,359 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR model and criterion classes. +""" +import torch +import torch.nn.functional as F +from torch import nn + +from util import box_ops +from util.misc import (NestedTensor, nested_tensor_from_tensor_list, + accuracy, get_world_size, interpolate, + is_dist_avail_and_initialized) + +from .backbone import build_backbone +from .matcher import build_matcher +from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, + dice_loss, sigmoid_focal_loss) +from .transformer import build_transformer + + +class DETR(nn.Module): + """ This is the DETR module that performs object detection """ + def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False): + """ Initializes the model. + Parameters: + backbone: torch module of the backbone to be used. See backbone.py + transformer: torch module of the transformer architecture. See transformer.py + num_classes: number of object classes + num_queries: number of object queries, ie detection slot. This is the maximal number of objects + DETR can detect in a single image. For COCO, we recommend 100 queries. + aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. + """ + super().__init__() + self.num_queries = num_queries + self.transformer = transformer + hidden_dim = transformer.d_model + self.class_embed = nn.Linear(hidden_dim, num_classes + 1) + self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) + self.query_embed = nn.Embedding(num_queries, hidden_dim) + self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1) + self.backbone = backbone + self.aux_loss = aux_loss + + def forward(self, samples: NestedTensor): + """ The forward expects a NestedTensor, which consists of: + - samples.tensor: batched images, of shape [batch_size x 3 x H x W] + - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels + + It returns a dict with the following elements: + - "pred_logits": the classification logits (including no-object) for all queries. + Shape= [batch_size x num_queries x (num_classes + 1)] + - "pred_boxes": The normalized boxes coordinates for all queries, represented as + (center_x, center_y, height, width). These values are normalized in [0, 1], + relative to the size of each individual image (disregarding possible padding). + See PostProcess for information on how to retrieve the unnormalized bounding box. + - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of + dictionnaries containing the two above keys for each decoder layer. + """ + if isinstance(samples, (list, torch.Tensor)): + samples = nested_tensor_from_tensor_list(samples) + features, pos = self.backbone(samples) + + src, mask = features[-1].decompose() + assert mask is not None + hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0] + + outputs_class = self.class_embed(hs) + outputs_coord = self.bbox_embed(hs).sigmoid() + out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} + if self.aux_loss: + out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) + return out + + @torch.jit.unused + def _set_aux_loss(self, outputs_class, outputs_coord): + # this is a workaround to make torchscript happy, as torchscript + # doesn't support dictionary with non-homogeneous values, such + # as a dict having both a Tensor and a list. + return [{'pred_logits': a, 'pred_boxes': b} + for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] + + +class SetCriterion(nn.Module): + """ This class computes the loss for DETR. + The process happens in two steps: + 1) we compute hungarian assignment between ground truth boxes and the outputs of the model + 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) + """ + def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): + """ Create the criterion. + Parameters: + num_classes: number of object categories, omitting the special no-object category + matcher: module able to compute a matching between targets and proposals + weight_dict: dict containing as key the names of the losses and as values their relative weight. + eos_coef: relative classification weight applied to the no-object category + losses: list of all the losses to be applied. See get_loss for list of available losses. + """ + super().__init__() + self.num_classes = num_classes + self.matcher = matcher + self.weight_dict = weight_dict + self.eos_coef = eos_coef + self.losses = losses + empty_weight = torch.ones(self.num_classes + 1) + empty_weight[-1] = self.eos_coef + self.register_buffer('empty_weight', empty_weight) + + def loss_labels(self, outputs, targets, indices, num_boxes, log=True): + """Classification loss (NLL) + targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] + """ + assert 'pred_logits' in outputs + src_logits = outputs['pred_logits'] + + idx = self._get_src_permutation_idx(indices) + target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) + target_classes = torch.full(src_logits.shape[:2], self.num_classes, + dtype=torch.int64, device=src_logits.device) + target_classes[idx] = target_classes_o + + loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) + losses = {'loss_ce': loss_ce} + + if log: + # TODO this should probably be a separate loss, not hacked in this one here + losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] + return losses + + @torch.no_grad() + def loss_cardinality(self, outputs, targets, indices, num_boxes): + """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes + This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients + """ + pred_logits = outputs['pred_logits'] + device = pred_logits.device + tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) + # Count the number of predictions that are NOT "no-object" (which is the last class) + card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) + card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) + losses = {'cardinality_error': card_err} + return losses + + def loss_boxes(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss + targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] + The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. + """ + assert 'pred_boxes' in outputs + idx = self._get_src_permutation_idx(indices) + src_boxes = outputs['pred_boxes'][idx] + target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) + + loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') + + losses = {} + losses['loss_bbox'] = loss_bbox.sum() / num_boxes + + loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( + box_ops.box_cxcywh_to_xyxy(src_boxes), + box_ops.box_cxcywh_to_xyxy(target_boxes))) + losses['loss_giou'] = loss_giou.sum() / num_boxes + return losses + + def loss_masks(self, outputs, targets, indices, num_boxes): + """Compute the losses related to the masks: the focal loss and the dice loss. + targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] + """ + assert "pred_masks" in outputs + + src_idx = self._get_src_permutation_idx(indices) + tgt_idx = self._get_tgt_permutation_idx(indices) + src_masks = outputs["pred_masks"] + src_masks = src_masks[src_idx] + masks = [t["masks"] for t in targets] + # TODO use valid to mask invalid areas due to padding in loss + target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() + target_masks = target_masks.to(src_masks) + target_masks = target_masks[tgt_idx] + + # upsample predictions to the target size + src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], + mode="bilinear", align_corners=False) + src_masks = src_masks[:, 0].flatten(1) + + target_masks = target_masks.flatten(1) + target_masks = target_masks.view(src_masks.shape) + losses = { + "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), + "loss_dice": dice_loss(src_masks, target_masks, num_boxes), + } + return losses + + def _get_src_permutation_idx(self, indices): + # permute predictions following indices + batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) + src_idx = torch.cat([src for (src, _) in indices]) + return batch_idx, src_idx + + def _get_tgt_permutation_idx(self, indices): + # permute targets following indices + batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) + tgt_idx = torch.cat([tgt for (_, tgt) in indices]) + return batch_idx, tgt_idx + + def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): + loss_map = { + 'labels': self.loss_labels, + 'cardinality': self.loss_cardinality, + 'boxes': self.loss_boxes, + 'masks': self.loss_masks + } + assert loss in loss_map, f'do you really want to compute {loss} loss?' + return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) + + def forward(self, outputs, targets): + """ This performs the loss computation. + Parameters: + outputs: dict of tensors, see the output specification of the model for the format + targets: list of dicts, such that len(targets) == batch_size. + The expected keys in each dict depends on the losses applied, see each loss' doc + """ + outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} + + # Retrieve the matching between the outputs of the last layer and the targets + indices = self.matcher(outputs_without_aux, targets) + + # Compute the average number of target boxes accross all nodes, for normalization purposes + num_boxes = sum(len(t["labels"]) for t in targets) + num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) + if is_dist_avail_and_initialized(): + torch.distributed.all_reduce(num_boxes) + num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() + + # Compute all the requested losses + losses = {} + for loss in self.losses: + losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) + + # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. + if 'aux_outputs' in outputs: + for i, aux_outputs in enumerate(outputs['aux_outputs']): + indices = self.matcher(aux_outputs, targets) + for loss in self.losses: + if loss == 'masks': + # Intermediate masks losses are too costly to compute, we ignore them. + continue + kwargs = {} + if loss == 'labels': + # Logging is enabled only for the last layer + kwargs = {'log': False} + l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) + l_dict = {k + f'_{i}': v for k, v in l_dict.items()} + losses.update(l_dict) + + return losses + + +class PostProcess(nn.Module): + """ This module converts the model's output into the format expected by the coco api""" + @torch.no_grad() + def forward(self, outputs, target_sizes): + """ Perform the computation + Parameters: + outputs: raw outputs of the model + target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch + For evaluation, this must be the original image size (before any data augmentation) + For visualization, this should be the image size after data augment, but before padding + """ + out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] + + assert len(out_logits) == len(target_sizes) + assert target_sizes.shape[1] == 2 + + prob = F.softmax(out_logits, -1) + scores, labels = prob[..., :-1].max(-1) + + # convert to [x0, y0, x1, y1] format + boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) + # and from relative [0, 1] to absolute [0, height] coordinates + img_h, img_w = target_sizes.unbind(1) + scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) + boxes = boxes * scale_fct[:, None, :] + + results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] + + return results + + +class MLP(nn.Module): + """ Very simple multi-layer perceptron (also called FFN)""" + + def __init__(self, input_dim, hidden_dim, output_dim, num_layers): + super().__init__() + self.num_layers = num_layers + h = [hidden_dim] * (num_layers - 1) + self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) + + def forward(self, x): + for i, layer in enumerate(self.layers): + x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) + return x + + +def build(args): + # the `num_classes` naming here is somewhat misleading. + # it indeed corresponds to `max_obj_id + 1`, where max_obj_id + # is the maximum id for a class in your dataset. For example, + # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91. + # As another example, for a dataset that has a single class with id 1, + # you should pass `num_classes` to be 2 (max_obj_id + 1). + # For more details on this, check the following discussion + # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223 + num_classes = 20 if args.dataset_file != 'coco' else 91 + if args.dataset_file == "coco_panoptic": + # for panoptic, we just add a num_classes that is large enough to hold + # max_obj_id + 1, but the exact value doesn't really matter + num_classes = 250 + device = torch.device(args.device) + + backbone = build_backbone(args) + + transformer = build_transformer(args) + + model = DETR( + backbone, + transformer, + num_classes=num_classes, + num_queries=args.num_queries, + aux_loss=args.aux_loss, + ) + if args.masks: + model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) + matcher = build_matcher(args) + weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef} + weight_dict['loss_giou'] = args.giou_loss_coef + if args.masks: + weight_dict["loss_mask"] = args.mask_loss_coef + weight_dict["loss_dice"] = args.dice_loss_coef + # TODO this is a hack + if args.aux_loss: + aux_weight_dict = {} + for i in range(args.dec_layers - 1): + aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()}) + weight_dict.update(aux_weight_dict) + + losses = ['labels', 'boxes', 'cardinality'] + if args.masks: + losses += ["masks"] + criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, + eos_coef=args.eos_coef, losses=losses) + criterion.to(device) + postprocessors = {'bbox': PostProcess()} + if args.masks: + postprocessors['segm'] = PostProcessSegm() + if args.dataset_file == "coco_panoptic": + is_thing_map = {i: i <= 90 for i in range(201)} + postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85) + + return model, criterion, postprocessors diff --git a/models/cv/object_detection/detr/igie/models/matcher.py b/models/cv/object_detection/detr/igie/models/matcher.py new file mode 100644 index 00000000..0c291473 --- /dev/null +++ b/models/cv/object_detection/detr/igie/models/matcher.py @@ -0,0 +1,86 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Modules to compute the matching cost and solve the corresponding LSAP. +""" +import torch +from scipy.optimize import linear_sum_assignment +from torch import nn + +from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou + + +class HungarianMatcher(nn.Module): + """This class computes an assignment between the targets and the predictions of the network + + For efficiency reasons, the targets don't include the no_object. Because of this, in general, + there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, + while the others are un-matched (and thus treated as non-objects). + """ + + def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): + """Creates the matcher + + Params: + cost_class: This is the relative weight of the classification error in the matching cost + cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost + cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost + """ + super().__init__() + self.cost_class = cost_class + self.cost_bbox = cost_bbox + self.cost_giou = cost_giou + assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" + + @torch.no_grad() + def forward(self, outputs, targets): + """ Performs the matching + + Params: + outputs: This is a dict that contains at least these entries: + "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits + "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates + + targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: + "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth + objects in the target) containing the class labels + "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates + + Returns: + A list of size batch_size, containing tuples of (index_i, index_j) where: + - index_i is the indices of the selected predictions (in order) + - index_j is the indices of the corresponding selected targets (in order) + For each batch element, it holds: + len(index_i) = len(index_j) = min(num_queries, num_target_boxes) + """ + bs, num_queries = outputs["pred_logits"].shape[:2] + + # We flatten to compute the cost matrices in a batch + out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] + out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] + + # Also concat the target labels and boxes + tgt_ids = torch.cat([v["labels"] for v in targets]) + tgt_bbox = torch.cat([v["boxes"] for v in targets]) + + # Compute the classification cost. Contrary to the loss, we don't use the NLL, + # but approximate it in 1 - proba[target class]. + # The 1 is a constant that doesn't change the matching, it can be ommitted. + cost_class = -out_prob[:, tgt_ids] + + # Compute the L1 cost between boxes + cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) + + # Compute the giou cost betwen boxes + cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) + + # Final cost matrix + C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou + C = C.view(bs, num_queries, -1).cpu() + + sizes = [len(v["boxes"]) for v in targets] + indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] + return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] + + +def build_matcher(args): + return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou) diff --git a/models/cv/object_detection/detr/igie/models/position_encoding.py b/models/cv/object_detection/detr/igie/models/position_encoding.py new file mode 100644 index 00000000..73ae39ed --- /dev/null +++ b/models/cv/object_detection/detr/igie/models/position_encoding.py @@ -0,0 +1,89 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Various positional encodings for the transformer. +""" +import math +import torch +from torch import nn + +from util.misc import NestedTensor + + +class PositionEmbeddingSine(nn.Module): + """ + This is a more standard version of the position embedding, very similar to the one + used by the Attention is all you need paper, generalized to work on images. + """ + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + mask = tensor_list.mask + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(1, dtype=torch.float32) + x_embed = not_mask.cumsum(2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) + dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) + + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + return pos + + +class PositionEmbeddingLearned(nn.Module): + """ + Absolute pos embedding, learned. + """ + def __init__(self, num_pos_feats=256): + super().__init__() + self.row_embed = nn.Embedding(50, num_pos_feats) + self.col_embed = nn.Embedding(50, num_pos_feats) + self.reset_parameters() + + def reset_parameters(self): + nn.init.uniform_(self.row_embed.weight) + nn.init.uniform_(self.col_embed.weight) + + def forward(self, tensor_list: NestedTensor): + x = tensor_list.tensors + h, w = x.shape[-2:] + i = torch.arange(w, device=x.device) + j = torch.arange(h, device=x.device) + x_emb = self.col_embed(i) + y_emb = self.row_embed(j) + pos = torch.cat([ + x_emb.unsqueeze(0).repeat(h, 1, 1), + y_emb.unsqueeze(1).repeat(1, w, 1), + ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) + return pos + + +def build_position_encoding(args): + N_steps = args.hidden_dim // 2 + if args.position_embedding in ('v2', 'sine'): + # TODO find a better way of exposing other arguments + position_embedding = PositionEmbeddingSine(N_steps, normalize=True) + elif args.position_embedding in ('v3', 'learned'): + position_embedding = PositionEmbeddingLearned(N_steps) + else: + raise ValueError(f"not supported {args.position_embedding}") + + return position_embedding diff --git a/models/cv/object_detection/detr/igie/models/segmentation.py b/models/cv/object_detection/detr/igie/models/segmentation.py new file mode 100644 index 00000000..01faa885 --- /dev/null +++ b/models/cv/object_detection/detr/igie/models/segmentation.py @@ -0,0 +1,363 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +This file provides the definition of the convolutional heads used to predict masks, as well as the losses +""" +import io +from collections import defaultdict +from typing import List, Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from PIL import Image + +import util.box_ops as box_ops +from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list + +try: + from panopticapi.utils import id2rgb, rgb2id +except ImportError: + pass + + +class DETRsegm(nn.Module): + def __init__(self, detr, freeze_detr=False): + super().__init__() + self.detr = detr + + if freeze_detr: + for p in self.parameters(): + p.requires_grad_(False) + + hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead + self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) + self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) + + def forward(self, samples: NestedTensor): + if isinstance(samples, (list, torch.Tensor)): + samples = nested_tensor_from_tensor_list(samples) + features, pos = self.detr.backbone(samples) + + bs = features[-1].tensors.shape[0] + + src, mask = features[-1].decompose() + assert mask is not None + src_proj = self.detr.input_proj(src) + hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) + + outputs_class = self.detr.class_embed(hs) + outputs_coord = self.detr.bbox_embed(hs).sigmoid() + out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} + if self.detr.aux_loss: + out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord) + + # FIXME h_boxes takes the last one computed, keep this in mind + bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) + + seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) + outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) + + out["pred_masks"] = outputs_seg_masks + return out + + +def _expand(tensor, length: int): + return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) + + +class MaskHeadSmallConv(nn.Module): + """ + Simple convolutional head, using group norm. + Upsampling is done using a FPN approach + """ + + def __init__(self, dim, fpn_dims, context_dim): + super().__init__() + + inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] + self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) + self.gn1 = torch.nn.GroupNorm(8, dim) + self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) + self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) + self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) + self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) + self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) + self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) + self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) + self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) + self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) + + self.dim = dim + + self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) + self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) + self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + + def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): + x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) + + x = self.lay1(x) + x = self.gn1(x) + x = F.relu(x) + x = self.lay2(x) + x = self.gn2(x) + x = F.relu(x) + + cur_fpn = self.adapter1(fpns[0]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay3(x) + x = self.gn3(x) + x = F.relu(x) + + cur_fpn = self.adapter2(fpns[1]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay4(x) + x = self.gn4(x) + x = F.relu(x) + + cur_fpn = self.adapter3(fpns[2]) + if cur_fpn.size(0) != x.size(0): + cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) + x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") + x = self.lay5(x) + x = self.gn5(x) + x = F.relu(x) + + x = self.out_lay(x) + return x + + +class MHAttentionMap(nn.Module): + """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" + + def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True): + super().__init__() + self.num_heads = num_heads + self.hidden_dim = hidden_dim + self.dropout = nn.Dropout(dropout) + + self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) + + nn.init.zeros_(self.k_linear.bias) + nn.init.zeros_(self.q_linear.bias) + nn.init.xavier_uniform_(self.k_linear.weight) + nn.init.xavier_uniform_(self.q_linear.weight) + self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 + + def forward(self, q, k, mask: Optional[Tensor] = None): + q = self.q_linear(q) + k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) + qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) + kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) + weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) + + if mask is not None: + weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) + weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size()) + weights = self.dropout(weights) + return weights + + +def dice_loss(inputs, targets, num_boxes): + """ + Compute the DICE loss, similar to generalized IOU for masks + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + """ + inputs = inputs.sigmoid() + inputs = inputs.flatten(1) + numerator = 2 * (inputs * targets).sum(1) + denominator = inputs.sum(-1) + targets.sum(-1) + loss = 1 - (numerator + 1) / (denominator + 1) + return loss.sum() / num_boxes + + +def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): + """ + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples. Default = -1 (no weighting). + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + Returns: + Loss tensor + """ + prob = inputs.sigmoid() + ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") + p_t = prob * targets + (1 - prob) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + return loss.mean(1).sum() / num_boxes + + +class PostProcessSegm(nn.Module): + def __init__(self, threshold=0.5): + super().__init__() + self.threshold = threshold + + @torch.no_grad() + def forward(self, results, outputs, orig_target_sizes, max_target_sizes): + assert len(orig_target_sizes) == len(max_target_sizes) + max_h, max_w = max_target_sizes.max(0)[0].tolist() + outputs_masks = outputs["pred_masks"].squeeze(2) + outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) + outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() + + for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): + img_h, img_w = t[0], t[1] + results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) + results[i]["masks"] = F.interpolate( + results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" + ).byte() + + return results + + +class PostProcessPanoptic(nn.Module): + """This class converts the output of the model to the final panoptic result, in the format expected by the + coco panoptic API """ + + def __init__(self, is_thing_map, threshold=0.85): + """ + Parameters: + is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether + the class is a thing (True) or a stuff (False) class + threshold: confidence threshold: segments with confidence lower than this will be deleted + """ + super().__init__() + self.threshold = threshold + self.is_thing_map = is_thing_map + + def forward(self, outputs, processed_sizes, target_sizes=None): + """ This function computes the panoptic prediction from the model's predictions. + Parameters: + outputs: This is a dict coming directly from the model. See the model doc for the content. + processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the + model, ie the size after data augmentation but before batching. + target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size + of each prediction. If left to None, it will default to the processed_sizes + """ + if target_sizes is None: + target_sizes = processed_sizes + assert len(processed_sizes) == len(target_sizes) + out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] + assert len(out_logits) == len(raw_masks) == len(target_sizes) + preds = [] + + def to_tuple(tup): + if isinstance(tup, tuple): + return tup + return tuple(tup.cpu().tolist()) + + for cur_logits, cur_masks, cur_boxes, size, target_size in zip( + out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes + ): + # we filter empty queries and detection below threshold + scores, labels = cur_logits.softmax(-1).max(-1) + keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) + cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) + cur_scores = cur_scores[keep] + cur_classes = cur_classes[keep] + cur_masks = cur_masks[keep] + cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) + cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) + + h, w = cur_masks.shape[-2:] + assert len(cur_boxes) == len(cur_classes) + + # It may be that we have several predicted masks for the same stuff class. + # In the following, we track the list of masks ids for each stuff class (they are merged later on) + cur_masks = cur_masks.flatten(1) + stuff_equiv_classes = defaultdict(lambda: []) + for k, label in enumerate(cur_classes): + if not self.is_thing_map[label.item()]: + stuff_equiv_classes[label.item()].append(k) + + def get_ids_area(masks, scores, dedup=False): + # This helper function creates the final panoptic segmentation image + # It also returns the area of the masks that appears on the image + + m_id = masks.transpose(0, 1).softmax(-1) + + if m_id.shape[-1] == 0: + # We didn't detect any mask :( + m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) + else: + m_id = m_id.argmax(-1).view(h, w) + + if dedup: + # Merge the masks corresponding to the same stuff class + for equiv in stuff_equiv_classes.values(): + if len(equiv) > 1: + for eq_id in equiv: + m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) + + final_h, final_w = to_tuple(target_size) + + seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) + seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) + + np_seg_img = ( + torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() + ) + m_id = torch.from_numpy(rgb2id(np_seg_img)) + + area = [] + for i in range(len(scores)): + area.append(m_id.eq(i).sum().item()) + return area, seg_img + + area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) + if cur_classes.numel() > 0: + # We know filter empty masks as long as we find some + while True: + filtered_small = torch.as_tensor( + [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device + ) + if filtered_small.any().item(): + cur_scores = cur_scores[~filtered_small] + cur_classes = cur_classes[~filtered_small] + cur_masks = cur_masks[~filtered_small] + area, seg_img = get_ids_area(cur_masks, cur_scores) + else: + break + + else: + cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) + + segments_info = [] + for i, a in enumerate(area): + cat = cur_classes[i].item() + segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) + del cur_classes + + with io.BytesIO() as out: + seg_img.save(out, format="PNG") + predictions = {"png_string": out.getvalue(), "segments_info": segments_info} + preds.append(predictions) + return preds diff --git a/models/cv/object_detection/detr/igie/models/transformer.py b/models/cv/object_detection/detr/igie/models/transformer.py new file mode 100644 index 00000000..dcd53675 --- /dev/null +++ b/models/cv/object_detection/detr/igie/models/transformer.py @@ -0,0 +1,297 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +DETR Transformer class. + +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +import copy +from typing import Optional, List + +import torch +import torch.nn.functional as F +from torch import nn, Tensor + + +class Transformer(nn.Module): + + def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, + num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False, + return_intermediate_dec=False): + super().__init__() + + encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + encoder_norm = nn.LayerNorm(d_model) if normalize_before else None + self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) + + decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, + dropout, activation, normalize_before) + decoder_norm = nn.LayerNorm(d_model) + self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, + return_intermediate=return_intermediate_dec) + + self._reset_parameters() + + self.d_model = d_model + self.nhead = nhead + + def _reset_parameters(self): + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_uniform_(p) + + def forward(self, src, mask, query_embed, pos_embed): + # flatten NxCxHxW to HWxNxC + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + pos_embed = pos_embed.flatten(2).permute(2, 0, 1) + query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) + mask = mask.flatten(1) + + tgt = torch.zeros_like(query_embed) + memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) + hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, + pos=pos_embed, query_pos=query_embed) + return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) + + +class TransformerEncoder(nn.Module): + + def __init__(self, encoder_layer, num_layers, norm=None): + super().__init__() + self.layers = _get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + + def forward(self, src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + output = src + + for layer in self.layers: + output = layer(output, src_mask=mask, + src_key_padding_mask=src_key_padding_mask, pos=pos) + + if self.norm is not None: + output = self.norm(output) + + return output + + +class TransformerDecoder(nn.Module): + + def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): + super().__init__() + self.layers = _get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self.return_intermediate = return_intermediate + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + output = tgt + + intermediate = [] + + for layer in self.layers: + output = layer(output, memory, tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, query_pos=query_pos) + if self.return_intermediate: + intermediate.append(self.norm(output)) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if self.return_intermediate: + return torch.stack(intermediate) + + return output.unsqueeze(0) + + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(src, pos) + src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src = self.norm1(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + src2 = self.norm1(src) + q = k = self.with_pos_embed(src2, pos) + src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + src2 = self.norm2(src) + src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + return src + + def forward(self, src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, + activation="relu", normalize_before=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = _get_activation_fn(activation) + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + return tgt + + def forward_pre(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask)[0] + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt + + def forward(self, tgt, memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None): + if self.normalize_before: + return self.forward_pre(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + return self.forward_post(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) + + +def _get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) + + +def build_transformer(args): + return Transformer( + d_model=args.hidden_dim, + dropout=args.dropout, + nhead=args.nheads, + dim_feedforward=args.dim_feedforward, + num_encoder_layers=args.enc_layers, + num_decoder_layers=args.dec_layers, + normalize_before=args.pre_norm, + return_intermediate_dec=True, + ) + + +def _get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "relu": + return F.relu + if activation == "gelu": + return F.gelu + if activation == "glu": + return F.glu + raise RuntimeError(F"activation should be relu/gelu, not {activation}.") diff --git a/models/cv/object_detection/detr/igie/requirements.txt b/models/cv/object_detection/detr/igie/requirements.txt new file mode 100644 index 00000000..bb8f7823 --- /dev/null +++ b/models/cv/object_detection/detr/igie/requirements.txt @@ -0,0 +1,9 @@ +cython +git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI&egg=pycocotools +submitit +torch>=1.5.0 +torchvision>=0.6.0 +git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi +scipy +onnx +onnxruntime diff --git a/models/cv/object_detection/detr/igie/run_with_submitit.py b/models/cv/object_detection/detr/igie/run_with_submitit.py new file mode 100644 index 00000000..b6780def --- /dev/null +++ b/models/cv/object_detection/detr/igie/run_with_submitit.py @@ -0,0 +1,111 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +A script to run multinode training with submitit. +""" +import argparse +import os +import uuid +from pathlib import Path + +import main as detection +import submitit + + +def parse_args(): + detection_parser = detection.get_args_parser() + parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser]) + parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") + parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request") + parser.add_argument("--timeout", default=60, type=int, help="Duration of the job") + parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") + return parser.parse_args() + + +def get_shared_folder() -> Path: + user = os.getenv("USER") + if Path("/checkpoint/").is_dir(): + p = Path(f"/checkpoint/{user}/experiments") + p.mkdir(exist_ok=True) + return p + raise RuntimeError("No shared folder available") + + +def get_init_file(): + # Init file must not exist, but it's parent dir must exist. + os.makedirs(str(get_shared_folder()), exist_ok=True) + init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" + if init_file.exists(): + os.remove(str(init_file)) + return init_file + + +class Trainer(object): + def __init__(self, args): + self.args = args + + def __call__(self): + import main as detection + + self._setup_gpu_args() + detection.main(self.args) + + def checkpoint(self): + import os + import submitit + from pathlib import Path + + self.args.dist_url = get_init_file().as_uri() + checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") + if os.path.exists(checkpoint_file): + self.args.resume = checkpoint_file + print("Requeuing ", self.args) + empty_trainer = type(self)(self.args) + return submitit.helpers.DelayedSubmission(empty_trainer) + + def _setup_gpu_args(self): + import submitit + from pathlib import Path + + job_env = submitit.JobEnvironment() + self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) + self.args.gpu = job_env.local_rank + self.args.rank = job_env.global_rank + self.args.world_size = job_env.num_tasks + print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") + + +def main(): + args = parse_args() + if args.job_dir == "": + args.job_dir = get_shared_folder() / "%j" + + # Note that the folder will depend on the job_id, to easily track experiments + executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) + + # cluster setup is defined by environment variables + num_gpus_per_node = args.ngpus + nodes = args.nodes + timeout_min = args.timeout + + executor.update_parameters( + mem_gb=40 * num_gpus_per_node, + gpus_per_node=num_gpus_per_node, + tasks_per_node=num_gpus_per_node, # one task per GPU + cpus_per_task=10, + nodes=nodes, + timeout_min=timeout_min, # max is 60 * 72 + ) + + executor.update_parameters(name="detr") + + args.dist_url = get_init_file().as_uri() + args.output_dir = args.job_dir + + trainer = Trainer(args) + job = executor.submit(trainer) + + print("Submitted job_id:", job.job_id) + + +if __name__ == "__main__": + main() diff --git a/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh new file mode 100644 index 00000000..e9fcf541 --- /dev/null +++ b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +batchsize=32 +model_path="detr_opt.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --batch_size ${batchsize} \ + --precision fp16 \ + --engine_path detr_bs_${batchsize}_fp16.so + +# inference +python3 inference.py \ + --engine_path detr_bs_${batchsize}_fp16.so \ + --batch_size ${batchsize} \ + --coco_path ${datasets_path} \ No newline at end of file diff --git a/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh new file mode 100644 index 00000000..5c614f39 --- /dev/null +++ b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +batchsize=32 +model_path="detr_opt.onnx" +datasets_path=${DATASETS_DIR} + +# build engine +python3 build_engine.py \ + --model_path ${model_path} \ + --batch_size ${batchsize} \ + --precision fp16 \ + --engine_path detr_bs_${batchsize}_fp16.so + +# inference +python3 inference.py \ + --engine_path detr_bs_${batchsize}_fp16.so \ + --batch_size ${batchsize} \ + --coco_path ${datasets_path} \ + --perf_only True \ No newline at end of file diff --git a/models/cv/object_detection/detr/igie/test_all.py b/models/cv/object_detection/detr/igie/test_all.py new file mode 100644 index 00000000..7153892f --- /dev/null +++ b/models/cv/object_detection/detr/igie/test_all.py @@ -0,0 +1,209 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import io +import unittest + +import torch +from torch import nn, Tensor +from typing import List + +from models.matcher import HungarianMatcher +from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned +from models.backbone import Backbone, Joiner, BackboneBase +from util import box_ops +from util.misc import nested_tensor_from_tensor_list +from hubconf import detr_resnet50, detr_resnet50_panoptic + +# onnxruntime requires python 3.5 or above +try: + import onnxruntime +except ImportError: + onnxruntime = None + + +class Tester(unittest.TestCase): + + def test_box_cxcywh_to_xyxy(self): + t = torch.rand(10, 4) + r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t)) + self.assertLess((t - r).abs().max(), 1e-5) + + @staticmethod + def indices_torch2python(indices): + return [(i.tolist(), j.tolist()) for i, j in indices] + + def test_hungarian(self): + n_queries, n_targets, n_classes = 100, 15, 91 + logits = torch.rand(1, n_queries, n_classes + 1) + boxes = torch.rand(1, n_queries, 4) + tgt_labels = torch.randint(high=n_classes, size=(n_targets,)) + tgt_boxes = torch.rand(n_targets, 4) + matcher = HungarianMatcher() + targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}] + indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets) + indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1), + 'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2) + self.assertEqual(len(indices_single[0][0]), n_targets) + self.assertEqual(len(indices_single[0][1]), n_targets) + self.assertEqual(self.indices_torch2python(indices_single), + self.indices_torch2python([indices_batched[0]])) + self.assertEqual(self.indices_torch2python(indices_single), + self.indices_torch2python([indices_batched[1]])) + + # test with empty targets + tgt_labels_empty = torch.randint(high=n_classes, size=(0,)) + tgt_boxes_empty = torch.rand(0, 4) + targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}] + indices = matcher({'pred_logits': logits.repeat(2, 1, 1), + 'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty) + self.assertEqual(len(indices[1][0]), 0) + indices = matcher({'pred_logits': logits.repeat(2, 1, 1), + 'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2) + self.assertEqual(len(indices[0][0]), 0) + + def test_position_encoding_script(self): + m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned() + mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa + + def test_backbone_script(self): + backbone = Backbone('resnet50', True, False, False) + torch.jit.script(backbone) # noqa + + def test_model_script_detection(self): + model = detr_resnet50(pretrained=False).eval() + scripted_model = torch.jit.script(model) + x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) + out = model(x) + out_script = scripted_model(x) + self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) + self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) + + def test_model_script_panoptic(self): + model = detr_resnet50_panoptic(pretrained=False).eval() + scripted_model = torch.jit.script(model) + x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) + out = model(x) + out_script = scripted_model(x) + self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) + self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) + self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"])) + + def test_model_detection_different_inputs(self): + model = detr_resnet50(pretrained=False).eval() + # support NestedTensor + x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) + out = model(x) + self.assertIn('pred_logits', out) + # and 4d Tensor + x = torch.rand(1, 3, 200, 200) + out = model(x) + self.assertIn('pred_logits', out) + # and List[Tensor[C, H, W]] + x = torch.rand(3, 200, 200) + out = model([x]) + self.assertIn('pred_logits', out) + + def test_warpped_model_script_detection(self): + class WrappedDETR(nn.Module): + def __init__(self, model): + super().__init__() + self.model = model + + def forward(self, inputs: List[Tensor]): + sample = nested_tensor_from_tensor_list(inputs) + return self.model(sample) + + model = detr_resnet50(pretrained=False) + wrapped_model = WrappedDETR(model) + wrapped_model.eval() + scripted_model = torch.jit.script(wrapped_model) + x = [torch.rand(3, 200, 200), torch.rand(3, 200, 250)] + out = wrapped_model(x) + out_script = scripted_model(x) + self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) + self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) + + +@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable') +class ONNXExporterTester(unittest.TestCase): + @classmethod + def setUpClass(cls): + torch.manual_seed(123) + + def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None, + output_names=None, input_names=None): + model.eval() + + onnx_io = io.BytesIO() + # export to onnx with the first input + torch.onnx.export(model, inputs_list[0], onnx_io, + do_constant_folding=do_constant_folding, opset_version=12, + dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names) + # validate the exported model with onnx runtime + for test_inputs in inputs_list: + with torch.no_grad(): + if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list): + test_inputs = (nested_tensor_from_tensor_list(test_inputs),) + test_ouputs = model(*test_inputs) + if isinstance(test_ouputs, torch.Tensor): + test_ouputs = (test_ouputs,) + self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch) + + def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False): + + inputs, _ = torch.jit._flatten(inputs) + outputs, _ = torch.jit._flatten(outputs) + + def to_numpy(tensor): + if tensor.requires_grad: + return tensor.detach().cpu().numpy() + else: + return tensor.cpu().numpy() + + inputs = list(map(to_numpy, inputs)) + outputs = list(map(to_numpy, outputs)) + + ort_session = onnxruntime.InferenceSession(onnx_io.getvalue()) + # compute onnxruntime output prediction + ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs)) + ort_outs = ort_session.run(None, ort_inputs) + for i, element in enumerate(outputs): + try: + torch.testing.assert_allclose(element, ort_outs[i], rtol=1e-03, atol=1e-05) + except AssertionError as error: + if tolerate_small_mismatch: + self.assertIn("(0.00%)", str(error), str(error)) + else: + raise + + def test_model_onnx_detection(self): + model = detr_resnet50(pretrained=False).eval() + dummy_image = torch.ones(1, 3, 800, 800) * 0.3 + model(dummy_image) + + # Test exported model on images of different size, or dummy input + self.run_model( + model, + [(torch.rand(1, 3, 750, 800),)], + input_names=["inputs"], + output_names=["pred_logits", "pred_boxes"], + tolerate_small_mismatch=True, + ) + + @unittest.skip("CI doesn't have enough memory") + def test_model_onnx_detection_panoptic(self): + model = detr_resnet50_panoptic(pretrained=False).eval() + dummy_image = torch.ones(1, 3, 800, 800) * 0.3 + model(dummy_image) + + # Test exported model on images of different size, or dummy input + self.run_model( + model, + [(torch.rand(1, 3, 750, 800),)], + input_names=["inputs"], + output_names=["pred_logits", "pred_boxes", "pred_masks"], + tolerate_small_mismatch=True, + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/models/cv/object_detection/detr/igie/tox.ini b/models/cv/object_detection/detr/igie/tox.ini new file mode 100644 index 00000000..5554a882 --- /dev/null +++ b/models/cv/object_detection/detr/igie/tox.ini @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 120 +ignore = F401,E402,F403,W503,W504 diff --git a/models/cv/object_detection/detr/igie/util/__init__.py b/models/cv/object_detection/detr/igie/util/__init__.py new file mode 100644 index 00000000..168f9979 --- /dev/null +++ b/models/cv/object_detection/detr/igie/util/__init__.py @@ -0,0 +1 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/models/cv/object_detection/detr/igie/util/box_ops.py b/models/cv/object_detection/detr/igie/util/box_ops.py new file mode 100644 index 00000000..9c088e5b --- /dev/null +++ b/models/cv/object_detection/detr/igie/util/box_ops.py @@ -0,0 +1,88 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Utilities for bounding box manipulation and GIoU. +""" +import torch +from torchvision.ops.boxes import box_area + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), + (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, + (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +# modified from torchvision to also return the union +def box_iou(boxes1, boxes2): + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = (rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + iou = inter / union + return iou, union + + +def generalized_box_iou(boxes1, boxes2): + """ + Generalized IoU from https://giou.stanford.edu/ + + The boxes should be in [x0, y0, x1, y1] format + + Returns a [N, M] pairwise matrix, where N = len(boxes1) + and M = len(boxes2) + """ + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + iou, union = box_iou(boxes1, boxes2) + + lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + wh = (rb - lt).clamp(min=0) # [N,M,2] + area = wh[:, :, 0] * wh[:, :, 1] + + return iou - (area - union) / area + + +def masks_to_boxes(masks): + """Compute the bounding boxes around the provided masks + + The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. + + Returns a [N, 4] tensors, with the boxes in xyxy format + """ + if masks.numel() == 0: + return torch.zeros((0, 4), device=masks.device) + + h, w = masks.shape[-2:] + + y = torch.arange(0, h, dtype=torch.float) + x = torch.arange(0, w, dtype=torch.float) + y, x = torch.meshgrid(y, x) + + x_mask = (masks * x.unsqueeze(0)) + x_max = x_mask.flatten(1).max(-1)[0] + x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + y_mask = (masks * y.unsqueeze(0)) + y_max = y_mask.flatten(1).max(-1)[0] + y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] + + return torch.stack([x_min, y_min, x_max, y_max], 1) diff --git a/models/cv/object_detection/detr/igie/util/misc.py b/models/cv/object_detection/detr/igie/util/misc.py new file mode 100644 index 00000000..dfa9fb5b --- /dev/null +++ b/models/cv/object_detection/detr/igie/util/misc.py @@ -0,0 +1,468 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +""" +Misc functions, including distributed helpers. + +Mostly copy-paste from torchvision references. +""" +import os +import subprocess +import time +from collections import defaultdict, deque +import datetime +import pickle +from packaging import version +from typing import Optional, List + +import torch +import torch.distributed as dist +from torch import Tensor + +# needed due to empty tensor bug in pytorch and torchvision 0.5 +import torchvision +if version.parse(torchvision.__version__) < version.parse('0.7'): + from torchvision.ops import _new_empty_tensor + from torchvision.ops.misc import _output_size + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device="cuda") + size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def get_sha(): + cwd = os.path.dirname(os.path.abspath(__file__)) + + def _run(command): + return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() + sha = 'N/A' + diff = "clean" + branch = 'N/A' + try: + sha = _run(['git', 'rev-parse', 'HEAD']) + subprocess.check_output(['git', 'diff'], cwd=cwd) + diff = _run(['git', 'diff-index', 'HEAD']) + diff = "has uncommited changes" if diff else "clean" + branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) + except Exception: + pass + message = f"sha: {sha}, status: {diff}, branch: {branch}" + return message + + +def collate_fn(batch): + batch = list(zip(*batch)) + batch[0] = nested_tensor_from_tensor_list(batch[0]) + return tuple(batch) + + +def _max_by_axis(the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + +class NestedTensor(object): + def __init__(self, tensors, mask: Optional[Tensor]): + self.tensors = tensors + self.mask = mask + + def to(self, device): + # type: (Device) -> NestedTensor # noqa + cast_tensor = self.tensors.to(device) + mask = self.mask + if mask is not None: + assert mask is not None + cast_mask = mask.to(device) + else: + cast_mask = None + return NestedTensor(cast_tensor, cast_mask) + + def decompose(self): + return self.tensors, self.mask + + def __repr__(self): + return str(self.tensors) + + +def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): + # TODO make this more general + if tensor_list[0].ndim == 3: + if torchvision._is_tracing(): + # nested_tensor_from_tensor_list() does not export well to ONNX + # call _onnx_nested_tensor_from_tensor_list() instead + return _onnx_nested_tensor_from_tensor_list(tensor_list) + + # TODO make it support different-sized images + max_size = _max_by_axis([list(img.shape) for img in tensor_list]) + # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) + batch_shape = [len(tensor_list)] + max_size + b, c, h, w = batch_shape + dtype = tensor_list[0].dtype + device = tensor_list[0].device + tensor = torch.zeros(batch_shape, dtype=dtype, device=device) + mask = torch.ones((b, h, w), dtype=torch.bool, device=device) + for img, pad_img, m in zip(tensor_list, tensor, mask): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + m[: img.shape[1], :img.shape[2]] = False + else: + raise ValueError('not supported') + return NestedTensor(tensor, mask) + + +# _onnx_nested_tensor_from_tensor_list() is an implementation of +# nested_tensor_from_tensor_list() that is supported by ONNX tracing. +@torch.jit.unused +def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: + max_size = [] + for i in range(tensor_list[0].dim()): + max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) + max_size.append(max_size_i) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # m[: img.shape[1], :img.shape[2]] = False + # which is not yet supported in onnx + padded_imgs = [] + padded_masks = [] + for img in tensor_list: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) + padded_imgs.append(padded_img) + + m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) + padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) + padded_masks.append(padded_mask.to(torch.bool)) + + tensor = torch.stack(padded_imgs) + mask = torch.stack(padded_masks) + + return NestedTensor(tensor, mask=mask) + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) + + +@torch.no_grad() +def accuracy(output, target, topk=(1,)): + """Computes the precision@k for the specified values of k""" + if target.numel() == 0: + return [torch.zeros([], device=output.device)] + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target.view(1, -1).expand_as(pred)) + + res = [] + for k in topk: + correct_k = correct[:k].view(-1).float().sum(0) + res.append(correct_k.mul_(100.0 / batch_size)) + return res + + +def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): + # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor + """ + Equivalent to nn.functional.interpolate, but with support for empty batch sizes. + This will eventually be supported natively by PyTorch, and this + class can go away. + """ + if version.parse(torchvision.__version__) < version.parse('0.7'): + if input.numel() > 0: + return torch.nn.functional.interpolate( + input, size, scale_factor, mode, align_corners + ) + + output_shape = _output_size(2, input, size, scale_factor) + output_shape = list(input.shape[:-2]) + list(output_shape) + return _new_empty_tensor(input, output_shape) + else: + return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/models/cv/object_detection/detr/igie/util/plot_utils.py b/models/cv/object_detection/detr/igie/util/plot_utils.py new file mode 100644 index 00000000..0f24bed0 --- /dev/null +++ b/models/cv/object_detection/detr/igie/util/plot_utils.py @@ -0,0 +1,107 @@ +""" +Plotting utilities to visualize training logs. +""" +import torch +import pandas as pd +import numpy as np +import seaborn as sns +import matplotlib.pyplot as plt + +from pathlib import Path, PurePath + + +def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): + ''' + Function to plot specific fields from training log(s). Plots both training and test results. + + :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file + - fields = which results to plot from each log file - plots both training and test for each field. + - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots + - log_name = optional, name of log file if different than default 'log.txt'. + + :: Outputs - matplotlib plots of results in fields, color coded for each log file. + - solid lines are training results, dashed lines are test results. + + ''' + func_name = "plot_utils.py::plot_logs" + + # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, + # convert single Path to list to avoid 'not iterable' error + + if not isinstance(logs, list): + if isinstance(logs, PurePath): + logs = [logs] + print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") + else: + raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ + Expect list[Path] or single Path obj, received {type(logs)}") + + # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir + for i, dir in enumerate(logs): + if not isinstance(dir, PurePath): + raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") + if not dir.exists(): + raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") + # verify log_name exists + fn = Path(dir / log_name) + if not fn.exists(): + print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?") + print(f"--> full path of missing log file: {fn}") + return + + # load log file(s) and plot + dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] + + fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) + + for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): + for j, field in enumerate(fields): + if field == 'mAP': + coco_eval = pd.DataFrame( + np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1] + ).ewm(com=ewm_col).mean() + axs[j].plot(coco_eval, c=color) + else: + df.interpolate().ewm(com=ewm_col).mean().plot( + y=[f'train_{field}', f'test_{field}'], + ax=axs[j], + color=[color] * 2, + style=['-', '--'] + ) + for ax, field in zip(axs, fields): + ax.legend([Path(p).name for p in logs]) + ax.set_title(field) + + +def plot_precision_recall(files, naming_scheme='iter'): + if naming_scheme == 'exp_id': + # name becomes exp_id + names = [f.parts[-3] for f in files] + elif naming_scheme == 'iter': + names = [f.stem for f in files] + else: + raise ValueError(f'not supported {naming_scheme}') + fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) + for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): + data = torch.load(f) + # precision is n_iou, n_points, n_cat, n_area, max_det + precision = data['precision'] + recall = data['params'].recThrs + scores = data['scores'] + # take precision for all classes, all areas and 100 detections + precision = precision[0, :, :, 0, -1].mean(1) + scores = scores[0, :, :, 0, -1].mean(1) + prec = precision.mean() + rec = data['recall'][0, :, 0, -1].mean() + print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + + f'score={scores.mean():0.3f}, ' + + f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' + ) + axs[0].plot(recall, precision, c=color) + axs[1].plot(recall, scores, c=color) + + axs[0].set_title('Precision / Recall') + axs[0].legend(names) + axs[1].set_title('Scores / Recall') + axs[1].legend(names) + return fig, axs -- Gitee From 87fcb0909313c87db4e5f0bd3bc624dbffe46cf4 Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Wed, 11 Mar 2026 15:58:12 +0800 Subject: [PATCH 6/6] update --- README.md | 15 +- README_en.md | 13 +- .../densenet121/igie/quantize.py | 107 ---- .../infer_densenet121_int8_accuracy.sh | 4 +- .../infer_densenet121_int8_performance.sh | 4 +- .../{mobilevit => mobilevit_s}/igie/README.md | 8 +- .../igie/ci/prepare.sh | 3 +- .../{mobilevit => mobilevit_s}/igie/export.py | 0 .../igie/inference.py | 0 .../infer_mobilevit_s_fp16_accuracy.sh | 0 .../infer_mobilevit_s_fp16_performance.sh | 0 .../cv/classification/vit_b_32/igie/README.md | 9 +- .../vit_b_32/igie/ci/prepare.sh | 5 + .../classification/vit_b_32/igie/inference.py | 2 +- .../cv/classification/vit_l_14/igie/README.md | 5 + .../vit_l_14/igie/ci/prepare.sh | 7 +- .../classification/vit_l_14/igie/inference.py | 2 +- .../cv/object_detection/detr/igie/Dockerfile | 13 - models/cv/object_detection/detr/igie/LICENSE | 201 -------- .../cv/object_detection/detr/igie/README.md | 16 +- .../detr/igie/build_engine.py | 4 +- .../object_detection/detr/igie/ci/prepare.sh | 30 ++ .../object_detection/detr/igie/d2/README.md | 39 -- .../d2/configs/detr_256_6_6_torchvision.yaml | 45 -- .../detr_segm_256_6_6_torchvision.yaml | 46 -- .../detr/igie/d2/converter.py | 69 --- .../detr/igie/d2/detr/__init__.py | 4 - .../detr/igie/d2/detr/config.py | 34 -- .../detr/igie/d2/detr/dataset_mapper.py | 122 ----- .../detr/igie/d2/detr/detr.py | 261 ---------- .../detr/igie/d2/train_net.py | 145 ------ .../detr/igie/datasets/__init__.py | 25 - .../detr/igie/datasets/coco.py | 158 ------ .../detr/igie/datasets/coco_eval.py | 257 ---------- .../detr/igie/datasets/coco_panoptic.py | 99 ---- .../detr/igie/datasets/panoptic_eval.py | 44 -- .../detr/igie/datasets/transforms.py | 277 ----------- .../cv/object_detection/detr/igie/engine.py | 151 ------ .../cv/object_detection/detr/igie/hubconf.py | 168 ------- models/cv/object_detection/detr/igie/main.py | 248 ---------- .../detr/igie/models/__init__.py | 6 - .../detr/igie/models/backbone.py | 119 ----- .../object_detection/detr/igie/models/detr.py | 359 -------------- .../detr/igie/models/matcher.py | 86 ---- .../detr/igie/models/position_encoding.py | 89 ---- .../detr/igie/models/segmentation.py | 363 -------------- .../detr/igie/models/transformer.py | 297 ----------- .../detr/igie/requirements.txt | 9 - .../detr/igie/run_with_submitit.py | 111 ----- .../igie/scripts/infer_detr_fp16_accuracy.sh | 2 +- .../scripts/infer_detr_fp16_performance.sh | 2 +- .../cv/object_detection/detr/igie/test_all.py | 209 -------- models/cv/object_detection/detr/igie/tox.ini | 3 - .../detr/igie/util/__init__.py | 1 - .../detr/igie/util/box_ops.py | 88 ---- .../object_detection/detr/igie/util/misc.py | 468 ------------------ .../detr/igie/util/plot_utils.py | 107 ---- .../object_detection/yolov11m/igie/README.md | 7 +- .../object_detection/yolov11s/igie/README.md | 11 +- .../yolov26n/igie/ci/prepare.sh | 11 +- .../object_detection/yolov26n/igie/export.py | 2 +- .../yolov26n/igie/inference.py | 2 +- .../yolov26n/igie/requirements.txt | 1 + .../scripts/infer_yolov26n_fp16_accuracy.sh | 2 +- .../infer_yolov26n_fp16_performance.sh | 2 +- .../object_detection/yolov26n/igie/utils.py | 2 +- .../yolov26n/igie/validator.py | 2 +- .../yolov5s/igie/ci/prepare.sh | 2 +- .../object_detection/yolov5s/igie/export.py | 2 +- .../yolov5s/igie/inference.py | 2 +- .../object_detection/yolov5s/igie/quantize.py | 2 +- .../scripts/infer_yolov5s_fp16_accuracy.sh | 2 +- .../scripts/infer_yolov5s_fp16_performance.sh | 2 +- .../scripts/infer_yolov5s_int8_accuracy.sh | 2 +- .../scripts/infer_yolov5s_int8_performance.sh | 2 +- .../cv/object_detection/yolov5s/igie/utils.py | 2 +- tests/model_info.json | 270 +++++++++- 77 files changed, 395 insertions(+), 4894 deletions(-) delete mode 100644 models/cv/classification/densenet121/igie/quantize.py rename models/cv/classification/{mobilevit => mobilevit_s}/igie/README.md (88%) rename models/cv/classification/{mobilevit => mobilevit_s}/igie/ci/prepare.sh (85%) rename models/cv/classification/{mobilevit => mobilevit_s}/igie/export.py (100%) rename models/cv/classification/{mobilevit => mobilevit_s}/igie/inference.py (100%) rename models/cv/classification/{mobilevit => mobilevit_s}/igie/scripts/infer_mobilevit_s_fp16_accuracy.sh (100%) rename models/cv/classification/{mobilevit => mobilevit_s}/igie/scripts/infer_mobilevit_s_fp16_performance.sh (100%) delete mode 100644 models/cv/object_detection/detr/igie/Dockerfile delete mode 100644 models/cv/object_detection/detr/igie/LICENSE create mode 100644 models/cv/object_detection/detr/igie/ci/prepare.sh delete mode 100644 models/cv/object_detection/detr/igie/d2/README.md delete mode 100644 models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml delete mode 100644 models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml delete mode 100644 models/cv/object_detection/detr/igie/d2/converter.py delete mode 100644 models/cv/object_detection/detr/igie/d2/detr/__init__.py delete mode 100644 models/cv/object_detection/detr/igie/d2/detr/config.py delete mode 100644 models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py delete mode 100644 models/cv/object_detection/detr/igie/d2/detr/detr.py delete mode 100644 models/cv/object_detection/detr/igie/d2/train_net.py delete mode 100644 models/cv/object_detection/detr/igie/datasets/__init__.py delete mode 100644 models/cv/object_detection/detr/igie/datasets/coco.py delete mode 100644 models/cv/object_detection/detr/igie/datasets/coco_eval.py delete mode 100644 models/cv/object_detection/detr/igie/datasets/coco_panoptic.py delete mode 100644 models/cv/object_detection/detr/igie/datasets/panoptic_eval.py delete mode 100644 models/cv/object_detection/detr/igie/datasets/transforms.py delete mode 100644 models/cv/object_detection/detr/igie/engine.py delete mode 100644 models/cv/object_detection/detr/igie/hubconf.py delete mode 100644 models/cv/object_detection/detr/igie/main.py delete mode 100644 models/cv/object_detection/detr/igie/models/__init__.py delete mode 100644 models/cv/object_detection/detr/igie/models/backbone.py delete mode 100644 models/cv/object_detection/detr/igie/models/detr.py delete mode 100644 models/cv/object_detection/detr/igie/models/matcher.py delete mode 100644 models/cv/object_detection/detr/igie/models/position_encoding.py delete mode 100644 models/cv/object_detection/detr/igie/models/segmentation.py delete mode 100644 models/cv/object_detection/detr/igie/models/transformer.py delete mode 100644 models/cv/object_detection/detr/igie/requirements.txt delete mode 100644 models/cv/object_detection/detr/igie/run_with_submitit.py delete mode 100644 models/cv/object_detection/detr/igie/test_all.py delete mode 100644 models/cv/object_detection/detr/igie/tox.ini delete mode 100644 models/cv/object_detection/detr/igie/util/__init__.py delete mode 100644 models/cv/object_detection/detr/igie/util/box_ops.py delete mode 100644 models/cv/object_detection/detr/igie/util/misc.py delete mode 100644 models/cv/object_detection/detr/igie/util/plot_utils.py diff --git a/README.md b/README.md index 7083f71e..0af03f66 100644 --- a/README.md +++ b/README.md @@ -98,6 +98,7 @@ | CSPResNeXt50 | FP16 | [✅](models/cv/classification/cspresnext50/igie) | [✅](models/cv/classification/cspresnext50/ixrt) | 4.3.0 | | DeiT-tiny | FP16 | [✅](models/cv/classification/deit_tiny/igie) | [✅](models/cv/classification/deit_tiny/ixrt) | 4.3.0 | | DenseNet121 | FP16 | [✅](models/cv/classification/densenet121/igie) | [✅](models/cv/classification/densenet121/ixrt) | 4.3.0 | +| | INT8 | [✅](models/cv/classification/densenet121/igie) | | 4.4.0 | | DenseNet161 | FP16 | [✅](models/cv/classification/densenet161/igie) | [✅](models/cv/classification/densenet161/ixrt) | 4.3.0 | | DenseNet169 | FP16 | [✅](models/cv/classification/densenet169/igie) | [✅](models/cv/classification/densenet169/ixrt) | 4.3.0 | | DenseNet201 | FP16 | [✅](models/cv/classification/densenet201/igie) | [✅](models/cv/classification/densenet201/ixrt) | 4.3.0 | @@ -132,6 +133,7 @@ | | INT8 | [✅](models/cv/classification/mobilenet_v2/igie) | [✅](models/cv/classification/mobilenet_v2/ixrt) | 4.3.0 | | MobileNetV3_Large | FP16 | [✅](models/cv/classification/mobilenet_v3_large/igie) | | 4.3.0 | | MobileNetV3_Small | FP16 | [✅](models/cv/classification/mobilenet_v3/igie) | [✅](models/cv/classification/mobilenet_v3/ixrt) | 4.3.0 | +| Mobilevit_s | FP16 | [✅](models/cv/classification/mobilevit_s/igie) | | 4.4.0 | | MViTv2_base | FP16 | [✅](models/cv/classification/mvitv2_base/igie) | | dev-only | | RegNet_x_16gf | FP16 | [✅](models/cv/classification/regnet_x_16gf/igie) | | 4.3.0 | | RegNet_x_1_6gf | FP16 | [✅](models/cv/classification/regnet_x_1_6gf/igie) | | 4.3.0 | @@ -187,6 +189,8 @@ | VGG19 | FP16 | [✅](models/cv/classification/vgg19/igie) | | 4.3.0 | | VGG19_BN | FP16 | [✅](models/cv/classification/vgg19_bn/igie) | | 4.3.0 | | ViT | FP16 | [✅](models/cv/classification/vit/igie) | | 4.3.0 | +| ViT-B-32 | FP16 | [✅](models/cv/classification/vit_b_32/igie) | | 4.4.0 | +| ViT-L-14 | FP16 | [✅](models/cv/classification/vit_l_14/igie) | | 4.4.0 | | Wide ResNet50 | FP16 | [✅](models/cv/classification/wide_resnet50/igie) | [✅](models/cv/classification/wide_resnet50/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/classification/wide_resnet50/igie) | [✅](models/cv/classification/wide_resnet50/ixrt) | 4.3.0 | | Wide ResNet101 | FP16 | [✅](models/cv/classification/wide_resnet101/igie) | | 4.3.0 | @@ -197,7 +201,7 @@ |------------|-------|-------------------------------------------------|-------------------------------------------------|-----------| | ATSS | FP16 | [✅](models/cv/object_detection/atss/igie) | [✅](models/cv/object_detection/atss/ixrt) | 4.3.0 | | CenterNet | FP16 | [✅](models/cv/object_detection/centernet/igie) | [✅](models/cv/object_detection/centernet/ixrt) | 4.3.0 | -| DETR | FP16 | | [✅](models/cv/object_detection/detr/ixrt) | 4.3.0 | +| DETR | FP16 | [✅](models/cv/object_detection/detr/igie) | [✅](models/cv/object_detection/detr/ixrt) | 4.3.0 | | FCOS | FP16 | [✅](models/cv/object_detection/fcos/igie) | [✅](models/cv/object_detection/fcos/ixrt) | 4.3.0 | | FoveaBox | FP16 | [✅](models/cv/object_detection/foveabox/igie) | [✅](models/cv/object_detection/foveabox/ixrt) | 4.3.0 | | FSAF | FP16 | [✅](models/cv/object_detection/fsaf/igie) | [✅](models/cv/object_detection/fsaf/ixrt) | 4.3.0 | @@ -219,8 +223,8 @@ | | INT8 | [✅](models/cv/object_detection/yolov4/igie16) | [✅](models/cv/object_detection/yolov4/ixrt16) | 4.3.0 | | YOLOv5m | FP16 | [✅](models/cv/object_detection/yolov5m/igie) | [✅](models/cv/object_detection/yolov5m/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov5m/igie) | [✅](models/cv/object_detection/yolov5m/ixrt) | 4.3.0 | -| YOLOv5s | FP16 | | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | -| | INT8 | | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | +| YOLOv5s | FP16 | [✅](models/cv/object_detection/yolov5s/igie) | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | +| | INT8 | [✅](models/cv/object_detection/yolov5s/igie) | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | | YOLOv6s | FP16 | [✅](models/cv/object_detection/yolov6s/igie) | [✅](models/cv/object_detection/yolov6s/ixrt) | 4.3.0 | | | INT8 | | [✅](models/cv/object_detection/yolov6s/ixrt) | 4.3.0 | | YOLOv7 | FP16 | [✅](models/cv/object_detection/yolov7/igie) | [✅](models/cv/object_detection/yolov7/ixrt) | 4.3.0 | @@ -232,12 +236,17 @@ | YOLOv9s | FP16 | [✅](models/cv/object_detection/yolov9s/igie) | [✅](models/cv/object_detection/yolov9s/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov9s/igie) | | 4.3.0 | | YOLOv10s | FP16 | [✅](models/cv/object_detection/yolov10s/igie) | [✅](models/cv/object_detection/yolov10s/ixrt) | 4.3.0 | +| YOLOv11m | FP16 | [✅](models/cv/object_detection/yolov11m/igie) | | 4.4.0 | +| | INT8 | [✅](models/cv/object_detection/yolov11m/igie) | | 4.4.0 | | YOLOv11n | FP16 | [✅](models/cv/object_detection/yolov11n/igie) | [✅](models/cv/object_detection/yolov11n/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov11n/igie) | | 4.3.0 | +| YOLOv11s | FP16 | [✅](models/cv/object_detection/yolov11s/igie) | | 4.4.0 | +| | INT8 | [✅](models/cv/object_detection/yolov11s/igie) | | 4.4.0 | | YOLOv12n | FP16 | [✅](models/cv/object_detection/yolov12n/igie) | [✅](models/cv/object_detection/yolov12n/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov12n/igie) | | 4.3.0 | | YOLOv13n | FP16 | [✅](models/cv/object_detection/yolov13n/igie) | [✅](models/cv/object_detection/yolov13n/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov13n/igie) | | 4.3.0 | +| YOLOv26n | FP16 | [✅](models/cv/object_detection/yolov26n/igie) | | 4.4.0 | | YOLOXm | FP16 | [✅](models/cv/object_detection/yoloxm/igie) | [✅](models/cv/object_detection/yoloxm/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yoloxm/igie) | [✅](models/cv/object_detection/yoloxm/ixrt) | 4.3.0 | diff --git a/README_en.md b/README_en.md index e6b5d7ee..631736f9 100644 --- a/README_en.md +++ b/README_en.md @@ -108,6 +108,7 @@ inference to be expanded in the future. | CSPResNeXt50 | FP16 | [✅](models/cv/classification/cspresnext50/igie) | [✅](models/cv/classification/cspresnext50/ixrt) | 4.3.0 | | DeiT-tiny | FP16 | [✅](models/cv/classification/deit_tiny/igie) | [✅](models/cv/classification/deit_tiny/ixrt) | 4.3.0 | | DenseNet121 | FP16 | [✅](models/cv/classification/densenet121/igie) | [✅](models/cv/classification/densenet121/ixrt) | 4.3.0 | +| | INT8 | [✅](models/cv/classification/densenet121/igie) | | 4.4.0 | | DenseNet161 | FP16 | [✅](models/cv/classification/densenet161/igie) | [✅](models/cv/classification/densenet161/ixrt) | 4.3.0 | | DenseNet169 | FP16 | [✅](models/cv/classification/densenet169/igie) | [✅](models/cv/classification/densenet169/ixrt) | 4.3.0 | | DenseNet201 | FP16 | [✅](models/cv/classification/densenet201/igie) | [✅](models/cv/classification/densenet201/ixrt) | 4.3.0 | @@ -142,6 +143,7 @@ inference to be expanded in the future. | | INT8 | [✅](models/cv/classification/mobilenet_v2/igie) | [✅](models/cv/classification/mobilenet_v2/ixrt) | 4.3.0 | | MobileNetV3_Large | FP16 | [✅](models/cv/classification/mobilenet_v3_large/igie) | | 4.3.0 | | MobileNetV3_Small | FP16 | [✅](models/cv/classification/mobilenet_v3/igie) | [✅](models/cv/classification/mobilenet_v3/ixrt) | 4.3.0 | +| Mobilevit_s | FP16 | [✅](models/cv/classification/mobilevit_s/igie) | | 4.4.0 | | MViTv2_base | FP16 | [✅](models/cv/classification/mvitv2_base/igie) | | dev-only | | RegNet_x_16gf | FP16 | [✅](models/cv/classification/regnet_x_16gf/igie) | | 4.3.0 | | RegNet_x_1_6gf | FP16 | [✅](models/cv/classification/regnet_x_1_6gf/igie) | | 4.3.0 | @@ -197,6 +199,8 @@ inference to be expanded in the future. | VGG19 | FP16 | [✅](models/cv/classification/vgg19/igie) | | 4.3.0 | | VGG19_BN | FP16 | [✅](models/cv/classification/vgg19_bn/igie) | | 4.3.0 | | ViT | FP16 | [✅](models/cv/classification/vit/igie) | | 4.3.0 | +| ViT-B-32 | FP16 | [✅](models/cv/classification/vit_b_32/igie) | | 4.4.0 | +| ViT-L-14 | FP16 | [✅](models/cv/classification/vit_l_14/igie) | | 4.4.0 | | Wide ResNet50 | FP16 | [✅](models/cv/classification/wide_resnet50/igie) | [✅](models/cv/classification/wide_resnet50/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/classification/wide_resnet50/igie) | [✅](models/cv/classification/wide_resnet50/ixrt) | 4.3.0 | | Wide ResNet101 | FP16 | [✅](models/cv/classification/wide_resnet101/igie) | | 4.3.0 | @@ -229,8 +233,8 @@ inference to be expanded in the future. | | INT8 | [✅](models/cv/object_detection/yolov4/igie16) | [✅](models/cv/object_detection/yolov4/ixrt16) | 4.3.0 | | YOLOv5m | FP16 | [✅](models/cv/object_detection/yolov5m/igie) | [✅](models/cv/object_detection/yolov5m/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov5m/igie) | [✅](models/cv/object_detection/yolov5m/ixrt) | 4.3.0 | -| YOLOv5s | FP16 | | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | -| | INT8 | | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | +| YOLOv5s | FP16 | [✅](models/cv/object_detection/yolov5s/igie) | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | +| | INT8 | [✅](models/cv/object_detection/yolov5s/igie) | [✅](models/cv/object_detection/yolov5s/ixrt) | 4.3.0 | | YOLOv6s | FP16 | [✅](models/cv/object_detection/yolov6s/igie) | [✅](models/cv/object_detection/yolov6s/ixrt) | 4.3.0 | | | INT8 | | [✅](models/cv/object_detection/yolov6s/ixrt) | 4.3.0 | | YOLOv7 | FP16 | [✅](models/cv/object_detection/yolov7/igie) | [✅](models/cv/object_detection/yolov7/ixrt) | 4.3.0 | @@ -242,12 +246,17 @@ inference to be expanded in the future. | YOLOv9s | FP16 | [✅](models/cv/object_detection/yolov9s/igie) | [✅](models/cv/object_detection/yolov9s/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov9s/igie) | | 4.3.0 | | YOLOv10s | FP16 | [✅](models/cv/object_detection/yolov10s/igie) | [✅](models/cv/object_detection/yolov10s/ixrt) | 4.3.0 | +| YOLOv11m | FP16 | [✅](models/cv/object_detection/yolov11m/igie) | | 4.4.0 | +| | INT8 | [✅](models/cv/object_detection/yolov11m/igie) | | 4.4.0 | | YOLOv11n | FP16 | [✅](models/cv/object_detection/yolov11n/igie) | [✅](models/cv/object_detection/yolov11n/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov11n/igie) | | 4.3.0 | +| YOLOv11s | FP16 | [✅](models/cv/object_detection/yolov11s/igie) | | 4.4.0 | +| | INT8 | [✅](models/cv/object_detection/yolov11s/igie) | | 4.4.0 | | YOLOv12n | FP16 | [✅](models/cv/object_detection/yolov12n/igie) | [✅](models/cv/object_detection/yolov12n/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov12n/igie) | | 4.3.0 | | YOLOv13n | FP16 | [✅](models/cv/object_detection/yolov13n/igie) | [✅](models/cv/object_detection/yolov13n/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yolov13n/igie) | | 4.3.0 | +| YOLOv26n | FP16 | [✅](models/cv/object_detection/yolov26n/igie) | | 4.4.0 | | YOLOXm | FP16 | [✅](models/cv/object_detection/yoloxm/igie) | [✅](models/cv/object_detection/yoloxm/ixrt) | 4.3.0 | | | INT8 | [✅](models/cv/object_detection/yoloxm/igie) | [✅](models/cv/object_detection/yoloxm/ixrt) | 4.3.0 | diff --git a/models/cv/classification/densenet121/igie/quantize.py b/models/cv/classification/densenet121/igie/quantize.py deleted file mode 100644 index d079be70..00000000 --- a/models/cv/classification/densenet121/igie/quantize.py +++ /dev/null @@ -1,107 +0,0 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import onnx -import psutil -import argparse -import numpy as np -from inference import get_dataloader -from onnxruntime.quantization import (CalibrationDataReader, QuantFormat, - quantize_static, QuantType, - CalibrationMethod) - -class CalibrationDataLoader(CalibrationDataReader): - def __init__(self, input_name, dataloader, cnt_limit=100): - self.cnt = 0 - self.input_name = input_name - self.cnt_limit = cnt_limit - self.iter = iter(dataloader) - - # avoid oom - @staticmethod - def _exceed_memory_upper_bound(upper_bound=80): - info = psutil.virtual_memory() - total_percent = info.percent - if total_percent >= upper_bound: - return True - return False - - def get_next(self): - if self._exceed_memory_upper_bound() or self.cnt >= self.cnt_limit: - return None - self.cnt += 1 - print(f"onnx calibration data count: {self.cnt}") - input_info = next(self.iter) - - ort_input = {k: np.array(v) for k, v in zip(self.input_name, input_info)} - return ort_input - -def parse_args(): - parser = argparse.ArgumentParser() - - parser.add_argument("--model_path", - type=str, - required=True, - help="original model path.") - - parser.add_argument("--out_path", - type=str, - required=True, - help="igie export engine path.") - - parser.add_argument("--datasets", - type=str, - required=True, - help="calibration datasets path.") - - parser.add_argument("--num_workers", - type=int, - default=16, - help="number of workers used in pytorch dataloader.") - - args = parser.parse_args() - - return args - -def main(): - args = parse_args() - - model = onnx.load(args.model_path) - graph = model.graph - input_names = [input.name for input in model.graph.input] - - dataloader = get_dataloader(args.datasets, batch_size=1, num_workers=args.num_workers) - calibration = CalibrationDataLoader(input_names, dataloader, cnt_limit=20) - - disable_quant_nodes = [node.name for node in graph.node if node.op_type == 'Concat'] - - quantize_static(args.model_path, - args.out_path, - calibration_data_reader=calibration, - quant_format=QuantFormat.QOperator, - per_channel=False, - activation_type=QuantType.QInt8, - weight_type=QuantType.QInt8, - use_external_data_format=False, - calibrate_method=CalibrationMethod.Percentile, - nodes_to_exclude=disable_quant_nodes, - extra_options = { - 'ActivationSymmetric': True, - 'WeightSymmetric': True - } - ) - -if __name__ == "__main__": - main() \ No newline at end of file diff --git a/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh index d75b0a1a..26af1b53 100644 --- a/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh +++ b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -36,7 +36,7 @@ echo "batch size is ${batchsize}" if [ ! -e $quantized_model_path ]; then # quantize model to int8 - python3 quantize.py \ + python3 ${RUN_DIR}quantize.py \ --model_path ${model_path} \ --out_path ${quantized_model_path} \ --datasets ${datasets_path} diff --git a/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh index 908097e5..b8362830 100644 --- a/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh +++ b/models/cv/classification/densenet121/igie/scripts/infer_densenet121_int8_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -36,7 +36,7 @@ echo "batch size is ${batchsize}" if [ ! -e $quantized_model_path ]; then # quantize model to int8 - python3 quantize.py \ + python3 ${RUN_DIR}quantize.py \ --model_path ${model_path} \ --out_path ${quantized_model_path} \ --datasets ${datasets_path} diff --git a/models/cv/classification/mobilevit/igie/README.md b/models/cv/classification/mobilevit_s/igie/README.md similarity index 88% rename from models/cv/classification/mobilevit/igie/README.md rename to models/cv/classification/mobilevit_s/igie/README.md index ff0461dc..4c5c8de0 100644 --- a/models/cv/classification/mobilevit/igie/README.md +++ b/models/cv/classification/mobilevit_s/igie/README.md @@ -1,4 +1,4 @@ -# Mobilevit_s +# Mobilevit_s (IGIE) ## Model Description @@ -14,7 +14,7 @@ The MobileViT-S model is a light-weight, general-purpose vision transformer desi ### Prepare Resources -Pretrained model: +Pretrained model: Dataset: to download the validation dataset. @@ -28,9 +28,9 @@ pip3 install timm ### Model Conversion ```bash - +# downloand mobilevit_s.cvnets_in1k from huggingface into ./mobilevit_s.cvnets_in1k # export onnxmodel from timm -python3 export.py --model-name mobilevit_s --output mobilevit_s.onnx +python3 export.py --model-name mobilevit_s.cvnets_in1k --output mobilevit_s.onnx # use onnxsim optimize onnx model onnxsim mobilevit_s.onnx mobilevit_s_opt.onnx diff --git a/models/cv/classification/mobilevit/igie/ci/prepare.sh b/models/cv/classification/mobilevit_s/igie/ci/prepare.sh similarity index 85% rename from models/cv/classification/mobilevit/igie/ci/prepare.sh rename to models/cv/classification/mobilevit_s/igie/ci/prepare.sh index 0651c29d..cac0fbcf 100644 --- a/models/cv/classification/mobilevit/igie/ci/prepare.sh +++ b/models/cv/classification/mobilevit_s/igie/ci/prepare.sh @@ -18,4 +18,5 @@ set -x pip3 install -r ../../igie_common/requirements.txt pip3 install timm -python3 export.py --model-name mobilevit_s --output mobilevit_s.onnx \ No newline at end of file +python3 export.py --model-name mobilevit_s.cvnets_in1k --output mobilevit_s.onnx +onnxsim mobilevit_s.onnx mobilevit_s_opt.onnx \ No newline at end of file diff --git a/models/cv/classification/mobilevit/igie/export.py b/models/cv/classification/mobilevit_s/igie/export.py similarity index 100% rename from models/cv/classification/mobilevit/igie/export.py rename to models/cv/classification/mobilevit_s/igie/export.py diff --git a/models/cv/classification/mobilevit/igie/inference.py b/models/cv/classification/mobilevit_s/igie/inference.py similarity index 100% rename from models/cv/classification/mobilevit/igie/inference.py rename to models/cv/classification/mobilevit_s/igie/inference.py diff --git a/models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_accuracy.sh b/models/cv/classification/mobilevit_s/igie/scripts/infer_mobilevit_s_fp16_accuracy.sh similarity index 100% rename from models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_accuracy.sh rename to models/cv/classification/mobilevit_s/igie/scripts/infer_mobilevit_s_fp16_accuracy.sh diff --git a/models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_performance.sh b/models/cv/classification/mobilevit_s/igie/scripts/infer_mobilevit_s_fp16_performance.sh similarity index 100% rename from models/cv/classification/mobilevit/igie/scripts/infer_mobilevit_s_fp16_performance.sh rename to models/cv/classification/mobilevit_s/igie/scripts/infer_mobilevit_s_fp16_performance.sh diff --git a/models/cv/classification/vit_b_32/igie/README.md b/models/cv/classification/vit_b_32/igie/README.md index 27b1452a..0601e175 100644 --- a/models/cv/classification/vit_b_32/igie/README.md +++ b/models/cv/classification/vit_b_32/igie/README.md @@ -30,10 +30,15 @@ pip3 install timm ### Model Conversion ```bash +# set weights_only=False to be comaptible with pytorch 2.7 +sed -i '164 s/weights_only=weights_only)/weights_only=False)/' /usr/local/lib/python3.10/site-packages/open_clip/factory.py + python3 export.py --model-name ViT-B-32 --weight ViT-B-32.pt --output vit_b_32.onnx # Use onnxsim optimize onnx model onnxsim vit_b_32.onnx vit_b_32_opt.onnx + +# download https://huggingface.co/timm/vit_base_patch32_clip_224.openai into ./vit_base_patch32_clip_224.openai ``` ## Model Inference @@ -46,9 +51,9 @@ export DATASETS_DIR=/Path/to/imagenet_val/ ```bash # Accuracy -bash scripts/infer_vit_fp16_accuracy.sh +bash scripts/infer_vit_b_32_fp16_accuracy.sh # Performance -bash scripts/infer_vit_fp16_performance.sh +bash scripts/infer_vit_b_32_fp16_performance.sh ``` ## Model Results diff --git a/models/cv/classification/vit_b_32/igie/ci/prepare.sh b/models/cv/classification/vit_b_32/igie/ci/prepare.sh index 84234a6d..ee1e1274 100644 --- a/models/cv/classification/vit_b_32/igie/ci/prepare.sh +++ b/models/cv/classification/vit_b_32/igie/ci/prepare.sh @@ -20,6 +20,11 @@ pip3 install -r requirements.txt pip3 install open_clip_torch pip3 install timm +# set weights_only=False to be comaptible with pytorch 2.7 +sed -i '164 s/weights_only=weights_only)/weights_only=False)/' /usr/local/lib/python3.10/site-packages/open_clip/factory.py + python3 export.py --model-name ViT-B-32 --weight ViT-B-32.pt --output vit_b_32.onnx onnxsim vit_b_32.onnx vit_b_32_opt.onnx + +ln -s /mnt/deepspark/data/checkpoints/vit_base_patch32_clip_224.openai ./ diff --git a/models/cv/classification/vit_b_32/igie/inference.py b/models/cv/classification/vit_b_32/igie/inference.py index 5a87832e..de2784a7 100644 --- a/models/cv/classification/vit_b_32/igie/inference.py +++ b/models/cv/classification/vit_b_32/igie/inference.py @@ -125,7 +125,7 @@ def main(): module.run() model_name = "ViT-B-32" - model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained="openai") + model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained="./vit_base_patch32_clip_224.openai/open_clip_model.safetensors") tokenizer = open_clip.get_tokenizer(model_name) from open_clip import IMAGENET_CLASSNAMES as imagenet_classnames diff --git a/models/cv/classification/vit_l_14/igie/README.md b/models/cv/classification/vit_l_14/igie/README.md index f612ddc3..835a8d05 100644 --- a/models/cv/classification/vit_l_14/igie/README.md +++ b/models/cv/classification/vit_l_14/igie/README.md @@ -30,10 +30,15 @@ pip3 install timm ### Model Conversion ```bash +# set weights_only=False to be comaptible with pytorch 2.7 +sed -i '164 s/weights_only=weights_only)/weights_only=False)/' /usr/local/lib/python3.10/site-packages/open_clip/factory.py + python3 export.py --model-name ViT-L-14 --weight ViT-L-14.pt --output vit_l_14.onnx # Use onnxsim optimize onnx model onnxsim vit_l_14.onnx vit_l_14_opt.onnx + +# download https://huggingface.co/timm/vit_large_patch14_clip_224.openai into ./vit_large_patch14_clip_224.openai ``` ## Model Inference diff --git a/models/cv/classification/vit_l_14/igie/ci/prepare.sh b/models/cv/classification/vit_l_14/igie/ci/prepare.sh index af8ca988..00f6a3e1 100644 --- a/models/cv/classification/vit_l_14/igie/ci/prepare.sh +++ b/models/cv/classification/vit_l_14/igie/ci/prepare.sh @@ -20,6 +20,11 @@ pip3 install -r requirements.txt pip3 install open_clip_torch pip3 install timm +# set weights_only=False to be comaptible with pytorch 2.7 +sed -i '164 s/weights_only=weights_only)/weights_only=False)/' /usr/local/lib/python3.10/site-packages/open_clip/factory.py + python3 export.py --model-name ViT-L-14 --weight ViT-L-14.pt --output vit_l_14.onnx -onnxsim vit_l_14.onnx vit_l_14_opt.onnx \ No newline at end of file +onnxsim vit_l_14.onnx vit_l_14_opt.onnx + +ln -s /mnt/deepspark/data/checkpoints/vit_large_patch14_clip_224.openai ./ \ No newline at end of file diff --git a/models/cv/classification/vit_l_14/igie/inference.py b/models/cv/classification/vit_l_14/igie/inference.py index 7bea25fd..f6b74062 100644 --- a/models/cv/classification/vit_l_14/igie/inference.py +++ b/models/cv/classification/vit_l_14/igie/inference.py @@ -125,7 +125,7 @@ def main(): module.run() model_name = "ViT-L-14" - model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained="openai") + model, _, preprocess = open_clip.create_model_and_transforms(model_name, pretrained="./vit_large_patch14_clip_224.openai/open_clip_model.safetensors") tokenizer = open_clip.get_tokenizer(model_name) from open_clip import IMAGENET_CLASSNAMES as imagenet_classnames diff --git a/models/cv/object_detection/detr/igie/Dockerfile b/models/cv/object_detection/detr/igie/Dockerfile deleted file mode 100644 index 3e6da220..00000000 --- a/models/cv/object_detection/detr/igie/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -FROM pytorch/pytorch:1.5-cuda10.1-cudnn7-runtime - -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt-get update -qq && \ - apt-get install -y git vim libgtk2.0-dev && \ - rm -rf /var/cache/apk/* - -RUN pip --no-cache-dir install Cython - -COPY requirements.txt /workspace - -RUN pip --no-cache-dir install -r /workspace/requirements.txt diff --git a/models/cv/object_detection/detr/igie/LICENSE b/models/cv/object_detection/detr/igie/LICENSE deleted file mode 100644 index b1395e94..00000000 --- a/models/cv/object_detection/detr/igie/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2020 - present, Facebook, Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/models/cv/object_detection/detr/igie/README.md b/models/cv/object_detection/detr/igie/README.md index d5964650..cd826657 100644 --- a/models/cv/object_detection/detr/igie/README.md +++ b/models/cv/object_detection/detr/igie/README.md @@ -13,8 +13,6 @@ DETR (DEtection TRansformer) is a novel approach that views object detection as ## Model Preparation -this is a fork version of official detr https://github.com/facebookresearch/detr.git - ### Prepare Resources Pretrained model: @@ -55,13 +53,15 @@ Contact the Iluvatar administrator to get the missing packages: - mmcv-*.whl ```bash -# Install libGL -## CentOS -yum install -y mesa-libGL -## Ubuntu -apt install -y libgl1-mesa-glx +git clone https://github.com/facebookresearch/detr.git +cp -r detr/* ./ + +# change images size +sed -i '105 s/size = get_size(image.size, size, max_size)/size = (800, 800)/' ./datasets/transforms.py -pip3 install -r requirements.txt +pip3 install --no-build-isolation -r requirements.txt +pip3 install onnxsim +pip install -U pycocotools ``` ### Model Conversion diff --git a/models/cv/object_detection/detr/igie/build_engine.py b/models/cv/object_detection/detr/igie/build_engine.py index 9692cc52..869a6904 100644 --- a/models/cv/object_detection/detr/igie/build_engine.py +++ b/models/cv/object_detection/detr/igie/build_engine.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -47,6 +47,8 @@ def parse_args(): def main(): args = parse_args() + if os.path.exists(args.engine_path): + return input_dict = {"tensor": [args.batch_size, 3, 800, 800], "mask": [args.batch_size, 800, 800]} diff --git a/models/cv/object_detection/detr/igie/ci/prepare.sh b/models/cv/object_detection/detr/igie/ci/prepare.sh new file mode 100644 index 00000000..c8318bc2 --- /dev/null +++ b/models/cv/object_detection/detr/igie/ci/prepare.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -x +cp -r /mnt/deepspark/data/repos/detr/* ./ + +# change images size +sed -i '105 s/size = get_size(image.size, size, max_size)/size = (800, 800)/' ./datasets/transforms.py + +pip3 install --no-build-isolation -r requirements.txt +pip3 install onnxsim +pip install -U pycocotools +mkdir -p /root/.cache/torch/hub/checkpoints/ +ln -s /mnt/deepspark/data/checkpoints/resnet50-0676ba61.pth /root/.cache/torch/hub/checkpoints/ +python3 export.py --no_aux_loss --eval --resume detr-r50-e632da11.pth --coco_path ./coco + +onnxsim detr.onnx detr_opt.onnx \ No newline at end of file diff --git a/models/cv/object_detection/detr/igie/d2/README.md b/models/cv/object_detection/detr/igie/d2/README.md deleted file mode 100644 index 7f1d7531..00000000 --- a/models/cv/object_detection/detr/igie/d2/README.md +++ /dev/null @@ -1,39 +0,0 @@ -Detectron2 wrapper for DETR -======= - -We provide a Detectron2 wrapper for DETR, thus providing a way to better integrate it in the existing detection ecosystem. It can be used for example to easily leverage datasets or backbones provided in Detectron2. - -This wrapper currently supports only box detection, and is intended to be as close as possible to the original implementation, and we checked that it indeed match the results. Some notable facts and caveats: -- The data augmentation matches DETR's original data augmentation. This required patching the RandomCrop augmentation from Detectron2, so you'll need a version from the master branch from June 24th 2020 or more recent. -- To match DETR's original backbone initialization, we use the weights of a ResNet50 trained on imagenet using torchvision. This network uses a different pixel mean and std than most of the backbones available in Detectron2 by default, so extra care must be taken when switching to another one. Note that no other torchvision models are available in Detectron2 as of now, though it may change in the future. -- The gradient clipping mode is "full_model", which is not the default in Detectron2. - -# Usage - -To install Detectron2, please follow the [official installation instructions](https://github.com/facebookresearch/detectron2/blob/master/INSTALL.md). - -## Evaluating a model - -For convenience, we provide a conversion script to convert models trained by the main DETR training loop into the format of this wrapper. To download and convert the main Resnet50 model, simply do: - -``` -python converter.py --source_model https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth --output_model converted_model.pth -``` - -You can then evaluate it using: -``` -python train_net.py --eval-only --config configs/detr_256_6_6_torchvision.yaml MODEL.WEIGHTS "converted_model.pth" -``` - - -## Training - -To train DETR on a single node with 8 gpus, simply use: -``` -python train_net.py --config configs/detr_256_6_6_torchvision.yaml --num-gpus 8 -``` - -To fine-tune DETR for instance segmentation on a single node with 8 gpus, simply use: -``` -python train_net.py --config configs/detr_segm_256_6_6_torchvision.yaml --num-gpus 8 MODEL.DETR.FROZEN_WEIGHTS -``` diff --git a/models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml b/models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml deleted file mode 100644 index 25d64184..00000000 --- a/models/cv/object_detection/detr/igie/d2/configs/detr_256_6_6_torchvision.yaml +++ /dev/null @@ -1,45 +0,0 @@ -MODEL: - META_ARCHITECTURE: "Detr" - WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" - PIXEL_MEAN: [123.675, 116.280, 103.530] - PIXEL_STD: [58.395, 57.120, 57.375] - MASK_ON: False - RESNETS: - DEPTH: 50 - STRIDE_IN_1X1: False - OUT_FEATURES: ["res2", "res3", "res4", "res5"] - DETR: - GIOU_WEIGHT: 2.0 - L1_WEIGHT: 5.0 - NUM_OBJECT_QUERIES: 100 -DATASETS: - TRAIN: ("coco_2017_train",) - TEST: ("coco_2017_val",) -SOLVER: - IMS_PER_BATCH: 64 - BASE_LR: 0.0001 - STEPS: (369600,) - MAX_ITER: 554400 - WARMUP_FACTOR: 1.0 - WARMUP_ITERS: 10 - WEIGHT_DECAY: 0.0001 - OPTIMIZER: "ADAMW" - BACKBONE_MULTIPLIER: 0.1 - CLIP_GRADIENTS: - ENABLED: True - CLIP_TYPE: "full_model" - CLIP_VALUE: 0.01 - NORM_TYPE: 2.0 -INPUT: - MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) - CROP: - ENABLED: True - TYPE: "absolute_range" - SIZE: (384, 600) - FORMAT: "RGB" -TEST: - EVAL_PERIOD: 4000 -DATALOADER: - FILTER_EMPTY_ANNOTATIONS: False - NUM_WORKERS: 4 -VERSION: 2 diff --git a/models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml b/models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml deleted file mode 100644 index ade490e6..00000000 --- a/models/cv/object_detection/detr/igie/d2/configs/detr_segm_256_6_6_torchvision.yaml +++ /dev/null @@ -1,46 +0,0 @@ -MODEL: - META_ARCHITECTURE: "Detr" -# WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl" - PIXEL_MEAN: [123.675, 116.280, 103.530] - PIXEL_STD: [58.395, 57.120, 57.375] - MASK_ON: True - RESNETS: - DEPTH: 50 - STRIDE_IN_1X1: False - OUT_FEATURES: ["res2", "res3", "res4", "res5"] - DETR: - GIOU_WEIGHT: 2.0 - L1_WEIGHT: 5.0 - NUM_OBJECT_QUERIES: 100 - FROZEN_WEIGHTS: '' -DATASETS: - TRAIN: ("coco_2017_train",) - TEST: ("coco_2017_val",) -SOLVER: - IMS_PER_BATCH: 64 - BASE_LR: 0.0001 - STEPS: (55440,) - MAX_ITER: 92400 - WARMUP_FACTOR: 1.0 - WARMUP_ITERS: 10 - WEIGHT_DECAY: 0.0001 - OPTIMIZER: "ADAMW" - BACKBONE_MULTIPLIER: 0.1 - CLIP_GRADIENTS: - ENABLED: True - CLIP_TYPE: "full_model" - CLIP_VALUE: 0.01 - NORM_TYPE: 2.0 -INPUT: - MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) - CROP: - ENABLED: True - TYPE: "absolute_range" - SIZE: (384, 600) - FORMAT: "RGB" -TEST: - EVAL_PERIOD: 4000 -DATALOADER: - FILTER_EMPTY_ANNOTATIONS: False - NUM_WORKERS: 4 -VERSION: 2 diff --git a/models/cv/object_detection/detr/igie/d2/converter.py b/models/cv/object_detection/detr/igie/d2/converter.py deleted file mode 100644 index 6fa5ff4c..00000000 --- a/models/cv/object_detection/detr/igie/d2/converter.py +++ /dev/null @@ -1,69 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Helper script to convert models trained with the main version of DETR to be used with the Detectron2 version. -""" -import json -import argparse - -import numpy as np -import torch - - -def parse_args(): - parser = argparse.ArgumentParser("D2 model converter") - - parser.add_argument("--source_model", default="", type=str, help="Path or url to the DETR model to convert") - parser.add_argument("--output_model", default="", type=str, help="Path where to save the converted model") - return parser.parse_args() - - -def main(): - args = parse_args() - - # D2 expects contiguous classes, so we need to remap the 92 classes from DETR - # fmt: off - coco_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, - 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, - 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90, 91] - # fmt: on - - coco_idx = np.array(coco_idx) - - if args.source_model.startswith("https"): - checkpoint = torch.hub.load_state_dict_from_url(args.source_model, map_location="cpu", check_hash=True) - else: - checkpoint = torch.load(args.source_model, map_location="cpu") - model_to_convert = checkpoint["model"] - - model_converted = {} - for k in model_to_convert.keys(): - old_k = k - if "backbone" in k: - k = k.replace("backbone.0.body.", "") - if "layer" not in k: - k = "stem." + k - for t in [1, 2, 3, 4]: - k = k.replace(f"layer{t}", f"res{t + 1}") - for t in [1, 2, 3]: - k = k.replace(f"bn{t}", f"conv{t}.norm") - k = k.replace("downsample.0", "shortcut") - k = k.replace("downsample.1", "shortcut.norm") - k = "backbone.0.backbone." + k - k = "detr." + k - print(old_k, "->", k) - if "class_embed" in old_k: - v = model_to_convert[old_k].detach() - if v.shape[0] == 92: - shape_old = v.shape - model_converted[k] = v[coco_idx] - print("Head conversion: changing shape from {} to {}".format(shape_old, model_converted[k].shape)) - continue - model_converted[k] = model_to_convert[old_k].detach() - - model_to_save = {"model": model_converted} - torch.save(model_to_save, args.output_model) - - -if __name__ == "__main__": - main() diff --git a/models/cv/object_detection/detr/igie/d2/detr/__init__.py b/models/cv/object_detection/detr/igie/d2/detr/__init__.py deleted file mode 100644 index a618f828..00000000 --- a/models/cv/object_detection/detr/igie/d2/detr/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .config import add_detr_config -from .detr import Detr -from .dataset_mapper import DetrDatasetMapper diff --git a/models/cv/object_detection/detr/igie/d2/detr/config.py b/models/cv/object_detection/detr/igie/d2/detr/config.py deleted file mode 100644 index 9ea267dd..00000000 --- a/models/cv/object_detection/detr/igie/d2/detr/config.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from detectron2.config import CfgNode as CN - - -def add_detr_config(cfg): - """ - Add config for DETR. - """ - cfg.MODEL.DETR = CN() - cfg.MODEL.DETR.NUM_CLASSES = 80 - - # For Segmentation - cfg.MODEL.DETR.FROZEN_WEIGHTS = '' - - # LOSS - cfg.MODEL.DETR.GIOU_WEIGHT = 2.0 - cfg.MODEL.DETR.L1_WEIGHT = 5.0 - cfg.MODEL.DETR.DEEP_SUPERVISION = True - cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1 - - # TRANSFORMER - cfg.MODEL.DETR.NHEADS = 8 - cfg.MODEL.DETR.DROPOUT = 0.1 - cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048 - cfg.MODEL.DETR.ENC_LAYERS = 6 - cfg.MODEL.DETR.DEC_LAYERS = 6 - cfg.MODEL.DETR.PRE_NORM = False - - cfg.MODEL.DETR.HIDDEN_DIM = 256 - cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100 - - cfg.SOLVER.OPTIMIZER = "ADAMW" - cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1 diff --git a/models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py b/models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py deleted file mode 100644 index f428a493..00000000 --- a/models/cv/object_detection/detr/igie/d2/detr/dataset_mapper.py +++ /dev/null @@ -1,122 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import copy -import logging - -import numpy as np -import torch - -from detectron2.data import detection_utils as utils -from detectron2.data import transforms as T -from detectron2.data.transforms import TransformGen - -__all__ = ["DetrDatasetMapper"] - - -def build_transform_gen(cfg, is_train): - """ - Create a list of :class:`TransformGen` from config. - Returns: - list[TransformGen] - """ - if is_train: - min_size = cfg.INPUT.MIN_SIZE_TRAIN - max_size = cfg.INPUT.MAX_SIZE_TRAIN - sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING - else: - min_size = cfg.INPUT.MIN_SIZE_TEST - max_size = cfg.INPUT.MAX_SIZE_TEST - sample_style = "choice" - if sample_style == "range": - assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size)) - - logger = logging.getLogger(__name__) - tfm_gens = [] - if is_train: - tfm_gens.append(T.RandomFlip()) - tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style)) - if is_train: - logger.info("TransformGens used in training: " + str(tfm_gens)) - return tfm_gens - - -class DetrDatasetMapper: - """ - A callable which takes a dataset dict in Detectron2 Dataset format, - and map it into a format used by DETR. - - The callable currently does the following: - - 1. Read the image from "file_name" - 2. Applies geometric transforms to the image and annotation - 3. Find and applies suitable cropping to the image and annotation - 4. Prepare image and annotation to Tensors - """ - - def __init__(self, cfg, is_train=True): - if cfg.INPUT.CROP.ENABLED and is_train: - self.crop_gen = [ - T.ResizeShortestEdge([400, 500, 600], sample_style="choice"), - T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE), - ] - else: - self.crop_gen = None - - self.mask_on = cfg.MODEL.MASK_ON - self.tfm_gens = build_transform_gen(cfg, is_train) - logging.getLogger(__name__).info( - "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen)) - ) - - self.img_format = cfg.INPUT.FORMAT - self.is_train = is_train - - def __call__(self, dataset_dict): - """ - Args: - dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format. - - Returns: - dict: a format that builtin models in detectron2 accept - """ - dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below - image = utils.read_image(dataset_dict["file_name"], format=self.img_format) - utils.check_image_size(dataset_dict, image) - - if self.crop_gen is None: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - if np.random.rand() > 0.5: - image, transforms = T.apply_transform_gens(self.tfm_gens, image) - else: - image, transforms = T.apply_transform_gens( - self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image - ) - - image_shape = image.shape[:2] # h, w - - # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory, - # but not efficient on large generic data structures due to the use of pickle & mp.Queue. - # Therefore it's important to use torch.Tensor. - dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1))) - - if not self.is_train: - # USER: Modify this if you want to keep them for some reason. - dataset_dict.pop("annotations", None) - return dataset_dict - - if "annotations" in dataset_dict: - # USER: Modify this if you want to keep them for some reason. - for anno in dataset_dict["annotations"]: - if not self.mask_on: - anno.pop("segmentation", None) - anno.pop("keypoints", None) - - # USER: Implement additional transformations if you have other types of data - annos = [ - utils.transform_instance_annotations(obj, transforms, image_shape) - for obj in dataset_dict.pop("annotations") - if obj.get("iscrowd", 0) == 0 - ] - instances = utils.annotations_to_instances(annos, image_shape) - dataset_dict["instances"] = utils.filter_empty_instances(instances) - return dataset_dict diff --git a/models/cv/object_detection/detr/igie/d2/detr/detr.py b/models/cv/object_detection/detr/igie/d2/detr/detr.py deleted file mode 100644 index 95f89dff..00000000 --- a/models/cv/object_detection/detr/igie/d2/detr/detr.py +++ /dev/null @@ -1,261 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import logging -import math -from typing import List - -import numpy as np -import torch -import torch.distributed as dist -import torch.nn.functional as F -from scipy.optimize import linear_sum_assignment -from torch import nn - -from detectron2.layers import ShapeSpec -from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, detector_postprocess -from detectron2.structures import Boxes, ImageList, Instances, BitMasks, PolygonMasks -from detectron2.utils.logger import log_first_n -from fvcore.nn import giou_loss, smooth_l1_loss -from models.backbone import Joiner -from models.detr import DETR, SetCriterion -from models.matcher import HungarianMatcher -from models.position_encoding import PositionEmbeddingSine -from models.transformer import Transformer -from models.segmentation import DETRsegm, PostProcessPanoptic, PostProcessSegm -from util.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh -from util.misc import NestedTensor -from datasets.coco import convert_coco_poly_to_mask - -__all__ = ["Detr"] - - -class MaskedBackbone(nn.Module): - """ This is a thin wrapper around D2's backbone to provide padding masking""" - - def __init__(self, cfg): - super().__init__() - self.backbone = build_backbone(cfg) - backbone_shape = self.backbone.output_shape() - self.feature_strides = [backbone_shape[f].stride for f in backbone_shape.keys()] - self.num_channels = backbone_shape[list(backbone_shape.keys())[-1]].channels - - def forward(self, images): - features = self.backbone(images.tensor) - masks = self.mask_out_padding( - [features_per_level.shape for features_per_level in features.values()], - images.image_sizes, - images.tensor.device, - ) - assert len(features) == len(masks) - for i, k in enumerate(features.keys()): - features[k] = NestedTensor(features[k], masks[i]) - return features - - def mask_out_padding(self, feature_shapes, image_sizes, device): - masks = [] - assert len(feature_shapes) == len(self.feature_strides) - for idx, shape in enumerate(feature_shapes): - N, _, H, W = shape - masks_per_feature_level = torch.ones((N, H, W), dtype=torch.bool, device=device) - for img_idx, (h, w) in enumerate(image_sizes): - masks_per_feature_level[ - img_idx, - : int(np.ceil(float(h) / self.feature_strides[idx])), - : int(np.ceil(float(w) / self.feature_strides[idx])), - ] = 0 - masks.append(masks_per_feature_level) - return masks - - -@META_ARCH_REGISTRY.register() -class Detr(nn.Module): - """ - Implement Detr - """ - - def __init__(self, cfg): - super().__init__() - - self.device = torch.device(cfg.MODEL.DEVICE) - - self.num_classes = cfg.MODEL.DETR.NUM_CLASSES - self.mask_on = cfg.MODEL.MASK_ON - hidden_dim = cfg.MODEL.DETR.HIDDEN_DIM - num_queries = cfg.MODEL.DETR.NUM_OBJECT_QUERIES - # Transformer parameters: - nheads = cfg.MODEL.DETR.NHEADS - dropout = cfg.MODEL.DETR.DROPOUT - dim_feedforward = cfg.MODEL.DETR.DIM_FEEDFORWARD - enc_layers = cfg.MODEL.DETR.ENC_LAYERS - dec_layers = cfg.MODEL.DETR.DEC_LAYERS - pre_norm = cfg.MODEL.DETR.PRE_NORM - - # Loss parameters: - giou_weight = cfg.MODEL.DETR.GIOU_WEIGHT - l1_weight = cfg.MODEL.DETR.L1_WEIGHT - deep_supervision = cfg.MODEL.DETR.DEEP_SUPERVISION - no_object_weight = cfg.MODEL.DETR.NO_OBJECT_WEIGHT - - N_steps = hidden_dim // 2 - d2_backbone = MaskedBackbone(cfg) - backbone = Joiner(d2_backbone, PositionEmbeddingSine(N_steps, normalize=True)) - backbone.num_channels = d2_backbone.num_channels - - transformer = Transformer( - d_model=hidden_dim, - dropout=dropout, - nhead=nheads, - dim_feedforward=dim_feedforward, - num_encoder_layers=enc_layers, - num_decoder_layers=dec_layers, - normalize_before=pre_norm, - return_intermediate_dec=deep_supervision, - ) - - self.detr = DETR( - backbone, transformer, num_classes=self.num_classes, num_queries=num_queries, aux_loss=deep_supervision - ) - if self.mask_on: - frozen_weights = cfg.MODEL.DETR.FROZEN_WEIGHTS - if frozen_weights != '': - print("LOAD pre-trained weights") - weight = torch.load(frozen_weights, map_location=lambda storage, loc: storage)['model'] - new_weight = {} - for k, v in weight.items(): - if 'detr.' in k: - new_weight[k.replace('detr.', '')] = v - else: - print(f"Skipping loading weight {k} from frozen model") - del weight - self.detr.load_state_dict(new_weight) - del new_weight - self.detr = DETRsegm(self.detr, freeze_detr=(frozen_weights != '')) - self.seg_postprocess = PostProcessSegm - - self.detr.to(self.device) - - # building criterion - matcher = HungarianMatcher(cost_class=1, cost_bbox=l1_weight, cost_giou=giou_weight) - weight_dict = {"loss_ce": 1, "loss_bbox": l1_weight} - weight_dict["loss_giou"] = giou_weight - if deep_supervision: - aux_weight_dict = {} - for i in range(dec_layers - 1): - aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - losses = ["labels", "boxes", "cardinality"] - if self.mask_on: - losses += ["masks"] - self.criterion = SetCriterion( - self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight, losses=losses, - ) - self.criterion.to(self.device) - - pixel_mean = torch.Tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(3, 1, 1) - pixel_std = torch.Tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(3, 1, 1) - self.normalizer = lambda x: (x - pixel_mean) / pixel_std - self.to(self.device) - - def forward(self, batched_inputs): - """ - Args: - batched_inputs: a list, batched outputs of :class:`DatasetMapper` . - Each item in the list contains the inputs for one image. - For now, each item in the list is a dict that contains: - - * image: Tensor, image in (C, H, W) format. - * instances: Instances - - Other information that's included in the original dicts, such as: - - * "height", "width" (int): the output resolution of the model, used in inference. - See :meth:`postprocess` for details. - Returns: - dict[str: Tensor]: - mapping from a named loss to a tensor storing the loss. Used during training only. - """ - images = self.preprocess_image(batched_inputs) - output = self.detr(images) - - if self.training: - gt_instances = [x["instances"].to(self.device) for x in batched_inputs] - - targets = self.prepare_targets(gt_instances) - loss_dict = self.criterion(output, targets) - weight_dict = self.criterion.weight_dict - for k in loss_dict.keys(): - if k in weight_dict: - loss_dict[k] *= weight_dict[k] - return loss_dict - else: - box_cls = output["pred_logits"] - box_pred = output["pred_boxes"] - mask_pred = output["pred_masks"] if self.mask_on else None - results = self.inference(box_cls, box_pred, mask_pred, images.image_sizes) - processed_results = [] - for results_per_image, input_per_image, image_size in zip(results, batched_inputs, images.image_sizes): - height = input_per_image.get("height", image_size[0]) - width = input_per_image.get("width", image_size[1]) - r = detector_postprocess(results_per_image, height, width) - processed_results.append({"instances": r}) - return processed_results - - def prepare_targets(self, targets): - new_targets = [] - for targets_per_image in targets: - h, w = targets_per_image.image_size - image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device) - gt_classes = targets_per_image.gt_classes - gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy - gt_boxes = box_xyxy_to_cxcywh(gt_boxes) - new_targets.append({"labels": gt_classes, "boxes": gt_boxes}) - if self.mask_on and hasattr(targets_per_image, 'gt_masks'): - gt_masks = targets_per_image.gt_masks - gt_masks = convert_coco_poly_to_mask(gt_masks.polygons, h, w) - new_targets[-1].update({'masks': gt_masks}) - return new_targets - - def inference(self, box_cls, box_pred, mask_pred, image_sizes): - """ - Arguments: - box_cls (Tensor): tensor of shape (batch_size, num_queries, K). - The tensor predicts the classification probability for each query. - box_pred (Tensor): tensors of shape (batch_size, num_queries, 4). - The tensor predicts 4-vector (x,y,w,h) box - regression values for every queryx - image_sizes (List[torch.Size]): the input image sizes - - Returns: - results (List[Instances]): a list of #images elements. - """ - assert len(box_cls) == len(image_sizes) - results = [] - - # For each box we assign the best class or the second best if the best on is `no_object`. - scores, labels = F.softmax(box_cls, dim=-1)[:, :, :-1].max(-1) - - for i, (scores_per_image, labels_per_image, box_pred_per_image, image_size) in enumerate(zip( - scores, labels, box_pred, image_sizes - )): - result = Instances(image_size) - result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image)) - - result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0]) - if self.mask_on: - mask = F.interpolate(mask_pred[i].unsqueeze(0), size=image_size, mode='bilinear', align_corners=False) - mask = mask[0].sigmoid() > 0.5 - B, N, H, W = mask_pred.shape - mask = BitMasks(mask.cpu()).crop_and_resize(result.pred_boxes.tensor.cpu(), 32) - result.pred_masks = mask.unsqueeze(1).to(mask_pred[0].device) - - result.scores = scores_per_image - result.pred_classes = labels_per_image - results.append(result) - return results - - def preprocess_image(self, batched_inputs): - """ - Normalize, pad and batch the input images. - """ - images = [self.normalizer(x["image"].to(self.device)) for x in batched_inputs] - images = ImageList.from_tensors(images) - return images diff --git a/models/cv/object_detection/detr/igie/d2/train_net.py b/models/cv/object_detection/detr/igie/d2/train_net.py deleted file mode 100644 index 82f69292..00000000 --- a/models/cv/object_detection/detr/igie/d2/train_net.py +++ /dev/null @@ -1,145 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR Training Script. - -This script is a simplified version of the training script in detectron2/tools. -""" -import os -import sys -import itertools - -# fmt: off -sys.path.insert(1, os.path.join(sys.path[0], '..')) -# fmt: on - -import time -from typing import Any, Dict, List, Set - -import torch - -import detectron2.utils.comm as comm -from d2.detr import DetrDatasetMapper, add_detr_config -from detectron2.checkpoint import DetectionCheckpointer -from detectron2.config import get_cfg -from detectron2.data import MetadataCatalog, build_detection_train_loader -from detectron2.engine import DefaultTrainer, default_argument_parser, default_setup, launch -from detectron2.evaluation import COCOEvaluator, verify_results - -from detectron2.solver.build import maybe_add_gradient_clipping - - -class Trainer(DefaultTrainer): - """ - Extension of the Trainer class adapted to DETR. - """ - - @classmethod - def build_evaluator(cls, cfg, dataset_name, output_folder=None): - """ - Create evaluator(s) for a given dataset. - This uses the special metadata "evaluator_type" associated with each builtin dataset. - For your own dataset, you can simply create an evaluator manually in your - script and do not have to worry about the hacky if-else logic here. - """ - if output_folder is None: - output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") - return COCOEvaluator(dataset_name, cfg, True, output_folder) - - @classmethod - def build_train_loader(cls, cfg): - if "Detr" == cfg.MODEL.META_ARCHITECTURE: - mapper = DetrDatasetMapper(cfg, True) - else: - mapper = None - return build_detection_train_loader(cfg, mapper=mapper) - - @classmethod - def build_optimizer(cls, cfg, model): - params: List[Dict[str, Any]] = [] - memo: Set[torch.nn.parameter.Parameter] = set() - for key, value in model.named_parameters(recurse=True): - if not value.requires_grad: - continue - # Avoid duplicating parameters - if value in memo: - continue - memo.add(value) - lr = cfg.SOLVER.BASE_LR - weight_decay = cfg.SOLVER.WEIGHT_DECAY - if "backbone" in key: - lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER - params += [{"params": [value], "lr": lr, "weight_decay": weight_decay}] - - def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class - # detectron2 doesn't have full model gradient clipping now - clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE - enable = ( - cfg.SOLVER.CLIP_GRADIENTS.ENABLED - and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" - and clip_norm_val > 0.0 - ) - - class FullModelGradientClippingOptimizer(optim): - def step(self, closure=None): - all_params = itertools.chain(*[x["params"] for x in self.param_groups]) - torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) - super().step(closure=closure) - - return FullModelGradientClippingOptimizer if enable else optim - - optimizer_type = cfg.SOLVER.OPTIMIZER - if optimizer_type == "SGD": - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( - params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM - ) - elif optimizer_type == "ADAMW": - optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( - params, cfg.SOLVER.BASE_LR - ) - else: - raise NotImplementedError(f"no optimizer type {optimizer_type}") - if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": - optimizer = maybe_add_gradient_clipping(cfg, optimizer) - return optimizer - - -def setup(args): - """ - Create configs and perform basic setups. - """ - cfg = get_cfg() - add_detr_config(cfg) - cfg.merge_from_file(args.config_file) - cfg.merge_from_list(args.opts) - cfg.freeze() - default_setup(cfg, args) - return cfg - - -def main(args): - cfg = setup(args) - - if args.eval_only: - model = Trainer.build_model(cfg) - DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(cfg.MODEL.WEIGHTS, resume=args.resume) - res = Trainer.test(cfg, model) - if comm.is_main_process(): - verify_results(cfg, res) - return res - - trainer = Trainer(cfg) - trainer.resume_or_load(resume=args.resume) - return trainer.train() - - -if __name__ == "__main__": - args = default_argument_parser().parse_args() - print("Command Line Args:", args) - launch( - main, - args.num_gpus, - num_machines=args.num_machines, - machine_rank=args.machine_rank, - dist_url=args.dist_url, - args=(args,), - ) diff --git a/models/cv/object_detection/detr/igie/datasets/__init__.py b/models/cv/object_detection/detr/igie/datasets/__init__.py deleted file mode 100644 index 571b126e..00000000 --- a/models/cv/object_detection/detr/igie/datasets/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import torch.utils.data -import torchvision - -from .coco import build as build_coco - - -def get_coco_api_from_dataset(dataset): - for _ in range(10): - # if isinstance(dataset, torchvision.datasets.CocoDetection): - # break - if isinstance(dataset, torch.utils.data.Subset): - dataset = dataset.dataset - if isinstance(dataset, torchvision.datasets.CocoDetection): - return dataset.coco - - -def build_dataset(image_set, args): - if args.dataset_file == 'coco': - return build_coco(image_set, args) - if args.dataset_file == 'coco_panoptic': - # to avoid making panopticapi required for coco - from .coco_panoptic import build as build_coco_panoptic - return build_coco_panoptic(image_set, args) - raise ValueError(f'dataset {args.dataset_file} not supported') diff --git a/models/cv/object_detection/detr/igie/datasets/coco.py b/models/cv/object_detection/detr/igie/datasets/coco.py deleted file mode 100644 index 93a436ba..00000000 --- a/models/cv/object_detection/detr/igie/datasets/coco.py +++ /dev/null @@ -1,158 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -COCO dataset which returns image_id for evaluation. - -Mostly copy-paste from https://github.com/pytorch/vision/blob/13b35ff/references/detection/coco_utils.py -""" -from pathlib import Path - -import torch -import torch.utils.data -import torchvision -from pycocotools import mask as coco_mask - -import datasets.transforms as T - - -class CocoDetection(torchvision.datasets.CocoDetection): - def __init__(self, img_folder, ann_file, transforms, return_masks): - super(CocoDetection, self).__init__(img_folder, ann_file) - self._transforms = transforms - self.prepare = ConvertCocoPolysToMask(return_masks) - - def __getitem__(self, idx): - img, target = super(CocoDetection, self).__getitem__(idx) - image_id = self.ids[idx] - target = {'image_id': image_id, 'annotations': target} - img, target = self.prepare(img, target) - if self._transforms is not None: - img, target = self._transforms(img, target) - return img, target - - -def convert_coco_poly_to_mask(segmentations, height, width): - masks = [] - for polygons in segmentations: - rles = coco_mask.frPyObjects(polygons, height, width) - mask = coco_mask.decode(rles) - if len(mask.shape) < 3: - mask = mask[..., None] - mask = torch.as_tensor(mask, dtype=torch.uint8) - mask = mask.any(dim=2) - masks.append(mask) - if masks: - masks = torch.stack(masks, dim=0) - else: - masks = torch.zeros((0, height, width), dtype=torch.uint8) - return masks - - -class ConvertCocoPolysToMask(object): - def __init__(self, return_masks=False): - self.return_masks = return_masks - - def __call__(self, image, target): - w, h = image.size - - image_id = target["image_id"] - image_id = torch.tensor([image_id]) - - anno = target["annotations"] - - anno = [obj for obj in anno if 'iscrowd' not in obj or obj['iscrowd'] == 0] - - boxes = [obj["bbox"] for obj in anno] - # guard against no boxes via resizing - boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) - boxes[:, 2:] += boxes[:, :2] - boxes[:, 0::2].clamp_(min=0, max=w) - boxes[:, 1::2].clamp_(min=0, max=h) - - classes = [obj["category_id"] for obj in anno] - classes = torch.tensor(classes, dtype=torch.int64) - - if self.return_masks: - segmentations = [obj["segmentation"] for obj in anno] - masks = convert_coco_poly_to_mask(segmentations, h, w) - - keypoints = None - if anno and "keypoints" in anno[0]: - keypoints = [obj["keypoints"] for obj in anno] - keypoints = torch.as_tensor(keypoints, dtype=torch.float32) - num_keypoints = keypoints.shape[0] - if num_keypoints: - keypoints = keypoints.view(num_keypoints, -1, 3) - - keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) - boxes = boxes[keep] - classes = classes[keep] - if self.return_masks: - masks = masks[keep] - if keypoints is not None: - keypoints = keypoints[keep] - - target = {} - target["boxes"] = boxes - target["labels"] = classes - if self.return_masks: - target["masks"] = masks - target["image_id"] = image_id - if keypoints is not None: - target["keypoints"] = keypoints - - # for conversion to coco api - area = torch.tensor([obj["area"] for obj in anno]) - iscrowd = torch.tensor([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in anno]) - target["area"] = area[keep] - target["iscrowd"] = iscrowd[keep] - - target["orig_size"] = torch.as_tensor([int(h), int(w)]) - target["size"] = torch.as_tensor([int(h), int(w)]) - - return image, target - - -def make_coco_transforms(image_set): - - normalize = T.Compose([ - T.ToTensor(), - T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) - ]) - - scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800] - - if image_set == 'train': - return T.Compose([ - T.RandomHorizontalFlip(), - T.RandomSelect( - T.RandomResize(scales, max_size=1333), - T.Compose([ - T.RandomResize([400, 500, 600]), - T.RandomSizeCrop(384, 600), - T.RandomResize(scales, max_size=1333), - ]) - ), - normalize, - ]) - - if image_set == 'val': - return T.Compose([ - T.RandomResize([800], max_size=1333), - normalize, - ]) - - raise ValueError(f'unknown {image_set}') - - -def build(image_set, args): - root = Path(args.coco_path) - assert root.exists(), f'provided COCO path {root} does not exist' - mode = 'instances' - PATHS = { - "train": (root / "train2017", root / "annotations" / f'{mode}_train2017.json'), - "val": (root / "val2017", root / "annotations" / f'{mode}_val2017.json'), - } - - img_folder, ann_file = PATHS[image_set] - dataset = CocoDetection(img_folder, ann_file, transforms=make_coco_transforms(image_set), return_masks=args.masks) - return dataset diff --git a/models/cv/object_detection/detr/igie/datasets/coco_eval.py b/models/cv/object_detection/detr/igie/datasets/coco_eval.py deleted file mode 100644 index 9487c08f..00000000 --- a/models/cv/object_detection/detr/igie/datasets/coco_eval.py +++ /dev/null @@ -1,257 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -COCO evaluator that works in distributed mode. - -Mostly copy-paste from https://github.com/pytorch/vision/blob/edfd5a7/references/detection/coco_eval.py -The difference is that there is less copy-pasting from pycocotools -in the end of the file, as python3 can suppress prints with contextlib -""" -import os -import contextlib -import copy -import numpy as np -import torch - -from pycocotools.cocoeval import COCOeval -from pycocotools.coco import COCO -import pycocotools.mask as mask_util - -from util.misc import all_gather - - -class CocoEvaluator(object): - def __init__(self, coco_gt, iou_types): - assert isinstance(iou_types, (list, tuple)) - coco_gt = copy.deepcopy(coco_gt) - self.coco_gt = coco_gt - - self.iou_types = iou_types - self.coco_eval = {} - for iou_type in iou_types: - self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type) - - self.img_ids = [] - self.eval_imgs = {k: [] for k in iou_types} - - def update(self, predictions): - img_ids = list(np.unique(list(predictions.keys()))) - self.img_ids.extend(img_ids) - - for iou_type in self.iou_types: - results = self.prepare(predictions, iou_type) - - # suppress pycocotools prints - with open(os.devnull, 'w') as devnull: - with contextlib.redirect_stdout(devnull): - coco_dt = COCO.loadRes(self.coco_gt, results) if results else COCO() - coco_eval = self.coco_eval[iou_type] - - coco_eval.cocoDt = coco_dt - coco_eval.params.imgIds = list(img_ids) - img_ids, eval_imgs = evaluate(coco_eval) - - self.eval_imgs[iou_type].append(eval_imgs) - - def synchronize_between_processes(self): - for iou_type in self.iou_types: - self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) - create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) - - def accumulate(self): - for coco_eval in self.coco_eval.values(): - coco_eval.accumulate() - - def summarize(self): - for iou_type, coco_eval in self.coco_eval.items(): - print("IoU metric: {}".format(iou_type)) - coco_eval.summarize() - - def prepare(self, predictions, iou_type): - if iou_type == "bbox": - return self.prepare_for_coco_detection(predictions) - elif iou_type == "segm": - return self.prepare_for_coco_segmentation(predictions) - elif iou_type == "keypoints": - return self.prepare_for_coco_keypoint(predictions) - else: - raise ValueError("Unknown iou type {}".format(iou_type)) - - def prepare_for_coco_detection(self, predictions): - coco_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - boxes = prediction["boxes"] - boxes = convert_to_xywh(boxes).tolist() - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - coco_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "bbox": box, - "score": scores[k], - } - for k, box in enumerate(boxes) - ] - ) - return coco_results - - def prepare_for_coco_segmentation(self, predictions): - coco_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - scores = prediction["scores"] - labels = prediction["labels"] - masks = prediction["masks"] - - masks = masks > 0.5 - - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - rles = [ - mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] - for mask in masks - ] - for rle in rles: - rle["counts"] = rle["counts"].decode("utf-8") - - coco_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "segmentation": rle, - "score": scores[k], - } - for k, rle in enumerate(rles) - ] - ) - return coco_results - - def prepare_for_coco_keypoint(self, predictions): - coco_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - boxes = prediction["boxes"] - boxes = convert_to_xywh(boxes).tolist() - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - keypoints = prediction["keypoints"] - keypoints = keypoints.flatten(start_dim=1).tolist() - - coco_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - 'keypoints': keypoint, - "score": scores[k], - } - for k, keypoint in enumerate(keypoints) - ] - ) - return coco_results - - -def convert_to_xywh(boxes): - xmin, ymin, xmax, ymax = boxes.unbind(1) - return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) - - -def merge(img_ids, eval_imgs): - all_img_ids = all_gather(img_ids) - all_eval_imgs = all_gather(eval_imgs) - - merged_img_ids = [] - for p in all_img_ids: - merged_img_ids.extend(p) - - merged_eval_imgs = [] - for p in all_eval_imgs: - merged_eval_imgs.append(p) - - merged_img_ids = np.array(merged_img_ids) - merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) - - # keep only unique (and in sorted order) images - merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) - merged_eval_imgs = merged_eval_imgs[..., idx] - - return merged_img_ids, merged_eval_imgs - - -def create_common_coco_eval(coco_eval, img_ids, eval_imgs): - img_ids, eval_imgs = merge(img_ids, eval_imgs) - img_ids = list(img_ids) - eval_imgs = list(eval_imgs.flatten()) - - coco_eval.evalImgs = eval_imgs - coco_eval.params.imgIds = img_ids - coco_eval._paramsEval = copy.deepcopy(coco_eval.params) - - -################################################################# -# From pycocotools, just removed the prints and fixed -# a Python3 bug about unicode not defined -################################################################# - - -def evaluate(self): - ''' - Run per image evaluation on given images and store results (a list of dict) in self.evalImgs - :return: None - ''' - # tic = time.time() - # print('Running per image evaluation...') - p = self.params - # add backward compatibility if useSegm is specified in params - if p.useSegm is not None: - p.iouType = 'segm' if p.useSegm == 1 else 'bbox' - print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) - # print('Evaluate annotation type *{}*'.format(p.iouType)) - p.imgIds = list(np.unique(p.imgIds)) - if p.useCats: - p.catIds = list(np.unique(p.catIds)) - p.maxDets = sorted(p.maxDets) - self.params = p - - self._prepare() - # loop through images, area range, max detection number - catIds = p.catIds if p.useCats else [-1] - - if p.iouType == 'segm' or p.iouType == 'bbox': - computeIoU = self.computeIoU - elif p.iouType == 'keypoints': - computeIoU = self.computeOks - self.ious = { - (imgId, catId): computeIoU(imgId, catId) - for imgId in p.imgIds - for catId in catIds} - - evaluateImg = self.evaluateImg - maxDet = p.maxDets[-1] - evalImgs = [ - evaluateImg(imgId, catId, areaRng, maxDet) - for catId in catIds - for areaRng in p.areaRng - for imgId in p.imgIds - ] - # this is NOT in the pycocotools code, but could be done outside - evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds)) - self._paramsEval = copy.deepcopy(self.params) - # toc = time.time() - # print('DONE (t={:0.2f}s).'.format(toc-tic)) - return p.imgIds, evalImgs - -################################################################# -# end of straight copy from pycocotools, just removing the prints -################################################################# diff --git a/models/cv/object_detection/detr/igie/datasets/coco_panoptic.py b/models/cv/object_detection/detr/igie/datasets/coco_panoptic.py deleted file mode 100644 index b24f615c..00000000 --- a/models/cv/object_detection/detr/igie/datasets/coco_panoptic.py +++ /dev/null @@ -1,99 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import json -from pathlib import Path - -import numpy as np -import torch -from PIL import Image - -from panopticapi.utils import rgb2id -from util.box_ops import masks_to_boxes - -from .coco import make_coco_transforms - - -class CocoPanoptic: - def __init__(self, img_folder, ann_folder, ann_file, transforms=None, return_masks=True): - with open(ann_file, 'r') as f: - self.coco = json.load(f) - - # sort 'images' field so that they are aligned with 'annotations' - # i.e., in alphabetical order - self.coco['images'] = sorted(self.coco['images'], key=lambda x: x['id']) - # sanity check - if "annotations" in self.coco: - for img, ann in zip(self.coco['images'], self.coco['annotations']): - assert img['file_name'][:-4] == ann['file_name'][:-4] - - self.img_folder = img_folder - self.ann_folder = ann_folder - self.ann_file = ann_file - self.transforms = transforms - self.return_masks = return_masks - - def __getitem__(self, idx): - ann_info = self.coco['annotations'][idx] if "annotations" in self.coco else self.coco['images'][idx] - img_path = Path(self.img_folder) / ann_info['file_name'].replace('.png', '.jpg') - ann_path = Path(self.ann_folder) / ann_info['file_name'] - - img = Image.open(img_path).convert('RGB') - w, h = img.size - if "segments_info" in ann_info: - masks = np.asarray(Image.open(ann_path), dtype=np.uint32) - masks = rgb2id(masks) - - ids = np.array([ann['id'] for ann in ann_info['segments_info']]) - masks = masks == ids[:, None, None] - - masks = torch.as_tensor(masks, dtype=torch.uint8) - labels = torch.tensor([ann['category_id'] for ann in ann_info['segments_info']], dtype=torch.int64) - - target = {} - target['image_id'] = torch.tensor([ann_info['image_id'] if "image_id" in ann_info else ann_info["id"]]) - if self.return_masks: - target['masks'] = masks - target['labels'] = labels - - target["boxes"] = masks_to_boxes(masks) - - target['size'] = torch.as_tensor([int(h), int(w)]) - target['orig_size'] = torch.as_tensor([int(h), int(w)]) - if "segments_info" in ann_info: - for name in ['iscrowd', 'area']: - target[name] = torch.tensor([ann[name] for ann in ann_info['segments_info']]) - - if self.transforms is not None: - img, target = self.transforms(img, target) - - return img, target - - def __len__(self): - return len(self.coco['images']) - - def get_height_and_width(self, idx): - img_info = self.coco['images'][idx] - height = img_info['height'] - width = img_info['width'] - return height, width - - -def build(image_set, args): - img_folder_root = Path(args.coco_path) - ann_folder_root = Path(args.coco_panoptic_path) - assert img_folder_root.exists(), f'provided COCO path {img_folder_root} does not exist' - assert ann_folder_root.exists(), f'provided COCO path {ann_folder_root} does not exist' - mode = 'panoptic' - PATHS = { - "train": ("train2017", Path("annotations") / f'{mode}_train2017.json'), - "val": ("val2017", Path("annotations") / f'{mode}_val2017.json'), - } - - img_folder, ann_file = PATHS[image_set] - img_folder_path = img_folder_root / img_folder - ann_folder = ann_folder_root / f'{mode}_{img_folder}' - ann_file = ann_folder_root / ann_file - - dataset = CocoPanoptic(img_folder_path, ann_folder, ann_file, - transforms=make_coco_transforms(image_set), return_masks=args.masks) - - return dataset diff --git a/models/cv/object_detection/detr/igie/datasets/panoptic_eval.py b/models/cv/object_detection/detr/igie/datasets/panoptic_eval.py deleted file mode 100644 index 9cb4f834..00000000 --- a/models/cv/object_detection/detr/igie/datasets/panoptic_eval.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import json -import os - -import util.misc as utils - -try: - from panopticapi.evaluation import pq_compute -except ImportError: - pass - - -class PanopticEvaluator(object): - def __init__(self, ann_file, ann_folder, output_dir="panoptic_eval"): - self.gt_json = ann_file - self.gt_folder = ann_folder - if utils.is_main_process(): - if not os.path.exists(output_dir): - os.mkdir(output_dir) - self.output_dir = output_dir - self.predictions = [] - - def update(self, predictions): - for p in predictions: - with open(os.path.join(self.output_dir, p["file_name"]), "wb") as f: - f.write(p.pop("png_string")) - - self.predictions += predictions - - def synchronize_between_processes(self): - all_predictions = utils.all_gather(self.predictions) - merged_predictions = [] - for p in all_predictions: - merged_predictions += p - self.predictions = merged_predictions - - def summarize(self): - if utils.is_main_process(): - json_data = {"annotations": self.predictions} - predictions_json = os.path.join(self.output_dir, "predictions.json") - with open(predictions_json, "w") as f: - f.write(json.dumps(json_data)) - return pq_compute(self.gt_json, predictions_json, gt_folder=self.gt_folder, pred_folder=self.output_dir) - return None diff --git a/models/cv/object_detection/detr/igie/datasets/transforms.py b/models/cv/object_detection/detr/igie/datasets/transforms.py deleted file mode 100644 index 25ba9362..00000000 --- a/models/cv/object_detection/detr/igie/datasets/transforms.py +++ /dev/null @@ -1,277 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Transforms and data augmentation for both image + bbox. -""" -import random - -import PIL -import torch -import torchvision.transforms as T -import torchvision.transforms.functional as F - -from util.box_ops import box_xyxy_to_cxcywh -from util.misc import interpolate - - -def crop(image, target, region): - cropped_image = F.crop(image, *region) - - target = target.copy() - i, j, h, w = region - - # should we do something wrt the original size? - target["size"] = torch.tensor([h, w]) - - fields = ["labels", "area", "iscrowd"] - - if "boxes" in target: - boxes = target["boxes"] - max_size = torch.as_tensor([w, h], dtype=torch.float32) - cropped_boxes = boxes - torch.as_tensor([j, i, j, i]) - cropped_boxes = torch.min(cropped_boxes.reshape(-1, 2, 2), max_size) - cropped_boxes = cropped_boxes.clamp(min=0) - area = (cropped_boxes[:, 1, :] - cropped_boxes[:, 0, :]).prod(dim=1) - target["boxes"] = cropped_boxes.reshape(-1, 4) - target["area"] = area - fields.append("boxes") - - if "masks" in target: - # FIXME should we update the area here if there are no boxes? - target['masks'] = target['masks'][:, i:i + h, j:j + w] - fields.append("masks") - - # remove elements for which the boxes or masks that have zero area - if "boxes" in target or "masks" in target: - # favor boxes selection when defining which elements to keep - # this is compatible with previous implementation - if "boxes" in target: - cropped_boxes = target['boxes'].reshape(-1, 2, 2) - keep = torch.all(cropped_boxes[:, 1, :] > cropped_boxes[:, 0, :], dim=1) - else: - keep = target['masks'].flatten(1).any(1) - - for field in fields: - target[field] = target[field][keep] - - return cropped_image, target - - -def hflip(image, target): - flipped_image = F.hflip(image) - - w, h = image.size - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - boxes = boxes[:, [2, 1, 0, 3]] * torch.as_tensor([-1, 1, -1, 1]) + torch.as_tensor([w, 0, w, 0]) - target["boxes"] = boxes - - if "masks" in target: - target['masks'] = target['masks'].flip(-1) - - return flipped_image, target - - -def resize(image, target, size, max_size=None): - # size can be min_size (scalar) or (w, h) tuple - - def get_size_with_aspect_ratio(image_size, size, max_size=None): - w, h = image_size - if max_size is not None: - min_original_size = float(min((w, h))) - max_original_size = float(max((w, h))) - if max_original_size / min_original_size * size > max_size: - size = int(round(max_size * min_original_size / max_original_size)) - - if (w <= h and w == size) or (h <= w and h == size): - return (h, w) - - if w < h: - ow = size - oh = int(size * h / w) - else: - oh = size - ow = int(size * w / h) - - return (oh, ow) - - def get_size(image_size, size, max_size=None): - if isinstance(size, (list, tuple)): - return size[::-1] - else: - return get_size_with_aspect_ratio(image_size, size, max_size) - - # size = get_size(image.size, size, max_size) - size = (800, 800) - rescaled_image = F.resize(image, size) - - if target is None: - return rescaled_image, None - - ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(rescaled_image.size, image.size)) - ratio_width, ratio_height = ratios - - target = target.copy() - if "boxes" in target: - boxes = target["boxes"] - scaled_boxes = boxes * torch.as_tensor([ratio_width, ratio_height, ratio_width, ratio_height]) - target["boxes"] = scaled_boxes - - if "area" in target: - area = target["area"] - scaled_area = area * (ratio_width * ratio_height) - target["area"] = scaled_area - - h, w = size - target["size"] = torch.tensor([h, w]) - - if "masks" in target: - target['masks'] = interpolate( - target['masks'][:, None].float(), size, mode="nearest")[:, 0] > 0.5 - - return rescaled_image, target - - -def pad(image, target, padding): - # assumes that we only pad on the bottom right corners - padded_image = F.pad(image, (0, 0, padding[0], padding[1])) - if target is None: - return padded_image, None - target = target.copy() - # should we do something wrt the original size? - target["size"] = torch.tensor(padded_image.size[::-1]) - if "masks" in target: - target['masks'] = torch.nn.functional.pad(target['masks'], (0, padding[0], 0, padding[1])) - return padded_image, target - - -class RandomCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - region = T.RandomCrop.get_params(img, self.size) - return crop(img, target, region) - - -class RandomSizeCrop(object): - def __init__(self, min_size: int, max_size: int): - self.min_size = min_size - self.max_size = max_size - - def __call__(self, img: PIL.Image.Image, target: dict): - w = random.randint(self.min_size, min(img.width, self.max_size)) - h = random.randint(self.min_size, min(img.height, self.max_size)) - region = T.RandomCrop.get_params(img, [h, w]) - return crop(img, target, region) - - -class CenterCrop(object): - def __init__(self, size): - self.size = size - - def __call__(self, img, target): - image_width, image_height = img.size - crop_height, crop_width = self.size - crop_top = int(round((image_height - crop_height) / 2.)) - crop_left = int(round((image_width - crop_width) / 2.)) - return crop(img, target, (crop_top, crop_left, crop_height, crop_width)) - - -class RandomHorizontalFlip(object): - def __init__(self, p=0.5): - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return hflip(img, target) - return img, target - - -class RandomResize(object): - def __init__(self, sizes, max_size=None): - assert isinstance(sizes, (list, tuple)) - self.sizes = sizes - self.max_size = max_size - - def __call__(self, img, target=None): - size = random.choice(self.sizes) - return resize(img, target, size, self.max_size) - - -class RandomPad(object): - def __init__(self, max_pad): - self.max_pad = max_pad - - def __call__(self, img, target): - pad_x = random.randint(0, self.max_pad) - pad_y = random.randint(0, self.max_pad) - return pad(img, target, (pad_x, pad_y)) - - -class RandomSelect(object): - """ - Randomly selects between transforms1 and transforms2, - with probability p for transforms1 and (1 - p) for transforms2 - """ - def __init__(self, transforms1, transforms2, p=0.5): - self.transforms1 = transforms1 - self.transforms2 = transforms2 - self.p = p - - def __call__(self, img, target): - if random.random() < self.p: - return self.transforms1(img, target) - return self.transforms2(img, target) - - -class ToTensor(object): - def __call__(self, img, target): - return F.to_tensor(img), target - - -class RandomErasing(object): - - def __init__(self, *args, **kwargs): - self.eraser = T.RandomErasing(*args, **kwargs) - - def __call__(self, img, target): - return self.eraser(img), target - - -class Normalize(object): - def __init__(self, mean, std): - self.mean = mean - self.std = std - - def __call__(self, image, target=None): - image = F.normalize(image, mean=self.mean, std=self.std) - if target is None: - return image, None - target = target.copy() - h, w = image.shape[-2:] - if "boxes" in target: - boxes = target["boxes"] - boxes = box_xyxy_to_cxcywh(boxes) - boxes = boxes / torch.tensor([w, h, w, h], dtype=torch.float32) - target["boxes"] = boxes - return image, target - - -class Compose(object): - def __init__(self, transforms): - self.transforms = transforms - - def __call__(self, image, target): - for t in self.transforms: - image, target = t(image, target) - return image, target - - def __repr__(self): - format_string = self.__class__.__name__ + "(" - for t in self.transforms: - format_string += "\n" - format_string += " {0}".format(t) - format_string += "\n)" - return format_string diff --git a/models/cv/object_detection/detr/igie/engine.py b/models/cv/object_detection/detr/igie/engine.py deleted file mode 100644 index ac5ea6ff..00000000 --- a/models/cv/object_detection/detr/igie/engine.py +++ /dev/null @@ -1,151 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Train and eval functions used in main.py -""" -import math -import os -import sys -from typing import Iterable - -import torch - -import util.misc as utils -from datasets.coco_eval import CocoEvaluator -from datasets.panoptic_eval import PanopticEvaluator - - -def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, - data_loader: Iterable, optimizer: torch.optim.Optimizer, - device: torch.device, epoch: int, max_norm: float = 0): - model.train() - criterion.train() - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) - metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - header = 'Epoch: [{}]'.format(epoch) - print_freq = 10 - - for samples, targets in metric_logger.log_every(data_loader, print_freq, header): - samples = samples.to(device) - targets = [{k: v.to(device) for k, v in t.items()} for t in targets] - - outputs = model(samples) - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) - - # reduce losses over all GPUs for logging purposes - loss_dict_reduced = utils.reduce_dict(loss_dict) - loss_dict_reduced_unscaled = {f'{k}_unscaled': v - for k, v in loss_dict_reduced.items()} - loss_dict_reduced_scaled = {k: v * weight_dict[k] - for k, v in loss_dict_reduced.items() if k in weight_dict} - losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) - - loss_value = losses_reduced_scaled.item() - - if not math.isfinite(loss_value): - print("Loss is {}, stopping training".format(loss_value)) - print(loss_dict_reduced) - sys.exit(1) - - optimizer.zero_grad() - losses.backward() - if max_norm > 0: - torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) - optimizer.step() - - metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) - metric_logger.update(class_error=loss_dict_reduced['class_error']) - metric_logger.update(lr=optimizer.param_groups[0]["lr"]) - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - return {k: meter.global_avg for k, meter in metric_logger.meters.items()} - - -@torch.no_grad() -def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir): - model.eval() - criterion.eval() - - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - header = 'Test:' - - iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) - coco_evaluator = CocoEvaluator(base_ds, iou_types) - # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] - - panoptic_evaluator = None - if 'panoptic' in postprocessors.keys(): - panoptic_evaluator = PanopticEvaluator( - data_loader.dataset.ann_file, - data_loader.dataset.ann_folder, - output_dir=os.path.join(output_dir, "panoptic_eval"), - ) - - for samples, targets in metric_logger.log_every(data_loader, 10, header): - samples = samples.to(device) - targets = [{k: v.to(device) for k, v in t.items()} for t in targets] - - outputs = model(samples) - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - - # reduce losses over all GPUs for logging purposes - loss_dict_reduced = utils.reduce_dict(loss_dict) - loss_dict_reduced_scaled = {k: v * weight_dict[k] - for k, v in loss_dict_reduced.items() if k in weight_dict} - loss_dict_reduced_unscaled = {f'{k}_unscaled': v - for k, v in loss_dict_reduced.items()} - metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), - **loss_dict_reduced_scaled, - **loss_dict_reduced_unscaled) - metric_logger.update(class_error=loss_dict_reduced['class_error']) - - orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) - results = postprocessors['bbox'](outputs, orig_target_sizes) - if 'segm' in postprocessors.keys(): - target_sizes = torch.stack([t["size"] for t in targets], dim=0) - results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) - res = {target['image_id'].item(): output for target, output in zip(targets, results)} - if coco_evaluator is not None: - coco_evaluator.update(res) - - if panoptic_evaluator is not None: - res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) - for i, target in enumerate(targets): - image_id = target["image_id"].item() - file_name = f"{image_id:012d}.png" - res_pano[i]["image_id"] = image_id - res_pano[i]["file_name"] = file_name - - panoptic_evaluator.update(res_pano) - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - if coco_evaluator is not None: - coco_evaluator.synchronize_between_processes() - if panoptic_evaluator is not None: - panoptic_evaluator.synchronize_between_processes() - - # accumulate predictions from all images - if coco_evaluator is not None: - coco_evaluator.accumulate() - coco_evaluator.summarize() - panoptic_res = None - if panoptic_evaluator is not None: - panoptic_res = panoptic_evaluator.summarize() - stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} - if coco_evaluator is not None: - if 'bbox' in postprocessors.keys(): - stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() - if 'segm' in postprocessors.keys(): - stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() - if panoptic_res is not None: - stats['PQ_all'] = panoptic_res["All"] - stats['PQ_th'] = panoptic_res["Things"] - stats['PQ_st'] = panoptic_res["Stuff"] - return stats, coco_evaluator diff --git a/models/cv/object_detection/detr/igie/hubconf.py b/models/cv/object_detection/detr/igie/hubconf.py deleted file mode 100644 index 328c3306..00000000 --- a/models/cv/object_detection/detr/igie/hubconf.py +++ /dev/null @@ -1,168 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import torch - -from models.backbone import Backbone, Joiner -from models.detr import DETR, PostProcess -from models.position_encoding import PositionEmbeddingSine -from models.segmentation import DETRsegm, PostProcessPanoptic -from models.transformer import Transformer - -dependencies = ["torch", "torchvision"] - - -def _make_detr(backbone_name: str, dilation=False, num_classes=91, mask=False): - hidden_dim = 256 - backbone = Backbone(backbone_name, train_backbone=True, return_interm_layers=mask, dilation=dilation) - pos_enc = PositionEmbeddingSine(hidden_dim // 2, normalize=True) - backbone_with_pos_enc = Joiner(backbone, pos_enc) - backbone_with_pos_enc.num_channels = backbone.num_channels - transformer = Transformer(d_model=hidden_dim, return_intermediate_dec=True) - detr = DETR(backbone_with_pos_enc, transformer, num_classes=num_classes, num_queries=100) - if mask: - return DETRsegm(detr) - return detr - - -def detr_resnet50(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR R50 with 6 encoder and 6 decoder layers. - - Achieves 42/62.4 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet50", dilation=False, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet50_dc5(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR-DC5 R50 with 6 encoder and 6 decoder layers. - - The last block of ResNet-50 has dilation to increase - output resolution. - Achieves 43.3/63.1 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet50", dilation=True, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-f0fb7ef5.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet101(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR-DC5 R101 with 6 encoder and 6 decoder layers. - - Achieves 43.5/63.8 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet101", dilation=False, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r101-2c7b67e5.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet101_dc5(pretrained=False, num_classes=91, return_postprocessor=False): - """ - DETR-DC5 R101 with 6 encoder and 6 decoder layers. - - The last block of ResNet-101 has dilation to increase - output resolution. - Achieves 44.9/64.7 AP/AP50 on COCO val5k. - """ - model = _make_detr("resnet101", dilation=True, num_classes=num_classes) - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r101-dc5-a2e86def.pth", map_location="cpu", check_hash=True - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcess() - return model - - -def detr_resnet50_panoptic( - pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False -): - """ - DETR R50 with 6 encoder and 6 decoder layers. - Achieves 43.4 PQ on COCO val5k. - - threshold is the minimum confidence required for keeping segments in the prediction - """ - model = _make_detr("resnet50", dilation=False, num_classes=num_classes, mask=True) - is_thing_map = {i: i <= 90 for i in range(250)} - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-panoptic-00ce5173.pth", - map_location="cpu", - check_hash=True, - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcessPanoptic(is_thing_map, threshold=threshold) - return model - - -def detr_resnet50_dc5_panoptic( - pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False -): - """ - DETR-DC5 R50 with 6 encoder and 6 decoder layers. - - The last block of ResNet-50 has dilation to increase - output resolution. - Achieves 44.6 on COCO val5k. - - threshold is the minimum confidence required for keeping segments in the prediction - """ - model = _make_detr("resnet50", dilation=True, num_classes=num_classes, mask=True) - is_thing_map = {i: i <= 90 for i in range(250)} - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r50-dc5-panoptic-da08f1b1.pth", - map_location="cpu", - check_hash=True, - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcessPanoptic(is_thing_map, threshold=threshold) - return model - - -def detr_resnet101_panoptic( - pretrained=False, num_classes=250, threshold=0.85, return_postprocessor=False -): - """ - DETR-DC5 R101 with 6 encoder and 6 decoder layers. - - Achieves 45.1 PQ on COCO val5k. - - threshold is the minimum confidence required for keeping segments in the prediction - """ - model = _make_detr("resnet101", dilation=False, num_classes=num_classes, mask=True) - is_thing_map = {i: i <= 90 for i in range(250)} - if pretrained: - checkpoint = torch.hub.load_state_dict_from_url( - url="https://dl.fbaipublicfiles.com/detr/detr-r101-panoptic-40021d53.pth", - map_location="cpu", - check_hash=True, - ) - model.load_state_dict(checkpoint["model"]) - if return_postprocessor: - return model, PostProcessPanoptic(is_thing_map, threshold=threshold) - return model diff --git a/models/cv/object_detection/detr/igie/main.py b/models/cv/object_detection/detr/igie/main.py deleted file mode 100644 index e5f9eff8..00000000 --- a/models/cv/object_detection/detr/igie/main.py +++ /dev/null @@ -1,248 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import argparse -import datetime -import json -import random -import time -from pathlib import Path - -import numpy as np -import torch -from torch.utils.data import DataLoader, DistributedSampler - -import datasets -import util.misc as utils -from datasets import build_dataset, get_coco_api_from_dataset -from engine import evaluate, train_one_epoch -from models import build_model - - -def get_args_parser(): - parser = argparse.ArgumentParser('Set transformer detector', add_help=False) - parser.add_argument('--lr', default=1e-4, type=float) - parser.add_argument('--lr_backbone', default=1e-5, type=float) - parser.add_argument('--batch_size', default=2, type=int) - parser.add_argument('--weight_decay', default=1e-4, type=float) - parser.add_argument('--epochs', default=300, type=int) - parser.add_argument('--lr_drop', default=200, type=int) - parser.add_argument('--clip_max_norm', default=0.1, type=float, - help='gradient clipping max norm') - - # Model parameters - parser.add_argument('--frozen_weights', type=str, default=None, - help="Path to the pretrained model. If set, only the mask head will be trained") - # * Backbone - parser.add_argument('--backbone', default='resnet50', type=str, - help="Name of the convolutional backbone to use") - parser.add_argument('--dilation', action='store_true', - help="If true, we replace stride with dilation in the last convolutional block (DC5)") - parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'), - help="Type of positional embedding to use on top of the image features") - - # * Transformer - parser.add_argument('--enc_layers', default=6, type=int, - help="Number of encoding layers in the transformer") - parser.add_argument('--dec_layers', default=6, type=int, - help="Number of decoding layers in the transformer") - parser.add_argument('--dim_feedforward', default=2048, type=int, - help="Intermediate size of the feedforward layers in the transformer blocks") - parser.add_argument('--hidden_dim', default=256, type=int, - help="Size of the embeddings (dimension of the transformer)") - parser.add_argument('--dropout', default=0.1, type=float, - help="Dropout applied in the transformer") - parser.add_argument('--nheads', default=8, type=int, - help="Number of attention heads inside the transformer's attentions") - parser.add_argument('--num_queries', default=100, type=int, - help="Number of query slots") - parser.add_argument('--pre_norm', action='store_true') - - # * Segmentation - parser.add_argument('--masks', action='store_true', - help="Train segmentation head if the flag is provided") - - # Loss - parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false', - help="Disables auxiliary decoding losses (loss at each layer)") - # * Matcher - parser.add_argument('--set_cost_class', default=1, type=float, - help="Class coefficient in the matching cost") - parser.add_argument('--set_cost_bbox', default=5, type=float, - help="L1 box coefficient in the matching cost") - parser.add_argument('--set_cost_giou', default=2, type=float, - help="giou box coefficient in the matching cost") - # * Loss coefficients - parser.add_argument('--mask_loss_coef', default=1, type=float) - parser.add_argument('--dice_loss_coef', default=1, type=float) - parser.add_argument('--bbox_loss_coef', default=5, type=float) - parser.add_argument('--giou_loss_coef', default=2, type=float) - parser.add_argument('--eos_coef', default=0.1, type=float, - help="Relative classification weight of the no-object class") - - # dataset parameters - parser.add_argument('--dataset_file', default='coco') - parser.add_argument('--coco_path', type=str) - parser.add_argument('--coco_panoptic_path', type=str) - parser.add_argument('--remove_difficult', action='store_true') - - parser.add_argument('--output_dir', default='', - help='path where to save, empty for no saving') - parser.add_argument('--device', default='cuda', - help='device to use for training / testing') - parser.add_argument('--seed', default=42, type=int) - parser.add_argument('--resume', default='', help='resume from checkpoint') - parser.add_argument('--start_epoch', default=0, type=int, metavar='N', - help='start epoch') - parser.add_argument('--eval', action='store_true') - parser.add_argument('--num_workers', default=2, type=int) - - # distributed training parameters - parser.add_argument('--world_size', default=1, type=int, - help='number of distributed processes') - parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training') - return parser - - -def main(args): - utils.init_distributed_mode(args) - print("git:\n {}\n".format(utils.get_sha())) - - if args.frozen_weights is not None: - assert args.masks, "Frozen training is meant for segmentation only" - print(args) - - device = torch.device(args.device) - - # fix the seed for reproducibility - seed = args.seed + utils.get_rank() - torch.manual_seed(seed) - np.random.seed(seed) - random.seed(seed) - - model, criterion, postprocessors = build_model(args) - model.to(device) - - model_without_ddp = model - if args.distributed: - model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) - model_without_ddp = model.module - n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad) - print('number of params:', n_parameters) - - param_dicts = [ - {"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]}, - { - "params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad], - "lr": args.lr_backbone, - }, - ] - optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, - weight_decay=args.weight_decay) - lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop) - - dataset_train = build_dataset(image_set='train', args=args) - dataset_val = build_dataset(image_set='val', args=args) - - if args.distributed: - sampler_train = DistributedSampler(dataset_train) - sampler_val = DistributedSampler(dataset_val, shuffle=False) - else: - sampler_train = torch.utils.data.RandomSampler(dataset_train) - sampler_val = torch.utils.data.SequentialSampler(dataset_val) - - batch_sampler_train = torch.utils.data.BatchSampler( - sampler_train, args.batch_size, drop_last=True) - - data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, - collate_fn=utils.collate_fn, num_workers=args.num_workers) - data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val, - drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers) - - if args.dataset_file == "coco_panoptic": - # We also evaluate AP during panoptic training, on original coco DS - coco_val = datasets.coco.build("val", args) - base_ds = get_coco_api_from_dataset(coco_val) - else: - base_ds = get_coco_api_from_dataset(dataset_val) - - if args.frozen_weights is not None: - checkpoint = torch.load(args.frozen_weights, map_location='cpu') - model_without_ddp.detr.load_state_dict(checkpoint['model']) - - output_dir = Path(args.output_dir) - if args.resume: - if args.resume.startswith('https'): - checkpoint = torch.hub.load_state_dict_from_url( - args.resume, map_location='cpu', check_hash=True) - else: - checkpoint = torch.load(args.resume, map_location='cpu') - model_without_ddp.load_state_dict(checkpoint['model']) - if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint: - optimizer.load_state_dict(checkpoint['optimizer']) - lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) - args.start_epoch = checkpoint['epoch'] + 1 - - if args.eval: - test_stats, coco_evaluator = evaluate(model, criterion, postprocessors, - data_loader_val, base_ds, device, args.output_dir) - if args.output_dir: - utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth") - return - - print("Start training") - start_time = time.time() - for epoch in range(args.start_epoch, args.epochs): - if args.distributed: - sampler_train.set_epoch(epoch) - train_stats = train_one_epoch( - model, criterion, data_loader_train, optimizer, device, epoch, - args.clip_max_norm) - lr_scheduler.step() - if args.output_dir: - checkpoint_paths = [output_dir / 'checkpoint.pth'] - # extra checkpoint before LR drop and every 100 epochs - if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 100 == 0: - checkpoint_paths.append(output_dir / f'checkpoint{epoch:04}.pth') - for checkpoint_path in checkpoint_paths: - utils.save_on_master({ - 'model': model_without_ddp.state_dict(), - 'optimizer': optimizer.state_dict(), - 'lr_scheduler': lr_scheduler.state_dict(), - 'epoch': epoch, - 'args': args, - }, checkpoint_path) - - test_stats, coco_evaluator = evaluate( - model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir - ) - - log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, - **{f'test_{k}': v for k, v in test_stats.items()}, - 'epoch': epoch, - 'n_parameters': n_parameters} - - if args.output_dir and utils.is_main_process(): - with (output_dir / "log.txt").open("a") as f: - f.write(json.dumps(log_stats) + "\n") - - # for evaluation logs - if coco_evaluator is not None: - (output_dir / 'eval').mkdir(exist_ok=True) - if "bbox" in coco_evaluator.coco_eval: - filenames = ['latest.pth'] - if epoch % 50 == 0: - filenames.append(f'{epoch:03}.pth') - for name in filenames: - torch.save(coco_evaluator.coco_eval["bbox"].eval, - output_dir / "eval" / name) - - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('Training time {}'.format(total_time_str)) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()]) - args = parser.parse_args() - if args.output_dir: - Path(args.output_dir).mkdir(parents=True, exist_ok=True) - main(args) diff --git a/models/cv/object_detection/detr/igie/models/__init__.py b/models/cv/object_detection/detr/igie/models/__init__.py deleted file mode 100644 index a3f26531..00000000 --- a/models/cv/object_detection/detr/igie/models/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -from .detr import build - - -def build_model(args): - return build(args) diff --git a/models/cv/object_detection/detr/igie/models/backbone.py b/models/cv/object_detection/detr/igie/models/backbone.py deleted file mode 100644 index 96680932..00000000 --- a/models/cv/object_detection/detr/igie/models/backbone.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Backbone modules. -""" -from collections import OrderedDict - -import torch -import torch.nn.functional as F -import torchvision -from torch import nn -from torchvision.models._utils import IntermediateLayerGetter -from typing import Dict, List - -from util.misc import NestedTensor, is_main_process - -from .position_encoding import build_position_encoding - - -class FrozenBatchNorm2d(torch.nn.Module): - """ - BatchNorm2d where the batch statistics and the affine parameters are fixed. - - Copy-paste from torchvision.misc.ops with added eps before rqsrt, - without which any other models than torchvision.models.resnet[18,34,50,101] - produce nans. - """ - - def __init__(self, n): - super(FrozenBatchNorm2d, self).__init__() - self.register_buffer("weight", torch.ones(n)) - self.register_buffer("bias", torch.zeros(n)) - self.register_buffer("running_mean", torch.zeros(n)) - self.register_buffer("running_var", torch.ones(n)) - - def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs): - num_batches_tracked_key = prefix + 'num_batches_tracked' - if num_batches_tracked_key in state_dict: - del state_dict[num_batches_tracked_key] - - super(FrozenBatchNorm2d, self)._load_from_state_dict( - state_dict, prefix, local_metadata, strict, - missing_keys, unexpected_keys, error_msgs) - - def forward(self, x): - # move reshapes to the beginning - # to make it fuser-friendly - w = self.weight.reshape(1, -1, 1, 1) - b = self.bias.reshape(1, -1, 1, 1) - rv = self.running_var.reshape(1, -1, 1, 1) - rm = self.running_mean.reshape(1, -1, 1, 1) - eps = 1e-5 - scale = w * (rv + eps).rsqrt() - bias = b - rm * scale - return x * scale + bias - - -class BackboneBase(nn.Module): - - def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool): - super().__init__() - for name, parameter in backbone.named_parameters(): - if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name: - parameter.requires_grad_(False) - if return_interm_layers: - return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"} - else: - return_layers = {'layer4': "0"} - self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) - self.num_channels = num_channels - - def forward(self, tensor_list: NestedTensor): - xs = self.body(tensor_list.tensors) - out: Dict[str, NestedTensor] = {} - for name, x in xs.items(): - m = tensor_list.mask - assert m is not None - mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0] - out[name] = NestedTensor(x, mask) - return out - - -class Backbone(BackboneBase): - """ResNet backbone with frozen BatchNorm.""" - def __init__(self, name: str, - train_backbone: bool, - return_interm_layers: bool, - dilation: bool): - backbone = getattr(torchvision.models, name)( - replace_stride_with_dilation=[False, False, dilation], - pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d) - num_channels = 512 if name in ('resnet18', 'resnet34') else 2048 - super().__init__(backbone, train_backbone, num_channels, return_interm_layers) - - -class Joiner(nn.Sequential): - def __init__(self, backbone, position_embedding): - super().__init__(backbone, position_embedding) - - def forward(self, tensor_list: NestedTensor): - xs = self[0](tensor_list) - out: List[NestedTensor] = [] - pos = [] - for name, x in xs.items(): - out.append(x) - # position encoding - pos.append(self[1](x).to(x.tensors.dtype)) - - return out, pos - - -def build_backbone(args): - position_embedding = build_position_encoding(args) - train_backbone = args.lr_backbone > 0 - return_interm_layers = args.masks - backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation) - model = Joiner(backbone, position_embedding) - model.num_channels = backbone.num_channels - return model diff --git a/models/cv/object_detection/detr/igie/models/detr.py b/models/cv/object_detection/detr/igie/models/detr.py deleted file mode 100644 index 23c2376d..00000000 --- a/models/cv/object_detection/detr/igie/models/detr.py +++ /dev/null @@ -1,359 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR model and criterion classes. -""" -import torch -import torch.nn.functional as F -from torch import nn - -from util import box_ops -from util.misc import (NestedTensor, nested_tensor_from_tensor_list, - accuracy, get_world_size, interpolate, - is_dist_avail_and_initialized) - -from .backbone import build_backbone -from .matcher import build_matcher -from .segmentation import (DETRsegm, PostProcessPanoptic, PostProcessSegm, - dice_loss, sigmoid_focal_loss) -from .transformer import build_transformer - - -class DETR(nn.Module): - """ This is the DETR module that performs object detection """ - def __init__(self, backbone, transformer, num_classes, num_queries, aux_loss=False): - """ Initializes the model. - Parameters: - backbone: torch module of the backbone to be used. See backbone.py - transformer: torch module of the transformer architecture. See transformer.py - num_classes: number of object classes - num_queries: number of object queries, ie detection slot. This is the maximal number of objects - DETR can detect in a single image. For COCO, we recommend 100 queries. - aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used. - """ - super().__init__() - self.num_queries = num_queries - self.transformer = transformer - hidden_dim = transformer.d_model - self.class_embed = nn.Linear(hidden_dim, num_classes + 1) - self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3) - self.query_embed = nn.Embedding(num_queries, hidden_dim) - self.input_proj = nn.Conv2d(backbone.num_channels, hidden_dim, kernel_size=1) - self.backbone = backbone - self.aux_loss = aux_loss - - def forward(self, samples: NestedTensor): - """ The forward expects a NestedTensor, which consists of: - - samples.tensor: batched images, of shape [batch_size x 3 x H x W] - - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels - - It returns a dict with the following elements: - - "pred_logits": the classification logits (including no-object) for all queries. - Shape= [batch_size x num_queries x (num_classes + 1)] - - "pred_boxes": The normalized boxes coordinates for all queries, represented as - (center_x, center_y, height, width). These values are normalized in [0, 1], - relative to the size of each individual image (disregarding possible padding). - See PostProcess for information on how to retrieve the unnormalized bounding box. - - "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of - dictionnaries containing the two above keys for each decoder layer. - """ - if isinstance(samples, (list, torch.Tensor)): - samples = nested_tensor_from_tensor_list(samples) - features, pos = self.backbone(samples) - - src, mask = features[-1].decompose() - assert mask is not None - hs = self.transformer(self.input_proj(src), mask, self.query_embed.weight, pos[-1])[0] - - outputs_class = self.class_embed(hs) - outputs_coord = self.bbox_embed(hs).sigmoid() - out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1]} - if self.aux_loss: - out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord) - return out - - @torch.jit.unused - def _set_aux_loss(self, outputs_class, outputs_coord): - # this is a workaround to make torchscript happy, as torchscript - # doesn't support dictionary with non-homogeneous values, such - # as a dict having both a Tensor and a list. - return [{'pred_logits': a, 'pred_boxes': b} - for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] - - -class SetCriterion(nn.Module): - """ This class computes the loss for DETR. - The process happens in two steps: - 1) we compute hungarian assignment between ground truth boxes and the outputs of the model - 2) we supervise each pair of matched ground-truth / prediction (supervise class and box) - """ - def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses): - """ Create the criterion. - Parameters: - num_classes: number of object categories, omitting the special no-object category - matcher: module able to compute a matching between targets and proposals - weight_dict: dict containing as key the names of the losses and as values their relative weight. - eos_coef: relative classification weight applied to the no-object category - losses: list of all the losses to be applied. See get_loss for list of available losses. - """ - super().__init__() - self.num_classes = num_classes - self.matcher = matcher - self.weight_dict = weight_dict - self.eos_coef = eos_coef - self.losses = losses - empty_weight = torch.ones(self.num_classes + 1) - empty_weight[-1] = self.eos_coef - self.register_buffer('empty_weight', empty_weight) - - def loss_labels(self, outputs, targets, indices, num_boxes, log=True): - """Classification loss (NLL) - targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes] - """ - assert 'pred_logits' in outputs - src_logits = outputs['pred_logits'] - - idx = self._get_src_permutation_idx(indices) - target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)]) - target_classes = torch.full(src_logits.shape[:2], self.num_classes, - dtype=torch.int64, device=src_logits.device) - target_classes[idx] = target_classes_o - - loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight) - losses = {'loss_ce': loss_ce} - - if log: - # TODO this should probably be a separate loss, not hacked in this one here - losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0] - return losses - - @torch.no_grad() - def loss_cardinality(self, outputs, targets, indices, num_boxes): - """ Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes - This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients - """ - pred_logits = outputs['pred_logits'] - device = pred_logits.device - tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device) - # Count the number of predictions that are NOT "no-object" (which is the last class) - card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1) - card_err = F.l1_loss(card_pred.float(), tgt_lengths.float()) - losses = {'cardinality_error': card_err} - return losses - - def loss_boxes(self, outputs, targets, indices, num_boxes): - """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss - targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4] - The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. - """ - assert 'pred_boxes' in outputs - idx = self._get_src_permutation_idx(indices) - src_boxes = outputs['pred_boxes'][idx] - target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0) - - loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none') - - losses = {} - losses['loss_bbox'] = loss_bbox.sum() / num_boxes - - loss_giou = 1 - torch.diag(box_ops.generalized_box_iou( - box_ops.box_cxcywh_to_xyxy(src_boxes), - box_ops.box_cxcywh_to_xyxy(target_boxes))) - losses['loss_giou'] = loss_giou.sum() / num_boxes - return losses - - def loss_masks(self, outputs, targets, indices, num_boxes): - """Compute the losses related to the masks: the focal loss and the dice loss. - targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w] - """ - assert "pred_masks" in outputs - - src_idx = self._get_src_permutation_idx(indices) - tgt_idx = self._get_tgt_permutation_idx(indices) - src_masks = outputs["pred_masks"] - src_masks = src_masks[src_idx] - masks = [t["masks"] for t in targets] - # TODO use valid to mask invalid areas due to padding in loss - target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() - target_masks = target_masks.to(src_masks) - target_masks = target_masks[tgt_idx] - - # upsample predictions to the target size - src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:], - mode="bilinear", align_corners=False) - src_masks = src_masks[:, 0].flatten(1) - - target_masks = target_masks.flatten(1) - target_masks = target_masks.view(src_masks.shape) - losses = { - "loss_mask": sigmoid_focal_loss(src_masks, target_masks, num_boxes), - "loss_dice": dice_loss(src_masks, target_masks, num_boxes), - } - return losses - - def _get_src_permutation_idx(self, indices): - # permute predictions following indices - batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)]) - src_idx = torch.cat([src for (src, _) in indices]) - return batch_idx, src_idx - - def _get_tgt_permutation_idx(self, indices): - # permute targets following indices - batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]) - tgt_idx = torch.cat([tgt for (_, tgt) in indices]) - return batch_idx, tgt_idx - - def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs): - loss_map = { - 'labels': self.loss_labels, - 'cardinality': self.loss_cardinality, - 'boxes': self.loss_boxes, - 'masks': self.loss_masks - } - assert loss in loss_map, f'do you really want to compute {loss} loss?' - return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs) - - def forward(self, outputs, targets): - """ This performs the loss computation. - Parameters: - outputs: dict of tensors, see the output specification of the model for the format - targets: list of dicts, such that len(targets) == batch_size. - The expected keys in each dict depends on the losses applied, see each loss' doc - """ - outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'} - - # Retrieve the matching between the outputs of the last layer and the targets - indices = self.matcher(outputs_without_aux, targets) - - # Compute the average number of target boxes accross all nodes, for normalization purposes - num_boxes = sum(len(t["labels"]) for t in targets) - num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) - if is_dist_avail_and_initialized(): - torch.distributed.all_reduce(num_boxes) - num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item() - - # Compute all the requested losses - losses = {} - for loss in self.losses: - losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) - - # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. - if 'aux_outputs' in outputs: - for i, aux_outputs in enumerate(outputs['aux_outputs']): - indices = self.matcher(aux_outputs, targets) - for loss in self.losses: - if loss == 'masks': - # Intermediate masks losses are too costly to compute, we ignore them. - continue - kwargs = {} - if loss == 'labels': - # Logging is enabled only for the last layer - kwargs = {'log': False} - l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs) - l_dict = {k + f'_{i}': v for k, v in l_dict.items()} - losses.update(l_dict) - - return losses - - -class PostProcess(nn.Module): - """ This module converts the model's output into the format expected by the coco api""" - @torch.no_grad() - def forward(self, outputs, target_sizes): - """ Perform the computation - Parameters: - outputs: raw outputs of the model - target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch - For evaluation, this must be the original image size (before any data augmentation) - For visualization, this should be the image size after data augment, but before padding - """ - out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes'] - - assert len(out_logits) == len(target_sizes) - assert target_sizes.shape[1] == 2 - - prob = F.softmax(out_logits, -1) - scores, labels = prob[..., :-1].max(-1) - - # convert to [x0, y0, x1, y1] format - boxes = box_ops.box_cxcywh_to_xyxy(out_bbox) - # and from relative [0, 1] to absolute [0, height] coordinates - img_h, img_w = target_sizes.unbind(1) - scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) - boxes = boxes * scale_fct[:, None, :] - - results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)] - - return results - - -class MLP(nn.Module): - """ Very simple multi-layer perceptron (also called FFN)""" - - def __init__(self, input_dim, hidden_dim, output_dim, num_layers): - super().__init__() - self.num_layers = num_layers - h = [hidden_dim] * (num_layers - 1) - self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) - - def forward(self, x): - for i, layer in enumerate(self.layers): - x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x) - return x - - -def build(args): - # the `num_classes` naming here is somewhat misleading. - # it indeed corresponds to `max_obj_id + 1`, where max_obj_id - # is the maximum id for a class in your dataset. For example, - # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91. - # As another example, for a dataset that has a single class with id 1, - # you should pass `num_classes` to be 2 (max_obj_id + 1). - # For more details on this, check the following discussion - # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223 - num_classes = 20 if args.dataset_file != 'coco' else 91 - if args.dataset_file == "coco_panoptic": - # for panoptic, we just add a num_classes that is large enough to hold - # max_obj_id + 1, but the exact value doesn't really matter - num_classes = 250 - device = torch.device(args.device) - - backbone = build_backbone(args) - - transformer = build_transformer(args) - - model = DETR( - backbone, - transformer, - num_classes=num_classes, - num_queries=args.num_queries, - aux_loss=args.aux_loss, - ) - if args.masks: - model = DETRsegm(model, freeze_detr=(args.frozen_weights is not None)) - matcher = build_matcher(args) - weight_dict = {'loss_ce': 1, 'loss_bbox': args.bbox_loss_coef} - weight_dict['loss_giou'] = args.giou_loss_coef - if args.masks: - weight_dict["loss_mask"] = args.mask_loss_coef - weight_dict["loss_dice"] = args.dice_loss_coef - # TODO this is a hack - if args.aux_loss: - aux_weight_dict = {} - for i in range(args.dec_layers - 1): - aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()}) - weight_dict.update(aux_weight_dict) - - losses = ['labels', 'boxes', 'cardinality'] - if args.masks: - losses += ["masks"] - criterion = SetCriterion(num_classes, matcher=matcher, weight_dict=weight_dict, - eos_coef=args.eos_coef, losses=losses) - criterion.to(device) - postprocessors = {'bbox': PostProcess()} - if args.masks: - postprocessors['segm'] = PostProcessSegm() - if args.dataset_file == "coco_panoptic": - is_thing_map = {i: i <= 90 for i in range(201)} - postprocessors["panoptic"] = PostProcessPanoptic(is_thing_map, threshold=0.85) - - return model, criterion, postprocessors diff --git a/models/cv/object_detection/detr/igie/models/matcher.py b/models/cv/object_detection/detr/igie/models/matcher.py deleted file mode 100644 index 0c291473..00000000 --- a/models/cv/object_detection/detr/igie/models/matcher.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Modules to compute the matching cost and solve the corresponding LSAP. -""" -import torch -from scipy.optimize import linear_sum_assignment -from torch import nn - -from util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou - - -class HungarianMatcher(nn.Module): - """This class computes an assignment between the targets and the predictions of the network - - For efficiency reasons, the targets don't include the no_object. Because of this, in general, - there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, - while the others are un-matched (and thus treated as non-objects). - """ - - def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): - """Creates the matcher - - Params: - cost_class: This is the relative weight of the classification error in the matching cost - cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost - cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost - """ - super().__init__() - self.cost_class = cost_class - self.cost_bbox = cost_bbox - self.cost_giou = cost_giou - assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, "all costs cant be 0" - - @torch.no_grad() - def forward(self, outputs, targets): - """ Performs the matching - - Params: - outputs: This is a dict that contains at least these entries: - "pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits - "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates - - targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing: - "labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth - objects in the target) containing the class labels - "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates - - Returns: - A list of size batch_size, containing tuples of (index_i, index_j) where: - - index_i is the indices of the selected predictions (in order) - - index_j is the indices of the corresponding selected targets (in order) - For each batch element, it holds: - len(index_i) = len(index_j) = min(num_queries, num_target_boxes) - """ - bs, num_queries = outputs["pred_logits"].shape[:2] - - # We flatten to compute the cost matrices in a batch - out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] - out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] - - # Also concat the target labels and boxes - tgt_ids = torch.cat([v["labels"] for v in targets]) - tgt_bbox = torch.cat([v["boxes"] for v in targets]) - - # Compute the classification cost. Contrary to the loss, we don't use the NLL, - # but approximate it in 1 - proba[target class]. - # The 1 is a constant that doesn't change the matching, it can be ommitted. - cost_class = -out_prob[:, tgt_ids] - - # Compute the L1 cost between boxes - cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1) - - # Compute the giou cost betwen boxes - cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)) - - # Final cost matrix - C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou - C = C.view(bs, num_queries, -1).cpu() - - sizes = [len(v["boxes"]) for v in targets] - indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))] - return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] - - -def build_matcher(args): - return HungarianMatcher(cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou) diff --git a/models/cv/object_detection/detr/igie/models/position_encoding.py b/models/cv/object_detection/detr/igie/models/position_encoding.py deleted file mode 100644 index 73ae39ed..00000000 --- a/models/cv/object_detection/detr/igie/models/position_encoding.py +++ /dev/null @@ -1,89 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Various positional encodings for the transformer. -""" -import math -import torch -from torch import nn - -from util.misc import NestedTensor - - -class PositionEmbeddingSine(nn.Module): - """ - This is a more standard version of the position embedding, very similar to the one - used by the Attention is all you need paper, generalized to work on images. - """ - def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): - super().__init__() - self.num_pos_feats = num_pos_feats - self.temperature = temperature - self.normalize = normalize - if scale is not None and normalize is False: - raise ValueError("normalize should be True if scale is passed") - if scale is None: - scale = 2 * math.pi - self.scale = scale - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - mask = tensor_list.mask - assert mask is not None - not_mask = ~mask - y_embed = not_mask.cumsum(1, dtype=torch.float32) - x_embed = not_mask.cumsum(2, dtype=torch.float32) - if self.normalize: - eps = 1e-6 - y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale - x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale - - dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device) - dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats) - - pos_x = x_embed[:, :, :, None] / dim_t - pos_y = y_embed[:, :, :, None] / dim_t - pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) - pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) - return pos - - -class PositionEmbeddingLearned(nn.Module): - """ - Absolute pos embedding, learned. - """ - def __init__(self, num_pos_feats=256): - super().__init__() - self.row_embed = nn.Embedding(50, num_pos_feats) - self.col_embed = nn.Embedding(50, num_pos_feats) - self.reset_parameters() - - def reset_parameters(self): - nn.init.uniform_(self.row_embed.weight) - nn.init.uniform_(self.col_embed.weight) - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - h, w = x.shape[-2:] - i = torch.arange(w, device=x.device) - j = torch.arange(h, device=x.device) - x_emb = self.col_embed(i) - y_emb = self.row_embed(j) - pos = torch.cat([ - x_emb.unsqueeze(0).repeat(h, 1, 1), - y_emb.unsqueeze(1).repeat(1, w, 1), - ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1) - return pos - - -def build_position_encoding(args): - N_steps = args.hidden_dim // 2 - if args.position_embedding in ('v2', 'sine'): - # TODO find a better way of exposing other arguments - position_embedding = PositionEmbeddingSine(N_steps, normalize=True) - elif args.position_embedding in ('v3', 'learned'): - position_embedding = PositionEmbeddingLearned(N_steps) - else: - raise ValueError(f"not supported {args.position_embedding}") - - return position_embedding diff --git a/models/cv/object_detection/detr/igie/models/segmentation.py b/models/cv/object_detection/detr/igie/models/segmentation.py deleted file mode 100644 index 01faa885..00000000 --- a/models/cv/object_detection/detr/igie/models/segmentation.py +++ /dev/null @@ -1,363 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -This file provides the definition of the convolutional heads used to predict masks, as well as the losses -""" -import io -from collections import defaultdict -from typing import List, Optional - -import torch -import torch.nn as nn -import torch.nn.functional as F -from torch import Tensor -from PIL import Image - -import util.box_ops as box_ops -from util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list - -try: - from panopticapi.utils import id2rgb, rgb2id -except ImportError: - pass - - -class DETRsegm(nn.Module): - def __init__(self, detr, freeze_detr=False): - super().__init__() - self.detr = detr - - if freeze_detr: - for p in self.parameters(): - p.requires_grad_(False) - - hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead - self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0) - self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim) - - def forward(self, samples: NestedTensor): - if isinstance(samples, (list, torch.Tensor)): - samples = nested_tensor_from_tensor_list(samples) - features, pos = self.detr.backbone(samples) - - bs = features[-1].tensors.shape[0] - - src, mask = features[-1].decompose() - assert mask is not None - src_proj = self.detr.input_proj(src) - hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1]) - - outputs_class = self.detr.class_embed(hs) - outputs_coord = self.detr.bbox_embed(hs).sigmoid() - out = {"pred_logits": outputs_class[-1], "pred_boxes": outputs_coord[-1]} - if self.detr.aux_loss: - out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord) - - # FIXME h_boxes takes the last one computed, keep this in mind - bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask) - - seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors]) - outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]) - - out["pred_masks"] = outputs_seg_masks - return out - - -def _expand(tensor, length: int): - return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) - - -class MaskHeadSmallConv(nn.Module): - """ - Simple convolutional head, using group norm. - Upsampling is done using a FPN approach - """ - - def __init__(self, dim, fpn_dims, context_dim): - super().__init__() - - inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] - self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1) - self.gn1 = torch.nn.GroupNorm(8, dim) - self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1) - self.gn2 = torch.nn.GroupNorm(8, inter_dims[1]) - self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) - self.gn3 = torch.nn.GroupNorm(8, inter_dims[2]) - self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) - self.gn4 = torch.nn.GroupNorm(8, inter_dims[3]) - self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) - self.gn5 = torch.nn.GroupNorm(8, inter_dims[4]) - self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1) - - self.dim = dim - - self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1) - self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1) - self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1) - - for m in self.modules(): - if isinstance(m, nn.Conv2d): - nn.init.kaiming_uniform_(m.weight, a=1) - nn.init.constant_(m.bias, 0) - - def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): - x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) - - x = self.lay1(x) - x = self.gn1(x) - x = F.relu(x) - x = self.lay2(x) - x = self.gn2(x) - x = F.relu(x) - - cur_fpn = self.adapter1(fpns[0]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay3(x) - x = self.gn3(x) - x = F.relu(x) - - cur_fpn = self.adapter2(fpns[1]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay4(x) - x = self.gn4(x) - x = F.relu(x) - - cur_fpn = self.adapter3(fpns[2]) - if cur_fpn.size(0) != x.size(0): - cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) - x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") - x = self.lay5(x) - x = self.gn5(x) - x = F.relu(x) - - x = self.out_lay(x) - return x - - -class MHAttentionMap(nn.Module): - """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" - - def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True): - super().__init__() - self.num_heads = num_heads - self.hidden_dim = hidden_dim - self.dropout = nn.Dropout(dropout) - - self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) - self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) - - nn.init.zeros_(self.k_linear.bias) - nn.init.zeros_(self.q_linear.bias) - nn.init.xavier_uniform_(self.k_linear.weight) - nn.init.xavier_uniform_(self.q_linear.weight) - self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 - - def forward(self, q, k, mask: Optional[Tensor] = None): - q = self.q_linear(q) - k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) - qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) - kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) - weights = torch.einsum("bqnc,bnchw->bqnhw", qh * self.normalize_fact, kh) - - if mask is not None: - weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float("-inf")) - weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size()) - weights = self.dropout(weights) - return weights - - -def dice_loss(inputs, targets, num_boxes): - """ - Compute the DICE loss, similar to generalized IOU for masks - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - """ - inputs = inputs.sigmoid() - inputs = inputs.flatten(1) - numerator = 2 * (inputs * targets).sum(1) - denominator = inputs.sum(-1) + targets.sum(-1) - loss = 1 - (numerator + 1) / (denominator + 1) - return loss.sum() / num_boxes - - -def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): - """ - Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. - Args: - inputs: A float tensor of arbitrary shape. - The predictions for each example. - targets: A float tensor with the same shape as inputs. Stores the binary - classification label for each element in inputs - (0 for the negative class and 1 for the positive class). - alpha: (optional) Weighting factor in range (0,1) to balance - positive vs negative examples. Default = -1 (no weighting). - gamma: Exponent of the modulating factor (1 - p_t) to - balance easy vs hard examples. - Returns: - Loss tensor - """ - prob = inputs.sigmoid() - ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction="none") - p_t = prob * targets + (1 - prob) * (1 - targets) - loss = ce_loss * ((1 - p_t) ** gamma) - - if alpha >= 0: - alpha_t = alpha * targets + (1 - alpha) * (1 - targets) - loss = alpha_t * loss - - return loss.mean(1).sum() / num_boxes - - -class PostProcessSegm(nn.Module): - def __init__(self, threshold=0.5): - super().__init__() - self.threshold = threshold - - @torch.no_grad() - def forward(self, results, outputs, orig_target_sizes, max_target_sizes): - assert len(orig_target_sizes) == len(max_target_sizes) - max_h, max_w = max_target_sizes.max(0)[0].tolist() - outputs_masks = outputs["pred_masks"].squeeze(2) - outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode="bilinear", align_corners=False) - outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu() - - for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)): - img_h, img_w = t[0], t[1] - results[i]["masks"] = cur_mask[:, :img_h, :img_w].unsqueeze(1) - results[i]["masks"] = F.interpolate( - results[i]["masks"].float(), size=tuple(tt.tolist()), mode="nearest" - ).byte() - - return results - - -class PostProcessPanoptic(nn.Module): - """This class converts the output of the model to the final panoptic result, in the format expected by the - coco panoptic API """ - - def __init__(self, is_thing_map, threshold=0.85): - """ - Parameters: - is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether - the class is a thing (True) or a stuff (False) class - threshold: confidence threshold: segments with confidence lower than this will be deleted - """ - super().__init__() - self.threshold = threshold - self.is_thing_map = is_thing_map - - def forward(self, outputs, processed_sizes, target_sizes=None): - """ This function computes the panoptic prediction from the model's predictions. - Parameters: - outputs: This is a dict coming directly from the model. See the model doc for the content. - processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the - model, ie the size after data augmentation but before batching. - target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size - of each prediction. If left to None, it will default to the processed_sizes - """ - if target_sizes is None: - target_sizes = processed_sizes - assert len(processed_sizes) == len(target_sizes) - out_logits, raw_masks, raw_boxes = outputs["pred_logits"], outputs["pred_masks"], outputs["pred_boxes"] - assert len(out_logits) == len(raw_masks) == len(target_sizes) - preds = [] - - def to_tuple(tup): - if isinstance(tup, tuple): - return tup - return tuple(tup.cpu().tolist()) - - for cur_logits, cur_masks, cur_boxes, size, target_size in zip( - out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes - ): - # we filter empty queries and detection below threshold - scores, labels = cur_logits.softmax(-1).max(-1) - keep = labels.ne(outputs["pred_logits"].shape[-1] - 1) & (scores > self.threshold) - cur_scores, cur_classes = cur_logits.softmax(-1).max(-1) - cur_scores = cur_scores[keep] - cur_classes = cur_classes[keep] - cur_masks = cur_masks[keep] - cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode="bilinear").squeeze(1) - cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep]) - - h, w = cur_masks.shape[-2:] - assert len(cur_boxes) == len(cur_classes) - - # It may be that we have several predicted masks for the same stuff class. - # In the following, we track the list of masks ids for each stuff class (they are merged later on) - cur_masks = cur_masks.flatten(1) - stuff_equiv_classes = defaultdict(lambda: []) - for k, label in enumerate(cur_classes): - if not self.is_thing_map[label.item()]: - stuff_equiv_classes[label.item()].append(k) - - def get_ids_area(masks, scores, dedup=False): - # This helper function creates the final panoptic segmentation image - # It also returns the area of the masks that appears on the image - - m_id = masks.transpose(0, 1).softmax(-1) - - if m_id.shape[-1] == 0: - # We didn't detect any mask :( - m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device) - else: - m_id = m_id.argmax(-1).view(h, w) - - if dedup: - # Merge the masks corresponding to the same stuff class - for equiv in stuff_equiv_classes.values(): - if len(equiv) > 1: - for eq_id in equiv: - m_id.masked_fill_(m_id.eq(eq_id), equiv[0]) - - final_h, final_w = to_tuple(target_size) - - seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy())) - seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST) - - np_seg_img = ( - torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy() - ) - m_id = torch.from_numpy(rgb2id(np_seg_img)) - - area = [] - for i in range(len(scores)): - area.append(m_id.eq(i).sum().item()) - return area, seg_img - - area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True) - if cur_classes.numel() > 0: - # We know filter empty masks as long as we find some - while True: - filtered_small = torch.as_tensor( - [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device - ) - if filtered_small.any().item(): - cur_scores = cur_scores[~filtered_small] - cur_classes = cur_classes[~filtered_small] - cur_masks = cur_masks[~filtered_small] - area, seg_img = get_ids_area(cur_masks, cur_scores) - else: - break - - else: - cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device) - - segments_info = [] - for i, a in enumerate(area): - cat = cur_classes[i].item() - segments_info.append({"id": i, "isthing": self.is_thing_map[cat], "category_id": cat, "area": a}) - del cur_classes - - with io.BytesIO() as out: - seg_img.save(out, format="PNG") - predictions = {"png_string": out.getvalue(), "segments_info": segments_info} - preds.append(predictions) - return preds diff --git a/models/cv/object_detection/detr/igie/models/transformer.py b/models/cv/object_detection/detr/igie/models/transformer.py deleted file mode 100644 index dcd53675..00000000 --- a/models/cv/object_detection/detr/igie/models/transformer.py +++ /dev/null @@ -1,297 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -DETR Transformer class. - -Copy-paste from torch.nn.Transformer with modifications: - * positional encodings are passed in MHattention - * extra LN at the end of encoder is removed - * decoder returns a stack of activations from all decoding layers -""" -import copy -from typing import Optional, List - -import torch -import torch.nn.functional as F -from torch import nn, Tensor - - -class Transformer(nn.Module): - - def __init__(self, d_model=512, nhead=8, num_encoder_layers=6, - num_decoder_layers=6, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False, - return_intermediate_dec=False): - super().__init__() - - encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - encoder_norm = nn.LayerNorm(d_model) if normalize_before else None - self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm) - - decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward, - dropout, activation, normalize_before) - decoder_norm = nn.LayerNorm(d_model) - self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, - return_intermediate=return_intermediate_dec) - - self._reset_parameters() - - self.d_model = d_model - self.nhead = nhead - - def _reset_parameters(self): - for p in self.parameters(): - if p.dim() > 1: - nn.init.xavier_uniform_(p) - - def forward(self, src, mask, query_embed, pos_embed): - # flatten NxCxHxW to HWxNxC - bs, c, h, w = src.shape - src = src.flatten(2).permute(2, 0, 1) - pos_embed = pos_embed.flatten(2).permute(2, 0, 1) - query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) - mask = mask.flatten(1) - - tgt = torch.zeros_like(query_embed) - memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) - hs = self.decoder(tgt, memory, memory_key_padding_mask=mask, - pos=pos_embed, query_pos=query_embed) - return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w) - - -class TransformerEncoder(nn.Module): - - def __init__(self, encoder_layer, num_layers, norm=None): - super().__init__() - self.layers = _get_clones(encoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - - def forward(self, src, - mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - output = src - - for layer in self.layers: - output = layer(output, src_mask=mask, - src_key_padding_mask=src_key_padding_mask, pos=pos) - - if self.norm is not None: - output = self.norm(output) - - return output - - -class TransformerDecoder(nn.Module): - - def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False): - super().__init__() - self.layers = _get_clones(decoder_layer, num_layers) - self.num_layers = num_layers - self.norm = norm - self.return_intermediate = return_intermediate - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - output = tgt - - intermediate = [] - - for layer in self.layers: - output = layer(output, memory, tgt_mask=tgt_mask, - memory_mask=memory_mask, - tgt_key_padding_mask=tgt_key_padding_mask, - memory_key_padding_mask=memory_key_padding_mask, - pos=pos, query_pos=query_pos) - if self.return_intermediate: - intermediate.append(self.norm(output)) - - if self.norm is not None: - output = self.norm(output) - if self.return_intermediate: - intermediate.pop() - intermediate.append(output) - - if self.return_intermediate: - return torch.stack(intermediate) - - return output.unsqueeze(0) - - -class TransformerEncoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, - src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(src, pos) - src2 = self.self_attn(q, k, value=src, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src = self.norm1(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src)))) - src = src + self.dropout2(src2) - src = self.norm2(src) - return src - - def forward_pre(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - src2 = self.norm1(src) - q = k = self.with_pos_embed(src2, pos) - src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask, - key_padding_mask=src_key_padding_mask)[0] - src = src + self.dropout1(src2) - src2 = self.norm2(src) - src2 = self.linear2(self.dropout(self.activation(self.linear1(src2)))) - src = src + self.dropout2(src2) - return src - - def forward(self, src, - src_mask: Optional[Tensor] = None, - src_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(src, src_mask, src_key_padding_mask, pos) - return self.forward_post(src, src_mask, src_key_padding_mask, pos) - - -class TransformerDecoderLayer(nn.Module): - - def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, - activation="relu", normalize_before=False): - super().__init__() - self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) - # Implementation of Feedforward model - self.linear1 = nn.Linear(d_model, dim_feedforward) - self.dropout = nn.Dropout(dropout) - self.linear2 = nn.Linear(dim_feedforward, d_model) - - self.norm1 = nn.LayerNorm(d_model) - self.norm2 = nn.LayerNorm(d_model) - self.norm3 = nn.LayerNorm(d_model) - self.dropout1 = nn.Dropout(dropout) - self.dropout2 = nn.Dropout(dropout) - self.dropout3 = nn.Dropout(dropout) - - self.activation = _get_activation_fn(activation) - self.normalize_before = normalize_before - - def with_pos_embed(self, tensor, pos: Optional[Tensor]): - return tensor if pos is None else tensor + pos - - def forward_post(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - q = k = self.with_pos_embed(tgt, query_pos) - tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt = self.norm1(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt = self.norm2(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) - tgt = tgt + self.dropout3(tgt2) - tgt = self.norm3(tgt) - return tgt - - def forward_pre(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - tgt2 = self.norm1(tgt) - q = k = self.with_pos_embed(tgt2, query_pos) - tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask, - key_padding_mask=tgt_key_padding_mask)[0] - tgt = tgt + self.dropout1(tgt2) - tgt2 = self.norm2(tgt) - tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos), - key=self.with_pos_embed(memory, pos), - value=memory, attn_mask=memory_mask, - key_padding_mask=memory_key_padding_mask)[0] - tgt = tgt + self.dropout2(tgt2) - tgt2 = self.norm3(tgt) - tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) - tgt = tgt + self.dropout3(tgt2) - return tgt - - def forward(self, tgt, memory, - tgt_mask: Optional[Tensor] = None, - memory_mask: Optional[Tensor] = None, - tgt_key_padding_mask: Optional[Tensor] = None, - memory_key_padding_mask: Optional[Tensor] = None, - pos: Optional[Tensor] = None, - query_pos: Optional[Tensor] = None): - if self.normalize_before: - return self.forward_pre(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - return self.forward_post(tgt, memory, tgt_mask, memory_mask, - tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos) - - -def _get_clones(module, N): - return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) - - -def build_transformer(args): - return Transformer( - d_model=args.hidden_dim, - dropout=args.dropout, - nhead=args.nheads, - dim_feedforward=args.dim_feedforward, - num_encoder_layers=args.enc_layers, - num_decoder_layers=args.dec_layers, - normalize_before=args.pre_norm, - return_intermediate_dec=True, - ) - - -def _get_activation_fn(activation): - """Return an activation function given a string""" - if activation == "relu": - return F.relu - if activation == "gelu": - return F.gelu - if activation == "glu": - return F.glu - raise RuntimeError(F"activation should be relu/gelu, not {activation}.") diff --git a/models/cv/object_detection/detr/igie/requirements.txt b/models/cv/object_detection/detr/igie/requirements.txt deleted file mode 100644 index bb8f7823..00000000 --- a/models/cv/object_detection/detr/igie/requirements.txt +++ /dev/null @@ -1,9 +0,0 @@ -cython -git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI&egg=pycocotools -submitit -torch>=1.5.0 -torchvision>=0.6.0 -git+https://github.com/cocodataset/panopticapi.git#egg=panopticapi -scipy -onnx -onnxruntime diff --git a/models/cv/object_detection/detr/igie/run_with_submitit.py b/models/cv/object_detection/detr/igie/run_with_submitit.py deleted file mode 100644 index b6780def..00000000 --- a/models/cv/object_detection/detr/igie/run_with_submitit.py +++ /dev/null @@ -1,111 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -A script to run multinode training with submitit. -""" -import argparse -import os -import uuid -from pathlib import Path - -import main as detection -import submitit - - -def parse_args(): - detection_parser = detection.get_args_parser() - parser = argparse.ArgumentParser("Submitit for detection", parents=[detection_parser]) - parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node") - parser.add_argument("--nodes", default=4, type=int, help="Number of nodes to request") - parser.add_argument("--timeout", default=60, type=int, help="Duration of the job") - parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.") - return parser.parse_args() - - -def get_shared_folder() -> Path: - user = os.getenv("USER") - if Path("/checkpoint/").is_dir(): - p = Path(f"/checkpoint/{user}/experiments") - p.mkdir(exist_ok=True) - return p - raise RuntimeError("No shared folder available") - - -def get_init_file(): - # Init file must not exist, but it's parent dir must exist. - os.makedirs(str(get_shared_folder()), exist_ok=True) - init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init" - if init_file.exists(): - os.remove(str(init_file)) - return init_file - - -class Trainer(object): - def __init__(self, args): - self.args = args - - def __call__(self): - import main as detection - - self._setup_gpu_args() - detection.main(self.args) - - def checkpoint(self): - import os - import submitit - from pathlib import Path - - self.args.dist_url = get_init_file().as_uri() - checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") - if os.path.exists(checkpoint_file): - self.args.resume = checkpoint_file - print("Requeuing ", self.args) - empty_trainer = type(self)(self.args) - return submitit.helpers.DelayedSubmission(empty_trainer) - - def _setup_gpu_args(self): - import submitit - from pathlib import Path - - job_env = submitit.JobEnvironment() - self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) - self.args.gpu = job_env.local_rank - self.args.rank = job_env.global_rank - self.args.world_size = job_env.num_tasks - print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") - - -def main(): - args = parse_args() - if args.job_dir == "": - args.job_dir = get_shared_folder() / "%j" - - # Note that the folder will depend on the job_id, to easily track experiments - executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30) - - # cluster setup is defined by environment variables - num_gpus_per_node = args.ngpus - nodes = args.nodes - timeout_min = args.timeout - - executor.update_parameters( - mem_gb=40 * num_gpus_per_node, - gpus_per_node=num_gpus_per_node, - tasks_per_node=num_gpus_per_node, # one task per GPU - cpus_per_task=10, - nodes=nodes, - timeout_min=timeout_min, # max is 60 * 72 - ) - - executor.update_parameters(name="detr") - - args.dist_url = get_init_file().as_uri() - args.output_dir = args.job_dir - - trainer = Trainer(args) - job = executor.submit(trainer) - - print("Submitted job_id:", job.job_id) - - -if __name__ == "__main__": - main() diff --git a/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh index e9fcf541..bff3c73d 100644 --- a/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh +++ b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh index 5c614f39..86cda7ea 100644 --- a/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh +++ b/models/cv/object_detection/detr/igie/scripts/infer_detr_fp16_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/detr/igie/test_all.py b/models/cv/object_detection/detr/igie/test_all.py deleted file mode 100644 index 7153892f..00000000 --- a/models/cv/object_detection/detr/igie/test_all.py +++ /dev/null @@ -1,209 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import io -import unittest - -import torch -from torch import nn, Tensor -from typing import List - -from models.matcher import HungarianMatcher -from models.position_encoding import PositionEmbeddingSine, PositionEmbeddingLearned -from models.backbone import Backbone, Joiner, BackboneBase -from util import box_ops -from util.misc import nested_tensor_from_tensor_list -from hubconf import detr_resnet50, detr_resnet50_panoptic - -# onnxruntime requires python 3.5 or above -try: - import onnxruntime -except ImportError: - onnxruntime = None - - -class Tester(unittest.TestCase): - - def test_box_cxcywh_to_xyxy(self): - t = torch.rand(10, 4) - r = box_ops.box_xyxy_to_cxcywh(box_ops.box_cxcywh_to_xyxy(t)) - self.assertLess((t - r).abs().max(), 1e-5) - - @staticmethod - def indices_torch2python(indices): - return [(i.tolist(), j.tolist()) for i, j in indices] - - def test_hungarian(self): - n_queries, n_targets, n_classes = 100, 15, 91 - logits = torch.rand(1, n_queries, n_classes + 1) - boxes = torch.rand(1, n_queries, 4) - tgt_labels = torch.randint(high=n_classes, size=(n_targets,)) - tgt_boxes = torch.rand(n_targets, 4) - matcher = HungarianMatcher() - targets = [{'labels': tgt_labels, 'boxes': tgt_boxes}] - indices_single = matcher({'pred_logits': logits, 'pred_boxes': boxes}, targets) - indices_batched = matcher({'pred_logits': logits.repeat(2, 1, 1), - 'pred_boxes': boxes.repeat(2, 1, 1)}, targets * 2) - self.assertEqual(len(indices_single[0][0]), n_targets) - self.assertEqual(len(indices_single[0][1]), n_targets) - self.assertEqual(self.indices_torch2python(indices_single), - self.indices_torch2python([indices_batched[0]])) - self.assertEqual(self.indices_torch2python(indices_single), - self.indices_torch2python([indices_batched[1]])) - - # test with empty targets - tgt_labels_empty = torch.randint(high=n_classes, size=(0,)) - tgt_boxes_empty = torch.rand(0, 4) - targets_empty = [{'labels': tgt_labels_empty, 'boxes': tgt_boxes_empty}] - indices = matcher({'pred_logits': logits.repeat(2, 1, 1), - 'pred_boxes': boxes.repeat(2, 1, 1)}, targets + targets_empty) - self.assertEqual(len(indices[1][0]), 0) - indices = matcher({'pred_logits': logits.repeat(2, 1, 1), - 'pred_boxes': boxes.repeat(2, 1, 1)}, targets_empty * 2) - self.assertEqual(len(indices[0][0]), 0) - - def test_position_encoding_script(self): - m1, m2 = PositionEmbeddingSine(), PositionEmbeddingLearned() - mm1, mm2 = torch.jit.script(m1), torch.jit.script(m2) # noqa - - def test_backbone_script(self): - backbone = Backbone('resnet50', True, False, False) - torch.jit.script(backbone) # noqa - - def test_model_script_detection(self): - model = detr_resnet50(pretrained=False).eval() - scripted_model = torch.jit.script(model) - x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) - out = model(x) - out_script = scripted_model(x) - self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) - self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) - - def test_model_script_panoptic(self): - model = detr_resnet50_panoptic(pretrained=False).eval() - scripted_model = torch.jit.script(model) - x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) - out = model(x) - out_script = scripted_model(x) - self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) - self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) - self.assertTrue(out["pred_masks"].equal(out_script["pred_masks"])) - - def test_model_detection_different_inputs(self): - model = detr_resnet50(pretrained=False).eval() - # support NestedTensor - x = nested_tensor_from_tensor_list([torch.rand(3, 200, 200), torch.rand(3, 200, 250)]) - out = model(x) - self.assertIn('pred_logits', out) - # and 4d Tensor - x = torch.rand(1, 3, 200, 200) - out = model(x) - self.assertIn('pred_logits', out) - # and List[Tensor[C, H, W]] - x = torch.rand(3, 200, 200) - out = model([x]) - self.assertIn('pred_logits', out) - - def test_warpped_model_script_detection(self): - class WrappedDETR(nn.Module): - def __init__(self, model): - super().__init__() - self.model = model - - def forward(self, inputs: List[Tensor]): - sample = nested_tensor_from_tensor_list(inputs) - return self.model(sample) - - model = detr_resnet50(pretrained=False) - wrapped_model = WrappedDETR(model) - wrapped_model.eval() - scripted_model = torch.jit.script(wrapped_model) - x = [torch.rand(3, 200, 200), torch.rand(3, 200, 250)] - out = wrapped_model(x) - out_script = scripted_model(x) - self.assertTrue(out["pred_logits"].equal(out_script["pred_logits"])) - self.assertTrue(out["pred_boxes"].equal(out_script["pred_boxes"])) - - -@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable') -class ONNXExporterTester(unittest.TestCase): - @classmethod - def setUpClass(cls): - torch.manual_seed(123) - - def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None, - output_names=None, input_names=None): - model.eval() - - onnx_io = io.BytesIO() - # export to onnx with the first input - torch.onnx.export(model, inputs_list[0], onnx_io, - do_constant_folding=do_constant_folding, opset_version=12, - dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names) - # validate the exported model with onnx runtime - for test_inputs in inputs_list: - with torch.no_grad(): - if isinstance(test_inputs, torch.Tensor) or isinstance(test_inputs, list): - test_inputs = (nested_tensor_from_tensor_list(test_inputs),) - test_ouputs = model(*test_inputs) - if isinstance(test_ouputs, torch.Tensor): - test_ouputs = (test_ouputs,) - self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch) - - def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False): - - inputs, _ = torch.jit._flatten(inputs) - outputs, _ = torch.jit._flatten(outputs) - - def to_numpy(tensor): - if tensor.requires_grad: - return tensor.detach().cpu().numpy() - else: - return tensor.cpu().numpy() - - inputs = list(map(to_numpy, inputs)) - outputs = list(map(to_numpy, outputs)) - - ort_session = onnxruntime.InferenceSession(onnx_io.getvalue()) - # compute onnxruntime output prediction - ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs)) - ort_outs = ort_session.run(None, ort_inputs) - for i, element in enumerate(outputs): - try: - torch.testing.assert_allclose(element, ort_outs[i], rtol=1e-03, atol=1e-05) - except AssertionError as error: - if tolerate_small_mismatch: - self.assertIn("(0.00%)", str(error), str(error)) - else: - raise - - def test_model_onnx_detection(self): - model = detr_resnet50(pretrained=False).eval() - dummy_image = torch.ones(1, 3, 800, 800) * 0.3 - model(dummy_image) - - # Test exported model on images of different size, or dummy input - self.run_model( - model, - [(torch.rand(1, 3, 750, 800),)], - input_names=["inputs"], - output_names=["pred_logits", "pred_boxes"], - tolerate_small_mismatch=True, - ) - - @unittest.skip("CI doesn't have enough memory") - def test_model_onnx_detection_panoptic(self): - model = detr_resnet50_panoptic(pretrained=False).eval() - dummy_image = torch.ones(1, 3, 800, 800) * 0.3 - model(dummy_image) - - # Test exported model on images of different size, or dummy input - self.run_model( - model, - [(torch.rand(1, 3, 750, 800),)], - input_names=["inputs"], - output_names=["pred_logits", "pred_boxes", "pred_masks"], - tolerate_small_mismatch=True, - ) - - -if __name__ == '__main__': - unittest.main() diff --git a/models/cv/object_detection/detr/igie/tox.ini b/models/cv/object_detection/detr/igie/tox.ini deleted file mode 100644 index 5554a882..00000000 --- a/models/cv/object_detection/detr/igie/tox.ini +++ /dev/null @@ -1,3 +0,0 @@ -[flake8] -max-line-length = 120 -ignore = F401,E402,F403,W503,W504 diff --git a/models/cv/object_detection/detr/igie/util/__init__.py b/models/cv/object_detection/detr/igie/util/__init__.py deleted file mode 100644 index 168f9979..00000000 --- a/models/cv/object_detection/detr/igie/util/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved diff --git a/models/cv/object_detection/detr/igie/util/box_ops.py b/models/cv/object_detection/detr/igie/util/box_ops.py deleted file mode 100644 index 9c088e5b..00000000 --- a/models/cv/object_detection/detr/igie/util/box_ops.py +++ /dev/null @@ -1,88 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Utilities for bounding box manipulation and GIoU. -""" -import torch -from torchvision.ops.boxes import box_area - - -def box_cxcywh_to_xyxy(x): - x_c, y_c, w, h = x.unbind(-1) - b = [(x_c - 0.5 * w), (y_c - 0.5 * h), - (x_c + 0.5 * w), (y_c + 0.5 * h)] - return torch.stack(b, dim=-1) - - -def box_xyxy_to_cxcywh(x): - x0, y0, x1, y1 = x.unbind(-1) - b = [(x0 + x1) / 2, (y0 + y1) / 2, - (x1 - x0), (y1 - y0)] - return torch.stack(b, dim=-1) - - -# modified from torchvision to also return the union -def box_iou(boxes1, boxes2): - area1 = box_area(boxes1) - area2 = box_area(boxes2) - - lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] - rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] - - wh = (rb - lt).clamp(min=0) # [N,M,2] - inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] - - union = area1[:, None] + area2 - inter - - iou = inter / union - return iou, union - - -def generalized_box_iou(boxes1, boxes2): - """ - Generalized IoU from https://giou.stanford.edu/ - - The boxes should be in [x0, y0, x1, y1] format - - Returns a [N, M] pairwise matrix, where N = len(boxes1) - and M = len(boxes2) - """ - # degenerate boxes gives inf / nan results - # so do an early check - assert (boxes1[:, 2:] >= boxes1[:, :2]).all() - assert (boxes2[:, 2:] >= boxes2[:, :2]).all() - iou, union = box_iou(boxes1, boxes2) - - lt = torch.min(boxes1[:, None, :2], boxes2[:, :2]) - rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) - - wh = (rb - lt).clamp(min=0) # [N,M,2] - area = wh[:, :, 0] * wh[:, :, 1] - - return iou - (area - union) / area - - -def masks_to_boxes(masks): - """Compute the bounding boxes around the provided masks - - The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions. - - Returns a [N, 4] tensors, with the boxes in xyxy format - """ - if masks.numel() == 0: - return torch.zeros((0, 4), device=masks.device) - - h, w = masks.shape[-2:] - - y = torch.arange(0, h, dtype=torch.float) - x = torch.arange(0, w, dtype=torch.float) - y, x = torch.meshgrid(y, x) - - x_mask = (masks * x.unsqueeze(0)) - x_max = x_mask.flatten(1).max(-1)[0] - x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - y_mask = (masks * y.unsqueeze(0)) - y_max = y_mask.flatten(1).max(-1)[0] - y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0] - - return torch.stack([x_min, y_min, x_max, y_max], 1) diff --git a/models/cv/object_detection/detr/igie/util/misc.py b/models/cv/object_detection/detr/igie/util/misc.py deleted file mode 100644 index dfa9fb5b..00000000 --- a/models/cv/object_detection/detr/igie/util/misc.py +++ /dev/null @@ -1,468 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -""" -Misc functions, including distributed helpers. - -Mostly copy-paste from torchvision references. -""" -import os -import subprocess -import time -from collections import defaultdict, deque -import datetime -import pickle -from packaging import version -from typing import Optional, List - -import torch -import torch.distributed as dist -from torch import Tensor - -# needed due to empty tensor bug in pytorch and torchvision 0.5 -import torchvision -if version.parse(torchvision.__version__) < version.parse('0.7'): - from torchvision.ops import _new_empty_tensor - from torchvision.ops.misc import _output_size - - -class SmoothedValue(object): - """Track a series of values and provide access to smoothed values over a - window or the global series average. - """ - - def __init__(self, window_size=20, fmt=None): - if fmt is None: - fmt = "{median:.4f} ({global_avg:.4f})" - self.deque = deque(maxlen=window_size) - self.total = 0.0 - self.count = 0 - self.fmt = fmt - - def update(self, value, n=1): - self.deque.append(value) - self.count += n - self.total += value * n - - def synchronize_between_processes(self): - """ - Warning: does not synchronize the deque! - """ - if not is_dist_avail_and_initialized(): - return - t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') - dist.barrier() - dist.all_reduce(t) - t = t.tolist() - self.count = int(t[0]) - self.total = t[1] - - @property - def median(self): - d = torch.tensor(list(self.deque)) - return d.median().item() - - @property - def avg(self): - d = torch.tensor(list(self.deque), dtype=torch.float32) - return d.mean().item() - - @property - def global_avg(self): - return self.total / self.count - - @property - def max(self): - return max(self.deque) - - @property - def value(self): - return self.deque[-1] - - def __str__(self): - return self.fmt.format( - median=self.median, - avg=self.avg, - global_avg=self.global_avg, - max=self.max, - value=self.value) - - -def all_gather(data): - """ - Run all_gather on arbitrary picklable data (not necessarily tensors) - Args: - data: any picklable object - Returns: - list[data]: list of data gathered from each rank - """ - world_size = get_world_size() - if world_size == 1: - return [data] - - # serialized to a Tensor - buffer = pickle.dumps(data) - storage = torch.ByteStorage.from_buffer(buffer) - tensor = torch.ByteTensor(storage).to("cuda") - - # obtain Tensor size of each rank - local_size = torch.tensor([tensor.numel()], device="cuda") - size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)] - dist.all_gather(size_list, local_size) - size_list = [int(size.item()) for size in size_list] - max_size = max(size_list) - - # receiving Tensor from all ranks - # we pad the tensor because torch all_gather does not support - # gathering tensors of different shapes - tensor_list = [] - for _ in size_list: - tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda")) - if local_size != max_size: - padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda") - tensor = torch.cat((tensor, padding), dim=0) - dist.all_gather(tensor_list, tensor) - - data_list = [] - for size, tensor in zip(size_list, tensor_list): - buffer = tensor.cpu().numpy().tobytes()[:size] - data_list.append(pickle.loads(buffer)) - - return data_list - - -def reduce_dict(input_dict, average=True): - """ - Args: - input_dict (dict): all the values will be reduced - average (bool): whether to do average or sum - Reduce the values in the dictionary from all processes so that all processes - have the averaged results. Returns a dict with the same fields as - input_dict, after reduction. - """ - world_size = get_world_size() - if world_size < 2: - return input_dict - with torch.no_grad(): - names = [] - values = [] - # sort the keys so that they are consistent across processes - for k in sorted(input_dict.keys()): - names.append(k) - values.append(input_dict[k]) - values = torch.stack(values, dim=0) - dist.all_reduce(values) - if average: - values /= world_size - reduced_dict = {k: v for k, v in zip(names, values)} - return reduced_dict - - -class MetricLogger(object): - def __init__(self, delimiter="\t"): - self.meters = defaultdict(SmoothedValue) - self.delimiter = delimiter - - def update(self, **kwargs): - for k, v in kwargs.items(): - if isinstance(v, torch.Tensor): - v = v.item() - assert isinstance(v, (float, int)) - self.meters[k].update(v) - - def __getattr__(self, attr): - if attr in self.meters: - return self.meters[attr] - if attr in self.__dict__: - return self.__dict__[attr] - raise AttributeError("'{}' object has no attribute '{}'".format( - type(self).__name__, attr)) - - def __str__(self): - loss_str = [] - for name, meter in self.meters.items(): - loss_str.append( - "{}: {}".format(name, str(meter)) - ) - return self.delimiter.join(loss_str) - - def synchronize_between_processes(self): - for meter in self.meters.values(): - meter.synchronize_between_processes() - - def add_meter(self, name, meter): - self.meters[name] = meter - - def log_every(self, iterable, print_freq, header=None): - i = 0 - if not header: - header = '' - start_time = time.time() - end = time.time() - iter_time = SmoothedValue(fmt='{avg:.4f}') - data_time = SmoothedValue(fmt='{avg:.4f}') - space_fmt = ':' + str(len(str(len(iterable)))) + 'd' - if torch.cuda.is_available(): - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}', - 'max mem: {memory:.0f}' - ]) - else: - log_msg = self.delimiter.join([ - header, - '[{0' + space_fmt + '}/{1}]', - 'eta: {eta}', - '{meters}', - 'time: {time}', - 'data: {data}' - ]) - MB = 1024.0 * 1024.0 - for obj in iterable: - data_time.update(time.time() - end) - yield obj - iter_time.update(time.time() - end) - if i % print_freq == 0 or i == len(iterable) - 1: - eta_seconds = iter_time.global_avg * (len(iterable) - i) - eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) - if torch.cuda.is_available(): - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time), - memory=torch.cuda.max_memory_allocated() / MB)) - else: - print(log_msg.format( - i, len(iterable), eta=eta_string, - meters=str(self), - time=str(iter_time), data=str(data_time))) - i += 1 - end = time.time() - total_time = time.time() - start_time - total_time_str = str(datetime.timedelta(seconds=int(total_time))) - print('{} Total time: {} ({:.4f} s / it)'.format( - header, total_time_str, total_time / len(iterable))) - - -def get_sha(): - cwd = os.path.dirname(os.path.abspath(__file__)) - - def _run(command): - return subprocess.check_output(command, cwd=cwd).decode('ascii').strip() - sha = 'N/A' - diff = "clean" - branch = 'N/A' - try: - sha = _run(['git', 'rev-parse', 'HEAD']) - subprocess.check_output(['git', 'diff'], cwd=cwd) - diff = _run(['git', 'diff-index', 'HEAD']) - diff = "has uncommited changes" if diff else "clean" - branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD']) - except Exception: - pass - message = f"sha: {sha}, status: {diff}, branch: {branch}" - return message - - -def collate_fn(batch): - batch = list(zip(*batch)) - batch[0] = nested_tensor_from_tensor_list(batch[0]) - return tuple(batch) - - -def _max_by_axis(the_list): - # type: (List[List[int]]) -> List[int] - maxes = the_list[0] - for sublist in the_list[1:]: - for index, item in enumerate(sublist): - maxes[index] = max(maxes[index], item) - return maxes - - -class NestedTensor(object): - def __init__(self, tensors, mask: Optional[Tensor]): - self.tensors = tensors - self.mask = mask - - def to(self, device): - # type: (Device) -> NestedTensor # noqa - cast_tensor = self.tensors.to(device) - mask = self.mask - if mask is not None: - assert mask is not None - cast_mask = mask.to(device) - else: - cast_mask = None - return NestedTensor(cast_tensor, cast_mask) - - def decompose(self): - return self.tensors, self.mask - - def __repr__(self): - return str(self.tensors) - - -def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): - # TODO make this more general - if tensor_list[0].ndim == 3: - if torchvision._is_tracing(): - # nested_tensor_from_tensor_list() does not export well to ONNX - # call _onnx_nested_tensor_from_tensor_list() instead - return _onnx_nested_tensor_from_tensor_list(tensor_list) - - # TODO make it support different-sized images - max_size = _max_by_axis([list(img.shape) for img in tensor_list]) - # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) - batch_shape = [len(tensor_list)] + max_size - b, c, h, w = batch_shape - dtype = tensor_list[0].dtype - device = tensor_list[0].device - tensor = torch.zeros(batch_shape, dtype=dtype, device=device) - mask = torch.ones((b, h, w), dtype=torch.bool, device=device) - for img, pad_img, m in zip(tensor_list, tensor, mask): - pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - m[: img.shape[1], :img.shape[2]] = False - else: - raise ValueError('not supported') - return NestedTensor(tensor, mask) - - -# _onnx_nested_tensor_from_tensor_list() is an implementation of -# nested_tensor_from_tensor_list() that is supported by ONNX tracing. -@torch.jit.unused -def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor: - max_size = [] - for i in range(tensor_list[0].dim()): - max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64) - max_size.append(max_size_i) - max_size = tuple(max_size) - - # work around for - # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) - # m[: img.shape[1], :img.shape[2]] = False - # which is not yet supported in onnx - padded_imgs = [] - padded_masks = [] - for img in tensor_list: - padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] - padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) - padded_imgs.append(padded_img) - - m = torch.zeros_like(img[0], dtype=torch.int, device=img.device) - padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1) - padded_masks.append(padded_mask.to(torch.bool)) - - tensor = torch.stack(padded_imgs) - mask = torch.stack(padded_masks) - - return NestedTensor(tensor, mask=mask) - - -def setup_for_distributed(is_master): - """ - This function disables printing when not in master process - """ - import builtins as __builtin__ - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop('force', False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def is_dist_avail_and_initialized(): - if not dist.is_available(): - return False - if not dist.is_initialized(): - return False - return True - - -def get_world_size(): - if not is_dist_avail_and_initialized(): - return 1 - return dist.get_world_size() - - -def get_rank(): - if not is_dist_avail_and_initialized(): - return 0 - return dist.get_rank() - - -def is_main_process(): - return get_rank() == 0 - - -def save_on_master(*args, **kwargs): - if is_main_process(): - torch.save(*args, **kwargs) - - -def init_distributed_mode(args): - if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: - args.rank = int(os.environ["RANK"]) - args.world_size = int(os.environ['WORLD_SIZE']) - args.gpu = int(os.environ['LOCAL_RANK']) - elif 'SLURM_PROCID' in os.environ: - args.rank = int(os.environ['SLURM_PROCID']) - args.gpu = args.rank % torch.cuda.device_count() - else: - print('Not using distributed mode') - args.distributed = False - return - - args.distributed = True - - torch.cuda.set_device(args.gpu) - args.dist_backend = 'nccl' - print('| distributed init (rank {}): {}'.format( - args.rank, args.dist_url), flush=True) - torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, - world_size=args.world_size, rank=args.rank) - torch.distributed.barrier() - setup_for_distributed(args.rank == 0) - - -@torch.no_grad() -def accuracy(output, target, topk=(1,)): - """Computes the precision@k for the specified values of k""" - if target.numel() == 0: - return [torch.zeros([], device=output.device)] - maxk = max(topk) - batch_size = target.size(0) - - _, pred = output.topk(maxk, 1, True, True) - pred = pred.t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - - res = [] - for k in topk: - correct_k = correct[:k].view(-1).float().sum(0) - res.append(correct_k.mul_(100.0 / batch_size)) - return res - - -def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None): - # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor - """ - Equivalent to nn.functional.interpolate, but with support for empty batch sizes. - This will eventually be supported natively by PyTorch, and this - class can go away. - """ - if version.parse(torchvision.__version__) < version.parse('0.7'): - if input.numel() > 0: - return torch.nn.functional.interpolate( - input, size, scale_factor, mode, align_corners - ) - - output_shape = _output_size(2, input, size, scale_factor) - output_shape = list(input.shape[:-2]) + list(output_shape) - return _new_empty_tensor(input, output_shape) - else: - return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners) diff --git a/models/cv/object_detection/detr/igie/util/plot_utils.py b/models/cv/object_detection/detr/igie/util/plot_utils.py deleted file mode 100644 index 0f24bed0..00000000 --- a/models/cv/object_detection/detr/igie/util/plot_utils.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Plotting utilities to visualize training logs. -""" -import torch -import pandas as pd -import numpy as np -import seaborn as sns -import matplotlib.pyplot as plt - -from pathlib import Path, PurePath - - -def plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'): - ''' - Function to plot specific fields from training log(s). Plots both training and test results. - - :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file - - fields = which results to plot from each log file - plots both training and test for each field. - - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots - - log_name = optional, name of log file if different than default 'log.txt'. - - :: Outputs - matplotlib plots of results in fields, color coded for each log file. - - solid lines are training results, dashed lines are test results. - - ''' - func_name = "plot_utils.py::plot_logs" - - # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path, - # convert single Path to list to avoid 'not iterable' error - - if not isinstance(logs, list): - if isinstance(logs, PurePath): - logs = [logs] - print(f"{func_name} info: logs param expects a list argument, converted to list[Path].") - else: - raise ValueError(f"{func_name} - invalid argument for logs parameter.\n \ - Expect list[Path] or single Path obj, received {type(logs)}") - - # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir - for i, dir in enumerate(logs): - if not isinstance(dir, PurePath): - raise ValueError(f"{func_name} - non-Path object in logs argument of {type(dir)}: \n{dir}") - if not dir.exists(): - raise ValueError(f"{func_name} - invalid directory in logs argument:\n{dir}") - # verify log_name exists - fn = Path(dir / log_name) - if not fn.exists(): - print(f"-> missing {log_name}. Have you gotten to Epoch 1 in training?") - print(f"--> full path of missing log file: {fn}") - return - - # load log file(s) and plot - dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs] - - fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5)) - - for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))): - for j, field in enumerate(fields): - if field == 'mAP': - coco_eval = pd.DataFrame( - np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1] - ).ewm(com=ewm_col).mean() - axs[j].plot(coco_eval, c=color) - else: - df.interpolate().ewm(com=ewm_col).mean().plot( - y=[f'train_{field}', f'test_{field}'], - ax=axs[j], - color=[color] * 2, - style=['-', '--'] - ) - for ax, field in zip(axs, fields): - ax.legend([Path(p).name for p in logs]) - ax.set_title(field) - - -def plot_precision_recall(files, naming_scheme='iter'): - if naming_scheme == 'exp_id': - # name becomes exp_id - names = [f.parts[-3] for f in files] - elif naming_scheme == 'iter': - names = [f.stem for f in files] - else: - raise ValueError(f'not supported {naming_scheme}') - fig, axs = plt.subplots(ncols=2, figsize=(16, 5)) - for f, color, name in zip(files, sns.color_palette("Blues", n_colors=len(files)), names): - data = torch.load(f) - # precision is n_iou, n_points, n_cat, n_area, max_det - precision = data['precision'] - recall = data['params'].recThrs - scores = data['scores'] - # take precision for all classes, all areas and 100 detections - precision = precision[0, :, :, 0, -1].mean(1) - scores = scores[0, :, :, 0, -1].mean(1) - prec = precision.mean() - rec = data['recall'][0, :, 0, -1].mean() - print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' + - f'score={scores.mean():0.3f}, ' + - f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}' - ) - axs[0].plot(recall, precision, c=color) - axs[1].plot(recall, scores, c=color) - - axs[0].set_title('Precision / Recall') - axs[0].legend(names) - axs[1].set_title('Scores / Recall') - axs[1].legend(names) - return fig, axs diff --git a/models/cv/object_detection/yolov11m/igie/README.md b/models/cv/object_detection/yolov11m/igie/README.md index 57d55a34..4085fc11 100644 --- a/models/cv/object_detection/yolov11m/igie/README.md +++ b/models/cv/object_detection/yolov11m/igie/README.md @@ -8,8 +8,7 @@ YOLOv11 is the latest generation of the YOLO (You Only Look Once) series object | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | | :----: | :----: | :----: | -| MR-V100 | 4.3.0 | 25.12 | -| MR-V100 | 4.2.0 | 25.03 | +| MR-V100 | 4.4.0 | 26.03 | ## Model Preparation @@ -50,11 +49,11 @@ coco Contact the Iluvatar administrator to get the missing packages: -- mmcv-2.1.0+corex.4.3.0-cp310-cp310-linux_x86_64.whl +- mmcv-* ```bash pip3 install -r requirements.txt -pip3 install mmcv-2.1.0+corex.4.3.0-cp310-cp310-linux_x86_64.whl +pip3 install mmcv-*.whl ``` ## Model Conversion diff --git a/models/cv/object_detection/yolov11s/igie/README.md b/models/cv/object_detection/yolov11s/igie/README.md index eb48a671..fd596bbb 100644 --- a/models/cv/object_detection/yolov11s/igie/README.md +++ b/models/cv/object_detection/yolov11s/igie/README.md @@ -1,4 +1,4 @@ -# YOLOv11m (IGIE) +# YOLOv11s (IGIE) ## Model Description @@ -8,8 +8,7 @@ YOLOv11 is the latest generation of the YOLO (You Only Look Once) series object | GPU | [IXUCA SDK](https://gitee.com/deep-spark/deepspark#%E5%A4%A9%E6%95%B0%E6%99%BA%E7%AE%97%E8%BD%AF%E4%BB%B6%E6%A0%88-ixuca) | Release | | :----: | :----: | :----: | -| MR-V100 | 4.3.0 | 25.12 | -| MR-V100 | 4.2.0 | 25.03 | +| MR-V100 | 4.4.0 | 26.03 | ## Model Preparation @@ -50,17 +49,17 @@ coco Contact the Iluvatar administrator to get the missing packages: -- mmcv-2.1.0+corex.4.3.0-cp310-cp310-linux_x86_64.whl +- mmcv-* ```bash pip3 install -r requirements.txt -pip3 install mmcv-2.1.0+corex.4.3.0-cp310-cp310-linux_x86_64.whl +pip3 install mmcv-*.whl ``` ## Model Conversion ```bash -python3 export.py --weight yolo11m.pt --batch 32 +python3 export.py --weight yolo11s.pt --batch 32 # Make sure numpy < 2.0 ``` diff --git a/models/cv/object_detection/yolov26n/igie/ci/prepare.sh b/models/cv/object_detection/yolov26n/igie/ci/prepare.sh index d1302eda..7c845324 100644 --- a/models/cv/object_detection/yolov26n/igie/ci/prepare.sh +++ b/models/cv/object_detection/yolov26n/igie/ci/prepare.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may @@ -16,15 +16,6 @@ set -x -ID=$(grep -oP '(?<=^ID=).+' /etc/os-release | tr -d '"') -if [[ ${ID} == "ubuntu" ]]; then - apt install -y libgl1-mesa-glx -elif [[ ${ID} == "centos" ]]; then - yum install -y mesa-libGL -else - echo "Not Support Os" -fi - pip3 install -r requirements.txt python3 export.py --weight yolo26n.pt --batch 32 diff --git a/models/cv/object_detection/yolov26n/igie/export.py b/models/cv/object_detection/yolov26n/igie/export.py index 780b9b2a..0eea848e 100644 --- a/models/cv/object_detection/yolov26n/igie/export.py +++ b/models/cv/object_detection/yolov26n/igie/export.py @@ -1,4 +1,4 @@ -# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov26n/igie/inference.py b/models/cv/object_detection/yolov26n/igie/inference.py index 5053a00f..7049b930 100644 --- a/models/cv/object_detection/yolov26n/igie/inference.py +++ b/models/cv/object_detection/yolov26n/igie/inference.py @@ -1,4 +1,4 @@ -# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov26n/igie/requirements.txt b/models/cv/object_detection/yolov26n/igie/requirements.txt index 9eecef9e..0bbe10c1 100644 --- a/models/cv/object_detection/yolov26n/igie/requirements.txt +++ b/models/cv/object_detection/yolov26n/igie/requirements.txt @@ -1,4 +1,5 @@ tqdm +onnxsim onnx==1.16.0 huggingface_hub ultralytics==8.4.16 diff --git a/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh index ba4b2db9..1a6d893c 100644 --- a/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh +++ b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh index cbae9330..61c9e727 100644 --- a/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh +++ b/models/cv/object_detection/yolov26n/igie/scripts/infer_yolov26n_fp16_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov26n/igie/utils.py b/models/cv/object_detection/yolov26n/igie/utils.py index 1b35ad76..ed4c71bd 100644 --- a/models/cv/object_detection/yolov26n/igie/utils.py +++ b/models/cv/object_detection/yolov26n/igie/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov26n/igie/validator.py b/models/cv/object_detection/yolov26n/igie/validator.py index c91fd6f4..66af9721 100644 --- a/models/cv/object_detection/yolov26n/igie/validator.py +++ b/models/cv/object_detection/yolov26n/igie/validator.py @@ -1,4 +1,4 @@ -# Copyright (c) 2025, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/ci/prepare.sh b/models/cv/object_detection/yolov5s/igie/ci/prepare.sh index 5531dec7..5a2195ed 100644 --- a/models/cv/object_detection/yolov5s/igie/ci/prepare.sh +++ b/models/cv/object_detection/yolov5s/igie/ci/prepare.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/export.py b/models/cv/object_detection/yolov5s/igie/export.py index bb0c669a..8fcf3e9b 100644 --- a/models/cv/object_detection/yolov5s/igie/export.py +++ b/models/cv/object_detection/yolov5s/igie/export.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/inference.py b/models/cv/object_detection/yolov5s/igie/inference.py index 42f9407d..645e774a 100644 --- a/models/cv/object_detection/yolov5s/igie/inference.py +++ b/models/cv/object_detection/yolov5s/igie/inference.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/quantize.py b/models/cv/object_detection/yolov5s/igie/quantize.py index 6771ad41..f9559b8c 100644 --- a/models/cv/object_detection/yolov5s/igie/quantize.py +++ b/models/cv/object_detection/yolov5s/igie/quantize.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh index 64a94ad6..373e60a3 100644 --- a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh index 84cef150..9ba35e13 100644 --- a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_fp16_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh index a37bb904..fda73abf 100644 --- a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_accuracy.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh index 47435056..53f1432c 100644 --- a/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh +++ b/models/cv/object_detection/yolov5s/igie/scripts/infer_yolov5s_int8_performance.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/models/cv/object_detection/yolov5s/igie/utils.py b/models/cv/object_detection/yolov5s/igie/utils.py index 8ab517b2..23fe2c3a 100644 --- a/models/cv/object_detection/yolov5s/igie/utils.py +++ b/models/cv/object_detection/yolov5s/igie/utils.py @@ -1,4 +1,4 @@ -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. +# Copyright (c) 2026, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/tests/model_info.json b/tests/model_info.json index 3fb2e0cd..1d90c802 100644 --- a/tests/model_info.json +++ b/tests/model_info.json @@ -625,7 +625,8 @@ "download_url": "https://download.pytorch.org/models/densenet121-a639ec97.pth", "need_third_part": false, "precisions": [ - "fp16" + "fp16", + "int8" ], "type": "inference", "hasDemo": false, @@ -9792,6 +9793,273 @@ "type": "inference", "hasDemo": false, "demoType": "" + }, + { + "display_name": "Mobilevit_s", + "model_name": "mobilevit_s", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/classification", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/classification/mobilevit_s/igie", + "readme_file": "models/cv/classification/mobilevit_s/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/imagenet", + "download_url": "https://huggingface.co/timm/mobilevit_s.cvnets_in1k", + "need_third_part": false, + "precisions": [ + "fp16" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" + }, + { + "display_name": "ViT_B_32", + "model_name": "vit_b_32", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/classification", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/classification/vit_b_32/igie", + "readme_file": "models/cv/classification/vit_b_32/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/imagenet", + "download_url": "https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt", + "need_third_part": false, + "precisions": [ + "fp16" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" + }, + { + "display_name": "ViT_L_14", + "model_name": "vit_l_14", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/classification", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/classification/vit_l_14/igie", + "readme_file": "models/cv/classification/vit_l_14/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/imagenet", + "download_url": "https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt", + "need_third_part": false, + "precisions": [ + "fp16" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" + }, + { + "display_name": "YOLOv11m", + "model_name": "yolov11m", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/object_detection", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/object_detection/yolov11m/igie", + "readme_file": "models/cv/object_detection/yolov11m/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/coco", + "download_url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11m.pt", + "need_third_part": false, + "precisions": [ + "fp16", + "int8" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" + }, + { + "display_name": "YOLOv11s", + "model_name": "yolov11s", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/object_detection", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/object_detection/yolov11s/igie", + "readme_file": "models/cv/object_detection/yolov11s/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/coco", + "download_url": "https://github.com/ultralytics/assets/releases/download/v8.3.0/yolo11s.pt", + "need_third_part": false, + "precisions": [ + "fp16", + "int8" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" + }, + { + "display_name": "YOLOv26n", + "model_name": "yolov26n", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/object_detection", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/object_detection/yolov26n/igie", + "readme_file": "models/cv/object_detection/yolov26n/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/coco", + "download_url": "https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n.pt", + "need_third_part": false, + "precisions": [ + "fp16" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" + }, + { + "display_name": "YOLOv5s", + "model_name": "yolov5s", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/object_detection", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/object_detection/yolov5s/igie", + "readme_file": "models/cv/object_detection/yolov5s/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/coco", + "download_url": "https://github.com/ultralytics/yolov5/releases/download/v7.0/yolov5s.pt", + "need_third_part": false, + "precisions": [ + "fp16", + "int8" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" + }, + { + "display_name": "DETR", + "model_name": "detr", + "framework": "igie", + "release_version": "26.03", + "release_sdk": "4.4.0", + "release_gpgpu": "MR-V100", + "latest_sdk": "4.4.0", + "latest_gpgpu": "MR-V100", + "category": "cv/object_detection", + "toolbox": "", + "mdims": "", + "dataset": "", + "license": "", + "model_path": "models/cv/object_detection/detr/igie", + "readme_file": "models/cv/object_detection/detr/igie/README.md", + "bitbucket_repo": "", + "bitbucket_branch": "", + "bitbucket_path": "", + "develop_owner": "", + "github_repo": "", + "github_branch": "", + "github_path": "", + "datasets": "local/coco", + "download_url": "https://dl.fbaipublicfiles.com/detr/detr-r50-e632da11.pth", + "need_third_part": false, + "precisions": [ + "fp16" + ], + "type": "inference", + "hasDemo": false, + "demoType": "" } ] } \ No newline at end of file -- Gitee