{
"github_url":"https://github.com/sophgo/tdl_models/tree/main/",
"_comment":"model_list specify the model maintained in tdl_models,each model should at least have file name and rgb_order(choice:rgb,bgr,gray)",
"model_list":{
"MBV2_DET_PERSON":{
"types":["person"],
"file_name":"mbv2_det_person_256_448_INT8",
"rgb_order":"rgb"
},
"YOLOV8N_DET_HAND":{
"types":["hand"],
"file_name":"yolov8n_det_hand_384_640_INT8"
},
"YOLOV8N_DET_PET_PERSON":{
"types":["cat","dog","person"],
"file_name":"yolov8n_det_pet_person_384_640_INT8"
},
"YOLOV8N_DET_BICYCLE_MOTOR_EBICYCLE":{
"types":["bicycle","motorcycle","ebicycle"],
"file_name":"yolov8n_det_bicycle_motor_ebicycle_384_640_INT8"
},
"YOLOV8N_DET_PERSON_VEHICLE":{
"types":["car","bus","truck","rider with motorcycle","person","bike","motorcycle"],
"file_name":"yolov8n_det_person_vehicle_384_640_INT8"
},
"YOLOV8N_DET_HAND_FACE_PERSON":{
"types":["hand","face","person"],
"file_name":"yolov8n_det_hand_face_person_384_640_INT8"
},
"YOLOV8N_DET_FACE_HEAD_PERSON_PET":{
"types":["face","head","person","pet"],
"file_name":"yolov8n_det_face_head_person_pet_384_640_INT8"
},
"YOLOV8N_DET_HEAD_PERSON":{
"types":["head","person"],
"file_name":"yolov8n_det_head_person_384_640_INT8"
},
"YOLOV8N_DET_HEAD_HARDHAT":{
"types":["head","hardhat"],
"file_name":"yolov8n_det_head_hardhat_576_960_INT8"
},
"YOLOV8N_DET_FIRE_SMOKE":{
"types":["fire","smoke"],
"file_name":"yolov8n_det_fire_smoke_384_640_INT8"
},
"YOLOV8N_DET_FIRE":{
"types":["fire"],
"file_name":"yolov8n_det_fire_384_640_INT8"
},
"YOLOV8N_DET_HEAD_SHOULDER":{
"types":["head shoulder"],
"file_name":"yolov8n_det_head_shoulder_384_640_INT8"
},
"YOLOV8N_DET_LICENSE_PLATE":{
"types":["license plate"],
"file_name":"yolov8n_det_license_plate_384_640_INT8"
},
"YOLOV8N_DET_TRAFFIC_LIGHT":{
"types":["red","yellow","green","off","wait on"],
"file_name":"yolov8n_det_traffic_light_384_640_INT8"
},
"YOLOV8N_DET_MONITOR_PERSON":{
"types":["person"],
"file_name":"yolov8n_det_monitor_person_256_448_INT8"
},
"YOLOV11N_DET_MONITOR_PERSON":{
"types":["person"],
"file_name":"yolov11n_det_monitor_person_384_640_INT8"
},
"YOLOV11N_DET_BICYCLE_MOTOR_EBICYCLE":{
"types":["bicycle","motorcycle","ebicycle"],
"file_name":"yolov11n_det_bicycle_motor_ebicycle_384_640_INT8"
},
"YOLOV5_DET_COCO80":{
"is_coco_types":true,
"file_name":"yolov5s_det_coco80_640_640_INT8"
},
"YOLOV6_DET_COCO80":{
"is_coco_types":true,
"file_name":"yolov6n_det_coco80_640_640_INT8"
},
"YOLOV7_DET_COCO80":{
"is_coco_types":true,
"file_name":"yolov7_tiny_det_coco80_640_640_INT8"
},
"YOLOV8_DET_COCO80":{
"is_coco_types":true,
"file_name":"yolov8n_det_coco80_640_640_INT8"
},
"YOLOV10_DET_COCO80":{
"is_coco_types":true,
"file_name":"yolov10n_det_coco80_640_640_INT8"
},
"YOLOV11N_DET_COCO80":{
"is_coco_types":true,
"file_name":"yolov11n_det_coco80_640_640_INT8"
},
"PPYOLOE_DET_COCO80":{
"is_coco_types":true,
"file_name":"ppyoloe_det_coco80_640_640_INT8"
},
"YOLOX_DET_COCO80":{
"is_coco_types":true,
"file_name":"yolox_m_det_coco80_640_640_INT8"
},
"YOLOV5":{
"_comment":"custom model, specify num_cls or branch string",
"file_name":""
},
"YOLOV6":{
"_comment":"custom model, specify num_cls or branch string",
"file_name":""
},
"YOLOV8":{
"_comment":"custom model, specify num_cls or branch string",
"file_name":"best",
"model_path": "/root/best.cvimodel",
"model_type": "yolov8",
"input_width": 640,
"input_height": 640,
"num_classes": 3,
"threshold": 0.5,
"nms_threshold": 0.45,
"mean": [0.0, 0.0, 0.0],
"scale": [0.00392157, 0.00392157, 0.00392157],
"format": "RGB",
"labels": ["Ades", "Barbie", "Bauer"]
},
"YOLOV10":{
"_comment":"custom model, specify num_cls or branch string",
"file_name":""
},
"PPYOLOE":{
"_comment":"custom model, specify num_cls or branch string",
"file_name":""
},
"YOLOX":{
"_comment":"custom model, specify num_cls or branch string",
"file_name":""
},
"SCRFD_DET_FACE":{
"_comment":"output face and 5 landmarks",
"types":["face"],
"file_name":"scrfd_det_face_432_768_INT8"
},
"CLS_ATTRIBUTE_GENDER_AGE_GLASS":{
"_comment":"output age,gender(0:male,1:female),glass(0:no glass,1:glass)",
"types":["age","gender","glass"],
"file_name":"cls_attribute_gender_age_glass_112_112_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[255.0,255.0,255.0]
},
"CLS_ATTRIBUTE_GENDER_AGE_GLASS_MASK":{
"_comment":"output age,gender(0:male,1:female),glass(0:no glass,1:glass),mask(0:no mask,1:mask)",
"types":["age","gender","glass","mask"],
"file_name":"cls_attribute_gender_age_glass_mask_112_112_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[255.0,255.0,255.0]
},
"CLS_ATTRIBUTE_GENDER_AGE_GLASS_EMOTION":{
"_comment":"output age,gender(0:male,1:female),glass(0:no glass,1:glass),emotion(0:anger,1:disgut,2:fear,3:happy,4:neutral,5:sad;6:surprise)",
"types":["age","gender","glass","emotion"],
"file_name":"cls_attribute_gender_age_glass_emotion_112_112_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[255.0,255.0,255.0]
},
"CLS_RGBLIVENESS":{
"_comment":"output 0:live or 1:spoof",
"types":["live","spoof"],
"file_name":"cls_rgbliveness_256_256_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[255.0,255.0,255.0]
},
"CLS_YOLOV8":{
"file_name":"yolov8_cls_384_640_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[255.0,255.0,255.0]
},
"CLS_HAND_GESTURE":{
"_comment":"output hand gesture(0:fist,1:five,2:none,3:two)",
"types":["fist","five","none","two"],
"file_name":"cls_hand_gesture_128_128_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[255.0,255.0,255.0]
},
"CLS_KEYPOINT_HAND_GESTURE":{
"_comment":"output hand gesture(0:fist,1:five,2:four,3:none,4:ok,5:one,6:three,7:three2,8:two)",
"types":["fist","five","four","none","ok","one","three","three2","two"],
"file_name":"cls_keypoint_hand_gesture_1_42_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[1.0,1.0,1.0]
},
"CLS_SOUND_BABAY_CRY":{
"_comment":"output 0:background or 1:cry,single channel",
"types":["background","cry"],
"file_name":"cls_sound_babay_cry_188_40_INT8",
"rgb_order":"gray"
},
"CLS_SOUND_COMMAND_NIHAOSHIYUN":{
"_comment":"single channel,TODO:add types",
"types":["background","nihaoshiyun"],
"file_name":"cls_sound_nihaoshiyun_126_40_INT8",
"rgb_order":"gray",
"hop_len":128,
"fix":1
},
"CLS_SOUND_COMMAND_NIHAOSUANNENG":{
"_comment":"single channel,TODO:add types",
"types":["background","nihaosuanneng"],
"file_name":"my_custom_sound_command",
"rgb_order":"gray",
"hop_len":128,
"fix":1
},
"CLS_SOUND_COMMAND_XIAOAIXIAOAI":{
"_comment":"single channel,TODO:add types",
"types":["background","xiaoaixiaoai"],
"file_name":"cls_sound_xiaoaixiaoai_126_40_INT8",
"rgb_order":"gray",
"hop_len":128,
"fix":1
},
"CLS_IMG":{
"_comment":"custom classification, set types,file_name,specify rgb order and mean/std",
"types":["custom"],
"file_name":"",
"rgb_order":"rgb"
},
"KEYPOINT_LICENSE_PLATE":{
"_comment":"output 4 license plate keypoints",
"types":["top_left","top_right","bottom_left","bottom_right"],
"file_name":"keypoint_license_plate_64_128_INT8",
"rgb_order":"rgb"
},
"KEYPOINT_HAND":{
"_comment":"output 21 hand keypoints",
"file_name":"keypoint_hand_128_128_INT8",
"rgb_order":"rgb"
},
"KEYPOINT_YOLOV8POSE_PERSON17":{
"_comment":"output 17 person keypoints and box",
"file_name":"keypoint_yolov8pose_person17_384_640_INT8",
"rgb_order":"rgb"
},
"KEYPOINT_SIMCC_PERSON17":{
"_comment":"output 17 person keypoints from cropped image",
"file_name":"keypoint_simcc_person17_256_192_INT8",
"rgb_order":"rgb"
},
"KEYPOINT_FACE_V2": {
"_comment": "KEYPOINT_FACE_V2",
"file_name": "keypoint_face_v2_64_64_INT8"
},
"LSTR_DET_LANE":{
"_comment":"output lane keypoints",
"file_name":"lstr_det_lane_360_640_MIX",
"rgb_order":"rgb"
},
"RECOGNITION_LICENSE_PLATE":{
"_comment":"output 7 license plate characters",
"file_name":"recognition_license_plate_24_96_MIX",
"rgb_order":"bgr"
},
"YOLOV8_SEG":{
"_comment":"custom segmentation,set types,file_name,specify rgb order",
"types":[],
"file_name":"yolov8_seg_384_640_INT8"
},
"YOLOV8_SEG_COCO80":{
"is_coco_types":true,
"_comment":"output 80 segmentation mask",
"file_name":"yolov8n_seg_coco80_640_640_INT8"
},
"TOPFORMER_SEG_PERSON_FACE_VEHICLE":{
"_comment":"output mask",
"types":["background","person","face","vehicle","license plate"],
"file_name":"topformer_seg_person_face_vehicle_384_640_INT8",
"rgb_order":"rgb"
},
"FEATURE_IMG":{
"_comment":"custom segmentation,set file_name,specify rgb order,set mean/std",
"file_name":"",
"rgb_order":"rgb"
},
"FEATURE_CLIP_IMG":{
"_comment":"clip image feature extraction",
"file_name":"feature_clip_image_224_224_W4BF16",
"rgb_order":"rgb"
},
"FEATURE_CLIP_TEXT":{
"_comment":"clip text feature extraction",
"file_name":"feature_clip_text_1_77_W4BF16",
"rgb_order":"rgb"
},
"FEATURE_MOBILECLIP2_IMG":{
"_comment":"mobileclip2 image feature extraction",
"file_name":"feature_mobileclip2_B_img_224_224_INT8",
"rgb_order":"rgb"
},
"FEATURE_MOBILECLIP2_TEXT":{
"_comment":"mobileclip2 text feature extraction",
"file_name":"feature_mobileclip2_B_text_1_77_INT8",
"rgb_order":"rgb"
},
"FEATURE_CVIFACE":{
"_comment":"cviface 256-dimensional feature",
"file_name":"feature_cviface_112_112_INT8",
"rgb_order":"rgb",
"mean":[127.5,127.5,127.5],
"std":[128,128,128]
},
"FEATURE_BMFACE_R34":{
"_comment":"output 512 dim feature",
"file_name":"feature_bmface_r34_112_112_INT8",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[1,1,1]
},
"FEATURE_BMFACE_R50":{
"_comment":"output 512 dim feature",
"file_name":"bmface_r50_v1_bmnetp.bmodel",
"rgb_order":"rgb",
"mean":[0,0,0],
"std":[1,1,1]
},
"TRACKING_FEARTRACK":{
"_comment":"single object tracking",
"file_name":"tracking_feartrack_128_128_256_256_INT8",
"rgb_order":"rgb",
"mean":[123.675,116.28,103.53],
"std":[58.395,57.12,57.375]
},
"RECOGNITION_SPEECH_ZIPFORMER_ENCODER":{
"file_name":"recognition_speech_zipformer_encoder-s_71_80_BF16"
},
"RECOGNITION_SPEECH_ZIPFORMER_DECODER":{
"file_name":"recognition_speech_zipformer_decoder-s_1_2_BF16"
},
"RECOGNITION_SPEECH_ZIPFORMER_JOINER":{
"file_name":"recognition_speech_zipformer_joiner-s_1_512_1_512_BF16"
}
}
}
=================
!!!!!!!!!!!!!!!!!!!!!!!! solucao tdl_sdk !!!!!!!!!!!!!!!!!!!!!!!
em
https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-obb.ipynb#scrollTo=tdSMcABDNKW-
!pip install ultralytics==8.2.103 -q
faça o upgrade
treine
from ultralytics import YOLO
model = YOLO('yolov8n.pt')
results = model.train(data=f"data.yaml", epochs=100, imgsz=640)
https://github.com/milkv-duo/duo-buildroot-sdk-v2/blob/develop/tdl_sdk/tool/yolo_export/yolov8_export.py
model_transform.py \
--model_name yolov8n \
--model_def best.onnx \
--input_shapes [[1,3,640,640]] \
--mean 0.0,0.0,0.0 \
--scale 0.0039216,0.0039216,0.0039216 \
--keep_aspect_ratio \
--pixel_format rgb \
--mlir yolov8n.mlir
fotos na pasta BUGGIO estavam 640x640
run_calibration.py yolov8n.mlir \
--dataset ../../BUGGIO \
--input_num 100 \
-o yolov8n_cali_table
model_deploy.py \
--mlir yolov8n.mlir \
--quant_input --quant_output \
--quantize INT8 \
--calibration_table yolov8n_cali_table \
--processor cv181x \
--model yolov8n_cv181x_int8_sym.cvimodel
python tdl sdk sophgo example
tdl_sophgo.py
python tdl_sophgo.py /root/cv181x/yolov8n_cv181x_int8_sym_cv181x.cvimodel Barbie_5-4_jpg.rf.64feb144416c82dc7c58b335d7143774.jpg
import sys
import os
from tdl import nn, image
import cv2
import numpy as np
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: python sample_fd.py <model_path> <image_path>")
sys.exit(1)
model_path = sys.argv[1]
img_path = sys.argv[2]
face_detector = nn.get_model(nn.ModelType.YOLOV8, model_path)
img = image.read(img_path)
# img = cv2.imread(img_path)
bboxes = face_detector.inference(img)
print(bboxes)
#https://github.com/sophgo/tdl_sdk/tree/master
python tdl sdk milkv-duo S example
sample_img_object_detection.py
python sample_img_object_detection.py YOLOV8 /root/cv181x/yolov8n_cv181x_int8_sym_cv181x.cvimodel /root/Bauer_9-4_jpg.rf.1ee8c79f82e5c4ed6b2ba3b7d5340d2c.jpg
import sys
import os
from tdl import nn, image
import cv2
import numpy as np
def visualize_objects(img_path, bboxes, save_path="object_detection.jpg"):
"""å¯è§†åŒ–ç›®æ ‡æ£€æµ‹ç»“æžœ"""
img = cv2.imread(img_path)
print(f"检测到 {len(bboxes)} ä¸ªç›®æ ‡")
for i, obj in enumerate(bboxes):
x1, y1, x2, y2 = map(int, [obj['x1'], obj['y1'], obj['x2'], obj['y2']])
class_id = obj['class_id']
score = obj['score']
class_name = obj.get('class_name', f'class_{class_id}')
cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
label = f"{class_id}:{score:.2f}"
center_x = (x1 + x2) // 2
center_y = (y1 + y2) // 2
cv2.putText(img, label, (center_x, center_y),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1)
cv2.imwrite(save_path, img)
print(f"ä¿å˜å›¾åƒåˆ°: {save_path}")
if __name__ == "__main__":
if len(sys.argv) < 4 or len(sys.argv) > 5:
print("Usage: python3 sample_img_object_detection.py <model_id_name> <model_dir> <image_path> [threshold]")
# å°šæœªåŠ å…¥å¯¹äºŽä¸æ˜¯æ£€æµ‹æ¨¡åž‹çš„处ç†
sys.exit(1)
model_id_name = sys.argv[1]
model_dir = sys.argv[2]
image_path = sys.argv[3]
threshold = float(sys.argv[4]) if len(sys.argv) == 5 else 0.5
if not os.path.exists(image_path):
print(f"å›¾åƒæ–‡ä»¶ä¸å˜åœ¨: {image_path}")
sys.exit(1)
model_type = getattr(nn.ModelType, model_id_name)
model = nn.get_model(model_type, model_dir, device_id=0)
# 读å–图åƒ
img = image.read(image_path)
# 执行推ç†
outdatas = model.inference(img)
expected_keys = {"class_id", "class_name", "score", "x1", "y1", "x2", "y2"}
is_detection = (
isinstance(outdatas, list) and
isinstance(outdatas[0], dict) and
set(outdatas[0].keys()) == expected_keys
)
if not is_detection:
print("当剿¨¡åž‹ä¸æ˜¯ç›®æ ‡æ£€æµ‹æ¨¡åž‹ï¼Œè¾“出内容:")
print(outdatas)
sys.exit(1)
print(f"out_datas.size: {len(outdatas)}")
for i, obj in enumerate(outdatas):
print(f"obj_meta_index: {i} "
f"class: {obj['class_id']} "
f"score: {obj['score']:.2f} "
f"bbox: {obj['x1']:.2f} {obj['y1']:.2f} {obj['x2']:.2f} {obj['y2']:.2f}")
visualize_objects(image_path, outdatas)
# input: python3 sample_img_object_detection.py <model_id_name> <model_dir> <image_path> [threshold]
# output: obj_meta_index: <index> class: <class_id> score: <score_value> bbox: <x1> <y1> <x2> <y2>
utilizando Script completo
./pt_to_cvimodel_tdl_sdk.sh
python yolov8_export.py
python export_tdl_sdk.py --dataset ../../BUGGIO --test_input ../../BUGGIO/Ades_2-3_jpg.rf.de3d17a6dcc748c6642882198a1c1c76.jpg best.onnx
!!!!!!!!!!!!!!!!!!!!!!!! solucao recamera !!!!!!!!!!!!!!!!!!!!!!!
em
https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolo11-object-detection-on-custom-dataset.ipynb#scrollTo=tdSMcABDNKW-yolo11s
!pip install ultralytics==8.2.103 -q
faça o upgrade
pip install --upgrade --force-reinstall ultralytics
copie o zip do BUGGIO
!unzip /context/buggio.v1i.yolov11.zip
treine
!yolo task=detect mode=train model=yolo11n.pt data=data.yaml epochs=200 imgsz=640
./pt_to_cvimodel_recamera.sh
yolo export model=best.pt format=onnx imgsz=640,640
python export_recamera.py --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" --dataset ../../BUGGIO --test_input ../../BUGGIO/Barbie_7-10_jpg.rf.502fbff248ff3b2336a9e60317de843b.jpg best.onnx --quantize INT8
testar cvimodel no recamera
model_transform --model_name best --model_def ./best.onnx --input_shapes '[[1,3,640,640]]' --mean 0.0,0.0,0.0 --scale 0.0039216,0.0039216,0.0039216 --keep_aspect_ratio --pixel_format rgb --output_names /model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0 --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg --test_result best_top_outputs.npz --mlir best.mlir
run_calibration best.mlir --dataset ../../BUGGIO --input_num 200 -o best_calib_table
model_deploy --mlir best.mlir --quantize INT8 --quant_input --processor cv181x --calibration_table best_calib_table --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg --test_reference best_top_outputs.npz --customization_format RGB_PACKED --fuse_preprocess --aligned_input --model best_cv181x_int8.cvimodel
model_transform \
--model_name yolo11n \
--model_def best.onnx \
--input_shapes "[[1,3,640,640]]" \
--mean "0.0,0.0,0.0" \
--scale "0.0039216,0.0039216,0.0039216" \
--keep_aspect_ratio \
--pixel_format rgb \
--output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \
--test_input ../../BUGGIO/Ades_2-3_jpg.rf.de3d17a6dcc748c6642882198a1c1c76.jpg \
--test_result yolo11n_top_outputs.npz \
--mlir yolo11n.mlir
run_calibration \
yolo11n.mlir \
--dataset ../../BUGGIO \
--input_num 100 \
-o yolo11n_calib_table
model_deploy \
--mlir yolo11n.mlir \
--quantize INT8 \
--quant_input \
--processor cv181x \
--calibration_table yolo11n_calib_table \
--test_input ../../BUGGIO/Ades_2-3_jpg.rf.de3d17a6dcc748c6642882198a1c1c76.jpg \
--test_reference yolo11n_top_outputs.npz \
--customization_format RGB_PACKED \
--fuse_preprocess \
--aligned_input \
--model yolo11n_1684x_int8_sym.cvimodel
=======car counting
https://github.com/Seeed-Studio/sscma-example-sg200x/tree/main/solutions/sscma-model/main
yolo export model=best.pt format=onnx opset=14
python export_recamera.py --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" --dataset ../../CARS --test_input ../../CARS/DOH_3-video-converter_com-_mp4-26_jpg.rf.a6a631199f4152b1ab619e3e3cf6e8ee.jpg best.onnx --quantize INT8
model_transform \
--model_name yolo11n \
--model_def best.onnx \
--input_shapes "[[1,3,640,640]]" \
--mean "0.0,0.0,0.0" \
--scale "0.0039216,0.0039216,0.0039216" \
--keep_aspect_ratio \
--pixel_format rgb \
--output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \
--test_input ../../CARS/DOH_3-video-converter_com-_mp4-26_jpg.rf.a6a631199f4152b1ab619e3e3cf6e8ee.jpg \
--test_result yolo11n_top_outputs.npz \
--mlir yolo11n.mlir
run_calibration \
yolo11n.mlir \
--dataset ../../CARS \
--input_num 100 \
-o yolo11n_calib_table
model_deploy \
--mlir yolo11n.mlir \
--quantize INT8 \
--quant_input \
--processor cv181x \
--calibration_table yolo11n_calib_table \
--test_input ../../CARS/DOH_3-video-converter_com-_mp4-26_jpg.rf.a6a631199f4152b1ab619e3e3cf6e8ee.jpg \
--test_reference yolo11n_top_outputs.npz \
--customization_format RGB_PACKED \
--fuse_preprocess \
--aligned_input \
--model yolo11n_1684x_int8_sym.cvimodel
yolo26n
model_transform \
--model_name yolo26n \
--model_def best_26.onnx \
--input_shapes "[[1,3,640,640]]" \
--mean "0.0,0.0,0.0" \
--scale "0.0039216,0.0039216,0.0039216" \
--keep_aspect_ratio \
--pixel_format rgb \
--output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \
--test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \
--test_result yolo26n_top_outputs.npz \
--mlir yolo26n.mlir
run_calibration \
yolo26n.mlir \
--dataset ../../BUGGIO \
--input_num 100 \
-o yolo26n_calib_table
model_deploy \
--mlir yolo26n.mlir \
--quantize INT8 \
--quant_input \
--processor cv181x \
--calibration_table yolo26n_calib_table \
--test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \
--test_reference yolo26n_top_outputs.npz \
--customization_format RGB_PACKED \
--fuse_preprocess \
--aligned_input \
--model yolo26n_1684x_int8_sym.cvimodel
https://github.com/ultralytics/ultralytics/blob/ee2ac9e43491e5ca61a158fb3a42e621a6710ee1/docs/en/integrations/seeedstudio-recamera.md?plain=1#L62
https://github.com/Seeed-Studio/SSCMA-Micro
https://docs.ultralytics.com/modes/predict/#key-features-of-predict-mode
https://github.com/Seeed-Studio/reCamera-OS/tree/sg200x-reCamera/external/br2-external