segunda-feira, 19 de janeiro de 2026

C++ PYTHON NODE

{ "github_url":"https://github.com/sophgo/tdl_models/tree/main/", "_comment":"model_list specify the model maintained in tdl_models,each model should at least have file name and rgb_order(choice:rgb,bgr,gray)", "model_list":{ "MBV2_DET_PERSON":{ "types":["person"], "file_name":"mbv2_det_person_256_448_INT8", "rgb_order":"rgb" }, "YOLOV8N_DET_HAND":{ "types":["hand"], "file_name":"yolov8n_det_hand_384_640_INT8" }, "YOLOV8N_DET_PET_PERSON":{ "types":["cat","dog","person"], "file_name":"yolov8n_det_pet_person_384_640_INT8" }, "YOLOV8N_DET_BICYCLE_MOTOR_EBICYCLE":{ "types":["bicycle","motorcycle","ebicycle"], "file_name":"yolov8n_det_bicycle_motor_ebicycle_384_640_INT8" }, "YOLOV8N_DET_PERSON_VEHICLE":{ "types":["car","bus","truck","rider with motorcycle","person","bike","motorcycle"], "file_name":"yolov8n_det_person_vehicle_384_640_INT8" }, "YOLOV8N_DET_HAND_FACE_PERSON":{ "types":["hand","face","person"], "file_name":"yolov8n_det_hand_face_person_384_640_INT8" }, "YOLOV8N_DET_FACE_HEAD_PERSON_PET":{ "types":["face","head","person","pet"], "file_name":"yolov8n_det_face_head_person_pet_384_640_INT8" }, "YOLOV8N_DET_HEAD_PERSON":{ "types":["head","person"], "file_name":"yolov8n_det_head_person_384_640_INT8" }, "YOLOV8N_DET_HEAD_HARDHAT":{ "types":["head","hardhat"], "file_name":"yolov8n_det_head_hardhat_576_960_INT8" }, "YOLOV8N_DET_FIRE_SMOKE":{ "types":["fire","smoke"], "file_name":"yolov8n_det_fire_smoke_384_640_INT8" }, "YOLOV8N_DET_FIRE":{ "types":["fire"], "file_name":"yolov8n_det_fire_384_640_INT8" }, "YOLOV8N_DET_HEAD_SHOULDER":{ "types":["head shoulder"], "file_name":"yolov8n_det_head_shoulder_384_640_INT8" }, "YOLOV8N_DET_LICENSE_PLATE":{ "types":["license plate"], "file_name":"yolov8n_det_license_plate_384_640_INT8" }, "YOLOV8N_DET_TRAFFIC_LIGHT":{ "types":["red","yellow","green","off","wait on"], "file_name":"yolov8n_det_traffic_light_384_640_INT8" }, "YOLOV8N_DET_MONITOR_PERSON":{ "types":["person"], "file_name":"yolov8n_det_monitor_person_256_448_INT8" }, "YOLOV11N_DET_MONITOR_PERSON":{ "types":["person"], "file_name":"yolov11n_det_monitor_person_384_640_INT8" }, "YOLOV11N_DET_BICYCLE_MOTOR_EBICYCLE":{ "types":["bicycle","motorcycle","ebicycle"], "file_name":"yolov11n_det_bicycle_motor_ebicycle_384_640_INT8" }, "YOLOV5_DET_COCO80":{ "is_coco_types":true, "file_name":"yolov5s_det_coco80_640_640_INT8" }, "YOLOV6_DET_COCO80":{ "is_coco_types":true, "file_name":"yolov6n_det_coco80_640_640_INT8" }, "YOLOV7_DET_COCO80":{ "is_coco_types":true, "file_name":"yolov7_tiny_det_coco80_640_640_INT8" }, "YOLOV8_DET_COCO80":{ "is_coco_types":true, "file_name":"yolov8n_det_coco80_640_640_INT8" }, "YOLOV10_DET_COCO80":{ "is_coco_types":true, "file_name":"yolov10n_det_coco80_640_640_INT8" }, "YOLOV11N_DET_COCO80":{ "is_coco_types":true, "file_name":"yolov11n_det_coco80_640_640_INT8" }, "PPYOLOE_DET_COCO80":{ "is_coco_types":true, "file_name":"ppyoloe_det_coco80_640_640_INT8" }, "YOLOX_DET_COCO80":{ "is_coco_types":true, "file_name":"yolox_m_det_coco80_640_640_INT8" }, "YOLOV5":{ "_comment":"custom model, specify num_cls or branch string", "file_name":"" }, "YOLOV6":{ "_comment":"custom model, specify num_cls or branch string", "file_name":"" }, "YOLOV8":{ "_comment":"custom model, specify num_cls or branch string", "file_name":"yolov8n_cv181x_int8_sym" }, "YOLOV10":{ "_comment":"custom model, specify num_cls or branch string", "file_name":"" }, "PPYOLOE":{ "_comment":"custom model, specify num_cls or branch string", "file_name":"" }, "YOLOX":{ "_comment":"custom model, specify num_cls or branch string", "file_name":"" }, "SCRFD_DET_FACE":{ "_comment":"output face and 5 landmarks", "types":["face"], "file_name":"scrfd_det_face_432_768_INT8" }, "CLS_ATTRIBUTE_GENDER_AGE_GLASS":{ "_comment":"output age,gender(0:male,1:female),glass(0:no glass,1:glass)", "types":["age","gender","glass"], "file_name":"cls_attribute_gender_age_glass_112_112_INT8", "rgb_order":"rgb", "mean":[0,0,0], "std":[255.0,255.0,255.0] }, "CLS_ATTRIBUTE_GENDER_AGE_GLASS_MASK":{ "_comment":"output age,gender(0:male,1:female),glass(0:no glass,1:glass),mask(0:no mask,1:mask)", "types":["age","gender","glass","mask"], "file_name":"cls_attribute_gender_age_glass_mask_112_112_INT8", "rgb_order":"rgb", "mean":[0,0,0], "std":[255.0,255.0,255.0] }, "CLS_ATTRIBUTE_GENDER_AGE_GLASS_EMOTION":{ "_comment":"output age,gender(0:male,1:female),glass(0:no glass,1:glass),emotion(0:anger,1:disgut,2:fear,3:happy,4:neutral,5:sad;6:surprise)", "types":["age","gender","glass","emotion"], "file_name":"cls_attribute_gender_age_glass_emotion_112_112_INT8", "rgb_order":"rgb", "mean":[0,0,0], "std":[255.0,255.0,255.0] }, "CLS_RGBLIVENESS":{ "_comment":"output 0:live or 1:spoof", "types":["live","spoof"], "file_name":"cls_rgbliveness_256_256_INT8", "rgb_order":"rgb", "mean":[0,0,0], "std":[255.0,255.0,255.0] }, "CLS_YOLOV8":{ "file_name":"yolov8n_cv181x_int8_sym", "rgb_order":"rgb", "mean":[0,0,0], "std":[255.0,255.0,255.0] }, "CLS_HAND_GESTURE":{ "_comment":"output hand gesture(0:fist,1:five,2:none,3:two)", "types":["fist","five","none","two"], "file_name":"cls_hand_gesture_128_128_INT8", "rgb_order":"rgb", "mean":[0,0,0], "std":[255.0,255.0,255.0] }, "CLS_KEYPOINT_HAND_GESTURE":{ "_comment":"output hand gesture(0:fist,1:five,2:four,3:none,4:ok,5:one,6:three,7:three2,8:two)", "types":["fist","five","four","none","ok","one","three","three2","two"], "file_name":"cls_keypoint_hand_gesture_1_42_INT8", "rgb_order":"rgb", "mean":[0,0,0], "std":[1.0,1.0,1.0] }, "CLS_SOUND_BABAY_CRY":{ "_comment":"output 0:background or 1:cry,single channel", "types":["background","cry"], "file_name":"cls_sound_babay_cry_188_40_INT8", "rgb_order":"gray" }, "CLS_SOUND_COMMAND_NIHAOSHIYUN":{ "_comment":"single channel,TODO:add types", "types":["background","nihaoshiyun"], "file_name":"cls_sound_nihaoshiyun_126_40_INT8", "rgb_order":"gray", "hop_len":128, "fix":1 }, "CLS_SOUND_COMMAND_NIHAOSUANNENG":{ "_comment":"single channel,TODO:add types", "types":["background","nihaosuanneng"], "file_name":"my_custom_sound_command", "rgb_order":"gray", "hop_len":128, "fix":1 }, "CLS_SOUND_COMMAND_XIAOAIXIAOAI":{ "_comment":"single channel,TODO:add types", "types":["background","xiaoaixiaoai"], "file_name":"cls_sound_xiaoaixiaoai_126_40_INT8", "rgb_order":"gray", "hop_len":128, "fix":1 }, "CLS_IMG":{ "_comment":"custom classification, set types,file_name,specify rgb order and mean/std", "types":["custom"], "file_name":"", "rgb_order":"rgb" }, "KEYPOINT_LICENSE_PLATE":{ "_comment":"output 4 license plate keypoints", "types":["top_left","top_right","bottom_left","bottom_right"], "file_name":"keypoint_license_plate_64_128_INT8", "rgb_order":"rgb" }, "KEYPOINT_HAND":{ "_comment":"output 21 hand keypoints", "file_name":"keypoint_hand_128_128_INT8", "rgb_order":"rgb" }, "KEYPOINT_YOLOV8POSE_PERSON17":{ "_comment":"output 17 person keypoints and box", "file_name":"keypoint_yolov8pose_person17_384_640_INT8", "rgb_order":"rgb" }, "KEYPOINT_SIMCC_PERSON17":{ "_comment":"output 17 person keypoints from cropped image", "file_name":"keypoint_simcc_person17_256_192_INT8", "rgb_order":"rgb" }, "KEYPOINT_FACE_V2": { "_comment": "KEYPOINT_FACE_V2", "file_name": "keypoint_face_v2_64_64_INT8" }, "LSTR_DET_LANE":{ "_comment":"output lane keypoints", "file_name":"lstr_det_lane_360_640_MIX", "rgb_order":"rgb" }, "RECOGNITION_LICENSE_PLATE":{ "_comment":"output 7 license plate characters", "file_name":"recognition_license_plate_24_96_MIX", "rgb_order":"bgr" }, "YOLOV8_SEG":{ "_comment":"custom segmentation,set types,file_name,specify rgb order", "types":[], "file_name":"yolov8_seg_384_640_INT8" }, "YOLOV8_SEG_COCO80":{ "is_coco_types":true, "_comment":"output 80 segmentation mask", "file_name":"yolov8n_seg_coco80_640_640_INT8" }, "TOPFORMER_SEG_PERSON_FACE_VEHICLE":{ "_comment":"output mask", "types":["background","person","face","vehicle","license plate"], "file_name":"topformer_seg_person_face_vehicle_384_640_INT8", "rgb_order":"rgb" }, "FEATURE_IMG":{ "_comment":"custom segmentation,set file_name,specify rgb order,set mean/std", "file_name":"", "rgb_order":"rgb" }, "FEATURE_CLIP_IMG":{ "_comment":"clip image feature extraction", "file_name":"feature_clip_image_224_224_W4BF16", "rgb_order":"rgb" }, "FEATURE_CLIP_TEXT":{ "_comment":"clip text feature extraction", "file_name":"feature_clip_text_1_77_W4BF16", "rgb_order":"rgb" }, "FEATURE_MOBILECLIP2_IMG":{ "_comment":"mobileclip2 image feature extraction", "file_name":"feature_mobileclip2_B_img_224_224_INT8", "rgb_order":"rgb" }, "FEATURE_MOBILECLIP2_TEXT":{ "_comment":"mobileclip2 text feature extraction", "file_name":"feature_mobileclip2_B_text_1_77_INT8", "rgb_order":"rgb" }, "FEATURE_CVIFACE":{ "_comment":"cviface 256-dimensional feature", "file_name":"feature_cviface_112_112_INT8", "rgb_order":"rgb", "mean":[127.5,127.5,127.5], "std":[128,128,128] }, "FEATURE_BMFACE_R34":{ "_comment":"output 512 dim feature", "file_name":"feature_bmface_r34_112_112_INT8", "rgb_order":"rgb", "mean":[0,0,0], "std":[1,1,1] }, "FEATURE_BMFACE_R50":{ "_comment":"output 512 dim feature", "file_name":"bmface_r50_v1_bmnetp.bmodel", "rgb_order":"rgb", "mean":[0,0,0], "std":[1,1,1] }, "TRACKING_FEARTRACK":{ "_comment":"single object tracking", "file_name":"tracking_feartrack_128_128_256_256_INT8", "rgb_order":"rgb", "mean":[123.675,116.28,103.53], "std":[58.395,57.12,57.375] }, "RECOGNITION_SPEECH_ZIPFORMER_ENCODER":{ "file_name":"recognition_speech_zipformer_encoder-s_71_80_BF16" }, "RECOGNITION_SPEECH_ZIPFORMER_DECODER":{ "file_name":"recognition_speech_zipformer_decoder-s_1_2_BF16" }, "RECOGNITION_SPEECH_ZIPFORMER_JOINER":{ "file_name":"recognition_speech_zipformer_joiner-s_1_512_1_512_BF16" } } } ================= !!!!!!!!!!!!!!!!!!!!!!!! solucao tdl_sdk !!!!!!!!!!!!!!!!!!!!!!! em https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-obb.ipynb#scrollTo=tdSMcABDNKW- !pip install ultralytics==8.2.103 -q !pip install ultralytics==8.3.0 #o segredo para o yolo nao pegar .pt superior treine from ultralytics import YOLO model = YOLO('yolov8n.pt') results = model.train(data=f"data.yaml", epochs=100, imgsz=640) https://github.com/milkv-duo/duo-buildroot-sdk-v2/blob/develop/tdl_sdk/tool/yolo_export/yolov8_export.py model_transform \ --model_name yolov8n \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --mlir yolov8n.mlir fotos na pasta BUGGIO estavam 640x640 run_calibration.py yolov8n.mlir \ --dataset ../../BUGGIO \ --input_num 100 \ -o yolov8n_cali_table model_deploy.py \ --mlir yolov8n.mlir \ --quant_input --quant_output \ --quantize INT8 \ --calibration_table yolov8n_cali_table \ --processor cv181x \ --model yolov8n_cv181x_int8_sym.cvimodel python tdl sdk sophgo example tdl_sophgo.py python tdl_sophgo.py /root/cv181x/yolov8n_cv181x_int8_sym_cv181x.cvimodel Barbie_5-4_jpg.rf.64feb144416c82dc7c58b335d7143774.jpg import sys import os from tdl import nn, image import cv2 import numpy as np if __name__ == "__main__": if len(sys.argv) != 3: print("Usage: python sample_fd.py <model_path> <image_path>") sys.exit(1) model_path = sys.argv[1] img_path = sys.argv[2] face_detector = nn.get_model(nn.ModelType.YOLOV8, model_path) img = image.read(img_path) # img = cv2.imread(img_path) bboxes = face_detector.inference(img) print(bboxes) #https://github.com/sophgo/tdl_sdk/tree/master python tdl sdk milkv-duo S example sample_img_object_detection.py python sample_img_object_detection.py YOLOV8 /root/cv181x/yolov8n_cv181x_int8_sym_cv181x.cvimodel /root/Bauer_9-4_jpg.rf.1ee8c79f82e5c4ed6b2ba3b7d5340d2c.jpg import sys import os from tdl import nn, image import cv2 import numpy as np def visualize_objects(img_path, bboxes, save_path="object_detection.jpg"): """可视化目标检测结果""" img = cv2.imread(img_path) print(f"检测到 {len(bboxes)} 个目标") for i, obj in enumerate(bboxes): x1, y1, x2, y2 = map(int, [obj['x1'], obj['y1'], obj['x2'], obj['y2']]) class_id = obj['class_id'] score = obj['score'] class_name = obj.get('class_name', f'class_{class_id}') cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2) label = f"{class_id}:{score:.2f}" center_x = (x1 + x2) // 2 center_y = (y1 + y2) // 2 cv2.putText(img, label, (center_x, center_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) cv2.imwrite(save_path, img) print(f"保存图像到: {save_path}") if __name__ == "__main__": if len(sys.argv) < 4 or len(sys.argv) > 5: print("Usage: python3 sample_img_object_detection.py <model_id_name> <model_dir> <image_path> [threshold]") # 尚未加入对于不是检测模型的处理 sys.exit(1) model_id_name = sys.argv[1] model_dir = sys.argv[2] image_path = sys.argv[3] threshold = float(sys.argv[4]) if len(sys.argv) == 5 else 0.5 if not os.path.exists(image_path): print(f"图像文件不存在: {image_path}") sys.exit(1) model_type = getattr(nn.ModelType, model_id_name) model = nn.get_model(model_type, model_dir, device_id=0) # 读取图像 img = image.read(image_path) # 执行推理 outdatas = model.inference(img) expected_keys = {"class_id", "class_name", "score", "x1", "y1", "x2", "y2"} is_detection = ( isinstance(outdatas, list) and isinstance(outdatas[0], dict) and set(outdatas[0].keys()) == expected_keys ) if not is_detection: print("当前模型不是目标检测模型,输出内容:") print(outdatas) sys.exit(1) print(f"out_datas.size: {len(outdatas)}") for i, obj in enumerate(outdatas): print(f"obj_meta_index: {i} " f"class: {obj['class_id']} " f"score: {obj['score']:.2f} " f"bbox: {obj['x1']:.2f} {obj['y1']:.2f} {obj['x2']:.2f} {obj['y2']:.2f}") visualize_objects(image_path, outdatas) # input: python3 sample_img_object_detection.py <model_id_name> <model_dir> <image_path> [threshold] # output: obj_meta_index: <index> class: <class_id> score: <score_value> bbox: <x1> <y1> <x2> <y2> utilizando Script completo ./pt_to_cvimodel_tdl_sdk.sh python yolov8_export.py python export_tdl_sdk.py --dataset ../../BUGGIO --test_input ../../BUGGIO/Ades_2-3_jpg.rf.de3d17a6dcc748c6642882198a1c1c76.jpg best.onnx !!!!!!!!!!!!!!!!!!!!!!!! solucao recamera !!!!!!!!!!!!!!!!!!!!!!! em https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolo11-object-detection-on-custom-dataset.ipynb#scrollTo=tdSMcABDNKW-yolo11s !pip install ultralytics==8.2.103 -q faça o upgrade pip install --upgrade --force-reinstall ultralytics copie o zip do BUGGIO !unzip /context/buggio.v1i.yolov11.zip treine <---> Python :) from ultralytics import YOLO model = YOLO("/content/yolo11n.pt") model.train(data="data.yaml", epochs=200, imgsz=640) ./pt_to_cvimodel_recamera.sh yolo export model=best.pt format=onnx imgsz=640,640 python export_recamera.py --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" --dataset ../../BUGGIO --test_input ../../BUGGIO/Barbie_7-10_jpg.rf.502fbff248ff3b2336a9e60317de843b.jpg best.onnx --quantize INT8 testar cvimodel no recamera model_transform --model_name best --model_def ./best.onnx --input_shapes '[[1,3,640,640]]' --mean 0.0,0.0,0.0 --scale 0.0039216,0.0039216,0.0039216 --keep_aspect_ratio --pixel_format rgb --output_names /model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0 --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg --test_result best_top_outputs.npz --mlir best.mlir run_calibration best.mlir --dataset ../../BUGGIO --input_num 200 -o best_calib_table model_deploy --mlir best.mlir --quantize INT8 --quant_input --processor cv181x --calibration_table best_calib_table --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg --test_reference best_top_outputs.npz --customization_format RGB_PACKED --fuse_preprocess --aligned_input --model best_cv181x_int8.cvimodel model_transform \ --model_name yolo11n \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \ --test_input ../../BUGGIO/Ades_2-3_jpg.rf.de3d17a6dcc748c6642882198a1c1c76.jpg \ --test_result yolo11n_top_outputs.npz \ --mlir yolo11n.mlir run_calibration \ yolo11n.mlir \ --dataset ../../BUGGIO \ --input_num 100 \ -o yolo11n_calib_table model_deploy \ --mlir yolo11n.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo11n_calib_table \ --test_input ../../BUGGIO/Ades_2-3_jpg.rf.de3d17a6dcc748c6642882198a1c1c76.jpg \ --test_reference yolo11n_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess \ --aligned_input \ --model yolo11n_1684x_int8_sym.cvimodel =======car counting 11 https://github.com/Seeed-Studio/sscma-example-sg200x/tree/main/solutions/sscma-model/main yolo export model=best.pt format=onnx opset=14 python export_recamera.py --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" --dataset ../../CARS --test_input ../../CARS/DOH_3-video-converter_com-_mp4-26_jpg.rf.a6a631199f4152b1ab619e3e3cf6e8ee.jpg best.onnx --quantize INT8 model_transform \ --model_name yolo11n \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \ --test_input ../../CARS/DOH_3-video-converter_com-_mp4-26_jpg.rf.a6a631199f4152b1ab619e3e3cf6e8ee.jpg \ --test_result yolo11n_top_outputs.npz \ --mlir yolo11n.mlir run_calibration \ yolo11n.mlir \ --dataset ../../CARS \ --input_num 100 \ -o yolo11n_calib_table model_deploy \ --mlir yolo11n.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo11n_calib_table \ --test_input ../../CARS/DOH_3-video-converter_com-_mp4-26_jpg.rf.a6a631199f4152b1ab619e3e3cf6e8ee.jpg \ --test_reference yolo11n_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess \ --aligned_input \ --model yolo11n_1684x_int8_sym.cvimodel yolo26n model_transform \ --model_name yolo26n \ --model_def best_26.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \ --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \ --test_result yolo26n_top_outputs.npz \ --mlir yolo26n.mlir run_calibration \ yolo26n.mlir \ --dataset ../../BUGGIO \ --input_num 100 \ -o yolo26n_calib_table model_deploy \ --mlir yolo26n.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo26n_calib_table \ --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \ --test_reference yolo26n_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess \ --aligned_input \ --model yolo26n_1684x_int8_sym.cvimodel https://github.com/ultralytics/ultralytics/blob/ee2ac9e43491e5ca61a158fb3a42e621a6710ee1/docs/en/integrations/seeedstudio-recamera.md?plain=1#L62 https://github.com/Seeed-Studio/SSCMA-Micro https://docs.ultralytics.com/modes/predict/#key-features-of-predict-mode https://github.com/Seeed-Studio/reCamera-OS/tree/sg200x-reCamera/external/br2-external ULTRALYTICS OPEN COLAB https://colab.research.google.com/github/ultralytics/ultralytics/blob/main/examples/tutorial.ipynb Install Ultralytics !uv pip install ultralytics import ultralytics ultralytics.checks() copy the BUGGIO DATASET (got with ROBOFLOW) !unzip /cont yolo check Train using yolo26n.pt reference using the data.yaml of roboblox !yolo train model=yolo26n.pt data=data.yaml epochs=100 imgsz=640 !yolo classify train data=/content/train model=yolo26n-cls.pt epochs=100 imgsz=640 train.zip (class) yolo export model=best.pt format=onnx opset=14 end2end=False model_transform \ --model_name yolo26n \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \ --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \ --test_result yolo26n_top_outputs.npz \ --mlir yolo26n.mlir > transform.txt run_calibration \ yolo26n.mlir \ --dataset ../../BUGGIO \ --input_num 100 \ -o yolo26n_calib_table > calib.txt model_deploy \ --mlir yolo26n.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo26n_calib_table \ --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \ --test_reference yolo26n_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess \ --aligned_input \ --model yolo26n_1684x_int8_sym.cvimodel > cvimodel.txt yolo export model=best.pt format=onnx opset=14 end2end=False yolo export model=best.pt format=onnx opset=14 pip install --upgrade pip pip install git+https://github.com/ultralytics/ultralytics.git@main yolo export model=best.pt format=onnx opset=14 end2end=False model_transform \ --model_name yolo26n \ --model_def yolo26n.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \ --test_result yolo26n_top_outputs.npz \ --mlir yolo26n.mlir run_calibration \ yolo26n.mlir \ --dataset ../../BUGGIO \ --input_num 100 \ -o yolo26n_calib_table model_deploy \ --mlir yolo26n.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo26n_calib_table \ --test_input ../../BUGGIO/Ades_2-4_jpg.rf.4de8403c125c5d16b435a839a3a93780.jpg \ --test_reference yolo26n_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess \ --aligned_input \ --model yolo26n_1684x_int8_sym.cvimodel yolo export model=yolov26n.pt format=onnx opset=14 end2end=False 2.8.12 cmake ===========desespero yolo export model=best.pt format=onnx opset=14 end2end=False model_transform \ --model_name yolo11n \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \ --test_input ../image/dog.jpg \ --test_result yolo11n_top_outputs.npz \ --mlir yolo11n.mlir run_calibration \ yolo11n.mlir \ --dataset ../COCO2017 \ --input_num 100 \ -o yolo11n_calib_table /workspace/duo-buildroot-sdk-v2/buildroot/utils/brmake -j4 -C /workspace/duo-buildroot-sdk-v2/buildroot 2026-01-29T05:40:25 >>> host-tar 1.35 Configuring Done in 1min 08s (error code: 2) make: *** [Makefile:664: br-rootfs-pack] Error 2 Error: Build board milkv-duos-musl-riscv64-emmc failed! root@f09cd7f3870c:/workspace/duo-buildroot-sdk-v2# https://github.com/smuraro/duo-buildroot-sdk-v2 sudo apt install -y pkg-config build-essential ninja-build automake autoconf libtool wget curl git gcc libssl-dev bc slib squashfs-tools android-sdk-libsparse-utils jq python3-distutils scons parallel tree python3-dev python3-pip device-tree-compiler ssh cpio fakeroot libncurses5 flex bison libncurses5-dev genext2fs rsync unzip dosfstools mtools tcl openssh-client cmake expect sudo apt para o python export FORCE_UNSAFE_CONFIGURE=1 export PATH=$(echo $PATH | tr -d '\n\t ') sudo apt-get install python-is-python3 sudo apt install nodejs npm sudo chown -R root:root /workspace/duo-buildroot-sdk-v2 ./build.sh lunch cd buildroot-2021.05/output/milkv-duo_musl_riscv64 make show-targets make menuconfig https://wiki.seeedstudio.com/recamera_model_conversion/ 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 pip install git+https://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@github.com/tcpipchip/ultralytics.git@main pip install git+https://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@github.com/ultralytics/ultralytics.git@main claude Windows arquiteturadecomputadores1@gmail.com claude windows (ps1) irm https://claude.ai/install.ps1 | iex C:\Users\Miguel\.local\bin\claude.exe skills install git+https://github.com/Seeed-Studio/ai-skills I'll guide you through converting your custom YOLO v26 ONNX model to CVIMODEL format. Prerequisites 1. TPU-MLIR toolchain (Sophgo's conversion tool) 2. Your custom .onnx model file 3. Calibration dataset (~100-1000 images for INT8 quantization) 4. A test image for validation Conversion Steps 1. Install TPU-MLIR # Clone TPU-MLIR v1.7 (or latest) git clone -b v1.7 --depth 1 https://github.com/sophgo/tpu-mlir.git cd tpu-mlir source ./envsetup.sh ./build.sh 2. Convert ONNX to MLIR (FP32) model_transform \ --model_name your_model_name \ --model_def your_model.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input test_image.jpg \ --test_result output_ref.npz \ --mlir your_model.mlir Important: You may need to specify --output_names with your model's specific output layer names. You can find these by inspecting your ONNX model. 3. Run Calibration (for INT8 quantization) run_calibration your_model.mlir \ --dataset /path/to/calibration/images \ --input_num 100 \ -o calibration_table 4. Convert to CVIMODEL # For INT8 (recommended for edge devices) model_deploy \ --mlir your_model.mlir \ --quantize INT8 \ --calibration_table calibration_table \ --chip cv181x \ --test_input test_image.jpg \ --test_reference output_ref.npz \ --model your_model_int8.cvimodel # For BF16 (higher precision) model_deploy \ --mlir your_model.mlir \ --quantize BF16 \ --chip cv181x \ --test_input test_image.jpg \ --test_reference output_ref.npz \ --model your_model_bf16.cvimodel Key Questions for Your Setup 1. What chip are you targeting? (cv180x, cv181x, cv182x, cv183x) 2. What's your input size? (e.g., 640x640, 416x416) 3. Do you know your model's output layer names? (needed for --output_names) Finding Output Layer Names If you need to inspect your ONNX model: pip install netron netron your_model.onnx Or use Python: import onnx model = onnx.load("your_model.onnx") for output in model.graph.output: print(output.name) Would you like me to help you with any specific step, or do you need help finding your model's output layer names? Sources: - https://wiki.seeedstudio.com/model_conversion_guide/ - https://github.com/sophgo/tpu-mlir - https://wiki.seeedstudio.com/recamera_model_conversion/ - https://doc.sophgo.com/sdk-docs/v23.05.01/docs_latest_release/docs/tpu-mlir/quick_start_en/html/10_cv18xx_guide.html > cv181x 640x640 ● Perfect! Here are the specific commands for your cv181x chip with 640x640 input: Step-by-Step Conversion for Your Setup 1. Setup TPU-MLIR Environment git clone -b v1.7 --depth 1 https://github.com/sophgo/tpu-mlir.git cd tpu-mlir source ./envsetup.sh cd .. 2. Inspect Your ONNX Model (to find output layer names) First, let's check what output layers your YOLO v26 model has: # Install onnx if needed pip install onnx # Create inspection script python -c " import onnx model = onnx.load('your_model.onnx') print('Input names:') for inp in model.graph.input: print(f' {inp.name}') print('\nOutput names:') for out in model.graph.output: print(f' {out.name}') " 3. Convert ONNX to MLIR model_transform \ --model_name yolov26_custom \ --model_def your_model.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input test_image.jpg \ --test_result yolov26_output_ref.npz \ --mlir yolov26_custom.mlir Note: Add --output_names output1,output2,output3 if you have specific output layer names from step 2. 4. Run Calibration run_calibration yolov26_custom.mlir \ --dataset ./calibration_images \ --input_num 100 \ -o yolov26_calibration_table 5. Generate CVIMODEL (INT8) model_deploy \ --mlir yolov26_custom.mlir \ --quantize INT8 \ --calibration_table yolov26_calibration_table \ --chip cv181x \ --test_input test_image.jpg \ --test_reference yolov26_output_ref.npz \ --model yolov26_custom_cv181x_int8.cvimodel What You Need Ready 1. ✓ Your .onnx file 2. ✓ Target chip: cv181x 3. ✓ Input size: 640x640 4. ⚠️ Calibration dataset (100+ images similar to your training data) 5. ⚠️ Test image (for validation) Questions 1. Do you have your ONNX file in the current directory? If so, I can help you inspect it to find the output layer names. 2. Do you have calibration images ready? They should be representative of your inference data. 3. Are you running this on Linux/WSL or Windows? (TPU-MLIR requires Linux environment) Would you like me to help you inspect your ONNX model or prepare the calibration dataset? ───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────> inspect my model.onnx ─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────── ? for shortcuts pip install -U ultralytics yolo export model=best.pt format=onnx imgsz=640 opset=14 simplify=True dynamic=False power shell wsl2 ubuntu 22.04 install sudo apt-get update sudo apt-get upgrade sudo apt install -y pkg-config build-essential ninja-build automake autoconf libtool wget curl git gcc libssl-dev bc slib squashfs-tools android-sdk-libsparse-utils jq python3-distutils scons parallel tree python3-dev python3-pip device-tree-compiler ssh cpio fakeroot libncurses5 flex bison libncurses5-dev genext2fs rsync unzip dosfstools mtools tcl openssh-client cmake expect execute docker windows desktop instalei ultralytics no ubuntu 22.04 copiei scripts dentro do ubuntu 22.04 git clone https://github.com/Seeed-Studio/ai-skills/tree/main dentro da pasta /home/ubuntu/ai-skills/skills/onnx-to-cvimodel/scripts copiei o .pt e dentro de /home/ubuntu/ai-skills/skills/onnx-to-cvimodel/scripts criei uma pasta dataset e copiei imagens pip install -U ultralytics para gerar o onnx conversao de .pt para .onnx yolo export model=best.pt format=onnx imgsz=640 opset=14 simplify=True dynamic=False nano ~/.docker/config.json (no ubuntu 22.04) { } pip install tpu-mlir git clone https://github.com/sophgo/tpu-mlir.git ./convert_to_cvimodel.sh best.onnx dataset ===================== object detect can detect many objects at the same time model_transform \ --model_name yolov26_custom \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input teste.jpg \ --test_result yolov26_output_ref.npz \ --output_names "/model.23/one2one_cv2.0/one2one_cv2.0.2/Conv_output_0,/model.23/one2one_cv3.0/one2one_cv3.0.2/Conv_output_0,/model.23/one2one_cv2.1/one2one_cv2.1.2/Conv_output_0,/model.23/one2one_cv3.1/one2one_cv3.1.2/Conv_output_0,/model.23/one2one_cv2.2/one2one_cv2.2.2/Conv_output_0,/model.23/one2one_cv3.2/one2one_cv3.2.2/Conv_output_0" \ --mlir yolov26_custom.mlir run_calibration yolov26_custom.mlir \ --dataset ./calibration_images \ --input_num 200 \ -o yolov26_calibration_table model_deploy \ --mlir yolov26_custom.mlir \ --quantize INT8 \ --calibration_table yolov26_calibration_table \ --chip cv181x \ --test_input teste.jpg \ --test_reference yolov26_output_ref.npz \ --model yolov26_custom_cv181x_int8.cvimodel name: output0 tensor: float32[1,300,6] ================= best class pt model_transform \ --model_name yolov26_custom \ --model_def best_class.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input teste.jpg \ --test_result yolov26_output_ref.npz \ --output_names "output0" \ --mlir yolov26_custom.mlir run_calibration yolov26_custom.mlir \ --dataset ./calibration_images \ --input_num 200 \ -o yolov26_calibration_table model_deploy \ --mlir yolov26_custom.mlir \ --quantize INT8 \ --calibration_table yolov26_calibration_table \ --chip cv181x \ --test_input teste.jpg \ --test_reference yolov26_output_ref.npz \ --model yolov26_custom_cv181x_int8.cvimodel ======================== testing scripts virtual box ubuntu 22.0.4 sem update ou upgrade instalei docker apenas nao precisa docker desktop windows... sudo chown -R ubuntu:ubuntu ai-skills/ sudo ./convert_to_cvimodel.sh best.onnx images criei pasta compartilhada no virtual box ubuntu 22.0.4 O Método Profissional: Pastas Compartilhadas (Shared Folders) Este é o melhor para quem precisa mover arquivos constantemente. Ele cria uma pasta no Windows que aparece como um "drive" ou diretório dentro do Ubuntu. No VirtualBox, vá em Configurações da VM > Pastas Compartilhadas. Clique no ícone de "mais" (+) e selecione a pasta no seu Windows. Marque Apenas leitura (opcional), Montar Automaticamente e Tornar Permanente. No Ubuntu, seu usuário precisa de permissão para ler essa pasta. Rode este comando no terminal: Bash sudo adduser $USER vboxsf sudo docker start recamera sudo docker exec -it recamera /bin/bash ======================== testes para ver como funciona chamadas no docker docker exec recamera ls -la /workspace sudo docker exec -it recamera bash -c "pwd && ls -la" sudo docker exec -it recamera bash -c \ "source /workspace/tpu-mlir/envsetup.sh" sudo docker exec -it sophgo bash -c " source /workspace/tpu-mlir/envsetup.sh && \ pwd && \ ls -la " sudo docker exec -it yolo26_convert bash -c " source /workspace/tpu-mlir/envsetup.sh pwd ls -la cp hello.txt /workspace " sudo docker run --privileged --rm --name "sophgo" \ -v "$(pwd)/$WORK_DIR:/work" \ -w "/work/$model_name" \ sophgo/tpuc_dev:v3.1 bash -c " source /workspace/tpu-mlir/envsetup.sh echo '=== Step 1: Model Transform ===' mkdir -p workspace && cd workspace cp ../$onnx_basename . $use_qtable && cp ../$(basename $qtable_path) . || true" docker run --privileged -it --rm --name yolo26_convert sophgo/tpuc_dev:v3.1 bash docker run --privileged -it --rm --name yolo26_convert \ -v "$(pwd)/$WORK_DIR:/work" \ -w "/work/$model_name" \ sophgo/tpuc_dev:v3.1 -c "pwd" ls pwd ls echo "$(pwd)/$WORK_DIR:/work" echo "/work/$model_name" ls -R " sudo docker start yolo26_convert sudo docker exec -it yolo26_convert /bin/bash docker run --privileged -it --rm --name yolo26_convert sophgo/tpuc_dev:v3.1 -c "ls -R" docker run --rm sophgo/tpuc_dev:v3.1 pwd sudo docker run --rm \ -v "$(realpath ./$WORK_DIR):/work" \ -w "/work/yolo11n" \ sophgo/tpuc_dev:v3.1 ls -R /work ==============novo script=============== install tpu_mlir sudo docker run -it sophgo/tpuc_dev:v3.1 bash altere... exit sudo docker ps -a sudo docker commit 16423cf57338 sophgo/tpuc_dev:v3.1-recamera sudo docker run -it sophgo/tpuc_dev:v3.1-recamera bash work_dir Pasta oficial que é duplicada no lado do docker NAO OKfaltou --chip no deploy path test input image errado...esta como /work/tmp deploy_args esta como "local" - remover local case "$task" in detect) if [[ "$model_type" == "yolo11" ]]; then <----sempre yolo11 echo "/model.23/one2one_cv2.0/one2one_cv2.0.2/Conv_output_0,/model.23/one2one_cv3.0/one2one_cv3.0.2/Conv_output_0,/model.23/one2one_cv2.1/one2one_cv2.1> else adicao do -xc 665ad7751bcf sudo docker kill 665ad7751bcf LOG DO NOVO SCRIPT model_transform.py --model_name yolo11n-detect --model_def best.onnx --input_shapes '[[1,3,640,640]]' --mean 0.0,0.0,0.0 --scale 0.0039216,0.0039216,0.0039216 --keep_aspect_ratio --pixel_format rgb --output_names /model.23/one2one_cv2.0/one2one_cv2.0.2/Conv_output_0,/model.23/one2one_cv3.0/one2one_cv3.0.2/Conv_output_0,/model.23/one2one_cv2.1/one2one_cv2.1.2/Conv_output_0,/model.23/one2one_cv3.1/one2one_cv3.1.2/Conv_output_0,/model.23/one2one_cv2.2/one2one_cv2.2.2/Conv_output_0,/model.23/one2one_cv3.2/one2one_cv3.2.2/Conv_output_0 --test_input /work/test.jpg --test_result yolo11n-detect_top_outputs.npz --mlir yolo11n-detect.mlir model_runner.py --input yolo11n-detect_in_f32.npz --model best.onnx --output yolo11n-detect_ref_outputs.npz run_calibration.py yolo11n-detect.mlir --dataset /work/dataset --input_num 100 -o yolo11n-detect_calib_table model_deploy.py --mlir yolo11n-detect.mlir --quantize INT8 --quant_input --processor cv181x --calibration_table yolo11n-detect_calib_table --test_input /work/test.jpg --test_reference yolo11n-detect_top_outputs.npz --customization_format RGB_PACKED --fuse_preprocess --aligned_input --model yolo11n-detect_cv181x_int8.cvimodel 1 branco 2 preto 3 azul perfect https://www.youtube.com/watch?v=r0RspiLG260 https://www.ejtech.io/learn/train-yolo-models https://wiki.seeedstudio.com/recamera_model_conversion/ //cars yolov8 export da sophgo model_transform \ --model_name yolo8n \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input cars.jpg \ --test_result yolo8n_top_outputs.npz \ --mlir yolo8n.mlir run_calibration yolov8n.mlir \ --dataset carros \ --input_num 100 \ -o yolov8n_cali_table model_deploy.py \ --mlir yolov8n.mlir \ --quant_input --quant_output \ --quantize INT8 \ --calibration_table yolov8n_cali_table \ --processor cv181x \ --model yolov8n_cv181x_int8_sym.cvimodel python sample_img_object_detection.py YOLOV8 /root/cv181x/yolov8n_cv181x_int8_sym.cvimodel /root/21.jpg obs: recamera yolov11 recamera funcionou com carros, que ultrapassam resolucao de 640x640 obs: tdl sdk parece exigir 640x640 e roda yolov8 apenas # Manually download the specific version first !wget https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.pt # Then run your train command !yolo task=detect mode=train model=yolov8n.pt data=data.yaml epochs=100 imgsz=640 2. Use the Python API (Recommended) The Python API is generally less "opinionated" than the CLI and handles versioning more strictly. Python from ultralytics import YOLO # This explicitly loads the V8 architecture model = YOLO('yolov8n.pt') # Train the model model.train(data='data.yaml', epochs=100, imgsz=640) --carros yolov8 - run_calibration.py yolov8n.mlir \ --dataset ./carros --input_num 10 \ -o yolov8n_cali_table testes yolov8 cars tdl_sdk sample_img_object_detection YOLOV8 /root /root/carro.jpg [root@milkv-duo]~# sample_img_object_detection YOLOV8 /root /root/carro.jpg [tdl_model_factory.cpp:80] [I] input model config file is empty, load model config from /mnt/system/configs/model/model_factory.json [tdl_model_factory.cpp:104] [I] load model config from /mnt/system/configs/model/model_factory.json done,model size:66 [tdl_model_factory.cpp:226] [I] setModelDir success,model_dir:/root [tdl_model_factory_internal.cpp:40] [I] getModelInstance model_type:YOLOV8 [tdl_model_factory_internal.cpp:329] [I] createObjectDetectionModel success,model type:27,category:0 [tdl_model_factory.cpp:412] [I] model_path: /root/cv181x/best_cv181x.cvimodel out_datas.size: 1 obj_meta_index: 0 class: 2 score: 0.587585 bbox: 213.596 88.3061 414.607 263.387 obj_meta_index: 1 class: 4 score: 0.893209 bbox: 488.964 35.1621 640 174.354 obj_meta_index: 2 class: 4 score: 0.844244 bbox: 447.938 189.45 639.936 366.509 obj_meta_index: 3 class: 9 score: 0.804705 bbox: 240.546 8.94987 408.378 145.323 object_meta->bboxes.size(): 4 save image to object_detection_0.jpg [ 6911.369174] vb_cancel_block:964(): Not find pool for size(1228800). python tdl_sophgo.py /root/cv181x/best_int8_cars.cvimodel carro.jpg import sys import os from tdl import nn, image import cv2 import numpy as np if __name__ == "__main__": if len(sys.argv) != 3: print("Usage: python sample_fd.py <model_path> <image_path>") sys.exit(1) model_path = sys.argv[1] img_path = sys.argv[2] face_detector = nn.get_model(nn.ModelType.YOLOV8, model_path) img = image.read(img_path) # img = cv2.imread(img_path) bboxes = face_detector.inference(img) print(bboxes) #https://github.com/sophgo/tdl_sdk/tree/master python sample_img_object_detection.py YOLOV8 /root/cv181x/best_int8_cars.cvimodel carro.jpg import sys import os from tdl import nn, image import cv2 import numpy as np def visualize_objects(img_path, bboxes, save_path="object_detection.jpg"): """?????????""" img = cv2.imread(img_path) print(f"??? {len(bboxes)} ???") for i, obj in enumerate(bboxes): x1, y1, x2, y2 = map(int, [obj['x1'], obj['y1'], obj['x2'], obj['y2']]) class_id = obj['class_id'] score = obj['score'] class_name = obj.get('class_name', f'class_{class_id}') cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), 2) label = f"{class_id}:{score:.2f}" center_x = (x1 + x2) // 2 center_y = (y1 + y2) // 2 cv2.putText(img, label, (center_x, center_y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 1) cv2.imwrite(save_path, img) print(f"?????: {save_path}") if __name__ == "__main__": if len(sys.argv) < 4 or len(sys.argv) > 5: print("Usage: python3 sample_img_object_detection.py <model_id_name> <model_dir> <image_path> [threshold]") # ??????????????? sys.exit(1) model_id_name = sys.argv[1] model_dir = sys.argv[2] image_path = sys.argv[3] threshold = float(sys.argv[4]) if len(sys.argv) == 5 else 0.5 if not os.path.exists(image_path): print(f"???????: {image_path}") sys.exit(1) model_type = getattr(nn.ModelType, model_id_name) model = nn.get_model(model_type, model_dir, device_id=0) # ???? img = image.read(image_path) # ???? outdatas = model.inference(img) expected_keys = {"class_id", "class_name", "score", "x1", "y1", "x2", "y2"} is_detection = ( isinstance(outdatas, list) and isinstance(outdatas[0], dict) and set(outdatas[0].keys()) == expected_keys ) if not is_detection: print("????????????,????:") print(outdatas) sys.exit(1) print(f"out_datas.size: {len(outdatas)}") for i, obj in enumerate(outdatas): print(f"obj_meta_index: {i} " f"class: {obj['class_id']} " f"score: {obj['score']:.2f} " f"bbox: {obj['x1']:.2f} {obj['y1']:.2f} {obj['x2']:.2f} {obj['y2']:.2f}") visualize_objects(image_path, outdatas) # input: python3 sample_img_object_detection.py <model_id_name> <model_dir> <image_path> [threshold] # output: obj_meta_index: <index> class: <class_id> score: <score_value> bbox: <x1> <y1> <x2> <y2> plates 11 recamera 100% ok =============== model_transform.py \ --model_name yolo11n-detect \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \ --test_input test.jpg \ --test_result yolo11n-detect_top_outputs.npz \ --mlir yolo11n-detect.mlir run_calibration.py yolo11n-detect.mlir \ --dataset plates \ --input_num 100 \ -o yolo11n-detect_calib_table model_deploy.py \ --mlir yolo11n-detect.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo11n-detect_calib_table \ --test_input test.jpg \ --test_reference yolo11n-detect_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess --aligned_input \ --model yolo11n-detect_cv181x_int8.cvimodel plates 8 TDL_SDK 100% ok =========================== nao esquecer fo export do site para yolo8 model_transform \ --model_name yolov8n \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --mlir yolov8n.mlir run_calibration.py yolov8n.mlir \ --dataset plates \ --input_num 100 \ -o yolov8n_cali_table python sample_img_object_detection.py YOLOV8 /root/cv181x/yolov8n_cv181x_int8_sym.cvimodel plate.jpg /etc/init.d/S93sscma-supervisor stop /etc/init.d/S91sscma-node stop python sample_sergio.py /root /root/s4.jpg ===vibracao model_transform.py \ --model_name yolo11n-detect \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --output_names "/model.23/cv2.0/cv2.0.2/Conv_output_0,/model.23/cv3.0/cv3.0.2/Conv_output_0,/model.23/cv2.1/cv2.1.2/Conv_output_0,/model.23/cv3.1/cv3.1.2/Conv_output_0,/model.23/cv2.2/cv2.2.2/Conv_output_0,/model.23/cv3.2/cv3.2.2/Conv_output_0" \ --test_input test_vib.jpg \ --test_result yolo11n-detect_top_outputs.npz \ --mlir yolo11n-detect.mlir run_calibration.py yolo11n-detect.mlir \ --dataset vibracao \ --input_num 100 \ -o yolo11n-detect_calib_table model_deploy.py \ --mlir yolo11n-detect.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo11n-detect_calib_table \ --test_input test_vib.jpg \ --test_reference yolo11n-detect_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess --aligned_input \ --model yolo11n-detect_cv181x_int8.cvimodel https://universe.roboflow.com/rean-ba5ce/vibration-analysis-normal/dataset/2 yolo export model=best.pt format=onnx opset=14 AT+DEVEUI=70B3D57ED006E1A2 AT+APPEUI=0000000000000000 AT+BAND=6 AT+APPKEY=9E8B21A4624337145717626A83DA6F48 AT+MASK=0002 AT+CLASS=A AT+JOIN=1:0:10:8 https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolov8-obb.ipynb#scrollTo=tdSMcABDNKW- @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ classificacao C:. └───dataset ├───train │ ├───classe_A │ ├───classe_B │ └───classe_C └───val ├───classe_A ├───classe_B └───classe_C !pip install ultralytics==8.2.0 !pip install ultralytics==8.3.0 from ultralytics import YOLO import os # 1. Desativa o W&B os.environ["WANDB_MODE"] = "disabled" # 2. FORÇANDO O DOWNLOAD DO MODELO DE CLASSIFICAÇÃO VIA URL # Isso evita que ele baixe o yolo11n.pt de detecção por engano try: model = YOLO("yolo11n-cls.pt") except: # Se ele insistir no 11, usamos o link direto do 8-cls model = YOLO("https://github.com/ultralytics/assets/releases/download/v8.2.0/yolo11n-cls.pt") # 3. Treino # Aponte para a pasta que contém 'train' e 'val' model.train( data="/content/dataset", epochs=100, imgsz=640, task='classify' ) from google.colab import drive drive.mount('/content/drive') e então cp /content/drive/MyDrive/buggio/dataset.zip /content/. ou !yolo task=classify mode=train \ model=yolov8n-cls.yaml \ data=/content/dataset \ epochs=100 \ imgsz=640 yolo export model=best.pt format=onnx imgsz=640 model_transform.py \ --model_name yolov8n \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --mlir yolov8n.mlir run_calibration.py yolov8n.mlir \ --dataset motors \ --input_num 100 \ -o yolov8n_cali_table model_deploy.py \ --mlir yolov8n.mlir \ --quant_input --quant_output \ --quantize INT8 \ --calibration_table yolov8n_cali_table \ --processor cv181x \ --model yolov8n_cv181x_int8_sym.cvimodel [16:23, 12/02/2026] Sergio milk-v Duo S: edita o model_factory.json que fica em /mnt/system/configs/model [16:24, 12/02/2026] Sergio milk-v Duo S: "YOLOV8":{ "_comment":"custom model, specify num_cls or branch string", "file_name":"yolov8n_cv181x_int8_sym_4" }, [16:25, 12/02/2026] Sergio milk-v Duo S: só edita o "file_name": com o nome do seu modelo (lembranso que sem o _cv181x no final, pois ele coloca automatico) python sample_img_classification.py /root/cv181x/yolov8n_cv181x_int8_sym.cvimodel /root/bom_c0.jpg import os import cv2 import time import subprocess import numpy as np from pinpong.libs.dfrobot_lis2dh import LIS2DH from pinpong.board import Board, I2C # Funcao para configurar pinos no Milk-V Duo def execute_shell_command(command): try: result = subprocess.run(command, shell=True, capture_output=True, text=True) return result.stdout if result.stdout else result.stderr except Exception as e: return str(e) # Inicializacao do Hardware Board("milkv-duo").begin() execute_shell_command('duo-pinmux -w B20/IIC4_SCL') execute_shell_command('duo-pinmux -w B21/IIC4_SDA') lis = LIS2DH() while not lis.init(lis.LIS2DH12_RANGE_8GA): print("Aguardando sensor LIS2DH...") time.sleep(1) # Configuracao do Dataset DATASET_DIR = "dataset_yolo" CLASSES = [] # Lista de classes capturadas def preparar_pastas(): # Cria estrutura: dataset_yolo/train/images e dataset_yolo/train/labels for folder in ["images", "labels"]: path = os.path.join(DATASET_DIR, "train", folder) if not os.path.exists(path): os.makedirs(path) def criar_yaml(): # Gera o arquivo de configuracao para o YOLO11 yaml_path = os.path.join(DATASET_DIR, "data.yaml") with open(yaml_path, "w") as f: f.write(f"train: {os.path.abspath(DATASET_DIR)}/train/images\n") f.write(f"val: {os.path.abspath(DATASET_DIR)}/train/images\n") f.write("\nnc:\n") f.write(f"names: {CLASSES}\n") print(f"\n[OK] Arquivo data.yaml criado em: {yaml_path}") def capturar_ciclo(label_nome, class_id, quantidade=100): print(f"\nIniciando captura de {quantidade} amostras para: {label_nome}") img_dir = os.path.join(DATASET_DIR, "train", "images") lbl_dir = os.path.join(DATASET_DIR, "train", "labels") for i in range(quantidade): amostras = [] for _ in range(256): lis.read_XYZ() amostras.append([lis.x, lis.y, lis.z]) time.sleep(0.001) # Amostragem rapida # Processamento dos dados (Escala 8G = +/- 8000mg) dados = np.array(amostras) dados = np.clip(dados, -8000, 8000) imagem_base = ((dados + 8000) * 255 / 16000).astype(np.uint8) # Criacao da imagem 256x256 RGB (OpenCV usa BGR) # img_final[linha, coluna, canal] img_final = np.zeros((256, 256, 3), dtype=np.uint8) for linha in range(256): # Atribuindo 256 cores baseadas nos eixos para os 3 canais img_final[linha, :, 0] = imagem_base[linha, 2] # Canal Blue (Eixo Z) img_final[linha, :, 1] = imagem_base[linha, 1] # Canal Green (Eixo Y) img_final[linha, :, 2] = imagem_base[linha, 0] # Canal Red (Eixo X) # Nomes dos arquivos file_base = f"{label_nome}_c{i}" # Salva Imagem em JPG com qualidade maxima (100) para preservar dados img_path = os.path.join(img_dir, f"{file_base}.jpg") cv2.imwrite(img_path, img_final, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) # Salva Label (YOLO format: class_id x_center y_center width height) with open(os.path.join(lbl_dir, f"{file_base}.txt"), "w") as f: f.write(f"{class_id} 0.5 0.5 1.0 1.0") if i % 20 == 0: print(f"Gravando: {i}/{quantidade}...") # --- EXECUCAO DOS 3 CICLOS --- preparar_pastas() for ciclo in range(3): print(f"\n--- CICLO {ciclo + 1} DE 3 ---") nome_classe = input("Digite o nome para esta vibracao (ex: normal, erro, alta): ").strip() if nome_classe not in CLASSES: CLASSES.append(nome_classe) id_atual = CLASSES.index(nome_classe) input(f"Prepare o sensor para '{nome_classe}' e pressione ENTER para capturar 100 imagens...") capturar_ciclo(nome_classe, id_atual, 100) criar_yaml() print("\nDataset concluido com sucesso!") yolo export model=best.pt format=onnx opset=14 ======vibracao=============== =bom sempre usar test_input yolo8n ============================= yolo export model=best.pt format=onnx opset=14 model_transform \ --model_name yolo8n \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input 2bal640x640.jpg \ --test_result yolo8n_top_outputs.npz \ --mlir yolo8n.mlir run_calibration yolov8n.mlir \ --dataset vibracao \ --input_num 100 \ -o yolov8n_cali_table model_deploy.py \ --mlir yolov8n.mlir \ --quant_input --quant_output \ --quantize INT8 \ --calibration_table yolov8n_cali_table \ --processor cv181x \ --test_input 2bal640x640.jpg \ --model yolov8n_cv181x_int8_sym.cvimodel ====vibracao ==solucao tdl sdk...(foi usado) python sample_img_object_detection.py YOLOV8 /root/cv181x/yolov8n_cv181x_int8_sym_cv181x.cvimodel /root/4ballfaultserious_ke-48_png.rf.e5badf73f45eeb9d25224c8f7cb40d13.jpg run_calibration.py yolov8n.mlir \ --dataset hz \ --input_num 100 \ -o yolov8n_cali_table model_deploy.py \ --mlir yolov8n.mlir \ --quant_input --quant_output \ --quantize INT8 \ --calibration_table yolov8n_cali_table \ --processor cv181x \ --model yolov8n_cv181x_int8_sym.cvimodel picolm tinyllama-1.1b-chat-v1.0-q4_k_m.gguf ./picolm /mnt/usb/llm/tinyllama-1.1b-chat-v1.0-q4_k_m.gguf -p "The meaning of life is" -n 100 De-me 5 ideias de nomes para uma loja de cafe? ==== para o blog...e tdl sdk... new colab teste - chocolates... https://colab.research.google.com/github/EdjeElectronics/Train-and-Deploy-YOLO-Models/blob/main/Train_YOLO_Models.ipynb#scrollTo=EMEDk5byzxY5 !pip install ultralytics==8.3.0 #o segredo para o yolo nao pegar .pt superior já tenha uma pasta /test from ultralytics import YOLO model = YOLO('yolov8n.pt') results = model.train(data=f"data.yaml", epochs=100, imgsz=640) !yolo detect predict model=runs/detect/train2/weights/best.pt source=/content/train/test save=True import glob from IPython.display import Image, display for image_path in glob.glob(f'/content/runs/detect/predict/*.jpg')[:10]: display(Image(filename=image_path, height=400)) print('\n') https://www.youtube.com/watch?si=frhm3ByszGu_lBmT&v=r0RspiLG260&feature=youtu.be https://colab.research.google.com/ model_deploy.py \ --mlir yolo11n-detect.mlir \ --quantize BF16 \ --quant_input \ --processor cv181x \ --calibration_table yolo11n-detect_calib_table \ --test_input test.jpg \ --test_reference yolo11n-detect_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess --aligned_input \ --model yolo11n-detect_cv181x_int8.cvimodel https://ricardoadulis.sharepoint.com/:u:/s/smartcore/IQC5Hj8OhqzLSahmy_Qu2jFYASIYqhr7HpmA0Wsre_sWRU0?e=RN3dU9 https://github.com/sophgo/tdl_sdk/blob/master/tool/yolo_export/yolov8_export.py https://milkv.io/docs/duo/getting-started/setup https://platform.ultralytics.com/home //ultra web platform yolo26n model_transform \ --model_name yolo26n \ --model_def best.onnx \ --input_shapes "[[1,3,640,640]]" \ --mean "0.0,0.0,0.0" \ --scale "0.0039216,0.0039216,0.0039216" \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input test.jpg \ --test_result yolo26n_top_outputs.npz \ --mlir yolo26n.mlir run_calibration \ yolo26n.mlir \ --dataset cores \ --input_num 100 \ -o yolo26n_calib_table model_deploy \ --mlir yolo26n.mlir \ --quantize INT8 \ --quant_input \ --processor cv181x \ --calibration_table yolo26n_calib_table \ --test_input test.jpg \ --test_reference yolo26n_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess \ --aligned_input \ --model yolo26n_1684x_int8_sym.cvimodel model_deploy \ --mlir yolo26n.mlir \ --quantize BF16 \ --quant_input \ --processor cv181x \ --calibration_table yolo26n_calib_table \ --test_input test.jpg \ --test_reference yolo26n_top_outputs.npz \ --customization_format RGB_PACKED \ --fuse_preprocess \ --aligned_input \ --model yolo26n_1684x_int8_sym.cvimodel https://universe.roboflow.com/tyler-yonjx/car-colors-1smyc \\wsl.localhost\docker-desktop\mnt\docker-desktop-disk\data\docker\volumes\6e0475e8fbeba5b744b0d593cbfd6ad76a5f70300caec98595e193b6110b9dd9 ++++++++++++++++++ classificacao tdl-sdk (yolov8) ++++++++++++++++++ https://colab.research.google.com/github/roboflow-ai/notebooks/blob/main/notebooks/train-yolo11-object-detection-on-custom-dataset.ipynb#scrollTo=tdSMcABDNKW-yolo11s !pip install ultralytics criar pasta dataset e copiar zip dentro treine data.yaml path: /content/train train: train val: valid test: test !yolo task=classify mode=train \ model=yolov8n-cls.yaml \ data=/content/dataset \ epochs=100 \ imgsz=640 ou !yolo task=classify mode=train \ model=yolov8n-cls \ data=/content/dataset \ epochs=100 \ imgsz=640 ou !yolo classify train data=/content/dataset model=yolov8n-cls.pt epochs=50 imgsz=640 yolo export model=best.pt format=onnx imgsz=640 model_transform \ --model_name yolov8n-cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --mlir yolov8n.mlir run_calibration.py yolov8n.mlir \ --dataset cores \ --input_num 100 \ -o yolov8n_cali_table model_deploy.py \ --mlir yolov8n.mlir \ --quant_input --quant_output \ --quantize INT8 \ --calibration_table yolov8n_cali_table \ --processor cv181x \ --model yolov8n_cv181x_int8_sym.cvimodel run_calibration.py yolov8n.mlir \ --dataset cores \ --input_num 100 \ -o yolov8n_cali_table sample_img_classification CLS_YOLOV8 /root /root/test.jpg python sample_img_classification.py yolov8n_cv181x_int8_sym.cvimodel test.jpg !yolo classify train data=/content/dataset model=yolov8n-cls.pt epochs=50 imgsz=640 TENTAR... model_transform \ --model_name yolov8_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0,0,0 \ --scale 0.0039216,0.0039216,0.0039216 \ --mlir yolov8_cls.mlir run_calibration.py yolov8_cls.mlir \ --dataset cores \ --input_num 100 \ -o yolov8n_cali_table model_deploy.py \ --mlir yolov8_cls.mlir \ --quant_input --quant_output \ --quantize INT8 \ --calibration_table yolov8n_cali_table \ --processor cv181x \ --model yolov8n_cv181x_int8_sym.cvimodel ========blog treinei no ultralytics platform usei o python export_class model_transform.py \ --model_name yolo_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --pixel_format bgr \ --keep_aspect_ratio \ --channel_format nchw \ --mlir yolo_cls.mlir run_calibration.py yolo_cls.mlir \ --dataset cores \ --input_num 100 \ -o yolo_cls_cali_table model_deploy.py \ --mlir yolo_cls.mlir \ --quantize INT8 \ --calibration_table yolo_cls_cali_table \ --fuse_preprocess \ --asymmetric \ --processor cv181x \ --model yolo_cls_cv181x_int8_sym.cvimodel python sample_img_classification.py /root/yolo_cls_cv181x_int8_sym.cvimodel /root/test.jpg sample_img_classification CLS_YOLOV8 /root /root/test.jpg ====desisto? ultralytics platform convert_class.py model_transform.py \ --model_name yolov8n_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean 0.0,0.0,0.0 \ --scale 0.0039216,0.0039216,0.0039216 \ --keep_aspect_ratio \ --pixel_format rgb \ --test_input test.jpg \ --test_result yolov8n_cls_top_outputs.npz \ --mlir yolov8n_cls.mlir run_calibration.py yolov8n_cls.mlir \ --dataset cores \ --input_num 100 \ -o yolo_cls_cali_table model_deploy.py \ --mlir yolov8n_cls.mlir \ --quantize INT8 \ --calibration_table yolo_cls_cali_table \ --processor cv181x \ --test_input test.jpg \ --test_reference yolov8n_cls_top_outputs.npz \ --tolerance 0.85,0.45 \ --fuse_preprocess \ --customization_format RGB_PLANAR \ --aligned_input \ --model yolo_cls_cv181x_int8_sym.cvimodel ???yolo task=classify export model=best.pt format=onnx??? ./sample_img_classification /root CLS_YOLOV8 /root/test.jpg sample_img_classification /root/yolo_cls_cv181x_int8_sym.cvimodel CLS_YOLOV8 /root/test.jpg https://pt.aliexpress.com/item/1005009136395783.html?ug_edm_item_id=1005009136395783&edm_click_module=alg_product_3_4586567300&creative_img_ind=7&edm_log_data=gmod-edm-item-list-three-columns.track-edm-item-list-three-columns-log-link&tracelog=rowan&rowan_id1=aeug_edm_41180_1_pt_BR_2026-03-03&rowan_msg_id=tcpiCOWS_41180_%24b76fd4f35ee8490f8be62bee8ab63495&ck=in_edm_other&mem_info=ODmQN4iVZeMljqCr%2Fd0gpw%3D%3D-100003-aeug_edm_41180-IaxZ1cvIAFXa2co01j1vpwMJ553K%20YAzAjqcUK4jAsE%3D&gatewayAdapt=glo2bra#nav-specification Nícolas Zimermann - Detecção e Controle de Acesso veicular com Milk-v Selmo Werner Júnior - Sistema de Monitoramento Térmico para Manutenção Preditiva Industrial Utilizando Computação de Borda em Arquitetura com Milk-V Artur Lian Fernandes Torres - Desenvolvimento de um Dispositivo para Monitoramento de Condição de Máquinas Rotativas Utilizando Análise de Frequência e Rede Neural Convolucional com Milk-V ./sample_img_classification CLS_HAND_GESTURE /root /root/test.jpg ./sample_img_classification CLS_HAND_GESTURE /root /root/ruido_200Hz_200Hz_c1.jpg python sample_img_classification.py /root/cv181x/cls_hand_gesture_128_128_INT8_cv181x.cvimodel /root/test.jpg python sample_img_classification.py /root/cv181x/cls_hand_gesture_128_128_INT8_cv181x.cvimodel /root/ruido_200Hz_200Hz_c1.jpg ================================================= TESTE 1.27 TPU MLIR python sample_img_classification_.py /root/cv181x/yolo_cls_cv181x_int8_sym.cvimodel /root/ruido_200Hz_200Hz_c1.jpg ./sample_img_classification CLS_YOLOV8 /root /root/ruido_200Hz_200Hz_c1.jpg melhorou...mas... model_transform.py \ --model_name yolov8n_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mlir yolov8n_cls.mlir run_calibration.py yolov8n_cls.mlir \ --dataset cores \ --input_num 100 \ -o yolo_cls_cali_table model_deploy.py \ --mlir yolov8n_cls.mlir \ --quantize INT8 \ --quant_input \ --calibration_table yolo_cls_cali_table \ --processor cv181x \ --model yolo_cls_cv181x_int8_sym.cvimodel ================================================== ./sample_img_classification CLS_YOLOV8 /root /root/test.jpg =*****************************= model_transform.py \ --model_name yolov8n_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean "0,0,0" \ --scale "0.00392156862745098,0.00392156862745098,0.00392156862745098" \ --keep_aspect_ratio \ --pixel_format rgb \ --channel_format nchw \ --test_input test.jpg \ --tolerance 0.99,0.99 \ --test_result yolov8n_cls_top_outputs.npz \ --mlir yolov8n_cls.mlir run_calibration.py yolov8n_cls.mlir \ --dataset cores \ --input_num 100 \ -o yolo_cls_cali_table model_deploy.py \ --mlir yolov8n_cls.mlir \ --quantize INT8 \ --quant_input \ --calibration_table yolo_cls_cali_table \ --processor cv181x \ --test_input yolov8n_cls_in_f32.npz \ --test_reference yolov8n_cls_top_outputs.npz \ --tolerance 0.9,0.6 \ --model yolo_cls_cv181x_int8_sym.cvimodel \\\\\\\\\\\\\\\\\\\\\\\ model_transform.py \ --model_name yolov8n_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean "0,0,0" \ --scale "0.00392156862745098,0.00392156862745098,0.00392156862745098" \ --keep_aspect_ratio \ --pixel_format rgb \ --channel_format nchw \ --output_names /model.9/linear/Gemm_output_0 \ --test_input test.jpg \ --test_result yolov8n_cls_top_outputs.npz \ --mlir yolov8n_cls.mlir run_calibration.py yolov8n_cls.mlir \ --dataset cores \ --input_num 100 \ -o yolo_cls_cali_table model_deploy.py \ --mlir yolov8n_cls.mlir \ --quantize INT8 \ --quant_input \ --calibration_table yolo_cls_cali_table \ --processor cv181x \ --test_input yolov8n_cls_in_f32.npz \ --test_reference yolov8n_cls_top_outputs.npz \ --tolerance 0.9,0.6 \ --model yolo_cls_cv181x_int8_sym.cvimodel =====hub maix model_transform.py \ --model_name yolov8n_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean "0,0,0" \ --scale "0.017124753831663668, 0.017124753831663668, 0.017124753831663668" \ --keep_aspect_ratio \ --pixel_format rgb \ --channel_format nchw \ --output_names /model.9/linear/Gemm_output_0 \ --test_input test.jpg \ --test_result yolov8n_cls_top_outputs.npz \ --mlir yolov8n_cls.mlir model_transform.py \ --model_name yolov8n_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean "0,0,0" \ --scale "0.00392156862745098, 0.00392156862745098, 0.00392156862745098" \ --keep_aspect_ratio \ --output_names "output0" \ --pixel_format rgb \ --channel_format nchw \ --test_input green.jpg \ --test_result yolov8n_cls_top_outputs.npz \ --tolerance 0.95,0.60 \ --mlir yolov8n_cls.mlir run_calibration.py yolov8n_cls.mlir \ --dataset testes \ --input_num 1600 \ -o yolo_cls_cali_table model_deploy.py \ --mlir yolov8n_cls.mlir \ --quantize INT8 \ --quant_input \ --calibration_table yolo_cls_cali_table \ --processor cv181x \ --test_input yolov8n_cls_in_f32.npz \ --test_reference yolov8n_cls_top_outputs.npz \ --tolerance 0.95,0.60 \ --model yolo_cls_cv181x_int8_sym.cvimodel Look this https://wiki.sipeed.com/maixpy/doc/en/ai_model_converter/maixcam.html NAO ESQUECER DO BATCH QUE REMOVE SOFTMAX E SAIDAS = OUTPUT0 "mean":[123.5, 123.5, 123.5], "std":[255.0,255.0,255.0] python sample_img_classification_.py /root/yolo_cls_cv181x_int8_sym.cvimodel /root/ruido_200Hz_200Hz_c1.jpg ----monkey model_transform.py \ --model_name yolov8n_cls \ --model_def best.onnx \ --input_shapes [[1,3,640,640]] \ --mean "0,0,0" \ --scale "0.00392156862745098, 0.00392156862745098, 0.00392156862745098" \ --keep_aspect_ratio \ --output_names "output0" \ --pixel_format rgb \ --channel_format nchw \ --test_input Ades_1-2.jpg \ --test_result yolov8n_cls_top_outputs.npz \ --tolerance 0.95,0.60 \ --mlir yolov8n_cls.mlir run_calibration.py yolov8n_cls.mlir \ --dataset train \ --input_num 500 \ -o yolo_cls_cali_table model_deploy.py \ --mlir yolov8n_cls.mlir \ --quantize INT8 \ --quant_input \ --calibration_table yolo_cls_cali_table \ --processor cv181x \ --test_input yolov8n_cls_in_f32.npz \ --test_reference yolov8n_cls_top_outputs.npz \ --tolerance 0.95,0.60 \ --model yolo_cls_cv181x_int8_sym.cvimodel ./sample_img_classification CLS_YOLOV8 /root /root/barbie2.jpg python sample_img_classification_.py /root/yolo_cls_cv181x_int8_sym.cvimodel /root/barbie2.jpg [ { "id": "5665c4bbc82b4729", "type": "function", "z": "8c5f3c081178a986", "name": "function 1", "func": "if (!msg.payload.data.keypoints.length) return null;\n\nlet kp = msg.payload.data.keypoints[0][1];\nlet conf = 50;\n\nlet estado = null;\n\n// detectar braço levantado\nif (kp[9][2] > conf && kp[9][1] < kp[5][1]) estado = \"RIGHT HAND\";\nif (kp[10][2] > conf && kp[10][1] < kp[6][1]) estado = \"LEFT HAND\";\n\n// estado anterior\nlet ultimo = context.get(\"ultimo_estado\");\n\n// só envia se mudou\nif (estado !== ultimo) {\n\n context.set(\"ultimo_estado\", estado);\n\n if (estado !== null) {\n msg.payload = estado;\n return msg;\n }\n}\n\nreturn null;", "outputs": 1, "timeout": 0, "noerr": 0, "initialize": "", "finalize": "", "libs": [], "x": 160, "y": 480, "wires": [ [ "615a8a0791045d32" ] ] } ] if (!msg.payload.data.keypoints.length) return null; let kp = msg.payload.data.keypoints[0][1]; let conf = 50; let estado = null; // detectar braço levantado if (kp[9][2] > conf && kp[9][1] < kp[5][1]) estado = "LEFT HAND"; if (kp[10][2] > conf && kp[10][1] < kp[6][1]) estado = "RIGHT HAND"; // estado anterior let ultimo = context.get("ultimo_estado"); // só envia se mudou if (estado !== ultimo) { context.set("ultimo_estado", estado); if (estado !== null) { msg.payload = estado; return msg; } } return null;

Nenhum comentário:

Postar um comentário