247 lines
8.3 KiB
Python
247 lines
8.3 KiB
Python
|
|
#!/usr/bin/env python3
|
||
|
|
"""
|
||
|
|
Convert trained models for OAK-D deployment.
|
||
|
|
|
||
|
|
Supports conversion to ONNX and OpenVINO formats for edge deployment.
|
||
|
|
|
||
|
|
Usage:
|
||
|
|
python convert_for_deployment.py --model runs/training/weights/best.pt --output oak_d_deployment
|
||
|
|
"""
|
||
|
|
|
||
|
|
import argparse
|
||
|
|
import shutil
|
||
|
|
from pathlib import Path
|
||
|
|
|
||
|
|
|
||
|
|
def convert_rfdetr_for_oak(model_path: Path, output_dir: Path, img_size: int = 640):
|
||
|
|
"""Convert RF-DETR model for OAK-D deployment."""
|
||
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||
|
|
|
||
|
|
try:
|
||
|
|
from export_rtdetr_oak import export_rfdetr_onnx
|
||
|
|
|
||
|
|
# Export to ONNX
|
||
|
|
onnx_path = output_dir / "model.onnx"
|
||
|
|
export_rfdetr_onnx(str(model_path), str(onnx_path), img_size)
|
||
|
|
|
||
|
|
# Convert ONNX to OpenVINO
|
||
|
|
import subprocess
|
||
|
|
xml_path = output_dir / "model.xml"
|
||
|
|
bin_path = output_dir / "model.bin"
|
||
|
|
|
||
|
|
cmd = [
|
||
|
|
"mo", "--input_model", str(onnx_path),
|
||
|
|
"--output_dir", str(output_dir),
|
||
|
|
"--model_name", "model",
|
||
|
|
"--input_shape", f"[1,3,{img_size},{img_size}]",
|
||
|
|
"--data_type", "FP16"
|
||
|
|
]
|
||
|
|
|
||
|
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||
|
|
if result.returncode != 0:
|
||
|
|
return f"❌ OpenVINO conversion failed: {result.stderr}"
|
||
|
|
|
||
|
|
return f"✓ RF-DETR exported for OAK-D!\n📁 Output: {output_dir}\n🔗 Next: Convert ONNX → RVC using HubAI (online) or ModelConverter (offline)."
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
return f"❌ RF-DETR conversion failed: {e}"
|
||
|
|
|
||
|
|
|
||
|
|
def convert_rtdetr_for_oak(model_path: Path, output_dir: Path, img_size: int = 640):
|
||
|
|
"""Convert RT-DETR model for OAK-D deployment."""
|
||
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||
|
|
|
||
|
|
try:
|
||
|
|
from export_rtdetr_oak import export_rtdetr_openvino
|
||
|
|
|
||
|
|
# Export to OpenVINO directly
|
||
|
|
xml_path, bin_path = export_rtdetr_openvino(str(model_path), str(output_dir), img_size)
|
||
|
|
|
||
|
|
return f"✓ RT-DETR exported for OAK-D!\n📁 Output: {output_dir}\n🔗 Next: Convert .xml/.bin → blob using blobconverter.luxonis.com"
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
return f"❌ RT-DETR conversion failed: {e}"
|
||
|
|
|
||
|
|
|
||
|
|
def convert_yolov6_for_oak(model_path: Path, output_dir: Path, img_size: int = 640):
|
||
|
|
"""Convert YOLOv6 model for OAK-D deployment."""
|
||
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||
|
|
|
||
|
|
try:
|
||
|
|
from export_onnx import export_yolov6_onnx
|
||
|
|
|
||
|
|
# Export to ONNX
|
||
|
|
onnx_path = export_yolov6_onnx(str(model_path), str(output_dir), img_size)
|
||
|
|
|
||
|
|
# Convert ONNX to OpenVINO
|
||
|
|
import subprocess
|
||
|
|
xml_path = output_dir / "model.xml"
|
||
|
|
bin_path = output_dir / "model.bin"
|
||
|
|
|
||
|
|
cmd = [
|
||
|
|
"mo", "--input_model", str(onnx_path),
|
||
|
|
"--output_dir", str(output_dir),
|
||
|
|
"--model_name", "model",
|
||
|
|
"--input_shape", f"[1,3,{img_size},{img_size}]",
|
||
|
|
"--data_type", "FP16",
|
||
|
|
"--reverse_input_channels"
|
||
|
|
]
|
||
|
|
|
||
|
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||
|
|
if result.returncode != 0:
|
||
|
|
return f"❌ OpenVINO conversion failed: {result.stderr}"
|
||
|
|
|
||
|
|
return f"✓ YOLOv6 exported for OAK-D!\n📁 Output: {output_dir}\n🔗 Next: Convert .xml/.bin → blob using blobconverter.luxonis.com"
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
return f"❌ YOLOv6 conversion failed: {e}"
|
||
|
|
|
||
|
|
|
||
|
|
def convert_yolox_for_oak(model_path: Path, output_dir: Path, img_size: int = 640):
|
||
|
|
"""Convert YOLOX model for OAK-D deployment."""
|
||
|
|
output_dir.mkdir(parents=True, exist_ok=True)
|
||
|
|
|
||
|
|
try:
|
||
|
|
from export_onnx import export_yolox_onnx
|
||
|
|
|
||
|
|
# Export to ONNX
|
||
|
|
onnx_path = export_yolox_onnx(str(model_path), str(output_dir), img_size)
|
||
|
|
|
||
|
|
# Convert ONNX to OpenVINO
|
||
|
|
import subprocess
|
||
|
|
xml_path = output_dir / "model.xml"
|
||
|
|
bin_path = output_dir / "model.bin"
|
||
|
|
|
||
|
|
cmd = [
|
||
|
|
"mo", "--input_model", str(onnx_path),
|
||
|
|
"--output_dir", str(output_dir),
|
||
|
|
"--model_name", "model",
|
||
|
|
"--input_shape", f"[1,3,{img_size},{img_size}]",
|
||
|
|
"--data_type", "FP16",
|
||
|
|
"--reverse_input_channels"
|
||
|
|
]
|
||
|
|
|
||
|
|
result = subprocess.run(cmd, capture_output=True, text=True)
|
||
|
|
if result.returncode != 0:
|
||
|
|
return f"❌ OpenVINO conversion failed: {result.stderr}"
|
||
|
|
|
||
|
|
return f"✓ YOLOX exported for OAK-D!\n📁 Output: {output_dir}\n🔗 Next: Convert .xml/.bin → blob using blobconverter.luxonis.com"
|
||
|
|
|
||
|
|
except Exception as e:
|
||
|
|
return f"❌ YOLOX conversion failed: {e}"
|
||
|
|
|
||
|
|
|
||
|
|
def detect_model_type(model_path: Path) -> str:
|
||
|
|
"""Detect model type from file path or contents."""
|
||
|
|
path_str = str(model_path).lower()
|
||
|
|
|
||
|
|
# Check path patterns
|
||
|
|
if 'rf-detr' in path_str or 'rfdetr' in path_str:
|
||
|
|
return 'rf-detr'
|
||
|
|
elif 'rt-detr' in path_str or 'rtdetr' in path_str:
|
||
|
|
return 'rt-detr'
|
||
|
|
elif 'yolov6' in path_str:
|
||
|
|
return 'yolov6'
|
||
|
|
elif 'yolox' in path_str:
|
||
|
|
return 'yolox'
|
||
|
|
|
||
|
|
# Try to detect from file contents
|
||
|
|
try:
|
||
|
|
import torch
|
||
|
|
checkpoint = torch.load(model_path, map_location='cpu', weights_only=False)
|
||
|
|
|
||
|
|
if 'state_dict' in checkpoint:
|
||
|
|
state_dict = checkpoint['state_dict']
|
||
|
|
|
||
|
|
# Check for RT-DETR patterns
|
||
|
|
if any('rtdetr' in key.lower() for key in state_dict.keys()):
|
||
|
|
return 'rt-detr'
|
||
|
|
|
||
|
|
# Check for RF-DETR patterns
|
||
|
|
if any('rf_detr' in key.lower() for key in state_dict.keys()):
|
||
|
|
return 'rf-detr'
|
||
|
|
|
||
|
|
# Check for YOLO patterns
|
||
|
|
if any('yolo' in key.lower() for key in state_dict.keys()):
|
||
|
|
if any('v6' in key.lower() for key in state_dict.keys()):
|
||
|
|
return 'yolov6'
|
||
|
|
else:
|
||
|
|
return 'yolox'
|
||
|
|
|
||
|
|
except Exception:
|
||
|
|
pass
|
||
|
|
|
||
|
|
# Default fallback
|
||
|
|
return 'yolox'
|
||
|
|
|
||
|
|
|
||
|
|
def main():
|
||
|
|
parser = argparse.ArgumentParser(description="Convert trained models for OAK-D deployment")
|
||
|
|
parser.add_argument(
|
||
|
|
'--model',
|
||
|
|
type=Path,
|
||
|
|
required=True,
|
||
|
|
help='Path to trained model weights (.pt file)'
|
||
|
|
)
|
||
|
|
parser.add_argument(
|
||
|
|
'--output',
|
||
|
|
type=Path,
|
||
|
|
default=Path('oak_d_deployment'),
|
||
|
|
help='Output directory for converted models'
|
||
|
|
)
|
||
|
|
parser.add_argument(
|
||
|
|
'--img-size',
|
||
|
|
type=int,
|
||
|
|
default=640,
|
||
|
|
choices=[320, 416, 512, 640, 800, 1024],
|
||
|
|
help='Input image size for the model'
|
||
|
|
)
|
||
|
|
parser.add_argument(
|
||
|
|
'--framework',
|
||
|
|
choices=['auto', 'rf-detr', 'rt-detr', 'yolov6', 'yolox'],
|
||
|
|
default='auto',
|
||
|
|
help='Model framework (auto-detect if not specified)'
|
||
|
|
)
|
||
|
|
|
||
|
|
args = parser.parse_args()
|
||
|
|
|
||
|
|
# Auto-detect framework if not specified
|
||
|
|
if args.framework == 'auto':
|
||
|
|
framework = detect_model_type(args.model)
|
||
|
|
print(f"Auto-detected framework: {framework}")
|
||
|
|
else:
|
||
|
|
framework = args.framework
|
||
|
|
|
||
|
|
print(f"Converting {framework.upper()} model for OAK-D deployment...")
|
||
|
|
print(f"Model: {args.model}")
|
||
|
|
print(f"Output: {args.output}")
|
||
|
|
print(f"Image size: {args.img_size}")
|
||
|
|
|
||
|
|
# Convert based on framework
|
||
|
|
if framework == 'rf-detr':
|
||
|
|
result = convert_rfdetr_for_oak(args.model, args.output, args.img_size)
|
||
|
|
elif framework == 'rt-detr':
|
||
|
|
result = convert_rtdetr_for_oak(args.model, args.output, args.img_size)
|
||
|
|
elif framework == 'yolov6':
|
||
|
|
result = convert_yolov6_for_oak(args.model, args.output, args.img_size)
|
||
|
|
elif framework == 'yolox':
|
||
|
|
result = convert_yolox_for_oak(args.model, args.output, args.img_size)
|
||
|
|
else:
|
||
|
|
result = f"❌ Unsupported framework: {framework}"
|
||
|
|
|
||
|
|
print(result)
|
||
|
|
|
||
|
|
if "✓" in result:
|
||
|
|
print("\n📋 Next steps:")
|
||
|
|
print("1. Test OpenVINO model (optional):")
|
||
|
|
print(" python -c \"from openvino.runtime import Core; core = Core(); model = core.read_model('model.xml'); print('✓ Model loaded')\"")
|
||
|
|
print("2. Convert to RVC compiled format:")
|
||
|
|
print(" - Online: HubAI conversion (fastest setup)")
|
||
|
|
print(" - Offline: ModelConverter (requires Docker)")
|
||
|
|
print(" - Docs: https://docs.luxonis.com/software-v3/ai-inference/conversion/")
|
||
|
|
print("3. Deploy to OAK-D using DepthAI Python API")
|
||
|
|
|
||
|
|
|
||
|
|
if __name__ == "__main__":
|
||
|
|
main()</content>
|
||
|
|
<parameter name="filePath">/home/dillon/_code/saw_mill_knot_detection/convert_for_deployment.py
|