able to convert onnx to blob

This commit is contained in:
2025-12-23 12:53:52 -07:00
parent d3664693a8
commit da5becd5ae
24 changed files with 1242 additions and 10 deletions

View File

@ -874,14 +874,29 @@ class AnnotationApp:
return "✓ Training process terminated"
return "⚠️ No training in progress"
def export_for_oak_d(self, model_path: str, output_dir: str = "oak_d_export", img_size: int = 640):
def get_model_path_from_display(self, model_display: str) -> Path | None:
"""Get the actual model path from a display name."""
if not hasattr(self, 'available_models') or not self.available_models:
return None
for model in self.available_models:
if model['display'] == model_display:
return model['path']
return None
def export_for_oak_d(self, model_display: str, output_dir: str = "oak_d_export", img_size: int = 640):
"""Export trained model for OAK-D camera deployment."""
try:
weights_path = Path(model_path)
# Convert display name to actual path
weights_path = self.get_model_path_from_display(model_display)
if not weights_path:
return f"❌ Model '{model_display}' not found. Try clicking '🔍 Scan for Models' first."
output_path = Path(output_dir)
if not weights_path.exists():
return "❌ Model weights not found"
return f"❌ Model weights not found at: {weights_path}"
output_path.mkdir(parents=True, exist_ok=True)
@ -951,7 +966,19 @@ class AnnotationApp:
except Exception as e:
# OpenVINO not available, just return ONNX
return f"{model_type.upper()} exported to ONNX!\n📁 Output: {output_path}\n🔗 Next: Convert ONNX to blob using blobconverter.luxonis.com\n⚠️ OpenVINO not available: {str(e)}"
import shutil
docker_hint = ""
if shutil.which("docker") is None:
docker_hint = "\n⚠️ Docker not found (needed for offline conversion via ModelConverter)."
return (
f"{model_type.upper()} exported to ONNX!\n"
f"📁 Output: {output_path}\n"
f"🔗 Next: Convert ONNX → RVC using HubAI (online) or ModelConverter (offline).\n"
f"Docs: https://docs.luxonis.com/software-v3/ai-inference/conversion/\n"
f"💡 Offline conversion: Use Luxonis ModelConverter with Docker\n"
f"⚠️ OpenVINO export not available: {str(e)}"
f"{docker_hint}"
)
except Exception as e:
return f"❌ Export failed: {str(e)}"
@ -1178,19 +1205,18 @@ def create_ui(app: AnnotationApp) -> gr.Blocks:
python -c "from openvino.runtime import Core; core = Core(); model = core.read_model('model.xml'); print('✓ Model loaded')"
```
2. **Convert to Blob**:
- Go to: https://blobconverter.luxonis.com/
- Upload your `.xml` and `.bin` files
- Select OAK-D device
- Download the `.blob` file
2. **Convert to RVC compiled format** (recommended by Luxonis):
- Online: HubAI conversion (fastest setup)
- Offline: ModelConverter (requires Docker)
- Docs: https://docs.luxonis.com/software-v3/ai-inference/conversion/
3. **Deploy to OAK-D**:
- Use DepthAI Python API
- Or use OAK-D examples with your blob
### 💡 Tips
- Use **FP32** for best accuracy (default)
- **Nano models** work best on edge devices
- If you quantize, use real calibration images for best accuracy
- Test inference speed vs accuracy trade-off
""")

9
inspect_model.py Normal file
View File

@ -0,0 +1,9 @@
import onnx
model = onnx.load('oak_d_deployment/yolox_model.onnx')
print('Inputs:')
for inp in model.graph.input:
print(f' {inp.name}: {inp.type}')
print('Outputs:')
for out in model.graph.output:
print(f' {out.name}: {out.type}')

View File

@ -0,0 +1,15 @@
model:
path: yolox_model.onnx
inputs:
- name: images
shape: [1, 3, 640, 640]
encoding:
from: RGB
to: BGR
outputs:
- name: output0
calibration:
path: null # No calibration needed for this example

Binary file not shown.

View File

@ -0,0 +1,11 @@
stages:
default_stage:
input_model: models/yolox_model.onnx
inputs:
- name: images
shape: [1, 3, 640, 640]
encoding:
from: RGB
to: BGR
outputs:
- name: output0

View File

@ -0,0 +1,57 @@
{
"cmd_info": {
"model_optimizer": [
"mo",
"--output_dir",
"shared_with_container/outputs/yolox_model_to_rvc2_2025_12_23_19_33_11/intermediate_outputs",
"--output",
"output0",
"--compress_to_fp16",
"--input",
"images[1 3 640 640]{f32}",
"--reverse_input_channels",
"--input_model",
"shared_with_container/outputs/yolox_model_to_rvc2_2025_12_23_19_33_11/intermediate_outputs/yolox_model-simplified.onnx"
],
"compile_tool": [
"compile_tool",
"-d",
"MYRIAD",
"-ip",
"U8",
"-m",
"shared_with_container/outputs/yolox_model_to_rvc2_2025_12_23_19_33_11/intermediate_outputs/yolox_model-simplified.xml",
"-o",
"shared_with_container/outputs/yolox_model_to_rvc2_2025_12_23_19_33_11/intermediate_outputs/blobs/yolox_model_8shave.blob",
"-c",
"/tmp/tmpjjlw5b7b.conf"
]
},
"modelconverter_version": "0.5.1",
"model_optimizer_version": "2022.3.0-9052-9752fafe8eb-releases/2022/3",
"compile_tool_version": "2022.3.0",
"compile_tool_build": "2022.3.0-9213-bdadcd7583c-releases/2022/3",
"target_devices": [
"MYRIAD"
],
"is_superblob": true,
"number_of_shaves": [
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
11,
12,
13,
14,
15,
16
],
"number_of_cmx_slices": 8
}

View File

@ -0,0 +1,91 @@
{
"input_model": "shared_with_container/models/yolox_model.onnx",
"input_bin": null,
"input_file_type": "ONNX",
"inputs": [
{
"name": "images",
"shape": [
1,
3,
640,
640
],
"layout": "NCHW",
"data_type": "float32",
"calibration": {
"max_images": 20,
"min_value": 0.0,
"max_value": 255.0,
"mean": 127.5,
"std": 35.0,
"data_type": "float32"
},
"scale_values": null,
"mean_values": null,
"frozen_value": null,
"encoding": {
"from_": "RGB",
"to": "BGR"
}
}
],
"outputs": [
{
"name": "output0",
"shape": [
1,
14,
8400
],
"layout": "NCD",
"data_type": "float32"
}
],
"keep_intermediate_outputs": true,
"disable_onnx_simplification": false,
"disable_onnx_optimization": false,
"output_remote_url": null,
"intermediate_outputs_remote_url": null,
"put_file_plugin": null,
"hailo": {
"disable_calibration": false,
"force_onnx_names": true,
"optimization_level": 2,
"compression_level": 2,
"batch_size": 8,
"disable_compilation": false,
"alls": [],
"hw_arch": "hailo8"
},
"rvc2": {
"disable_calibration": false,
"mo_args": [],
"compile_tool_args": [],
"compress_to_fp16": true,
"number_of_shaves": 8,
"superblob": true,
"n_workers": null
},
"rvc3": {
"disable_calibration": false,
"mo_args": [],
"compile_tool_args": [],
"compress_to_fp16": true,
"pot_target_device": "VPU"
},
"rvc4": {
"disable_calibration": false,
"snpe_onnx_to_dlc_args": [],
"snpe_dlc_quant_args": [],
"snpe_dlc_graph_prepare_args": [],
"keep_raw_images": false,
"use_per_channel_quantization": true,
"use_per_row_quantization": false,
"optimization_level": 2,
"quantization_mode": "INT8_STANDARD",
"htp_socs": [
"sm8550"
]
}
}