Click here to Skip to main content
15,946,320 members
Please Sign up or sign in to vote.
0.00/5 (No votes)
I have put onnx model in my wwwroot/js folder but it is not being detected and i am getting this error.



error = Error: failed to load external data file: js/sevensegment.onnx at gn (https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.min.js:1806:20708) at async Co.fetchModelAndCopyToWasmMemory (https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.min.js:18...

When trying to access in razor file

//const modelPromise = ort.InferenceSession.create("~/js/sevensegment.onnx");
//const modelPromise = ort.InferenceSession.create("/js/sevensegment.onnx");
//const modelPromise = ort.InferenceSession.create("js/sevensegment.onnx");  tried different paths...

(async () => {
    try {
        const modelPath = "js/sevensegment.onnx";
        // Ensure this path is correct
        const session = await ort.InferenceSession.create(modelPath);
        console.log("Model loaded successfully");

        // Add your code to use the model here
        // Example:
        // const inputTensor = new ort.Tensor(...);
        // const outputTensor = await session.run({ input: inputTensor });
        // console.log(outputTensor);

    } catch (error) {
        console.error("Failed to load model:", error);
    }
})();


What I have tried:

Tried different approaches, new to dev express blazor.

this is working in simple html + javascript file that i used.

<!DOCTYPE html>
<html lang="en">
<head>
    <meta charset="UTF-8">
    <title>YOLOv8 Object Detection</title>
    <script src="https://cdn.jsdelivr.net/npm/onnxruntime-web/dist/ort.min.js"></script>
    <style>
        canvas {
            display:block;
            border: 1px solid black;
            margin-top:10px;
        }
        video {
            display:block;
            border: 1px solid black;
            margin-top:10px;
        }
    </style>
</head>
<body>
    <video id="videoElement" width="640" height="480" autoplay></video>
    <button id="detectButton">Detect Objects</button>
    <canvas></canvas>
    <script>
        const videoElement = document.getElementById('videoElement');
        const detectButton = document.getElementById('detectButton');

        // Get webcam feed on page load
        navigator.mediaDevices.getUserMedia({ video: true })
            .then(stream => {
                videoElement.srcObject = stream;
            });

        /**
         * Function to prepare input image for YOLOv8 object detection
         * @param buf Content of uploaded file
         * @returns Array of pixels
         */
        async function prepare_input(buf) {
            return new Promise(resolve => {
                const img = new Image();
                img.src = URL.createObjectURL(buf);
                img.onload = () => {
                    const [img_width,img_height] = [img.width, img.height]
                    const canvas = document.createElement("canvas");
                    canvas.width = 640;
                    canvas.height = 640;
                    const context = canvas.getContext("2d");
                    context.drawImage(img,0,0,640,640);
                    const imgData = context.getImageData(0,0,640,640);
                    const pixels = imgData.data;

                    const red = [], green = [], blue = [];
                    for (let index=0; index<pixels.length; index+=4) {
                        red.push(pixels[index]/255.0);
                        green.push(pixels[index+1]/255.0);
                        blue.push(pixels[index+2]/255.0);
                    }
                    const input = [...red, ...green, ...blue];
                    resolve([input, img_width, img_height])
                }
            })
        }

        detectButton.addEventListener("click", async () => {
            const canvas = document.createElement("canvas");
            canvas.width = videoElement.videoWidth;
            canvas.height = videoElement.videoHeight;
            const ctx = canvas.getContext("2d");

            // Draw current frame from video onto the canvas
            ctx.drawImage(videoElement, 0, 0, canvas.width, canvas.height);
            const imageBlob = await new Promise(resolve => canvas.toBlob(resolve, 'image/jpeg'));
            const boxes = await detect_objects_on_image(imageBlob);
            draw_image_and_boxes(imageBlob, boxes);
        });

        /**
         * Function to draw the image from provided file
         * and bounding boxes of detected objects on
         * top of the image
         * @param file Uploaded file object
         * @param boxes Array of bounding boxes in format [[x1,y1,x2,y2,object_type,probability],...]
         */
        function draw_image_and_boxes(file, boxes) {
            const img = new Image()
            img.src = URL.createObjectURL(file);
            img.onload = () => {
                const canvas = document.querySelector("canvas");
                canvas.width = img.width;
                canvas.height = img.height;
                const ctx = canvas.getContext("2d");
                ctx.drawImage(img,0,0);
                ctx.strokeStyle = "#00FF00";
                ctx.lineWidth = 3;
                ctx.font = "18px serif";
                boxes.forEach(([x1,y1,x2,y2,label]) => {
                    ctx.strokeRect(x1,y1,x2-x1,y2-y1);
                    ctx.fillStyle = "#00ff00";
                    const width = ctx.measureText(label).width;
                    ctx.fillRect(x1,y1,width+10,25);
                    ctx.fillStyle = "#000000";
                    ctx.fillText(label, x1, y1+18);
                });
            }
        }

        /**
         * Function receives an image, passes it through YOLOv8 neural network
         * and returns an array of detected objects and their bounding boxes
         * @param buf Input image body
         * @returns Array of bounding boxes in format [[x1,y1,x2,y2,object_type,probability],..]
         */
        async function detect_objects_on_image(buf) {
            const [input,img_width,img_height] = await prepare_input(buf);
            const output = await run_model(input);
            return process_output(output,img_width,img_height);
        }

        // Move the model loading outside of the run_model function
        const modelPromise = ort.InferenceSession.create("sevensegment.onnx");

        /**
         * Function used to pass provided input tensor to YOLOv8 neural network and return result
         * @param input Input pixels array
         * @returns Raw output of neural network as a flat array of numbers
         */
        async function run_model(input) {
            const model = await modelPromise; // Use the pre-loaded model
            input = new ort.Tensor(Float32Array.from(input),[1, 3, 640, 640]);
            const outputs = await model.run({images:input});
            return outputs["output0"].data;
        }

        /**
         * Function used to convert RAW output from YOLOv8 to an array of detected objects.
         * Each object contain the bounding box of this object, the type of object and the probability
         * @param output Raw output of YOLOv8 network
         * @param img_width Width of original image
         * @param img_height Height of original image
         * @returns Array of detected objects in a format [[x1,y1,x2,y2,object_type,probability],..]
         */
        function process_output(output, img_width, img_height) {
            let boxes = [];
            for (let index=0;index<8400;index++) {
                const [class_id,prob] = [...Array(80).keys()]
                    .map(col => [col, output[8400*(col+4)+index]])
                    .reduce((accum, item) => item[1]>accum[1] ? item : accum,[0,0]);
                if (prob < 0.5) {
                    continue;
                }
                const label = yolo_classes[class_id];
                const xc = output[index];
                const yc = output[8400+index];
                const w = output[2*8400+index];
                const h = output[3*8400+index];
                const x1 = (xc-w/2)/640*img_width;
                const y1 = (yc-h/2)/640*img_height;
                const x2 = (xc+w/2)/640*img_width;
                const y2 = (yc+h/2)/640*img_height;
                boxes.push([x1,y1,x2,y2,label,prob]);
            }

            boxes = boxes.sort((box1,box2) => box2[5]-box1[5])
            const result = [];
            while (boxes.length>0) {
                result.push(boxes[0]);
                boxes = boxes.filter(box => iou(boxes[0],box)<0.7);
            }
            return result;
        }

        /**
         * Function calculates "Intersection-over-union" coefficient for specified two boxes
         * https://pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/.
         * @param box1 First box in format: [x1,y1,x2,y2,object_class,probability]
         * @param box2 Second box in format: [x1,y1,x2,y2,object_class,probability]
         * @returns Intersection over union ratio as a float number
         */
        function iou(box1,box2) {
            return intersection(box1,box2)/union(box1,box2);
        }

        /**
         * Function calculates union area of two boxes.
         *     :param box1: First box in format [x1,y1,x2,y2,object_class,probability]
         *     :param box2: Second box in format [x1,y1,x2,y2,object_class,probability]
         *     :return: Area of the boxes union as a float number
         * @param box1 First box in format [x1,y1,x2,y2,object_class,probability]
         * @param box2 Second box in format [x1,y1,x2,y2,object_class,probability]
         * @returns Area of the boxes union as a float number
         */
        function union(box1,box2) {
            const [box1_x1,box1_y1,box1_x2,box1_y2] = box1;
            const [box2_x1,box2_y1,box2_x2,box2_y2] = box2;
            const box1_area = (box1_x2-box1_x1)*(box1_y2-box1_y1)
            const box2_area = (box2_x2-box2_x1)*(box2_y2-box2_y1)
            return box1_area + box2_area - intersection(box1,box2)
        }

        /**
         * Function calculates intersection area of two boxes
         * @param box1 First box in format [x1,y1,x2,y2,object_class,probability]
         * @param box2 Second box in format [x1,y1,x2,y2,object_class,probability]
         * @returns Area of intersection of the boxes as a float number
         */
        function intersection(box1,box2) {
            const [box1_x1,box1_y1,box1_x2,box1_y2] = box1;
            const [box2_x1,box2_y1,box2_x2,box2_y2] = box2;
            const x1 = Math.max(box1_x1,box2_x1);
            const y1 = Math.max(box1_y1,box2_y1);
            const x2 = Math.min(box1_x2,box2_x2);
            const y2 = Math.min(box1_y2,box2_y2);
            return (x2-x1)*(y2-y1)
        }

        /**
         * Array of YOLOv8 class labels
         */
        const yolo_classes = [
            '.','0', '1', '2', '3', '4', '5', '6', '7', '8', '9','-','kwh'
        ];
    </script>
</body>
</html>


This is working and i used local server to open it and it works. But when i moved to blazor project it is not detecting onnx model or am i missing something.
Posted

1 solution

Best place to ask for DevExpress help, is in the DevExpress Support[^]
 
Share this answer
 

This content, along with any associated source code and files, is licensed under The Code Project Open License (CPOL)



CodeProject, 20 Bay Street, 11th Floor Toronto, Ontario, Canada M5J 2N8 +1 (416) 849-8900