From 9fdd0beb3ae0854fe07972aace3f19bf8b1fea48 Mon Sep 17 00:00:00 2001 From: Benjamin Goel Date: Tue, 21 Jan 2025 12:40:12 -0500 Subject: [PATCH] Add FPS display to webcam picker frontend (#142) * Initial commit - add object detection route * Add package-lock.json * Add two-column object detection component * Add new layout and component structure * Use Aceternity UI file picker * adds tabs to control menu * modifies to move webcam to main component * adds webcam component * add the react package for webcam util * add shadcnn tabs ui component * modifies file upload to show last uploaded file + color change + always show icon to upload * Fix containing element scroll and z-stack * Add overflow scroll to main component * Allow images to assume full width of ObjectDetectionComponent * Add YoloV4 model config to backend API * Create new object-detection endpoint & expand DeviceConfigurations enum to support WH_ARCH_YAML setting * Add ModelType enumeration in frontend to faciliate conditional navigation & use modelID in endpoint invocation * WIP add components to support: - adds a component to open live webcam - hit endpoint - draws bounding boxes WIP: has errors * draw box on image * remove * Optimize real-time object detection to prevent frame backlog * Ensure webcam stops completely when stop button is clicked + layout changes * ts fixes * Fix aspect ratio of video container to 4:3 * Fix navigation and add to SourcePicker component - TODO - wire up API call and detection handling * Refactor inference API call and UI * Fix UI bugs * Add API authentication to YOLOv4 backend * Address PR comments * Add FPS measurement and display * Remove prefix / character in navigation toast --------- Co-authored-by: Anirudh Ramchandran --- app/frontend/src/api/modelsDeployedApis.ts | 2 +- .../components/object_detection/ObjectDetectionComponent.tsx | 2 +- .../src/components/object_detection/utlis/runInference.ts | 5 ++++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/app/frontend/src/api/modelsDeployedApis.ts b/app/frontend/src/api/modelsDeployedApis.ts index 49e5771f..b1314967 100644 --- a/app/frontend/src/api/modelsDeployedApis.ts +++ b/app/frontend/src/api/modelsDeployedApis.ts @@ -168,7 +168,7 @@ export const handleModelNavigationClick = ( const destination = getDestinationFromModelType(modelType); console.log(`${modelType} button clicked for model: ${modelID}`); console.log(`Opening ${modelType} for model: ${modelName}`); - customToast.success(`${destination} page opened!`); + customToast.success(`${destination.slice(1)} page opened!`); navigate(destination, { state: { containerID: modelID, modelName: modelName }, diff --git a/app/frontend/src/components/object_detection/ObjectDetectionComponent.tsx b/app/frontend/src/components/object_detection/ObjectDetectionComponent.tsx index d5c9b23b..d8a79598 100644 --- a/app/frontend/src/components/object_detection/ObjectDetectionComponent.tsx +++ b/app/frontend/src/components/object_detection/ObjectDetectionComponent.tsx @@ -132,7 +132,7 @@ export const ObjectDetectionComponent: React.FC = () => {
Input image width and height: {metadata.width} x {metadata.height}
- Time to inference: {metadata.inferenceTime} sec + Frame Rate: {metadata.inferenceTime} FPS
)} diff --git a/app/frontend/src/components/object_detection/utlis/runInference.ts b/app/frontend/src/components/object_detection/utlis/runInference.ts index d2bcc409..bb5d3a09 100644 --- a/app/frontend/src/components/object_detection/utlis/runInference.ts +++ b/app/frontend/src/components/object_detection/utlis/runInference.ts @@ -23,6 +23,7 @@ export const runInference = async ( } try { + const startTime = performance.now(); const response = await axios.post( `/models-api/object-detection/`, formData, @@ -30,6 +31,8 @@ export const runInference = async ( headers: { "Content-Type": "multipart/form-data" }, }, ); + const endTime = performance.now(); + const requestLatency = endTime - startTime; // handle imageSourceElement types let width, height; if (imageSourceElement instanceof HTMLCanvasElement) { @@ -42,7 +45,7 @@ export const runInference = async ( const detectionMetadata: DetectionMetadata = { width: width, height: height, - inferenceTime: response.data.inference_time || 33.333, + inferenceTime: response.data.inference_time || (1/(requestLatency/1000)).toFixed(2), }; const detections: Detection[] = response.data.map( // eslint-disable-next-line @typescript-eslint/no-explicit-any