diff --git a/docs/source/_static/images/examples/feature_motion_estimation.gif b/docs/source/_static/images/examples/feature_motion_estimation.gif new file mode 100644 index 000000000..97819cce3 Binary files /dev/null and b/docs/source/_static/images/examples/feature_motion_estimation.gif differ diff --git a/docs/source/components/device.rst b/docs/source/components/device.rst index 517b24de5..40423633f 100644 --- a/docs/source/components/device.rst +++ b/docs/source/components/device.rst @@ -55,6 +55,117 @@ subnet, you can specify the device (either with MxID, IP, or USB port name) you with depthai.Device(pipeline, device_info) as device: # ... + +Watchdog +######## + +Understanding the Watchdog Mechanism in POE Devices +---------------------------------------------------- + +The watchdog is a crucial component in the operation of POE (Power over Ethernet) devices with DepthAI. When DepthAI disconnects from a POE device, the watchdog mechanism is the first to respond, initiating a reset of the camera. This reset is followed by a complete system reboot, which includes the loading of the DepthAI bootloader and the initialization of the entire networking stack. + +.. note:: + This process is necessary to make the camera available for reconnection and typically takes about 10 seconds, which means the fastest possible reconnection time is 10 seconds. + + +Customizing the Watchdog Timeout +-------------------------------- + +.. tabs:: + + .. tab:: **Linux/MacOS** + + Set the environment variables `DEPTHAI_WATCHDOG_INITIAL_DELAY` and `DEPTHAI_BOOTUP_TIMEOUT` to your desired timeout values (in milliseconds) as follows: + + .. code-block:: bash + + DEPTHAI_WATCHDOG_INITIAL_DELAY= DEPTHAI_BOOTUP_TIMEOUT= python3 script.py + + .. tab:: **Windows PowerShell** + + For Windows PowerShell, set the environment variables like this: + + .. code-block:: powershell + + $env:DEPTHAI_WATCHDOG_INITIAL_DELAY= + $env:DEPTHAI_BOOTUP_TIMEOUT= + python3 script.py + + .. tab:: **Windows CMD** + + In Windows CMD, you can set the environment variables as follows: + + .. code-block:: guess + + set DEPTHAI_WATCHDOG_INITIAL_DELAY= + set DEPTHAI_BOOTUP_TIMEOUT= + python3 script.py + +Code-Based Configuration +------------------------ + +Alternatively, you can set the timeout directly in your code: + +.. code-block:: python + + pipeline = depthai.Pipeline() + + # Create a BoardConfig object + config = depthai.BoardConfig() + + # Set the parameters + config.watchdogInitialDelayMs = + config.watchdogTimeoutMs = + + pipeline.setBoardConfig(config) + +By adjusting these settings, you can tailor the watchdog functionality to better suit your specific requirements. + + +Environment Variables +##################### + +The following table lists various environment variables used in the system, along with their descriptions: + +.. list-table:: + :widths: 50 50 + :header-rows: 1 + + * - Environment Variable + - Description + * - `DEPTHAI_LEVEL` + - Sets logging verbosity, options: 'trace', 'debug', 'warn', 'error', 'off' + * - `XLINK_LEVEL` + - Sets logging verbosity of XLink library, options: 'debug', 'info', 'warn', 'error', 'fatal', 'off' + * - `DEPTHAI_INSTALL_SIGNAL_HANDLER` + - Set to 0 to disable installing Backward signal handler for stack trace printing + * - `DEPTHAI_WATCHDOG` + - Sets device watchdog timeout. Useful for debugging (DEPTHAI_WATCHDOG=0), to prevent device reset while the process is paused. + * - `DEPTHAI_WATCHDOG_INITIAL_DELAY` + - Specifies delay after which the device watchdog starts. + * - `DEPTHAI_SEARCH_TIMEOUT` + - Specifies timeout in milliseconds for device searching in blocking functions. + * - `DEPTHAI_CONNECT_TIMEOUT` + - Specifies timeout in milliseconds for establishing a connection to a given device. + * - `DEPTHAI_BOOTUP_TIMEOUT` + - Specifies timeout in milliseconds for waiting the device to boot after sending the binary. + * - `DEPTHAI_PROTOCOL` + - Restricts default search to the specified protocol. Options: any, usb, tcpip. + * - `DEPTHAI_DEVICE_MXID_LIST` + - Restricts default search to the specified MXIDs. Accepts comma separated list of MXIDs. Lists filter results in an "AND" manner and not "OR" + * - `DEPTHAI_DEVICE_ID_LIST` + - Alias to MXID list. Lists filter results in an "AND" manner and not "OR" + * - `DEPTHAI_DEVICE_NAME_LIST` + - Restricts default search to the specified NAMEs. Accepts comma separated list of NAMEs. Lists filter results in an "AND" manner and not "OR" + * - `DEPTHAI_DEVICE_BINARY` + - Overrides device Firmware binary. Mostly for internal debugging purposes. + * - `DEPTHAI_BOOTLOADER_BINARY_USB` + - Overrides device USB Bootloader binary. Mostly for internal debugging purposes. + * - `DEPTHAI_BOOTLOADER_BINARY_ETH` + - Overrides device Network Bootloader binary. Mostly for internal debugging purposes. + + + Multiple devices ################ @@ -120,7 +231,7 @@ Two examples would be: - encoding (most prominently H264/H265 as frame drops can lead to artifacts). Blocking behaviour -****************** +------------------ By default, queues are **blocking** and their size is **30**, so when the device fills up a queue and when the limit is reached, any additional messages from the device will be blocked and the library will wait until it can add new messages to the queue. @@ -133,7 +244,8 @@ It will wait for the host to consume (eg. :code:`queue.get()`) a message before to be empty again. Non-Blocking behaviour -********************** +---------------------- + Making the queue non-blocking will change the behavior in the situation described above - instead of waiting, the library will discard the oldest message and add the new one to the queue, and then continue its processing loop (so it won't get blocked). :code:`maxSize` determines the size of the queue and it also helps to control memory usage. @@ -142,7 +254,7 @@ For example, if a message has 5MB of data, and the queue size is 30, this queue up to 150MB of data in the memory on the host (the messages can also get really big, for instance, a single 4K NV12 encoded frame takes about ~12MB). Some additional information -*************************** +--------------------------- - Decreasing the queue size to 1 and setting non-blocking behavior will effectively mean "I only want the latest packet from the queue". - Queues are thread-safe - they can be accessed from any thread. diff --git a/docs/source/components/nodes/uvc.rst b/docs/source/components/nodes/uvc.rst new file mode 100644 index 000000000..96569afe8 --- /dev/null +++ b/docs/source/components/nodes/uvc.rst @@ -0,0 +1,62 @@ +UVC +=== + +The DepthAI UVC (`USB Video Class `__) node allows OAK devices to function as standard webcams. This feature is particularly useful for integrating OAK devices into applications that require video input, such as video conferencing tools or custom video processing applications. + +What is UVC? +############ + +UVC refers to the USB Video Class standard, which is a USB device class that describes devices capable of streaming video. This standard allows video devices to interface with computers and other devices without needing specific drivers, making them immediately compatible with a wide range of systems and software. + +How Does the UVC Node Work? +########################### + +The UVC node in DepthAI leverages this standard to stream video from OAK devices. When the UVC node is enabled, the OAK device is recognized as a standard webcam by the host system. This allows the device to be used in any application that supports webcam input, such as Zoom, Skype, or custom video processing software. + +The UVC node streams video data over a USB connection. It is important to use a USB3 cable for this purpose, as USB2 may not provide the necessary bandwidth for stable video streaming. + +.. note:: + + The UVC node can currently handle NV12 video streams from OAK devices. For streams in other formats, conversion to NV12 is necessary, which can be achieved using the :ref:`ImageManip` node. It's important to note that streams incompatible with NV12 conversion, like depth streams, are not supported by the UVC node. + +Examples of UVC Node Usage +########################## + +1. **DepthAI Demo Script**: The DepthAI demo script includes a UVC application that can be run to enable the UVC node on an OAK device. + + .. code-block:: bash + + python3 depthai_demo.py --app uvc + +2. **Custom Python Script**: A custom Python script can be written to enable the UVC node and configure the video stream parameters. Here are some pre-written examples: + + - :ref:`UVC & Color Camera` + - :ref:`UVC & Mono Camera` + - :ref:`UVC & Disparity` + + +3. **OBS Forwarding**: For applications where direct UVC node usage is not possible, OBS Studio can be used to forward the UVC stream. + + +Reference +######### + +.. tabs:: + + .. tab:: Python + + .. autoclass:: depthai.node.UVC + :members: + :inherited-members: + :noindex: + + .. tab:: C++ + + .. doxygenclass:: dai::node::UVC + :project: depthai-core + :members: + :private-members: + :undoc-members: + + +.. include:: ../../includes/footer-short.rst diff --git a/docs/source/samples/FeatureTracker/feature_motion_estimation.rst b/docs/source/samples/FeatureTracker/feature_motion_estimation.rst new file mode 100644 index 000000000..d3e4ec230 --- /dev/null +++ b/docs/source/samples/FeatureTracker/feature_motion_estimation.rst @@ -0,0 +1,37 @@ +Feature Tracker with Motion Estimation +====================================== + +This example demonstrates the capabilities of the :ref:`FeatureTracker` combined with motion estimation. It detects and tracks features between consecutive frames using optical flow. +Each feature is assigned a unique ID. The motion of the camera is estimated based on the tracked features, and the estimated motion (e.g., Up, Down, Left, Right, Rotating) is displayed on screen. + +The :ref:`Feature Detector` example only detects features without estimating motion. + +Demo +#### + +.. image:: ../../../../docs/source/_static/images/examples/feature_motion_estimation.gif + :alt: Feature Tracker with Motion Estimation Demo + :width: 100% + :align: center + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/FeatureTracker/feature_motion_estimation.py + :language: python + :linenos: + + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/docs/source/samples/UVC/uvc_disparity.rst b/docs/source/samples/UVC/uvc_disparity.rst new file mode 100644 index 000000000..9136fcafc --- /dev/null +++ b/docs/source/samples/UVC/uvc_disparity.rst @@ -0,0 +1,70 @@ +UVC & Disparity +=============== + +This example demonstrates how to use your OAK device as a UVC webcam. The UVC feature allows you to use your OAK device as a regular webcam in applications like OpenCV's :code:`cv2.VideoCapture()`, native camera apps, and more. + +.. rubric:: How It Works: + +The :ref:`StereoDepth` node outputs image data in the UINT8 format. However, the :ref:`UVC` node expects the data in NV12 format. To bridge this gap, an intermediary :ref:`ImageManip` node is used to convert the GRAY8 output from the MonoCamera node to NV12 format, which is then passed to the UVC node for streaming. +This doesn't work with stereo depth output, since depth is UINT16 which we cannot convert to NV12. + +This example won't work if we enable the subpixel disparity feature, since that outputs UINT16 as well. + +.. rubric:: Similar samples: + +- :ref:`UVC & Color Camera` +- :ref:`UVC & Mono Camera` + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Code used for testing +##################### + +.. code-block:: python + + import cv2 + + # Initialize the VideoCapture object to use the default camera (camera index 0 is webcam) + cap = cv2.VideoCapture(1) + + # Check if the camera opened successfully + if not cap.isOpened(): + print("Error: Could not open camera.") + exit() + + # Loop to continuously get frames from the camera + while True: + ret, frame = cap.read() + + if not ret: + print("Error: Could not read frame.") + break + + cv2.imshow('Video Feed', frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + cap.release() + cv2.destroyAllWindows() + + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/UVC/uvc_disparity.py + :language: python + :linenos: + + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/docs/source/samples/UVC/uvc_mono.rst b/docs/source/samples/UVC/uvc_mono.rst new file mode 100644 index 000000000..75801958b --- /dev/null +++ b/docs/source/samples/UVC/uvc_mono.rst @@ -0,0 +1,69 @@ +UVC & Mono Camera +================= + +This example demonstrates how to use a mono camera on your OAK device to function as a webcam. The UVC feature allows you to use your OAK device as a regular webcam in applications like OpenCV's :code:`cv2.VideoCapture()`, native camera apps, and more. + +.. rubric:: How It Works: + +The :ref:`MonoCamera` node outputs image data in the GRAY8 format. However, the :ref:`UVC` node expects the data in NV12 format. To bridge this gap, an intermediary :ref:`ImageManip` node is used to convert the GRAY8 output from the MonoCamera node to NV12 format, which is then passed to the UVC node for streaming. + + +.. rubric:: Similar samples: + +- :ref:`UVC & Color Camera` +- :ref:`UVC & Disparity` + + +Setup +##### + +.. include:: /includes/install_from_pypi.rst + +Code used for testing +##################### + +.. code-block:: python + + import cv2 + + # Initialize the VideoCapture object to use the default camera (camera index 0 is webcam) + cap = cv2.VideoCapture(1) + + # Check if the camera opened successfully + if not cap.isOpened(): + print("Error: Could not open camera.") + exit() + + # Loop to continuously get frames from the camera + while True: + ret, frame = cap.read() + + if not ret: + print("Error: Could not read frame.") + break + + cv2.imshow('Video Feed', frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + cap.release() + cv2.destroyAllWindows() + + + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/UVC/uvc_mono.py + :language: python + :linenos: + + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/docs/source/samples/UVC/uvc_rgb.rst b/docs/source/samples/UVC/uvc_rgb.rst new file mode 100644 index 000000000..e3c379cec --- /dev/null +++ b/docs/source/samples/UVC/uvc_rgb.rst @@ -0,0 +1,66 @@ +UVC & Color Camera +================== + +This example demonstrates how to use the RGB camera on your OAK device as a UVC webcam. The :ref:`UVC` node allows you to use your OAK device as a regular webcam in applications like OpenCV's :code:`cv2.VideoCapture()`, native camera apps, and more. + + +.. rubric:: Similar samples: + +- :ref:`UVC & Disparity` +- :ref:`UVC & Mono Camera` + + +Setup +##### + + +.. include:: /includes/install_from_pypi.rst + + +Code used for testing +##################### + +.. code-block:: python + + import cv2 + + # Initialize the VideoCapture object to use the default camera (camera index 0 is webcam) + cap = cv2.VideoCapture(1) + + # Check if the camera opened successfully + if not cap.isOpened(): + print("Error: Could not open camera.") + exit() + + # Loop to continuously get frames from the camera + while True: + ret, frame = cap.read() + + if not ret: + print("Error: Could not read frame.") + break + + cv2.imshow('Video Feed', frame) + + if cv2.waitKey(1) & 0xFF == ord('q'): + break + + cap.release() + cv2.destroyAllWindows() + + + +Source code +########### + +.. tabs:: + + .. tab:: Python + + Also `available on GitHub `__ + + .. literalinclude:: ../../../../examples/UVC/uvc_rgb.py + :language: python + :linenos: + +.. include:: /includes/footer-short.rst \ No newline at end of file diff --git a/docs/source/tutorials/code_samples.rst b/docs/source/tutorials/code_samples.rst index 5decffb24..eed30756a 100644 --- a/docs/source/tutorials/code_samples.rst +++ b/docs/source/tutorials/code_samples.rst @@ -27,6 +27,7 @@ Code Samples ../samples/ToF/* ../samples/VideoEncoder/* ../samples/Warp/* + ../samples/UVC/* ../samples/Yolo/* Code samples are used for automated testing. They are also a great starting point for the DepthAI API, as different node functionalities @@ -72,6 +73,7 @@ are presented with code. - :ref:`Feature Detector` - Feature detection on input frame - :ref:`Feature Tracker` - Feature detection and tracking on input frame +- :ref:`Feature Tracker with Motion Estimation` - Camera movement estimation based on feature tracking .. rubric:: Host side @@ -178,6 +180,12 @@ are presented with code. - :ref:`Warp Mesh` - Displays an image warped with 2 different meshes - :ref:`Interactive Warp Mesh` - Interactively change the warp mesh +.. rubric:: UVC + +- :ref:`UVC & Color Camera` - Displays RGB frames from a UVC camera +- :ref:`UVC & Mono Camera` - Displays mono frames from a UVC camera +- :ref:`UVC & Disparity` - Displays disparity frames from a UVC camera + .. rubric:: Yolo - :ref:`RGB & Tiny YOLO` - Runs Tiny YOLO on RGB frames and displays detections on the frame diff --git a/examples/FeatureTracker/feature_motion_estimation.py b/examples/FeatureTracker/feature_motion_estimation.py new file mode 100644 index 000000000..ae0162f9c --- /dev/null +++ b/examples/FeatureTracker/feature_motion_estimation.py @@ -0,0 +1,222 @@ +import numpy as np +import cv2 +from collections import deque +import depthai as dai + + +class CameraMotionEstimator: + def __init__(self, filter_weight=0.5, motion_threshold=0.01, rotation_threshold=0.05): + self.last_avg_flow = np.array([0.0, 0.0]) + self.filter_weight = filter_weight + self.motion_threshold = motion_threshold + self.rotation_threshold = rotation_threshold + + def estimate_motion(self, feature_paths): + most_prominent_motion = "Camera Staying Still" + max_magnitude = 0.0 + avg_flow = np.array([0.0, 0.0]) + total_rotation = 0.0 + vanishing_point = np.array([0.0, 0.0]) + num_features = len(feature_paths) + + print(f"Number of features: {num_features}") + + if num_features == 0: + return most_prominent_motion, vanishing_point + + for path in feature_paths.values(): + if len(path) >= 2: + src = np.array([path[-2].x, path[-2].y]) + dst = np.array([path[-1].x, path[-1].y]) + avg_flow += dst - src + motion_vector = dst + (dst - src) + vanishing_point += motion_vector + rotation = np.arctan2(dst[1] - src[1], dst[0] - src[0]) + total_rotation += rotation + + avg_flow /= num_features + avg_rotation = total_rotation / num_features + vanishing_point /= num_features + + print(f"Average Flow: {avg_flow}") + print(f"Average Rotation: {avg_rotation}") + + avg_flow = (self.filter_weight * self.last_avg_flow + + (1 - self.filter_weight) * avg_flow) + self.last_avg_flow = avg_flow + + flow_magnitude = np.linalg.norm(avg_flow) + rotation_magnitude = abs(avg_rotation) + + if flow_magnitude > max_magnitude and flow_magnitude > self.motion_threshold: + if abs(avg_flow[0]) > abs(avg_flow[1]): + most_prominent_motion = 'Right' if avg_flow[0] < 0 else 'Left' + else: + most_prominent_motion = 'Down' if avg_flow[1] < 0 else 'Up' + max_magnitude = flow_magnitude + + if rotation_magnitude > max_magnitude and rotation_magnitude > self.rotation_threshold: + most_prominent_motion = 'Rotating' + + return most_prominent_motion, vanishing_point + + +class FeatureTrackerDrawer: + + lineColor = (200, 0, 200) + pointColor = (0, 0, 255) + vanishingPointColor = (255, 0, 255) # Violet color for vanishing point + circleRadius = 2 + maxTrackedFeaturesPathLength = 30 + trackedFeaturesPathLength = 10 + + trackedIDs = None + trackedFeaturesPath = None + + def trackFeaturePath(self, features): + + newTrackedIDs = set() + for currentFeature in features: + currentID = currentFeature.id + newTrackedIDs.add(currentID) + + if currentID not in self.trackedFeaturesPath: + self.trackedFeaturesPath[currentID] = deque() + + path = self.trackedFeaturesPath[currentID] + path.append(currentFeature.position) + + while(len(path) > max(1, FeatureTrackerDrawer.trackedFeaturesPathLength)): + path.popleft() + + self.trackedFeaturesPath[currentID] = path + + featuresToRemove = set() + for oldId in self.trackedIDs: + if oldId not in newTrackedIDs: + featuresToRemove.add(oldId) + + for id in featuresToRemove: + self.trackedFeaturesPath.pop(id) + + self.trackedIDs = newTrackedIDs + + def drawVanishingPoint(self, img, vanishing_point): + cv2.circle(img, (int(vanishing_point[0]), int(vanishing_point[1])), self.circleRadius, self.vanishingPointColor, -1, cv2.LINE_AA, 0) + + # Define color mapping for directions + direction_colors = { + "Up": (0, 255, 255), # Yellow + "Down": (0, 255, 0), # Green + "Left": (255, 0, 0), # Blue + "Right": (0, 0, 255), # Red + } + + def drawFeatures(self, img, vanishing_point=None, prominent_motion=None): + + # Get the appropriate point color based on the prominent motion + if prominent_motion in self.direction_colors: + point_color = self.direction_colors[prominent_motion] + else: + point_color = self.pointColor + + for featurePath in self.trackedFeaturesPath.values(): + path = featurePath + + for j in range(len(path) - 1): + src = (int(path[j].x), int(path[j].y)) + dst = (int(path[j + 1].x), int(path[j + 1].y)) + cv2.line(img, src, dst, point_color, 1, cv2.LINE_AA, 0) + + j = len(path) - 1 + cv2.circle(img, (int(path[j].x), int(path[j].y)), self.circleRadius, point_color, -1, cv2.LINE_AA, 0) + + # Draw the direction text on the image + if prominent_motion: + font = cv2.FONT_HERSHEY_SIMPLEX + font_scale = 1 + font_thickness = 2 + text_size = cv2.getTextSize(prominent_motion, font, font_scale, font_thickness)[0] + text_x = (img.shape[1] - text_size[0]) // 2 + text_y = text_size[1] + 20 # 20 pixels from the top + + # Get the appropriate color based on the prominent motion + text_color = self.direction_colors.get(prominent_motion, (255, 255, 255)) # Default to white + + # Draw the text + cv2.putText(img, prominent_motion, (text_x, text_y), font, font_scale, text_color, font_thickness, cv2.LINE_AA) + + + # Draw vanishing point if provided + if vanishing_point is not None: + self.drawVanishingPoint(img, vanishing_point) + + def __init__(self, windowName): + self.windowName = windowName + cv2.namedWindow(windowName) + self.trackedIDs = set() + self.trackedFeaturesPath = dict() + +def create_pipeline(): + pipeline = dai.Pipeline() + + # Create a MonoCamera node and set its properties + mono_left = pipeline.create(dai.node.MonoCamera) + mono_left.setCamera("left") + mono_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) + mono_left.setFps(15) + + # Create a FeatureTracker node + feature_tracker_left = pipeline.create(dai.node.FeatureTracker) + + # Create XLinkOut nodes for output streams + xout_tracked_features_left = pipeline.create(dai.node.XLinkOut) + xout_passthrough_left = pipeline.create(dai.node.XLinkOut) + + # Set stream names + xout_tracked_features_left.setStreamName("trackedFeaturesLeft") + xout_passthrough_left.setStreamName("passthroughLeft") + + # Allocate resources for improved performance + num_shaves = 2 + num_memory_slices = 2 + feature_tracker_left.setHardwareResources(num_shaves, num_memory_slices) + + # Link the nodes + mono_left.out.link(feature_tracker_left.inputImage) + feature_tracker_left.passthroughInputImage.link(xout_passthrough_left.input) + feature_tracker_left.outputFeatures.link(xout_tracked_features_left.input) + + return pipeline + + +if __name__ == '__main__': + pipeline = create_pipeline() + with dai.Device(pipeline) as device: + output_features_left_queue = device.getOutputQueue( + "trackedFeaturesLeft", maxSize=4, blocking=False) + passthrough_image_left_queue = device.getOutputQueue( + "passthroughLeft", maxSize=4, blocking=False) + + left_window_name = "Left" + left_feature_drawer = FeatureTrackerDrawer(left_window_name) + camera_estimator_left = CameraMotionEstimator( + filter_weight=0.5, motion_threshold=0.3, rotation_threshold=0.5) + + while True: + in_passthrough_frame_left = passthrough_image_left_queue.get() + passthrough_frame_left = in_passthrough_frame_left.getFrame() + left_frame = cv2.cvtColor(passthrough_frame_left, cv2.COLOR_GRAY2BGR) + + tracked_features_left = output_features_left_queue.get().trackedFeatures + motions_left, vanishing_pt_left = camera_estimator_left.estimate_motion( + left_feature_drawer.trackedFeaturesPath) + + left_feature_drawer.trackFeaturePath(tracked_features_left) + left_feature_drawer.drawFeatures(left_frame, vanishing_pt_left, motions_left) + + print("Motions:", motions_left) + cv2.imshow(left_window_name, left_frame) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/UVC/uvc_disparity.py b/examples/UVC/uvc_disparity.py new file mode 100644 index 000000000..ff5c1949e --- /dev/null +++ b/examples/UVC/uvc_disparity.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python3 + +import time + +import depthai as dai + +pipeline = dai.Pipeline() + +# Define a source - two mono (grayscale) cameras +mono_left = pipeline.createMonoCamera() +mono_right = pipeline.createMonoCamera() + +mono_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) +mono_left.setBoardSocket(dai.CameraBoardSocket.CAM_B) + +mono_right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) +mono_right.setBoardSocket(dai.CameraBoardSocket.CAM_C) + +# Create stereo depth +stereo = pipeline.createStereoDepth() +stereo.initialConfig.setConfidenceThreshold(255) +stereo.setLeftRightCheck(True) +# 0.190 values, better for visualization +stereo.setExtendedDisparity(True) + +# Create an UVC (USB Video Class) output node +uvc = pipeline.createUVC() + +# Manip for frame type conversion +manip = pipeline.createImageManip() +manip.initialConfig.setResize(1280, 720) +manip.initialConfig.setFrameType(dai.RawImgFrame.Type.NV12) +manip.initialConfig.setColormap(dai.Colormap.STEREO_TURBO, stereo.initialConfig.getMaxDisparity()) +manip.setMaxOutputFrameSize(int(1280*720*1.5)) + +# Linking +mono_left.out.link(stereo.left) +mono_right.out.link(stereo.right) +stereo.disparity.link(manip.inputImage) +manip.out.link(uvc.input) + +# Note: if the pipeline is sent later to device (using startPipeline()), +# it is important to pass the device config separately when creating the device +config = dai.Device.Config() +config.board.uvc = dai.BoardConfig.UVC(1280, 720) +config.board.uvc.frameType = dai.ImgFrame.Type.NV12 +# config.board.uvc.cameraName = "My Custom Cam" +pipeline.setBoardConfig(config.board) + + +# Standard UVC load with depthai +with dai.Device(pipeline) as device: + # Dot projector + device.setIrLaserDotProjectorBrightness(765) + print("\nDevice started, please keep this process running") + print("and open an UVC viewer to check the camera stream.") + print("\nTo close: Ctrl+C") + + # Doing nothing here, just keeping the host feeding the watchdog + while True: + try: + time.sleep(0.1) + except KeyboardInterrupt: + break \ No newline at end of file diff --git a/examples/UVC/uvc_mono.py b/examples/UVC/uvc_mono.py new file mode 100644 index 000000000..62929ed96 --- /dev/null +++ b/examples/UVC/uvc_mono.py @@ -0,0 +1,50 @@ +#!/usr/bin/env python3 + +import time + +import depthai as dai + +pipeline = dai.Pipeline() + +# Define a source - left mono (grayscale) camera +mono_left = pipeline.createMonoCamera() + +mono_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) +mono_left.setBoardSocket(dai.CameraBoardSocket.CAM_B) + +# Create an UVC (USB Video Class) output node +uvc = pipeline.createUVC() + +# Manip for frame type conversion +manip = pipeline.createImageManip() +manip.initialConfig.setResize(1280, 720) +manip.initialConfig.setFrameType(dai.RawImgFrame.Type.NV12) +manip.setMaxOutputFrameSize(int(1280*720*1.5)) + +# Linking +manip.out.link(uvc.input) +mono_left.out.link(manip.inputImage) + +# Note: if the pipeline is sent later to device (using startPipeline()), +# it is important to pass the device config separately when creating the device +config = dai.Device.Config() +config.board.uvc = dai.BoardConfig.UVC(1280, 720) +config.board.uvc.frameType = dai.ImgFrame.Type.NV12 +# config.board.uvc.cameraName = "My Custom Cam" +pipeline.setBoardConfig(config.board) + + +# Standard UVC load with depthai +with dai.Device(pipeline) as device: + # Dot projector + device.setIrLaserDotProjectorBrightness(765) + print("\nDevice started, please keep this process running") + print("and open an UVC viewer to check the camera stream.") + print("\nTo close: Ctrl+C") + + # Doing nothing here, just keeping the host feeding the watchdog + while True: + try: + time.sleep(0.1) + except KeyboardInterrupt: + break \ No newline at end of file diff --git a/examples/ColorCamera/rgb_uvc.py b/examples/UVC/uvc_rgb.py similarity index 98% rename from examples/ColorCamera/rgb_uvc.py rename to examples/UVC/uvc_rgb.py index 78a27129b..ba8d8da9a 100755 --- a/examples/ColorCamera/rgb_uvc.py +++ b/examples/UVC/uvc_rgb.py @@ -18,7 +18,7 @@ import depthai as dai def getPipeline(): - enable_4k = True # Will downscale 4K -> 1080p + enable_4k = False # Will downscale 4K -> 1080p pipeline = dai.Pipeline()