Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Updated depth colorization logic #809

Merged
merged 3 commits into from
Apr 24, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ scanning camera for mobile robots.
Demo
####

.. image:: https://user-images.githubusercontent.com/18037362/190861621-b57fd1e3-5a3d-4d79-b1a7-d17a0b78c63e.gif
.. image:: https://user-images.githubusercontent.com/18037362/231822498-6e3699a0-039e-424b-acb2-b246575e91ee.png

Setup
#####
Expand Down
1 change: 1 addition & 0 deletions docs/source/tutorials/code_samples.rst
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,7 @@ are presented with code.

.. rubric:: SpatialDetection

- :ref:`Spatial Calculator Multi-ROI` - Selects multiple ROIs and calculates spatial coordinates for each of them
- :ref:`Spatial location calculator` - Demonstrates how to use the spatial location calculator
- :ref:`RGB & MobilenetSSD with spatial data` - Displays RGB frames with MobileNet detections and spatial coordinates on them
- :ref:`Mono & MobilenetSSD with spatial data` - Displays mono frames with MobileNet detections and spatial coordinates on them
Expand Down
9 changes: 6 additions & 3 deletions examples/SpatialDetection/spatial_calculator_multi_roi.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import cv2
import depthai as dai
import math
import numpy as np

# Create pipeline
pipeline = dai.Pipeline()
Expand All @@ -29,7 +30,7 @@

stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.setLeftRightCheck(True)
stereo.setExtendedDisparity(True)
stereo.setSubpixel(True)
spatialLocationCalculator.inputConfig.setWaitForMessage(False)

# Create 10 ROIs
Expand Down Expand Up @@ -65,8 +66,10 @@

depthFrame = inDepth.getFrame() # depthFrame values are in millimeters

depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depth_downscaled = depthFrame[::4]
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

spatialData = spatialCalcQueue.get().getSpatialLocations()
Expand Down
23 changes: 11 additions & 12 deletions examples/SpatialDetection/spatial_location_calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

import cv2
import depthai as dai

import numpy as np
stepSize = 0.05

newConfig = False
Expand Down Expand Up @@ -30,12 +30,9 @@
monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT)

lrcheck = False
subpixel = False

stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.setLeftRightCheck(lrcheck)
stereo.setSubpixel(subpixel)
stereo.setLeftRightCheck(True)
stereo.setSubpixel(True)

# Config
topLeft = dai.Point2f(0.4, 0.4)
Expand Down Expand Up @@ -77,8 +74,10 @@

depthFrame = inDepth.getFrame() # depthFrame values are in millimeters

depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depth_downscaled = depthFrame[::4]
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

spatialData = spatialCalcQueue.get().getSpatialLocations()
Expand All @@ -94,10 +93,10 @@
depthMax = depthData.depthMax

fontType = cv2.FONT_HERSHEY_TRIPLEX
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, 255)
cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, 255)
cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, 255)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1)
cv2.putText(depthFrameColor, f"X: {int(depthData.spatialCoordinates.x)} mm", (xmin + 10, ymin + 20), fontType, 0.5, color)
cv2.putText(depthFrameColor, f"Y: {int(depthData.spatialCoordinates.y)} mm", (xmin + 10, ymin + 35), fontType, 0.5, color)
cv2.putText(depthFrameColor, f"Z: {int(depthData.spatialCoordinates.z)} mm", (xmin + 10, ymin + 50), fontType, 0.5, color)
# Show the frame
cv2.imshow("depth", depthFrameColor)

Expand Down
9 changes: 6 additions & 3 deletions examples/SpatialDetection/spatial_mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# Align depth map to the perspective of RGB camera, on which inference is done
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
stereo.setSubpixel(True)
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())

spatialDetectionNetwork.setBlobPath(nnBlobPath)
Expand Down Expand Up @@ -113,8 +114,10 @@

depthFrame = depth.getFrame() # depthFrame values are in millimeters

depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depth_downscaled = depthFrame[::4]
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

detections = inDet.detections
Expand All @@ -132,7 +135,7 @@
ymin = int(topLeft.y)
xmax = int(bottomRight.x)
ymax = int(bottomRight.y)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1)

# Denormalize bounding box
x1 = int(detection.xmin * width)
Expand Down
7 changes: 5 additions & 2 deletions examples/SpatialDetection/spatial_mobilenet_mono.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,6 +59,7 @@

# StereoDepth
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.setSubpixel(True)

# Define a neural network that will make predictions based on the source frames
spatialDetectionNetwork.setConfidenceThreshold(0.5)
Expand Down Expand Up @@ -116,8 +117,10 @@

depthFrame = inDepth.getFrame() # depthFrame values are in millimeters

depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depth_downscaled = depthFrame[::4]
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

detections = inDet.detections
Expand Down
9 changes: 6 additions & 3 deletions examples/SpatialDetection/spatial_tiny_yolo.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@
# Align depth map to the perspective of RGB camera, on which inference is done
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())
stereo.setSubpixel(True)

spatialDetectionNetwork.setBlobPath(nnBlobPath)
spatialDetectionNetwork.setConfidenceThreshold(0.5)
Expand Down Expand Up @@ -146,8 +147,10 @@
frame = inPreview.getCvFrame()
depthFrame = depth.getFrame() # depthFrame values are in millimeters

depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depth_downscaled = depthFrame[::4]
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

counter+=1
Expand All @@ -172,7 +175,7 @@
ymin = int(topLeft.y)
xmax = int(bottomRight.x)
ymax = int(bottomRight.y)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, cv2.FONT_HERSHEY_SCRIPT_SIMPLEX)
cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1)

# Denormalize bounding box
x1 = int(detection.xmin * width)
Expand Down
7 changes: 5 additions & 2 deletions examples/StereoDepth/depth_crop_control.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
manip.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y)
manip.setMaxOutputFrameSize(monoRight.getResolutionHeight()*monoRight.getResolutionWidth()*3)
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
stereo.setSubpixel(True)

# Linking
configIn.out.link(manip.inputConfig)
Expand All @@ -62,8 +63,10 @@
depthFrame = inDepth.getFrame() # depthFrame values are in millimeters

# Frame is transformed, the color map will be applied to highlight the depth info
depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depth_downscaled = depthFrame[::4]
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

# Frame is ready to be shown
Expand Down
9 changes: 6 additions & 3 deletions examples/mixed/rotated_spatial_detections.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import sys
import cv2
import depthai as dai

import numpy as np
'''
Spatial object detections demo for 180° rotated OAK camera.
'''
Expand Down Expand Up @@ -58,6 +58,7 @@
stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY)
# Align depth map to the perspective of RGB camera, on which inference is done
stereo.setDepthAlign(dai.CameraBoardSocket.RGB)
stereo.setSubpixel(True)
stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight())

rotate_stereo_manip = pipeline.createImageManip()
Expand Down Expand Up @@ -104,8 +105,10 @@
frame = inPreview.getCvFrame()
depthFrame = depth.getFrame() # depthFrame values are in millimeters

depthFrameColor = cv2.normalize(depthFrame, None, 255, 0, cv2.NORM_INF, cv2.CV_8UC1)
depthFrameColor = cv2.equalizeHist(depthFrameColor)
depth_downscaled = depthFrame[::4]
min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1)
max_depth = np.percentile(depth_downscaled, 99)
depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8)
depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT)

detections = inDet.detections
Expand Down