Skip to content

Commit

Permalink
Fix cfg handling
Browse files Browse the repository at this point in the history
  • Loading branch information
narsenox committed Aug 1, 2024
1 parent bdcda1b commit 83b8347
Show file tree
Hide file tree
Showing 3 changed files with 155 additions and 60 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -12,24 +12,29 @@ def drawFeatures(frame, features):
for feature in features:
# descriptor = feature.descriptor
# print(descriptor)
if feature.age < 2:
cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, greenColor, -1, cv2.LINE_AA, 0)
else:
if feature.age > 2:
cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, pointColor, -1, cv2.LINE_AA, 0)
# elif feature.age < 255:
# cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, (0, 0, feature.age), -1, cv2.LINE_AA, 0)
else:
cv2.circle(frame, (int(feature.position.x), int(feature.position.y)), circleRadius, greenColor, -1, cv2.LINE_AA, 0)

class HostCamera(dai.node.ThreadedHostNode):
def __init__(self):
dai.node.ThreadedHostNode.__init__(self)
self.output = dai.Node.Output(self)
def run(self):
# Create a VideoCapture object
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("/home/nebojsa/Documents/depthai-device-kb/external/depthai-core/examples/python/models/construction_vest.mp4")
# cap = cv2.VideoCapture(0)
if not cap.isOpened():
pipeline.stop()
raise RuntimeError("Error: Couldn't open host camera")
while self.isRunning():
# frame = depthImg
ret, frame = cap.read()
# frame = cv2.imread("/home/nebojsa/Downloads/image.jpg")
frame = cv2.resize(frame, (640, 640), interpolation = cv2.INTER_LINEAR)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
imgFrame = dai.ImgFrame()
imgFrame.setData(frame)
Expand All @@ -52,12 +57,15 @@ def run(self):
initialRobustness = 100

def on_trackbar(val):
cfg = dai.FeatureTrackerConfigRvc4()
cfg = dai.FeatureTrackerConfig()
cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')

cfg.setHarrisCornerDetectorThreshold(cv2.getTrackbarPos('harris_score','Features'))
cfg.setHarrisCornerDetectorRobustness(cv2.getTrackbarPos('robustness','Features'))
cfg.setNumMaxFeatures(cv2.getTrackbarPos('numMaxFeatures', 'Features'))
thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')
cornerDetector.thresholds = thresholds

cfg.setCornerDetector(cornerDetector)
cfg.setMotionEstimator(motionEstimator)

inputConfigQueue.send(cfg)
Expand All @@ -67,7 +75,7 @@ def on_trackbar(val):

# create trackbars threshold and robustness change
cv2.createTrackbar('harris_score','Features',2000,25000, on_trackbar)
cv2.createTrackbar('robustness','Features',100,127, on_trackbar)
# cv2.createTrackbar('robustness','Features',100,127, on_trackbar)
cv2.createTrackbar('numMaxFeatures','Features',256,1024, on_trackbar)


Expand All @@ -77,12 +85,24 @@ def on_trackbar(val):
hostCamera = pipeline.create(HostCamera)
camQueue = hostCamera.output.createOutputQueue()

featureTrackerLeft = pipeline.create(dai.node.FeatureTrackerRvc4)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)

featureTrackerLeft.initialConfig.setHarrisCornerDetectorThreshold(initialThreshold)
featureTrackerLeft.initialConfig.setHarrisCornerDetectorRobustness(initialRobustness)
featureTrackerLeft.initialConfig.setCornerDetector(dai.FeatureTrackerConfigRvc4.CornerDetector.Type.HARRIS)
featureTrackerLeft.initialConfig.setNumMaxFeatures(256)
# featureTrackerLeft.initialConfig.setHarrisCornerDetectorThreshold(initialThreshold)
# featureTrackerLeft.initialConfig.setHarrisCornerDetectorRobustness(initialRobustness)
featureTrackerLeft.initialConfig.setCornerDetector(dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS)
featureTrackerLeft.initialConfig.setMotionEstimator(False)

motionEstimator = dai.FeatureTrackerConfig.MotionEstimator()
motionEstimator.enable = True
featureTrackerLeft.initialConfig.setMotionEstimator(motionEstimator)

cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = 256




# featureTrackerLeft.initialConfig.setNumMaxFeatures(256)


outputFeaturePassthroughQueue = featureTrackerLeft.passthroughInputImage.createOutputQueue()
Expand All @@ -91,10 +111,12 @@ def on_trackbar(val):
hostCamera.output.link(featureTrackerLeft.inputImage)

inputConfigQueue = featureTrackerLeft.inputConfig.createInputQueue()
motionEstimator = dai.FeatureTrackerConfigRvc4.MotionEstimator()
motionEstimator.enable = False

thresholds = dai.FeatureTrackerConfigRvc4.CornerDetector.Thresholds()
thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')

cornerDetector.thresholds = thresholds
featureTrackerLeft.initialConfig.setCornerDetector(cornerDetector)

pipeline.start()
while pipeline.isRunning():
Expand All @@ -118,10 +140,17 @@ def on_trackbar(val):
if key == ord('q'):
break
elif key == ord('m'):
cfg = dai.FeatureTrackerConfigRvc4()
cfg.setHarrisCornerDetectorThreshold(cv2.getTrackbarPos('harris_score','Features'))
cfg.setHarrisCornerDetectorRobustness(cv2.getTrackbarPos('robustness','Features'))
cfg.setNumMaxFeatures(cv2.getTrackbarPos('numMaxFeatures', 'Features'))
cfg = dai.FeatureTrackerConfig()
cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')

thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')
cornerDetector.thresholds = thresholds

cfg.setCornerDetector(cornerDetector)
cfg.setMotionEstimator(motionEstimator)

if motionEstimator.enable == False:
motionEstimator.enable = True
cfg.setMotionEstimator(motionEstimator)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,16 @@ def __init__(self):
self.output = dai.Node.Output(self)
def run(self):
# Create a VideoCapture object
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("/home/nebojsa/Documents/depthai-device-kb/external/depthai-core/examples/python/models/construction_vest.mp4")
# cap = cv2.VideoCapture(0)
if not cap.isOpened():
pipeline.stop()
raise RuntimeError("Error: Couldn't open host camera")
while self.isRunning():
# frame = depthImg
ret, frame = cap.read()
# frame = cv2.imread("/home/nebojsa/Downloads/image.jpg")
frame = cv2.resize(frame, (640, 640), interpolation = cv2.INTER_LINEAR)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
imgFrame = dai.ImgFrame()
imgFrame.setData(frame)
Expand Down Expand Up @@ -109,21 +112,23 @@ def __init__(self, trackbarName, windowName):
initialRobustness = 100

def on_trackbar(val):
cfg = dai.FeatureTrackerConfigRvc4()
cfg = dai.FeatureTrackerConfig()
cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')

cfg.setHarrisCornerDetectorThreshold(cv2.getTrackbarPos('harris_score','Features'))
cfg.setHarrisCornerDetectorRobustness(cv2.getTrackbarPos('robustness','Features'))
cfg.setNumMaxFeatures(cv2.getTrackbarPos('numMaxFeatures', 'Features'))
thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')
cornerDetector.thresholds = thresholds

cfg.setCornerDetector(cornerDetector)
cfg.setMotionEstimator(motionEstimator)

inputConfigQueue.send(cfg)

cv2.namedWindow('Features')

# create trackbars threshold and robustness change
cv2.createTrackbar('harris_score','Features',2000,25000, on_trackbar)
cv2.createTrackbar('robustness','Features',100,127, on_trackbar)
cv2.createTrackbar('harris_score','Features',20000,25000, on_trackbar)
cv2.createTrackbar('numMaxFeatures','Features',256,1024, on_trackbar)

# Connect to device and start pipeline
Expand All @@ -132,19 +137,22 @@ def on_trackbar(val):
hostCamera = pipeline.create(HostCamera)
camQueue = hostCamera.output.createOutputQueue()

featureTrackerLeft = pipeline.create(dai.node.FeatureTrackerRvc4)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)

featureTrackerLeft.initialConfig.setHarrisCornerDetectorThreshold(initialThreshold)
featureTrackerLeft.initialConfig.setHarrisCornerDetectorRobustness(initialRobustness)
featureTrackerLeft.initialConfig.setCornerDetector(dai.FeatureTrackerConfigRvc4.CornerDetector.Type.HARRIS)
featureTrackerLeft.initialConfig.setNumMaxFeatures(256)

# Disable optical flow
featureTrackerLeft.initialConfig.setMotionEstimator(True)

# variable to keep track of state
motionEstimator = dai.FeatureTrackerConfigRvc4.MotionEstimator()
motionEstimator.enable = False

# featureTrackerLeft.initialConfig.setHarrisCornerDetectorThreshold(initialThreshold)
# featureTrackerLeft.initialConfig.setHarrisCornerDetectorRobustness(initialRobustness)
featureTrackerLeft.initialConfig.setCornerDetector(dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS)
featureTrackerLeft.initialConfig.setMotionEstimator(False)

motionEstimator = dai.FeatureTrackerConfig.MotionEstimator()
motionEstimator.enable = True
featureTrackerLeft.initialConfig.setMotionEstimator(motionEstimator)

cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = 256

# featureTrackerLeft.initialConfig.setNumMaxFeatures(256)

outputFeaturePassthroughQueue = featureTrackerLeft.passthroughInputImage.createOutputQueue()
outputFeatureQueue = featureTrackerLeft.outputFeatures.createOutputQueue()
Expand All @@ -154,6 +162,12 @@ def on_trackbar(val):
# config = featureTrackerLeft.initialConfig
inputConfigQueue = featureTrackerLeft.inputConfig.createInputQueue()

thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')

cornerDetector.thresholds = thresholds
featureTrackerLeft.initialConfig.setCornerDetector(cornerDetector)

leftWindowName = "Features"
leftFeatureDrawer = FeatureTrackerDrawer("Feature tracking duration (frames)", leftWindowName)

Expand Down Expand Up @@ -181,10 +195,17 @@ def on_trackbar(val):
if key == ord('q'):
break
elif key == ord('m'):
cfg = dai.FeatureTrackerConfigRvc4()
cfg.setHarrisCornerDetectorThreshold(cv2.getTrackbarPos('harris_score','Features'))
cfg.setHarrisCornerDetectorRobustness(cv2.getTrackbarPos('robustness','Features'))
cfg.setNumMaxFeatures(cv2.getTrackbarPos('numMaxFeatures', 'Features'))
cfg = dai.FeatureTrackerConfig()
cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')

thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')
cornerDetector.thresholds = thresholds

cfg.setCornerDetector(cornerDetector)
cfg.setMotionEstimator(motionEstimator)

if motionEstimator.enable == False:
motionEstimator.enable = True
cfg.setMotionEstimator(motionEstimator)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,13 +11,16 @@ def __init__(self):
self.output = dai.Node.Output(self)
def run(self):
# Create a VideoCapture object
cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("/home/nebojsa/Documents/depthai-device-kb/external/depthai-core/examples/python/models/construction_vest.mp4")
# cap = cv2.VideoCapture(0)
if not cap.isOpened():
pipeline.stop()
raise RuntimeError("Error: Couldn't open host camera")
while self.isRunning():
# frame = depthImg
ret, frame = cap.read()
# frame = cv2.imread("/home/nebojsa/Downloads/image.jpg")
frame = cv2.resize(frame, (640, 640), interpolation = cv2.INTER_LINEAR)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
imgFrame = dai.ImgFrame()
imgFrame.setData(frame)
Expand Down Expand Up @@ -194,22 +197,25 @@ def __init__(self, windowName):
initialRobustness = 100

def on_trackbar(val):
cfg = dai.FeatureTrackerConfigRvc4()
cfg = dai.FeatureTrackerConfig()
cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')

cfg.setHarrisCornerDetectorThreshold(cv2.getTrackbarPos('harris_score','Features'))
cfg.setHarrisCornerDetectorRobustness(cv2.getTrackbarPos('robustness','Features'))
cfg.setNumMaxFeatures(cv2.getTrackbarPos('numMaxFeatures', 'Features'))
cfg.setMotionEstimator(True)
thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')
cornerDetector.thresholds = thresholds

cfg.setCornerDetector(cornerDetector)
cfg.setMotionEstimator(motionEstimator)

inputConfigQueue.send(cfg)


cv2.namedWindow('HostCamera')
cv2.namedWindow('Features')

# create trackbars threshold and robustness change
cv2.createTrackbar('harris_score','Features',2000,25000, on_trackbar)
cv2.createTrackbar('robustness','Features',100,127, on_trackbar)
# create trackbars threshold and numMaxFeatures change
cv2.createTrackbar('harris_score','Features',20000,25000, on_trackbar)
cv2.createTrackbar('numMaxFeatures','Features',256,1024, on_trackbar)

# Connect to device and start pipeline
Expand All @@ -218,12 +224,21 @@ def on_trackbar(val):
hostCamera = pipeline.create(HostCamera)
camQueue = hostCamera.output.createOutputQueue()

featureTrackerLeft = pipeline.create(dai.node.FeatureTrackerRvc4)
featureTrackerLeft = pipeline.create(dai.node.FeatureTracker)

# featureTrackerLeft.initialConfig.setHarrisCornerDetectorThreshold(initialThreshold)
# featureTrackerLeft.initialConfig.setHarrisCornerDetectorRobustness(initialRobustness)
featureTrackerLeft.initialConfig.setCornerDetector(dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS)
featureTrackerLeft.initialConfig.setMotionEstimator(False)

motionEstimator = dai.FeatureTrackerConfig.MotionEstimator()
motionEstimator.enable = True
featureTrackerLeft.initialConfig.setMotionEstimator(motionEstimator)

featureTrackerLeft.initialConfig.setHarrisCornerDetectorThreshold(initialThreshold)
featureTrackerLeft.initialConfig.setHarrisCornerDetectorRobustness(initialRobustness)
featureTrackerLeft.initialConfig.setCornerDetector(dai.FeatureTrackerConfigRvc4.CornerDetector.Type.HARRIS)
featureTrackerLeft.initialConfig.setNumMaxFeatures(256)
cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = 256

# featureTrackerLeft.initialConfig.setNumMaxFeatures(256)

# Enable optical flow
featureTrackerLeft.initialConfig.setMotionEstimator(True)
Expand All @@ -234,6 +249,13 @@ def on_trackbar(val):
hostCamera.output.link(featureTrackerLeft.inputImage)

inputConfigQueue = featureTrackerLeft.inputConfig.createInputQueue()

thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')

cornerDetector.thresholds = thresholds
featureTrackerLeft.initialConfig.setCornerDetector(cornerDetector)

cfg = dai.FeatureTrackerConfig()

left_window_name = "Features"
Expand Down Expand Up @@ -262,5 +284,28 @@ def on_trackbar(val):
print("Motions:", motions_left)
cv2.imshow(left_window_name, leftFrame)

if cv2.waitKey(1) == ord('q'):
break
key = cv2.waitKey(1)
if key == ord('q'):
break
elif key == ord('m'):
cfg = dai.FeatureTrackerConfig()
cornerDetector = dai.FeatureTrackerConfig.CornerDetector()
cornerDetector.numMaxFeatures = cv2.getTrackbarPos('numMaxFeatures', 'Features')

thresholds = dai.FeatureTrackerConfig.CornerDetector.Thresholds()
thresholds.initialValue = cv2.getTrackbarPos('harris_score','Features')
cornerDetector.thresholds = thresholds

cfg.setCornerDetector(cornerDetector)
cfg.setMotionEstimator(motionEstimator)

if motionEstimator.enable == False:
motionEstimator.enable = True
cfg.setMotionEstimator(motionEstimator)
print("Enabling motionEstimator")
else:
motionEstimator.enable = False
cfg.setMotionEstimator(motionEstimator)
print("Disabling motionEstimator")

inputConfigQueue.send(cfg)

0 comments on commit 83b8347

Please sign in to comment.