Skip to content

Commit

Permalink
Merge pull request #981 from roboflow/feature/detectctions_filter_on_…
Browse files Browse the repository at this point in the history
…steroids

Add new UQL extension - picking up bounding boxes that are inside specific class
  • Loading branch information
PawelPeczek-Roboflow authored Jan 30, 2025
2 parents 8f8dabf + fd8f63f commit 7bb1a67
Show file tree
Hide file tree
Showing 13 changed files with 386 additions and 159 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -536,6 +536,28 @@ class DetectionsRename(OperationDefinition):
)


class PickDetectionsByParentClass(OperationDefinition):
model_config = ConfigDict(
json_schema_extra={
"description": "Picks only those detections which are located inside "
"parent detections of specific class",
"compound": False,
"input_kind": [
OBJECT_DETECTION_PREDICTION_KIND,
INSTANCE_SEGMENTATION_PREDICTION_KIND,
KEYPOINT_DETECTION_PREDICTION_KIND,
],
"output_kind": [
OBJECT_DETECTION_PREDICTION_KIND,
INSTANCE_SEGMENTATION_PREDICTION_KIND,
KEYPOINT_DETECTION_PREDICTION_KIND,
],
},
)
type: Literal["PickDetectionsByParentClass"]
parent_class: str = Field(description="Class of parent detections")


AllOperationsType = Annotated[
Union[
StringToLowerCase,
Expand Down Expand Up @@ -569,6 +591,7 @@ class DetectionsRename(OperationDefinition):
ConvertImageToBase64,
DetectionsToDictionary,
ConvertDictionaryToJSON,
PickDetectionsByParentClass,
],
Field(discriminator="type"),
]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
extract_detections_property,
filter_detections,
offset_detections,
pick_detections_by_parent_class,
rename_detections,
select_detections,
shift_detections,
Expand Down Expand Up @@ -199,6 +200,7 @@ def build_detections_filter_operation(
"ConvertImageToBase64": encode_image_to_base64,
"DetectionsToDictionary": detections_to_dictionary,
"ConvertDictionaryToJSON": dictionary_to_json,
"PickDetectionsByParentClass": pick_detections_by_parent_class,
}

REGISTERED_COMPOUND_OPERATIONS_BUILDERS = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -351,3 +351,62 @@ def detections_to_dictionary(
context=f"step_execution | roboflow_query_language_evaluation | {execution_context}",
inner_error=error,
)


def pick_detections_by_parent_class(
detections: Any,
parent_class: str,
execution_context: str,
**kwargs,
) -> sv.Detections:
if not isinstance(detections, sv.Detections):
value_as_str = safe_stringify(value=detections)
raise InvalidInputTypeError(
public_message=f"Executing pick_detections_by_parent_class(...) in context {execution_context}, "
f"expected sv.Detections object as value, got {value_as_str} of type {type(detections)}",
context=f"step_execution | roboflow_query_language_evaluation | {execution_context}",
)
try:
return _pick_detections_by_parent_class(
detections=detections, parent_class=parent_class
)
except Exception as error:
raise OperationError(
public_message=f"While Using operation pick_detections_by_parent_class(...) in context {execution_context} "
f"encountered error: {error}",
context=f"step_execution | roboflow_query_language_evaluation | {execution_context}",
inner_error=error,
)


def _pick_detections_by_parent_class(
detections: sv.Detections,
parent_class: str,
) -> sv.Detections:
class_names = detections.data.get("class_name")
if class_names is None or len(class_names) == 0:
return sv.Detections.empty()
if not isinstance(class_names, np.ndarray):
class_names = np.array(class_names)
parent_mask = class_names == parent_class
parent_detections = detections[parent_mask]
if len(parent_detections) == 0:
return sv.Detections.empty()
dependent_detections = detections[~parent_mask]
dependent_detections_anchors = dependent_detections.get_anchors_coordinates(
anchor=Position.CENTER
)
dependent_detections_to_keep = set()
for detection_idx, anchor in enumerate(dependent_detections_anchors):
for parent_detection_box in parent_detections.xyxy:
if _is_point_within_box(point=anchor, box=parent_detection_box):
dependent_detections_to_keep.add(detection_idx)
continue
detections_to_keep_list = sorted(list(dependent_detections_to_keep))
return dependent_detections[detections_to_keep_list]


def _is_point_within_box(point: np.ndarray, box: np.ndarray) -> bool:
px, py = point
x1, y1, x2, y2 = box
return x1 <= px <= x2 and y1 <= py <= y2
18 changes: 9 additions & 9 deletions inference_cli/lib/container_adapter.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def start_inference_container(
docker_run_kwargs = {}
is_gpu = "gpu" in image and "jetson" not in image
is_jetson = "jetson" in image

if is_gpu:
device_requests = [
docker.types.DeviceRequest(device_ids=["all"], capabilities=[["gpu"]])
Expand Down Expand Up @@ -170,7 +170,8 @@ def start_inference_container(
labels=labels,
ports=ports,
device_requests=device_requests,
environment=environment + [
environment=environment
+ [
"MODEL_CACHE_DIR=/tmp/model-cache",
"TRANSFORMERS_CACHE=/tmp/huggingface",
"YOLO_CONFIG_DIR=/tmp/yolo",
Expand All @@ -182,14 +183,13 @@ def start_inference_container(
cpu_shares=1024,
security_opt=["no-new-privileges"] if not is_jetson else None,
cap_drop=["ALL"] if not is_jetson else None,
cap_add=(["NET_BIND_SERVICE"] + (["SYS_ADMIN"] if is_gpu else [])) if not is_jetson else None,
cap_add=(
(["NET_BIND_SERVICE"] + (["SYS_ADMIN"] if is_gpu else []))
if not is_jetson
else None
),
read_only=not is_jetson,
volumes={
"/tmp": {
"bind": "/tmp",
"mode": "rw"
}
},
volumes={"/tmp": {"bind": "/tmp", "mode": "rw"}},
network_mode="bridge",
ipc_mode="private" if not is_jetson else None,
**docker_run_kwargs,
Expand Down
60 changes: 8 additions & 52 deletions tests/inference/unit_tests/core/models/utils/test_keypoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,34 +123,12 @@ def test_model_keypoints_to_response() -> None:
# List of keypoints
assert result == (
[
Keypoint(x=100, y=100, confidence=0.5, class_id=0, **{"class": "nose"}),
Keypoint(x=200, y=200, confidence=0.5, class_id=1, **{"class": "left_eye"}),
Keypoint(
x=100,
y=100,
confidence=0.5,
class_id=0,
**{"class": "nose"}
),
Keypoint(
x=200,
y=200,
confidence=0.5,
class_id=1,
**{"class": "left_eye"}
),
Keypoint(
x=300,
y=300,
confidence=0.5,
class_id=2,
**{"class": "right_eye"}
),
Keypoint(
x=400,
y=400,
confidence=0.5,
class_id=3,
**{"class": "left_ear"}
x=300, y=300, confidence=0.5, class_id=2, **{"class": "right_eye"}
),
Keypoint(x=400, y=400, confidence=0.5, class_id=3, **{"class": "left_ear"}),
],
)

Expand Down Expand Up @@ -200,33 +178,11 @@ def test_model_keypoints_to_response_padded_points() -> None:
# List of keypoints
assert result == (
[
Keypoint(x=100, y=100, confidence=0.5, class_id=0, **{"class": "nose"}),
Keypoint(x=200, y=200, confidence=0.5, class_id=1, **{"class": "left_eye"}),
Keypoint(
x=100,
y=100,
confidence=0.5,
class_id=0,
**{"class": "nose"}
),
Keypoint(
x=200,
y=200,
confidence=0.5,
class_id=1,
**{"class": "left_eye"}
),
Keypoint(
x=300,
y=300,
confidence=0.5,
class_id=2,
**{"class": "right_eye"}
),
Keypoint(
x=400,
y=400,
confidence=0.5,
class_id=3,
**{"class": "left_ear"}
x=300, y=300, confidence=0.5, class_id=2, **{"class": "right_eye"}
),
Keypoint(x=400, y=400, confidence=0.5, class_id=3, **{"class": "left_ear"}),
],
)
1 change: 0 additions & 1 deletion tests/workflows/integration_tests/execution/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
import numpy as np
import pytest


ASSETS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets"))
ROCK_PAPER_SCISSORS_ASSETS = os.path.join(ASSETS_DIR, "rock_paper_scissors")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@
from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS
from inference.core.managers.base import ModelManager
from inference.core.workflows.core_steps.common.entities import StepExecutionMode
from inference.core.workflows.core_steps.transformations.dynamic_zones.v1 import (
OUTPUT_KEY as DYNAMIC_ZONES_OUTPUT_KEY,
)
from inference.core.workflows.core_steps.transformations.perspective_correction.v1 import (
OUTPUT_DETECTIONS_KEY as PERSPECTIVE_CORRECTION_OUTPUT_DETECTIONS_KEY,
OUTPUT_IMAGE_KEY as PERSPECTIVE_CORRECTION_OUTPUT_IMAGE_KEY,
)
from inference.core.workflows.core_steps.transformations.dynamic_zones.v1 import (
OUTPUT_KEY as DYNAMIC_ZONES_OUTPUT_KEY,
from inference.core.workflows.core_steps.transformations.perspective_correction.v1 import (
OUTPUT_IMAGE_KEY as PERSPECTIVE_CORRECTION_OUTPUT_IMAGE_KEY,
)
from inference.core.workflows.execution_engine.core import ExecutionEngine
from tests.workflows.integration_tests.execution.workflows_gallery_collector.decorators import (
Expand Down
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
from inference.enterprise.workflows.enterprise_blocks.sinks.mqtt_writer.v1 import MQTTWriterSinkBlockV1
import pytest
import threading

import pytest

from inference.enterprise.workflows.enterprise_blocks.sinks.mqtt_writer.v1 import (
MQTTWriterSinkBlockV1,
)


@pytest.mark.timeout(5)
def test_successful_connection_and_publishing(fake_mqtt_broker):
# given
block = MQTTWriterSinkBlockV1()
published_message = 'Test message'
expected_message = 'Message published successfully'
published_message = "Test message"
expected_message = "Message published successfully"

fake_mqtt_broker.messages_count_to_wait_for = 1
broker_thread = threading.Thread(target=fake_mqtt_broker.start)
Expand All @@ -18,7 +23,7 @@ def test_successful_connection_and_publishing(fake_mqtt_broker):
host=fake_mqtt_broker.host,
port=fake_mqtt_broker.port,
topic="RoboflowTopic",
message=published_message
message=published_message,
)

broker_thread.join(timeout=2)
Expand All @@ -27,4 +32,4 @@ def test_successful_connection_and_publishing(fake_mqtt_broker):
assert result["error_status"] is False, "No error expected"
assert result["message"] == expected_message

assert published_message.encode() in fake_mqtt_broker.messages[-1]
assert published_message.encode() in fake_mqtt_broker.messages[-1]
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@
import supervision as sv

from inference.core.workflows.core_steps.analytics.velocity.v1 import VelocityBlockV1
from inference.core.workflows.execution_engine.entities.base import VideoMetadata, WorkflowImageData
from inference.core.workflows.execution_engine.entities.base import (
VideoMetadata,
WorkflowImageData,
)


def test_velocity_block_basic_calculation() -> None:
Expand Down
Loading

0 comments on commit 7bb1a67

Please sign in to comment.