From 6326f1dc19a65dbf04180caa3dd84227374a5d96 Mon Sep 17 00:00:00 2001 From: javiber Date: Wed, 26 Oct 2022 15:03:49 -0300 Subject: [PATCH] First version of new drawing --- norfair/drawing.py | 1005 ------------------------------ norfair/drawing/__init__.py | 6 + norfair/drawing/absolute_grid.py | 104 ++++ norfair/drawing/color.py | 329 ++++++++++ norfair/drawing/draw_boxes.py | 108 ++++ norfair/drawing/draw_points.py | 224 +++++++ norfair/drawing/drawer.py | 335 ++++++++++ norfair/drawing/fixed_camera.py | 141 +++++ norfair/drawing/path.py | 232 +++++++ norfair/drawing/utils.py | 24 + 10 files changed, 1503 insertions(+), 1005 deletions(-) delete mode 100644 norfair/drawing.py create mode 100644 norfair/drawing/__init__.py create mode 100644 norfair/drawing/absolute_grid.py create mode 100644 norfair/drawing/color.py create mode 100644 norfair/drawing/draw_boxes.py create mode 100644 norfair/drawing/draw_points.py create mode 100644 norfair/drawing/drawer.py create mode 100644 norfair/drawing/fixed_camera.py create mode 100644 norfair/drawing/path.py create mode 100644 norfair/drawing/utils.py diff --git a/norfair/drawing.py b/norfair/drawing.py deleted file mode 100644 index 9cb2565f..00000000 --- a/norfair/drawing.py +++ /dev/null @@ -1,1005 +0,0 @@ -"""Drawing utils""" -import random -from collections import defaultdict -from functools import lru_cache -from typing import TYPE_CHECKING, Callable, Optional, Sequence, Tuple - -import numpy as np - -from .camera_motion import CoordinatesTransformation, TranslationTransformation -from .utils import validate_points, warn_once - -try: - import cv2 -except ImportError: - from .utils import DummyOpenCVImport - - cv2 = DummyOpenCVImport() - - -if TYPE_CHECKING: - from norfair.tracker import Detection, TrackedObject - - -def draw_points( - frame: np.ndarray, - detections: Sequence["Detection"], - radius: Optional[int] = None, - thickness: Optional[int] = None, - color: Optional[Tuple[int, int, int]] = None, - color_by_label: bool = False, - draw_labels: bool = False, - label_size: Optional[int] = None, -): - """ - Draw a list of detections on a frame. - - Parameters - ---------- - frame : np.ndarray - The OpenCV frame to draw on. Modified in place. - detections : Sequence[Detection] - List of [`Detection`][norfair.tracker.Detection] to be drawn. - radius : Optional[int], optional - Radius of the circles representing the detected points. - thickness : Optional[int], optional - Thickness of the circles representing the detected points. - color : Optional[Tuple[int, int, int]], optional - [Color][norfair.drawing.Color] of the circles representing the detected points. - color_by_label : bool, optional - If `True` detections will be colored by label. - draw_labels : bool, optional - If `True` the detection's label will be drawn along with the detected points. - label_size : Optional[int], optional - Size of the label being drawn along with the detected points. - """ - if detections is None: - return - frame_scale = frame.shape[0] / 100 - if radius is None: - radius = int(max(frame_scale * 0.7, 1)) - if thickness is None: - thickness = int(max(frame_scale / 7, 1)) - if label_size is None: - label_size = int(max(frame_scale / 100, 1)) - if color is None: - color = Color.red - for d in detections: - if color_by_label: - color = Color.random(abs(hash(d.label))) - points = d.points - points = validate_points(points) - for point in points: - cv2.circle( - frame, - tuple(point.astype(int)), - radius=radius, - color=color, - thickness=thickness, - ) - - if draw_labels: - label_draw_position = np.array([min(points[:, 0]), min(points[:, 1])]) - label_draw_position -= radius - cv2.putText( - frame, - f"L: {d.label}", - tuple(label_draw_position.astype(int)), - cv2.FONT_HERSHEY_SIMPLEX, - label_size, - color, - thickness, - cv2.LINE_AA, - ) - - -def draw_tracked_objects( - frame: np.ndarray, - objects: Sequence["TrackedObject"], - radius: Optional[int] = None, - color: Optional[Tuple[int, int, int]] = None, - id_size: Optional[float] = None, - id_thickness: Optional[int] = None, - draw_points: bool = True, - color_by_label: bool = False, - draw_labels: bool = False, - label_size: Optional[int] = None, -): - """ - Draw a list of tracked objects on a frame. - - Parameters - ---------- - frame : np.ndarray - The OpenCV frame to draw on. Modified in place. - objects : Sequence[TrackedObject] - List of [`TrackedObject`][norfair.tracker.TrackedObject] to be drawn. - radius : Optional[int], optional - Radius of the circles representing the points estimated by the tracked objects. - color : Optional[Tuple[int, int, int]], optional - [Color][norfair.drawing.Color] of the circles representing the points estimated by the tracked objects. - id_size : Optional[float], optional - Size of the id number being drawn on each tracked object. The id wont get drawn if `id_size` is set to 0. - id_thickness : Optional[int], optional - Thickness of the id number being drawn on each tracked object. - draw_points : bool, optional - Boolean determining if the function should draw the points estimated by the tracked objects. - If set to `True` the points get drawn, if set to `False` only the id numbers get drawn. - color_by_label : bool, optional - If `True` objects will be colored by label. - draw_labels : bool, optional - If `True` the objects's label will be drawn along with the tracked points. - label_size : Optional[int], optional - Size of the label being drawn along with the tracked points. - """ - frame_scale = frame.shape[0] / 100 - if radius is None: - radius = int(frame_scale * 0.5) - if id_size is None: - id_size = frame_scale / 10 - if id_thickness is None: - id_thickness = int(frame_scale / 5) - if label_size is None: - label_size = int(max(frame_scale / 100, 1)) - - for obj in objects: - if not obj.live_points.any(): - continue - if color_by_label: - point_color = Color.random(abs(hash(obj.label))) - id_color = point_color - elif color is None: - object_id = obj.id if obj.id is not None else random.randint(0, 999) - point_color = Color.random(object_id) - id_color = point_color - else: - point_color = color - id_color = color - - if draw_points: - for point, live in zip(obj.estimate, obj.live_points): - if live: - cv2.circle( - frame, - tuple(point.astype(int)), - radius=radius, - color=point_color, - thickness=-1, - ) - - if draw_labels: - points = obj.estimate[obj.live_points] - points = points.astype(int) - label_draw_position = np.array([min(points[:, 0]), min(points[:, 1])]) - label_draw_position -= radius - cv2.putText( - frame, - f"L: {obj.label}", - tuple(label_draw_position), - cv2.FONT_HERSHEY_SIMPLEX, - label_size, - point_color, - id_thickness, - cv2.LINE_AA, - ) - - if id_size > 0: - id_draw_position = _centroid(obj.estimate[obj.live_points]) - cv2.putText( - frame, - str(obj.id), - id_draw_position, - cv2.FONT_HERSHEY_SIMPLEX, - id_size, - id_color, - id_thickness, - cv2.LINE_AA, - ) - - -def draw_debug_metrics( - frame: np.ndarray, - objects: Sequence["TrackedObject"], - text_size: Optional[float] = None, - text_thickness: Optional[int] = None, - color: Optional[Tuple[int, int, int]] = None, - only_ids=None, - only_initializing_ids=None, - draw_score_threshold: float = 0, - color_by_label: bool = False, - draw_labels: bool = False, -): - """Draw objects with their debug information - - It is recommended to set the input variable `objects` to `your_tracker_object.objects` - so you can also debug objects wich haven't finished initializing, and you get a more - complete view of what your tracker is doing on each step. - """ - frame_scale = frame.shape[0] / 100 - if text_size is None: - text_size = frame_scale / 10 - if text_thickness is None: - text_thickness = int(frame_scale / 5) - radius = int(frame_scale * 0.5) - - for obj in objects: - if ( - not (obj.last_detection.scores is None) - and not (obj.last_detection.scores > draw_score_threshold).any() - ): - continue - if only_ids is not None: - if obj.id not in only_ids: - continue - if only_initializing_ids is not None: - if obj.initializing_id not in only_initializing_ids: - continue - if color_by_label: - text_color = Color.random(abs(hash(obj.label))) - elif color is None: - text_color = Color.random(obj.initializing_id) - else: - text_color = color - draw_position = _centroid( - obj.estimate[obj.last_detection.scores > draw_score_threshold] - if obj.last_detection.scores is not None - else obj.estimate - ) - - for point in obj.estimate: - cv2.circle( - frame, - tuple(point.astype(int)), - radius=radius, - color=text_color, - thickness=-1, - ) - - # Distance to last matched detection - if obj.last_distance is None: - last_dist = "-" - elif obj.last_distance > 999: - last_dist = ">" - else: - last_dist = "{:.2f}".format(obj.last_distance) - - # Distance to currently closest detection - if obj.current_min_distance is None: - current_min_dist = "-" - else: - current_min_dist = "{:.2f}".format(obj.current_min_distance) - - # No support for multiline text in opencv :facepalm: - lines_to_draw = [ - "{}|{}".format(obj.id, obj.initializing_id), - "a:{}".format(obj.age), - "h:{}".format(obj.hit_counter), - "ld:{}".format(last_dist), - "cd:{}".format(current_min_dist), - ] - if draw_labels: - lines_to_draw.append("l:{}".format(obj.label)) - - for i, line in enumerate(lines_to_draw): - draw_position = ( - int(draw_position[0]), - int(draw_position[1] + i * text_size * 7 + 15), - ) - cv2.putText( - frame, - line, - draw_position, - cv2.FONT_HERSHEY_SIMPLEX, - text_size, - text_color, - text_thickness, - cv2.LINE_AA, - ) - - -def _centroid(tracked_points: np.ndarray) -> Tuple[int, int]: - num_points = tracked_points.shape[0] - sum_x = np.sum(tracked_points[:, 0]) - sum_y = np.sum(tracked_points[:, 1]) - return int(sum_x / num_points), int(sum_y / num_points) - - -def draw_boxes( - frame: np.ndarray, - detections: Sequence["Detection"], - line_color: Optional[Tuple[int, int, int]] = None, - line_width: Optional[int] = None, - random_color: bool = False, - color_by_label: bool = False, - draw_labels: bool = False, - label_size: Optional[int] = None, -): - """ - Draw draws a list of detections as boxes on a frame. - - This function uses the first 2 points of your [`Detection`][norfair.tracker.Detection] - instances to draw a box with those points as its corners. - - Parameters - ---------- - frame : np.ndarray - The OpenCV frame to draw on. - detections : Sequence[Detection] - List of [`Detection`](#detection)s to be drawn. - line_color : Optional[Tuple[int, int, int]], optional - [Color][norfair.drawing.Color] of the boxes representing the detections. - line_width : Optional[int], optional - Width of the lines constituting the sides of the boxes representing the detections. - random_color : bool, optional - If `True` each detection will be colored with a random color. - color_by_label : bool, optional - If `True` detections will be colored by label. - draw_labels : bool, optional - If `True` the detection's label will be drawn along with the detected boxes. - label_size : Optional[int], optional - Size of the label being drawn along with the detected boxes. - - Returns - ------- - np.array - The frame. - """ - frame_scale = frame.shape[0] / 100 - if detections is None: - return frame - if line_width is None: - line_width = int(max(frame_scale / 7, 1)) - if line_color is None: - line_color = Color.red - if label_size is None: - label_size = int(max(frame_scale / 100, 1)) - for d in detections: - if color_by_label: - line_color = Color.random(abs(hash(d.label))) - elif random_color: - line_color = Color.random(random.randint(0, 20)) - points = d.points - points = validate_points(points) - points = points.astype(int) - cv2.rectangle( - frame, - tuple(points[0, :]), - tuple(points[1, :]), - color=line_color, - thickness=line_width, - ) - - if draw_labels: - label_draw_position = np.array(points[0, :]) - cv2.putText( - frame, - f"L: {d.label}", - tuple(label_draw_position), - cv2.FONT_HERSHEY_SIMPLEX, - label_size, - line_color, - line_width, - cv2.LINE_AA, - ) - - return frame - - -def draw_tracked_boxes( - frame: np.ndarray, - objects: Sequence["TrackedObject"], - border_colors: Optional[Tuple[int, int, int]] = None, - border_width: Optional[int] = None, - id_size: Optional[int] = None, - id_thickness: Optional[int] = None, - draw_box: bool = True, - color_by_label: bool = False, - draw_labels: bool = False, - label_size: Optional[int] = None, - label_width: Optional[int] = None, -) -> np.array: - """ - Draw draws a list of tracked objects on a frame. - - This function uses the first 2 points of your [`TrackedObject`][norfair.tracker.TrackedObject] - instances to draw a box with those points as its corners. - - Parameters - ---------- - frame : np.ndarray - The OpenCV frame to draw on. - objects : Sequence[TrackedObject] - List of [`TrackedObject`][norfair.tracker.TrackedObject] to be drawn. - border_colors : Optional[Tuple[int, int, int]], optional - [Color][norfair.drawing.Color] of the boxes representing the tracked objects. - border_width : Optional[int], optional - Width of the lines constituting the sides of the boxes representing the tracked objects. - id_size : Optional[int], optional - Size of the id number being drawn on each tracked object. The id wont get drawn if `id_size` is set to 0. - id_thickness : Optional[int], optional - Thickness of the id number being drawn on each tracked object. - draw_box : bool, optional - Boolean determining if the function should draw the boxes estimated by the tracked objects. - - If set to `True` the boxes get drawn, if set to `False` only the id numbers get drawn. - color_by_label : bool, optional - If `True` objects will be colored by label. - draw_labels : bool, optional - If `True` the objects's label will be drawn along with the tracked boxes. - label_size : Optional[int], optional - Size of the label being drawn along with the tracked boxes. - label_width : Optional[int], optional - Thickness of the label being drawn along with the tracked boxes. - - Returns - ------- - np.array - The frame - """ - - frame_scale = frame.shape[0] / 100 - if border_width is None: - border_width = int(frame_scale * 0.5) - if label_width is None: - label_width = int(max(frame_scale / 7, 2)) - if label_size is None: - label_size = int(max(frame_scale / 100, 1)) - if id_size is None: - id_size = frame_scale / 10 - if id_thickness is None: - id_thickness = int(frame_scale / 5) - if isinstance(border_colors, tuple): - border_colors = [border_colors] - - for n, obj in enumerate(objects): - if not obj.live_points.any(): - continue - if color_by_label: - color = Color.random(abs(hash(obj.label))) - elif border_colors is None: - color = Color.random(obj.id) - else: - color = border_colors[n % len(border_colors)] - - points = obj.estimate - if draw_box: - points = points.astype(int) - cv2.rectangle( - frame, - tuple(points[0, :]), - tuple(points[1, :]), - color=color, - thickness=border_width, - ) - - if draw_labels: - label_draw_position = np.array(points[0, :]) - cv2.putText( - frame, - f"L: {obj.label}", - tuple(label_draw_position), - cv2.FONT_HERSHEY_SIMPLEX, - label_size, - color, - label_width, - cv2.LINE_AA, - ) - - if id_size > 0: - id_draw_position = np.mean(points, axis=0) - id_draw_position = id_draw_position.astype(int) - cv2.putText( - frame, - str(obj.id), - tuple(id_draw_position), - cv2.FONT_HERSHEY_SIMPLEX, - id_size, - color, - id_thickness, - cv2.LINE_AA, - ) - return frame - - -class Paths: - """ - Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation. - - Parameters - ---------- - get_points_to_draw : Optional[Callable[[np.array], np.array]], optional - Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject]) - and returns a list of points for which we want to draw their paths. - - By default it is the mean point of all the points in the tracker. - thickness : Optional[int], optional - Thickness of the circles representing the paths of interest. - color : Optional[Tuple[int, int, int]], optional - [Color][norfair.drawing.Color] of the circles representing the paths of interest. - radius : Optional[int], optional - Radius of the circles representing the paths of interest. - attenuation : float, optional - A float number in [0, 1] that dictates the speed at which the path is erased. - if it is `0` then the path is never erased. - - Examples - -------- - >>> from norfair import Tracker, Video, Path - >>> video = Video("video.mp4") - >>> tracker = Tracker(...) - >>> path_drawer = Path() - >>> for frame in video: - >>> detections = get_detections(frame) # runs detector and returns Detections - >>> tracked_objects = tracker.update(detections) - >>> frame = path_drawer.draw(frame, tracked_objects) - >>> video.write(frame) - """ - - def __init__( - self, - get_points_to_draw: Optional[Callable[[np.array], np.array]] = None, - thickness: Optional[int] = None, - color: Optional[Tuple[int, int, int]] = None, - radius: Optional[int] = None, - attenuation: float = 0.01, - ): - if get_points_to_draw is None: - - def get_points_to_draw(points): - return [np.mean(np.array(points), axis=0)] - - self.get_points_to_draw = get_points_to_draw - - self.radius = radius - self.thickness = thickness - self.color = color - self.mask = None - self.attenuation_factor = 1 - attenuation - - def draw( - self, frame: np.ndarray, tracked_objects: Sequence["TrackedObject"] - ) -> np.array: - """ - Draw the paths of the points interest on a frame. - - !!! warning - This method does **not** draw frames in place as other drawers do, the resulting frame is returned. - - Parameters - ---------- - frame : np.ndarray - The OpenCV frame to draw on. - tracked_objects : Sequence[TrackedObject] - List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths. - - Returns - ------- - np.array - The resulting frame. - """ - if self.mask is None: - frame_scale = frame.shape[0] / 100 - - if self.radius is None: - self.radius = int(max(frame_scale * 0.7, 1)) - if self.thickness is None: - self.thickness = int(max(frame_scale / 7, 1)) - - self.mask = np.zeros(frame.shape, np.uint8) - - self.mask = (self.mask * self.attenuation_factor).astype("uint8") - - for obj in tracked_objects: - if obj.abs_to_rel is not None: - warn_once( - "It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected" - ) - - if self.color is None: - color = Color.random(obj.id) - else: - color = self.color - - points_to_draw = self.get_points_to_draw(obj.estimate) - - for point in points_to_draw: - cv2.circle( - self.mask, - tuple(point.astype(int)), - radius=self.radius, - color=color, - thickness=self.thickness, - ) - - return cv2.addWeighted(self.mask, 1, frame, 1, 0, frame) - - -def _draw_cross(frame, center, radius, color, thickness): - middle_x, middle_y = center - left, top = center - radius - right, bottom = center + radius - cv2.line( - frame, (middle_x, top), (middle_x, bottom), color=color, thickness=thickness - ) - cv2.line( - frame, (left, middle_y), (right, middle_y), color=color, thickness=thickness - ) - - -class Color: - """ - Object which represents an OpenCV color. - - Its properties are the colors which it can represent. - For example, set `Color.blue` to get the OpenCV tuple representing the color blue. - """ - - green = (0, 128, 0) - white = (255, 255, 255) - olive = (0, 128, 128) - black = (0, 0, 0) - navy = (128, 0, 0) - red = (0, 0, 255) - maroon = (0, 0, 128) - grey = (128, 128, 128) - purple = (128, 0, 128) - yellow = (0, 255, 255) - lime = (0, 255, 0) - fuchsia = (255, 0, 255) - aqua = (255, 255, 0) - blue = (255, 0, 0) - teal = (128, 128, 0) - silver = (192, 192, 192) - - @staticmethod - def random(obj_id: int) -> Tuple[int, int, int]: - color_list = [ - c - for c in Color.__dict__.keys() - if c[:2] != "__" - and c not in ("random", "red", "white", "grey", "black", "silver") - ] - return getattr(Color, color_list[obj_id % len(color_list)]) - - -@lru_cache(maxsize=4) -def _get_grid(size, w, h, polar=False): - """ - Construct the grid of points. - - Points are choosen - Results are cached since the grid in absolute coordinates doesn't change. - """ - # We need to get points on a semi-sphere of radious 1 centered around (0, 0) - - # First step is to get a grid of angles, theta and phi ∈ (-pi/2, pi/2) - step = np.pi / size - start = -np.pi / 2 + step / 2 - end = np.pi / 2 - theta, fi = np.mgrid[start:end:step, start:end:step] - - if polar: - # if polar=True the first frame will show points as if - # you are on the center of the earth looking at one of the poles. - # Points on the sphere are defined as [sin(theta) * cos(fi), sin(theta) * sin(fi), cos(theta)] - # Then we need to intersect the line defined by the point above with the - # plane z=1 which is the "absolute plane", we do so by dividing by cos(theta), the result becomes - # [tan(theta) * cos(fi), tan(theta) * sin(phi), 1] - # note that the z=1 is implied by the coord_transformation so there is no need to add it. - tan_theta = np.tan(theta) - - X = tan_theta * np.cos(fi) - Y = tan_theta * np.sin(fi) - else: - # otherwhise will show as if you were looking at the equator - X = np.tan(fi) - Y = np.divide(np.tan(theta), np.cos(fi)) - # construct the points as x, y coordinates - points = np.vstack((X.flatten(), Y.flatten())).T - # scale and center the points - return points * max(h, w) + np.array([w // 2, h // 2]) - - -def draw_absolute_grid( - frame: np.ndarray, - coord_transformations: CoordinatesTransformation, - grid_size: int = 20, - radius: int = 2, - thickness: int = 1, - color: Optional[Tuple[int, int, int]] = Color.black, - polar: bool = False, -): - """ - Draw a grid of points in absolute coordinates. - - Useful for debugging camera motion. - - The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection - of latitude and longitude lines over the surface of the sphere. - - Parameters - ---------- - frame : np.ndarray - The OpenCV frame to draw on. - coord_transformations : CoordinatesTransformation - The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator] - grid_size : int, optional - How many points to draw. - radius : int, optional - Size of each point. - thickness : int, optional - Thickness of each point - color : Optional[Tuple[int, int, int]], optional - Color of the points. - polar : Bool, optional - If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). - By default, False is used which means the points are drawn as if the camera were pointing to the Equator. - """ - h, w, _ = frame.shape - - # get absolute points grid - points = _get_grid(grid_size, w, h, polar=polar) - - # transform the points to relative coordinates - if coord_transformations is None: - points_transformed = points - else: - points_transformed = coord_transformations.abs_to_rel(points) - - # filter points that are not visible - visible_points = points_transformed[ - (points_transformed <= np.array([w, h])).all(axis=1) - & (points_transformed >= 0).all(axis=1) - ] - for point in visible_points: - _draw_cross( - frame, point.astype(int), radius=radius, thickness=thickness, color=color - ) - - -class FixedCamera: - """ - Class used to stabilize video based on the camera motion. - - Starts with a larger frame, where the original frame is drawn on top of a black background. - As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it. - - Useful for debugging or demoing the camera motion. - ![Example GIF](../../videos/camera_stabilization.gif) - - !!! Warning - This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation], - using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in - unexpected behaviour. - - !!! Warning - If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected. - - !!! Note - Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. - In this case, a warning will be logged and the frames will be cropped to avoid errors. - - Parameters - ---------- - scale : float, optional - The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video. - Use a bigger scale if the camera is moving too much. - attenuation : float, optional - Controls how fast the older frames fade to black. - - Examples - -------- - >>> # setup - >>> tracker = Tracker("frobenious", 100) - >>> motion_estimator = MotionEstimator() - >>> video = Video(input_path="video.mp4") - >>> fixed_camera = FixedCamera() - >>> # process video - >>> for frame in video: - >>> coord_transformations = motion_estimator.update(frame) - >>> detections = get_detections(frame) - >>> tracked_objects = tracker.update(detections, coord_transformations) - >>> draw_tracked_objects(frame, tracked_objects) # fixed_camera should always be the last drawer - >>> bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations) - >>> video.write(bigger_frame) - """ - - def __init__(self, scale: float = 2, attenuation: float = 0.05): - self.scale = scale - self._background = None - self._attenuation_factor = 1 - attenuation - - def adjust_frame( - self, frame: np.ndarray, coord_transformation: TranslationTransformation - ) -> np.ndarray: - """ - Render scaled up frame. - - Parameters - ---------- - frame : np.ndarray - The OpenCV frame. - coord_transformation : TranslationTransformation - The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator] - - Returns - ------- - np.ndarray - The new bigger frame with the original frame drawn on it. - """ - - # initialize background if necessary - if self._background is None: - original_size = ( - frame.shape[1], - frame.shape[0], - ) # OpenCV format is (width, height) - - scaled_size = tuple( - (np.array(original_size) * np.array(self.scale)).round().astype(int) - ) - self._background = np.zeros( - [scaled_size[1], scaled_size[0], frame.shape[-1]], - frame.dtype, - ) - else: - self._background = (self._background * self._attenuation_factor).astype( - frame.dtype - ) - - # top_left is the anchor coordinate from where we start drawing the fame on top of the background - # aim to draw it in the center of the background but transformations will move this point - top_left = ( - np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2 - ) - top_left = ( - coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1] - ) - # box of the background that will be updated and the limits of it - background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0]) - background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1]) - background_size_y, background_size_x = self._background.shape[:2] - - # define box of the frame that will be used - # if the scale is not enough to support the movement, warn the user but keep drawing - # cropping the frame so that the operation doesn't fail - frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1]) - if ( - background_y0 < 0 - or background_x0 < 0 - or background_y1 > background_size_y - or background_x1 > background_size_x - ): - warn_once( - "moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped" - ) - # crop left or top of the frame if necessary - frame_y0 = max(-background_y0, 0) - frame_x0 = max(-background_x0, 0) - # crop right or bottom of the frame if necessary - frame_y1 = max( - min(background_size_y - background_y0, background_y1 - background_y0), 0 - ) - frame_x1 = max( - min(background_size_x - background_x0, background_x1 - background_x0), 0 - ) - # handle cases where the limits of the background become negative which numpy will interpret incorrectly - background_y0 = max(background_y0, 0) - background_x0 = max(background_x0, 0) - background_y1 = max(background_y1, 0) - background_x1 = max(background_x1, 0) - self._background[ - background_y0:background_y1, background_x0:background_x1, : - ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :] - return self._background - - -class AbsolutePaths: - """ - Class that draws the absolute paths taken by a set of points. - - Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion. - - !!! warning - This drawer is not optimized so it can be stremely slow. Performance degrades linearly with - `max_history * number_of_tracked_objects`. - - Parameters - ---------- - get_points_to_draw : Optional[Callable[[np.array], np.array]], optional - Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject]) - and returns a list of points for which we want to draw their paths. - - By default it is the mean point of all the points in the tracker. - thickness : Optional[int], optional - Thickness of the circles representing the paths of interest. - color : Optional[Tuple[int, int, int]], optional - [Color][norfair.drawing.Color] of the circles representing the paths of interest. - radius : Optional[int], optional - Radius of the circles representing the paths of interest. - max_history : int, optional - Number of past points to include in the path. High values make the drawing slower - - Examples - -------- - >>> from norfair import Tracker, Video, Path - >>> video = Video("video.mp4") - >>> tracker = Tracker(...) - >>> path_drawer = Path() - >>> for frame in video: - >>> detections = get_detections(frame) # runs detector and returns Detections - >>> tracked_objects = tracker.update(detections) - >>> frame = path_drawer.draw(frame, tracked_objects) - >>> video.write(frame) - """ - - def __init__( - self, - get_points_to_draw: Optional[Callable[[np.array], np.array]] = None, - thickness: Optional[int] = None, - color: Optional[Tuple[int, int, int]] = None, - radius: Optional[int] = None, - max_history=20, - ): - - if get_points_to_draw is None: - - def get_points_to_draw(points): - return [np.mean(np.array(points), axis=0)] - - self.get_points_to_draw = get_points_to_draw - - self.radius = radius - self.thickness = thickness - self.color = color - self.past_points = defaultdict(lambda: []) - self.max_history = max_history - self.alphas = np.linspace(0.99, 0.01, max_history) - - def draw(self, frame, tracked_objects, coord_transform=None): - frame_scale = frame.shape[0] / 100 - - if self.radius is None: - self.radius = int(max(frame_scale * 0.7, 1)) - if self.thickness is None: - self.thickness = int(max(frame_scale / 7, 1)) - for obj in tracked_objects: - if not obj.live_points.any(): - continue - - if self.color is None: - color = Color.random(obj.id) - else: - color = self.color - - points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True)) - - for point in coord_transform.abs_to_rel(points_to_draw): - cv2.circle( - frame, - tuple(point.astype(int)), - radius=self.radius, - color=color, - thickness=self.thickness, - ) - - last = points_to_draw - for i, past_points in enumerate(self.past_points[obj.id]): - overlay = frame.copy() - last = coord_transform.abs_to_rel(last) - for j, point in enumerate(coord_transform.abs_to_rel(past_points)): - cv2.line( - overlay, - tuple(last[j].astype(int)), - tuple(point.astype(int)), - # radius=self.radius, - color=color, - thickness=self.thickness, - ) - last = past_points - - alpha = self.alphas[i] - frame = cv2.addWeighted(overlay, alpha, frame, 1 - alpha, 0) - self.past_points[obj.id].insert(0, points_to_draw) - self.past_points[obj.id] = self.past_points[obj.id][: self.max_history] - return frame diff --git a/norfair/drawing/__init__.py b/norfair/drawing/__init__.py new file mode 100644 index 00000000..bce4640f --- /dev/null +++ b/norfair/drawing/__init__.py @@ -0,0 +1,6 @@ +from .absolute_grid import draw_absolute_grid +from .color import Colors, Palette +from .draw_boxes import draw_boxes +from .draw_points import draw_points +from .fixed_camera import FixedCamera +from .path import AbsolutePaths, Paths diff --git a/norfair/drawing/absolute_grid.py b/norfair/drawing/absolute_grid.py new file mode 100644 index 00000000..a4e4cf29 --- /dev/null +++ b/norfair/drawing/absolute_grid.py @@ -0,0 +1,104 @@ +from functools import lru_cache +from typing import Optional, Tuple + +import numpy as np + +from norfair.camera_motion import CoordinatesTransformation + +from .color import Colors +from .drawer import Drawer + + +@lru_cache(maxsize=4) +def _get_grid(size, w, h, polar=False): + """ + Construct the grid of points. + + Points are choosen + Results are cached since the grid in absolute coordinates doesn't change. + """ + # We need to get points on a semi-sphere of radious 1 centered around (0, 0) + + # First step is to get a grid of angles, theta and phi ∈ (-pi/2, pi/2) + step = np.pi / size + start = -np.pi / 2 + step / 2 + end = np.pi / 2 + theta, fi = np.mgrid[start:end:step, start:end:step] + + if polar: + # if polar=True the first frame will show points as if + # you are on the center of the earth looking at one of the poles. + # Points on the sphere are defined as [sin(theta) * cos(fi), sin(theta) * sin(fi), cos(theta)] + # Then we need to intersect the line defined by the point above with the + # plane z=1 which is the "absolute plane", we do so by dividing by cos(theta), the result becomes + # [tan(theta) * cos(fi), tan(theta) * sin(phi), 1] + # note that the z=1 is implied by the coord_transformation so there is no need to add it. + tan_theta = np.tan(theta) + + X = tan_theta * np.cos(fi) + Y = tan_theta * np.sin(fi) + else: + # otherwhise will show as if you were looking at the equator + X = np.tan(fi) + Y = np.divide(np.tan(theta), np.cos(fi)) + # construct the points as x, y coordinates + points = np.vstack((X.flatten(), Y.flatten())).T + # scale and center the points + return points * max(h, w) + np.array([w // 2, h // 2]) + + +def draw_absolute_grid( + frame: np.ndarray, + coord_transformations: CoordinatesTransformation, + grid_size: int = 20, + radius: int = 2, + thickness: int = 1, + color: Optional[Tuple[int, int, int]] = Colors.black, + polar: bool = False, +): + """ + Draw a grid of points in absolute coordinates. + + Useful for debugging camera motion. + + The points are drawn as if the camera were in the center of a sphere and points are drawn in the intersection + of latitude and longitude lines over the surface of the sphere. + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. + coord_transformations : CoordinatesTransformation + The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator] + grid_size : int, optional + How many points to draw. + radius : int, optional + Size of each point. + thickness : int, optional + Thickness of each point + color : Optional[Tuple[int, int, int]], optional + Color of the points. + polar : Bool, optional + If True, the points on the first frame are drawn as if the camera were pointing to a pole (viewed from the center of the earth). + By default, False is used which means the points are drawn as if the camera were pointing to the Equator. + """ + h, w, _ = frame.shape + + # get absolute points grid + points = _get_grid(grid_size, w, h, polar=polar) + + # transform the points to relative coordinates + if coord_transformations is None: + points_transformed = points + else: + points_transformed = coord_transformations.abs_to_rel(points) + + # filter points that are not visible + visible_points = points_transformed[ + (points_transformed <= np.array([w, h])).all(axis=1) + & (points_transformed >= 0).all(axis=1) + ] + for point in visible_points: + Drawer.cross( + frame, point.astype(int), radius=radius, thickness=thickness, color=color + ) diff --git a/norfair/drawing/color.py b/norfair/drawing/color.py new file mode 100644 index 00000000..79348214 --- /dev/null +++ b/norfair/drawing/color.py @@ -0,0 +1,329 @@ +import re +from typing import Any, Iterable, Tuple, Union + +# types + +Color = Tuple[int, int, int] +ColorLike = Union[Color, str] + + +def hex_to_bgr(string: str) -> Color: + """Converts convencional 6 digits hex colors to BGR tuples + + Parameters + ---------- + string : str + hex value with leading `#` for instance `"#ff0000"` + + Returns + ------- + Tuple[int, int, int] + BGR values + + Raises + ------ + ValueError + if the string is invalid + """ + # TODO: support 3-digits hex + if re.match("#[a-f0-9]{6}$", string): + return int(string[3:5], 16), int(string[5:7], 16), int(string[1:3], 16) + raise ValueError(f"'{string}' is not a valid color") + + +class Colors: + """ + Contains predefined colors. + + Colors are defined as a Tuple of integers between 0 and 255 expressing the values in BGR + This is the format opencv uses. + """ + + # from PIL.ImageColors.colormap + aliceblue = hex_to_bgr("#f0f8ff") + antiquewhite = hex_to_bgr("#faebd7") + aqua = hex_to_bgr("#00ffff") + aquamarine = hex_to_bgr("#7fffd4") + azure = hex_to_bgr("#f0ffff") + beige = hex_to_bgr("#f5f5dc") + bisque = hex_to_bgr("#ffe4c4") + black = hex_to_bgr("#000000") + blanchedalmond = hex_to_bgr("#ffebcd") + blue = hex_to_bgr("#0000ff") + blueviolet = hex_to_bgr("#8a2be2") + brown = hex_to_bgr("#a52a2a") + burlywood = hex_to_bgr("#deb887") + cadetblue = hex_to_bgr("#5f9ea0") + chartreuse = hex_to_bgr("#7fff00") + chocolate = hex_to_bgr("#d2691e") + coral = hex_to_bgr("#ff7f50") + cornflowerblue = hex_to_bgr("#6495ed") + cornsilk = hex_to_bgr("#fff8dc") + crimson = hex_to_bgr("#dc143c") + cyan = hex_to_bgr("#00ffff") + darkblue = hex_to_bgr("#00008b") + darkcyan = hex_to_bgr("#008b8b") + darkgoldenrod = hex_to_bgr("#b8860b") + darkgray = hex_to_bgr("#a9a9a9") + darkgrey = hex_to_bgr("#a9a9a9") + darkgreen = hex_to_bgr("#006400") + darkkhaki = hex_to_bgr("#bdb76b") + darkmagenta = hex_to_bgr("#8b008b") + darkolivegreen = hex_to_bgr("#556b2f") + darkorange = hex_to_bgr("#ff8c00") + darkorchid = hex_to_bgr("#9932cc") + darkred = hex_to_bgr("#8b0000") + darksalmon = hex_to_bgr("#e9967a") + darkseagreen = hex_to_bgr("#8fbc8f") + darkslateblue = hex_to_bgr("#483d8b") + darkslategray = hex_to_bgr("#2f4f4f") + darkslategrey = hex_to_bgr("#2f4f4f") + darkturquoise = hex_to_bgr("#00ced1") + darkviolet = hex_to_bgr("#9400d3") + deeppink = hex_to_bgr("#ff1493") + deepskyblue = hex_to_bgr("#00bfff") + dimgray = hex_to_bgr("#696969") + dimgrey = hex_to_bgr("#696969") + dodgerblue = hex_to_bgr("#1e90ff") + firebrick = hex_to_bgr("#b22222") + floralwhite = hex_to_bgr("#fffaf0") + forestgreen = hex_to_bgr("#228b22") + fuchsia = hex_to_bgr("#ff00ff") + gainsboro = hex_to_bgr("#dcdcdc") + ghostwhite = hex_to_bgr("#f8f8ff") + gold = hex_to_bgr("#ffd700") + goldenrod = hex_to_bgr("#daa520") + gray = hex_to_bgr("#808080") + grey = hex_to_bgr("#808080") + green = (0, 128, 0) + greenyellow = hex_to_bgr("#adff2f") + honeydew = hex_to_bgr("#f0fff0") + hotpink = hex_to_bgr("#ff69b4") + indianred = hex_to_bgr("#cd5c5c") + indigo = hex_to_bgr("#4b0082") + ivory = hex_to_bgr("#fffff0") + khaki = hex_to_bgr("#f0e68c") + lavender = hex_to_bgr("#e6e6fa") + lavenderblush = hex_to_bgr("#fff0f5") + lawngreen = hex_to_bgr("#7cfc00") + lemonchiffon = hex_to_bgr("#fffacd") + lightblue = hex_to_bgr("#add8e6") + lightcoral = hex_to_bgr("#f08080") + lightcyan = hex_to_bgr("#e0ffff") + lightgoldenrodyellow = hex_to_bgr("#fafad2") + lightgreen = hex_to_bgr("#90ee90") + lightgray = hex_to_bgr("#d3d3d3") + lightgrey = hex_to_bgr("#d3d3d3") + lightpink = hex_to_bgr("#ffb6c1") + lightsalmon = hex_to_bgr("#ffa07a") + lightseagreen = hex_to_bgr("#20b2aa") + lightskyblue = hex_to_bgr("#87cefa") + lightslategray = hex_to_bgr("#778899") + lightslategrey = hex_to_bgr("#778899") + lightsteelblue = hex_to_bgr("#b0c4de") + lightyellow = hex_to_bgr("#ffffe0") + lime = hex_to_bgr("#00ff00") + limegreen = hex_to_bgr("#32cd32") + linen = hex_to_bgr("#faf0e6") + magenta = hex_to_bgr("#ff00ff") + maroon = hex_to_bgr("#800000") + mediumaquamarine = hex_to_bgr("#66cdaa") + mediumblue = hex_to_bgr("#0000cd") + mediumorchid = hex_to_bgr("#ba55d3") + mediumpurple = hex_to_bgr("#9370db") + mediumseagreen = hex_to_bgr("#3cb371") + mediumslateblue = hex_to_bgr("#7b68ee") + mediumspringgreen = hex_to_bgr("#00fa9a") + mediumturquoise = hex_to_bgr("#48d1cc") + mediumvioletred = hex_to_bgr("#c71585") + midnightblue = hex_to_bgr("#191970") + mintcream = hex_to_bgr("#f5fffa") + mistyrose = hex_to_bgr("#ffe4e1") + moccasin = hex_to_bgr("#ffe4b5") + navajowhite = hex_to_bgr("#ffdead") + navy = hex_to_bgr("#000080") + oldlace = hex_to_bgr("#fdf5e6") + olive = hex_to_bgr("#808000") + olivedrab = hex_to_bgr("#6b8e23") + orange = hex_to_bgr("#ffa500") + orangered = hex_to_bgr("#ff4500") + orchid = hex_to_bgr("#da70d6") + palegoldenrod = hex_to_bgr("#eee8aa") + palegreen = hex_to_bgr("#98fb98") + paleturquoise = hex_to_bgr("#afeeee") + palevioletred = hex_to_bgr("#db7093") + papayawhip = hex_to_bgr("#ffefd5") + peachpuff = hex_to_bgr("#ffdab9") + peru = hex_to_bgr("#cd853f") + pink = hex_to_bgr("#ffc0cb") + plum = hex_to_bgr("#dda0dd") + powderblue = hex_to_bgr("#b0e0e6") + purple = hex_to_bgr("#800080") + rebeccapurple = hex_to_bgr("#663399") + red = hex_to_bgr("#ff0000") + rosybrown = hex_to_bgr("#bc8f8f") + royalblue = hex_to_bgr("#4169e1") + saddlebrown = hex_to_bgr("#8b4513") + salmon = hex_to_bgr("#fa8072") + sandybrown = hex_to_bgr("#f4a460") + seagreen = hex_to_bgr("#2e8b57") + seashell = hex_to_bgr("#fff5ee") + sienna = hex_to_bgr("#a0522d") + silver = hex_to_bgr("#c0c0c0") + skyblue = hex_to_bgr("#87ceeb") + slateblue = hex_to_bgr("#6a5acd") + slategray = hex_to_bgr("#708090") + slategrey = hex_to_bgr("#708090") + snow = hex_to_bgr("#fffafa") + springgreen = hex_to_bgr("#00ff7f") + steelblue = hex_to_bgr("#4682b4") + tan = hex_to_bgr("#d2b48c") + teal = hex_to_bgr("#008080") + thistle = hex_to_bgr("#d8bfd8") + tomato = hex_to_bgr("#ff6347") + turquoise = hex_to_bgr("#40e0d0") + violet = hex_to_bgr("#ee82ee") + wheat = hex_to_bgr("#f5deb3") + white = hex_to_bgr("#ffffff") + whitesmoke = hex_to_bgr("#f5f5f5") + yellow = hex_to_bgr("#ffff00") + yellowgreen = hex_to_bgr("#9acd32") + + # seaborn tab20 colors + tab1 = hex_to_bgr("#1f77b4") + tab2 = hex_to_bgr("#aec7e8") + tab3 = hex_to_bgr("#ff7f0e") + tab4 = hex_to_bgr("#ffbb78") + tab5 = hex_to_bgr("#2ca02c") + tab6 = hex_to_bgr("#98df8a") + tab7 = hex_to_bgr("#d62728") + tab8 = hex_to_bgr("#ff9896") + tab9 = hex_to_bgr("#9467bd") + tab10 = hex_to_bgr("#c5b0d5") + tab11 = hex_to_bgr("#8c564b") + tab12 = hex_to_bgr("#c49c94") + tab13 = hex_to_bgr("#e377c2") + tab14 = hex_to_bgr("#f7b6d2") + tab15 = hex_to_bgr("#7f7f7f") + tab16 = hex_to_bgr("#c7c7c7") + tab17 = hex_to_bgr("#bcbd22") + tab18 = hex_to_bgr("#dbdb8d") + tab19 = hex_to_bgr("#17becf") + tab20 = hex_to_bgr("#9edae5") + # seaborn colorblind + cb1 = hex_to_bgr("#0173b2") + cb2 = hex_to_bgr("#de8f05") + cb3 = hex_to_bgr("#029e73") + cb4 = hex_to_bgr("#d55e00") + cb5 = hex_to_bgr("#cc78bc") + cb6 = hex_to_bgr("#ca9161") + cb7 = hex_to_bgr("#fbafe4") + cb8 = hex_to_bgr("#949494") + cb9 = hex_to_bgr("#ece133") + cb10 = hex_to_bgr("#56b4e9") + + +def parse_color(value: ColorLike) -> Color: + """Makes best effort to parse the given value to a Color + + Parameters + ---------- + value : ColorLike + Can be one of: + 1. a string with the 6 digits hex value (`"#ff0000"`) + 2. a string with one of the names defined in Colors (`"red"`) + 3. a BGR tuple (`(0, 0, 255)`) + + Returns + ------- + Color + The BGR tuple. + """ + if isinstance(value, str): + if value.startswith("#"): + return hex_to_bgr(value) + else: + return getattr(Colors, value) + # TODO: validate value? + return tuple([int(v) for v in value]) + + +PALETTES = { + "tab10": [ + Colors.tab1, + Colors.tab3, + Colors.tab5, + Colors.tab7, + Colors.tab9, + Colors.tab11, + Colors.tab13, + Colors.tab15, + Colors.tab17, + Colors.tab19, + ], + "tab20": [ + Colors.tab1, + Colors.tab2, + Colors.tab3, + Colors.tab4, + Colors.tab5, + Colors.tab6, + Colors.tab7, + Colors.tab8, + Colors.tab9, + Colors.tab10, + Colors.tab11, + Colors.tab12, + Colors.tab13, + Colors.tab14, + Colors.tab15, + Colors.tab16, + Colors.tab17, + Colors.tab18, + Colors.tab19, + Colors.tab20, + ], + "colorblind": [ + Colors.cb1, + Colors.cb2, + Colors.cb3, + Colors.cb4, + Colors.cb5, + Colors.cb6, + Colors.cb7, + Colors.cb8, + Colors.cb9, + Colors.cb10, + ], +} + + +class Palette: + _colors = PALETTES["tab10"] + _default_color = Colors.black + + @classmethod + def set(cls, palette: Union[str, Iterable[ColorLike]]): + if isinstance(palette, str): + try: + cls._colors = PALETTES[palette] + except KeyError as e: + raise ValueError( + f"Invalid palette name '{palette}', valid values are {PALETTES.keys()}" + ) from e + else: + colors = [] + for c in palette: + colors.append(parse_color(c)) + + cls._colors = colors + + @classmethod + def set_default_color(cls, color: ColorLike): + cls._default_color = parse_color(color) + + @classmethod + def choose_color(cls, hashable): + if hashable is None: + return cls._default_color + return cls._colors[abs(hash(hashable)) % len(cls._colors)] diff --git a/norfair/drawing/draw_boxes.py b/norfair/drawing/draw_boxes.py new file mode 100644 index 00000000..2d9404b2 --- /dev/null +++ b/norfair/drawing/draw_boxes.py @@ -0,0 +1,108 @@ +from typing import Optional, Sequence, Tuple, Union + +import numpy as np + +from norfair.tracker import Detection, TrackedObject + +from .color import ColorLike, Palette, parse_color +from .drawer import Drawable, Drawer +from .utils import _build_text + + +def draw_boxes( + frame: np.ndarray, + drawables: Sequence[Union[Detection, TrackedObject]], + color: ColorLike = "by_id", + thickness: Optional[int] = 2, + draw_labels: bool = False, + draw_ids: bool = False, + text_size: Optional[float] = None, + text_color: Optional[ColorLike] = None, + text_thickness: Optional[int] = None, +) -> np.ndarray: + """ + Draw bounding boxes corresponding to Detections or TrackedObjects. + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. Modified in place. + drawables : Sequence[Union[Detection, TrackedObject]] + List of objects to draw, Detections and TrackedObjects are accepted. + This objects are assumed to contain 2 bi-dimensional points defining + the bounding box as `[[x0, y0], [x1, y1]]`. + color : ColorLike, optional + This parameter can take: + 1. A color as a tuple of ints describing the BGR `(0, 0, 255)` + 2. A 6-digit hex string `"#FF0000"` + 3. One of the defined color names `"red"` + 4. A string defining the strategy to choose colors from the Palette: + 1. based on the id of the objects `"by_id"` + 2. based on the label of the objects `"by_label"` + Note that if your objects don't have labels or ids (Detections never have ids) + the selected color will be the same for all objects. + thickness : Optional[int], optional + Thickness or width of the line. + draw_labels : bool, optional + If set to True, the label is added to a title that is drawn on top of the box. + If an object doesn't have a label this parameter is ignored. + draw_ids : bool, optional + If set to True, the id is added to a title that is drawn on top of the box. + If an object doesn't have an id this parameter is ignored. + text_size : Optional[float], optional + Size of the title, the value is used as a multiplier of the base size of the font. + By default the size is scaled automatically based on the frame size. + text_color : Optional[int], optional + Color of the text. By default the same color as the box is used. + text_thickness : Optional[int], optional + Thickness of the font. By default it's scaled with the `text_size`. + + Returns + ------- + np.ndarray + The resulting frame. + """ + if drawables is None: + return frame + + for obj in drawables: + d = Drawable(obj) + + if color == "by_id": + obj_color = Palette.choose_color(d.id) + elif color == "by_label": + obj_color = Palette.choose_color(d.label) + else: + obj_color = parse_color(color) + + points = d.points.astype(int) + + Drawer.rectangle( + frame, + tuple(points), + color=obj_color, + thickness=thickness, + ) + + text = _build_text(d, draw_labels=draw_labels, draw_ids=draw_ids) + if text: + if text_color is None: + obj_text_color = obj_color + else: + obj_text_color = color + # the anchor will become the bottom-left of the text, + # we select-top left of the bbox compensating for the thickness of the box + text_anchor = ( + points[0, 0] - thickness // 2, + points[0, 1] - thickness // 2 - 1, + ) + frame = Drawer.text( + frame, + text, + position=text_anchor, + size=text_size, + color=obj_text_color, + thickness=text_thickness, + ) + + return frame diff --git a/norfair/drawing/draw_points.py b/norfair/drawing/draw_points.py new file mode 100644 index 00000000..0f393b17 --- /dev/null +++ b/norfair/drawing/draw_points.py @@ -0,0 +1,224 @@ +from typing import Optional, Sequence, Union + +import numpy as np + +from norfair.tracker import Detection, TrackedObject + +from .color import ColorLike, Palette, parse_color +from .drawer import Drawable, Drawer +from .utils import _build_text + + +def draw_points( + frame: np.ndarray, + drawables: Sequence[Union[Detection, TrackedObject]], + color: ColorLike = "by_id", + radius: Optional[int] = None, + thickness: Optional[int] = None, + draw_labels: bool = True, + draw_ids: bool = True, + draw_points: bool = True, + text_size: Optional[int] = None, + text_color: Optional[ColorLike] = None, + hide_dead_points: bool = True, +): + """ + Draw the points included in a list of Detections or TrackedObjects. + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. Modified in place. + drawables : Sequence[Union[Detection, TrackedObject]] + List of objects to draw, Detections and TrackedObjects are accepted. + color : ColorLike, optional + This parameter can take: + 1. A color as a tuple of ints describing the BGR `(0, 0, 255)` + 2. A 6-digit hex string `"#FF0000"` + 3. One of the defined color names `"red"` + 4. A string defining the strategy to choose colors from the Palette: + 1. based on the id of the objects `"by_id"` + 2. based on the label of the objects `"by_label"` + Note that if your objects don't have labels or ids (Detections never have ids) + the selected color will be the same for all objects. + radius : Optional[int], optional + Radius of the circles representing each point. + By default a sensible value is picked considering the frame size. + thickness : Optional[int], optional + Thickness or width of the line. + draw_labels : bool, optional + If set to True, the label is added to a title that is drawn on top of the box. + If an object doesn't have a label this parameter is ignored. + draw_ids : bool, optional + If set to True, the id is added to a title that is drawn on top of the box. + If an object doesn't have an id this parameter is ignored. + text_size : Optional[float], optional + Size of the title, the value is used as a multiplier of the base size of the font. + By default the size is scaled automatically based on the frame size. + text_color : Optional[int], optional + Color of the text. By default the same color as the box is used. + text_thickness : Optional[int], optional + Thickness of the font. By default it's scaled with the `text_size`. + hide_dead_points : bool, optional + By default the dead points of the TrackedObject are hidden. + A point is considered dead if the corresponding value of `TrackedObject.live_points` is set to False. + If all objects are dead the object is not drawn. + All points of a detection are considered to be alive. + Set this param to False to always draw all points. + + Returns + ------- + np.ndarray + The resulting frame. + """ + if drawables is None: + return + + if radius is None: + radius = int(round(max(max(frame.shape) * 0.002, 1))) + for o in drawables: + d = Drawable(o) + + if hide_dead_points and not d.live_points.any(): + continue + + if color == "by_id": + obj_color = Palette.choose_color(d.id) + elif color == "by_label": + obj_color = Palette.choose_color(d.label) + else: + obj_color = parse_color(color) + + if text_color is None: + obj_text_color = obj_color + else: + obj_text_color = color + + if draw_points: + for point, live in zip(d.points, d.live_points): + if live or not hide_dead_points: + Drawer.circle( + frame, + tuple(point.astype(int)), + radius=radius, + color=obj_color, + thickness=thickness, + ) + + if draw_labels or draw_ids: + position = np.array([min(d.points[:, 0]), min(d.points[:, 1])]) + position -= radius * 2 + text = _build_text(d, draw_labels=draw_labels, draw_ids=draw_ids) + + Drawer.text( + frame, + text, + tuple(position.astype(int)), + size=text_size, + color=obj_text_color, + ) + return frame + + +# TODO: We used to have this function to debug +# migrate it to use Drawer and clean it up +# if possible maybe merge this functionality to the function above + +# def draw_debug_metrics( +# frame: np.ndarray, +# objects: Sequence["TrackedObject"], +# text_size: Optional[float] = None, +# text_thickness: Optional[int] = None, +# color: Optional[Tuple[int, int, int]] = None, +# only_ids=None, +# only_initializing_ids=None, +# draw_score_threshold: float = 0, +# color_by_label: bool = False, +# draw_labels: bool = False, +# ): +# """Draw objects with their debug information + +# It is recommended to set the input variable `objects` to `your_tracker_object.objects` +# so you can also debug objects wich haven't finished initializing, and you get a more +# complete view of what your tracker is doing on each step. +# """ +# frame_scale = frame.shape[0] / 100 +# if text_size is None: +# text_size = frame_scale / 10 +# if text_thickness is None: +# text_thickness = int(frame_scale / 5) +# radius = int(frame_scale * 0.5) + +# for obj in objects: +# if ( +# not (obj.last_detection.scores is None) +# and not (obj.last_detection.scores > draw_score_threshold).any() +# ): +# continue +# if only_ids is not None: +# if obj.id not in only_ids: +# continue +# if only_initializing_ids is not None: +# if obj.initializing_id not in only_initializing_ids: +# continue +# if color_by_label: +# text_color = Color.random(abs(hash(obj.label))) +# elif color is None: +# text_color = Color.random(obj.initializing_id) +# else: +# text_color = color +# draw_position = _centroid( +# obj.estimate[obj.last_detection.scores > draw_score_threshold] +# if obj.last_detection.scores is not None +# else obj.estimate +# ) + +# for point in obj.estimate: +# cv2.circle( +# frame, +# tuple(point.astype(int)), +# radius=radius, +# color=text_color, +# thickness=-1, +# ) + +# # Distance to last matched detection +# if obj.last_distance is None: +# last_dist = "-" +# elif obj.last_distance > 999: +# last_dist = ">" +# else: +# last_dist = "{:.2f}".format(obj.last_distance) + +# # Distance to currently closest detection +# if obj.current_min_distance is None: +# current_min_dist = "-" +# else: +# current_min_dist = "{:.2f}".format(obj.current_min_distance) + +# # No support for multiline text in opencv :facepalm: +# lines_to_draw = [ +# "{}|{}".format(obj.id, obj.initializing_id), +# "a:{}".format(obj.age), +# "h:{}".format(obj.hit_counter), +# "ld:{}".format(last_dist), +# "cd:{}".format(current_min_dist), +# ] +# if draw_labels: +# lines_to_draw.append("l:{}".format(obj.label)) + +# for i, line in enumerate(lines_to_draw): +# draw_position = ( +# int(draw_position[0]), +# int(draw_position[1] + i * text_size * 7 + 15), +# ) +# cv2.putText( +# frame, +# line, +# draw_position, +# cv2.FONT_HERSHEY_SIMPLEX, +# text_size, +# text_color, +# text_thickness, +# cv2.LINE_AA, +# ) diff --git a/norfair/drawing/drawer.py b/norfair/drawing/drawer.py new file mode 100644 index 00000000..b036b7cb --- /dev/null +++ b/norfair/drawing/drawer.py @@ -0,0 +1,335 @@ +from typing import Optional, Sequence, Tuple, Union + +import numpy as np + +from norfair.drawing.color import Color, Colors +from norfair.tracker import Detection, TrackedObject + +try: + import cv2 +except ImportError: + from norfair.utils import DummyOpenCVImport + + cv2 = DummyOpenCVImport() + + +class Drawer: + """ + Basic drawing functionality. + + This class encapsulates opencv drawing functions allowing for + different backends to be implemented following the same interface. + """ + + @classmethod + def circle( + cls, + frame: np.ndarray, + position: Tuple[int, int], + radius: Optional[int] = None, + thickness: Optional[int] = None, + color: Color = None, + ) -> np.ndarray: + """ + Draw a circle. + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. Modified in place. + position : Tuple[int, int] + Position of the point. This will become the center of the circle. + radius : Optional[int], optional + Radius of the circle. + thickness : Optional[int], optional + Thickness or width of the line. + color : Color, optional + A tuple of ints describing the BGR color `(0, 0, 255)`. + + Returns + ------- + np.ndarray + The resulting frame. + """ + if radius is None: + radius = int(max(max(frame.shape) * 0.005, 1)) + if thickness is None: + thickness = radius - 1 + + return cv2.circle( + frame, + position, + radius=radius, + color=color, + thickness=thickness, + ) + + @classmethod + def text( + cls, + frame: np.ndarray, + text: str, + position: Tuple[int, int], + size: Optional[float] = None, + color: Optional[Color] = None, + thickness: Optional[int] = None, + shadow: bool = True, + shadow_color: Color = Colors.black, + shadow_offset: int = 1, + ) -> np.ndarray: + """ + Draw text + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. Modified in place. + text : str + The text to be written. + position : Tuple[int, int] + Position of the bottom-left corner of the text. + This value is adjusted considering the thickness automatically. + size : Optional[float], optional + Scale of the font, by default chooses a sensible value is picked based on the size of the frame. + color : Optional[Color], optional + Color of the text, by default is black. + thickness : Optional[int], optional + Thickness of the lines, by default a sensible value is picked based on the size. + shadow : bool, optional + If True, a shadow of the text is added which improves legibility. + shadow_color : Color, optional + Color of the shadow. + shadow_offset : int, optional + Offset of the shadow. + + Returns + ------- + np.ndarray + The resulting frame. + """ + if size is None: + size = min(max(max(frame.shape) / 4000, 0.5), 1.5) + if thickness is None: + thickness = int(round(size) + 1) + + if thickness is None and size is not None: + thickness = int(round(size) + 1) + # adjust position based on the thickness + anchor = (position[0] + thickness // 2, position[1] - thickness // 2) + if shadow: + frame = cv2.putText( + frame, + text, + (anchor[0] + shadow_offset, anchor[1] + shadow_offset), + cv2.FONT_HERSHEY_SIMPLEX, + size, + shadow_color, + thickness, + cv2.LINE_AA, + ) + return cv2.putText( + frame, + text, + anchor, + cv2.FONT_HERSHEY_SIMPLEX, + size, + color, + thickness, + cv2.LINE_AA, + ) + + @classmethod + def rectangle( + cls, + frame: np.ndarray, + points: Sequence[Tuple[int, int]], + color: Optional[Color] = None, + thickness: Optional[int] = None, + ) -> np.ndarray: + """ + Draw a rectangle + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. Modified in place. + points : Sequence[Tuple[int, int]] + Points describing the rectangle in the format `[[x0, y0], [x1, y1]]`. + color : Optional[Color], optional + Color of the lines, by default Black. + thickness : Optional[int], optional + Thickness of the lines, by default 1. + + Returns + ------- + np.ndarray + The resulting frame. + """ + frame = cv2.rectangle( + frame, + points[0], + points[1], + color=color, + thickness=thickness, + ) + return frame + + @classmethod + def cross( + cls, + frame: np.ndarray, + center: Tuple[int, int], + radius: int, + color: Color, + thickness: int, + ) -> np.ndarray: + """ + Draw a cross + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. Modified in place. + center : Tuple[int, int] + Center of the cross. + radius : int + Size or radius of the cross. + color : Color + Color of the lines. + thickness : int + Thickness of the lines. + + Returns + ------- + np.ndarray + The resulting frame. + """ + middle_x, middle_y = center + left, top = center - radius + right, bottom = center + radius + frame = cls.line( + frame, + start=(middle_x, top), + end=(middle_x, bottom), + color=color, + thickness=thickness, + ) + frame = cls.line( + frame, + start=(left, middle_y), + end=(right, middle_y), + color=color, + thickness=thickness, + ) + return frame + + @classmethod + def line( + cls, + frame: np.ndarray, + start: Tuple[int, int], + end: Tuple[int, int], + color: Color = Colors.black, + thickness: int = 1, + ): + """ + Draw a line. + + Parameters + ---------- + frame : np.ndarray + _description_ + start : Tuple[int, int] + _description_ + end : Tuple[int, int] + _description_ + color : Color, optional + _description_, by default Colors.black + thickness : int, optional + _description_, by default 1 + + Returns + ------- + _type_ + _description_ + """ + return cv2.line( + frame, + pt1=start, + pt2=end, + color=color, + thickness=thickness, + ) + + @classmethod + def alpha_blend( + cls, + frame1: np.ndarray, + frame2: np.ndarray, + alpha: float = 0.5, + beta: Optional[float] = None, + gamma: float = 0, + ) -> np.ndarray: + """ + Blend 2 frame as a wheigthted sum. + + Parameters + ---------- + frame1 : np.ndarray + An OpenCV frame. + frame2 : np.ndarray + An OpenCV frame. + alpha : float, optional + Weight of frame1. + beta : Optional[float], optional + Weight of frame2, by default `1 - alpha` + gamma : float, optional + Scalar to add to the sum. + + Returns + ------- + np.ndarray + The resulting frame. + """ + if beta is None: + beta = 1 - alpha + return cv2.addWeighted(src1=frame1, src2=frame2, beta=beta, gamma=gamma) + + +class Drawable: + """ + Class to standardize Drawable objects like Detections and TrackedObjects + + Parameters + ---------- + obj : Union[Detection, TrackedObject] + A [Detection][norfair.tracker.Detection] or a [TrackedObject][norfair.tracker.TrackedObject] + + Raises + ------ + ValueError + If obj is not an instance of the supported classes. + """ + + def __init__(self, obj: Union[Detection, TrackedObject]) -> None: + if isinstance(obj, Detection): + self.points = obj.points + self.id = None + self.label = obj.label + self.scores = obj.scores + # TODO: alive points for detections could be the ones over the threshold + # but that info is not available here + self.live_points = np.ones(obj.points.shape[0]).astype(bool) + + elif isinstance(obj, TrackedObject): + self.points = obj.estimate + self.id = obj.id + self.label = obj.label + # TODO: TrackedObject.scores could be an interesting thing to have + # it could be the scores of the last detection or some kind of moving average + self.scores = None + self.live_points = obj.live_points + else: + raise ValueError( + f"Extecting a Detection or a TrackedObject but received {type(obj)}" + ) diff --git a/norfair/drawing/fixed_camera.py b/norfair/drawing/fixed_camera.py new file mode 100644 index 00000000..6c049063 --- /dev/null +++ b/norfair/drawing/fixed_camera.py @@ -0,0 +1,141 @@ +import numpy as np + +from norfair.camera_motion import TranslationTransformation +from norfair.utils import warn_once + + +class FixedCamera: + """ + Class used to stabilize video based on the camera motion. + + Starts with a larger frame, where the original frame is drawn on top of a black background. + As the camera moves, the smaller frame moves in the opposite direction, stabilizing the objects in it. + + Useful for debugging or demoing the camera motion. + ![Example GIF](../../videos/camera_stabilization.gif) + + !!! Warning + This only works with [`TranslationTransformation`][norfair.camera_motion.TranslationTransformation], + using [`HomographyTransformation`][norfair.camera_motion.HomographyTransformation] will result in + unexpected behaviour. + + !!! Warning + If using other drawers, always apply this one last. Using other drawers on the scaled up frame will not work as expected. + + !!! Note + Sometimes the camera moves so far from the original point that the result won't fit in the scaled-up frame. + In this case, a warning will be logged and the frames will be cropped to avoid errors. + + Parameters + ---------- + scale : float, optional + The resulting video will have a resolution of `scale * (H, W)` where HxW is the resolution of the original video. + Use a bigger scale if the camera is moving too much. + attenuation : float, optional + Controls how fast the older frames fade to black. + + Examples + -------- + >>> # setup + >>> tracker = Tracker("frobenious", 100) + >>> motion_estimator = MotionEstimator() + >>> video = Video(input_path="video.mp4") + >>> fixed_camera = FixedCamera() + >>> # process video + >>> for frame in video: + >>> coord_transformations = motion_estimator.update(frame) + >>> detections = get_detections(frame) + >>> tracked_objects = tracker.update(detections, coord_transformations) + >>> draw_tracked_objects(frame, tracked_objects) # fixed_camera should always be the last drawer + >>> bigger_frame = fixed_camera.adjust_frame(frame, coord_transformations) + >>> video.write(bigger_frame) + """ + + def __init__(self, scale: float = 2, attenuation: float = 0.05): + self.scale = scale + self._background = None + self._attenuation_factor = 1 - attenuation + + def adjust_frame( + self, frame: np.ndarray, coord_transformation: TranslationTransformation + ) -> np.ndarray: + """ + Render scaled up frame. + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame. + coord_transformation : TranslationTransformation + The coordinate transformation as returned by the [`MotionEstimator`][norfair.camera_motion.MotionEstimator] + + Returns + ------- + np.ndarray + The new bigger frame with the original frame drawn on it. + """ + + # initialize background if necessary + if self._background is None: + original_size = ( + frame.shape[1], + frame.shape[0], + ) # OpenCV format is (width, height) + + scaled_size = tuple( + (np.array(original_size) * np.array(self.scale)).round().astype(int) + ) + self._background = np.zeros( + [scaled_size[1], scaled_size[0], frame.shape[-1]], + frame.dtype, + ) + else: + self._background = (self._background * self._attenuation_factor).astype( + frame.dtype + ) + + # top_left is the anchor coordinate from where we start drawing the fame on top of the background + # aim to draw it in the center of the background but transformations will move this point + top_left = ( + np.array(self._background.shape[:2]) // 2 - np.array(frame.shape[:2]) // 2 + ) + top_left = ( + coord_transformation.rel_to_abs(top_left[::-1]).round().astype(int)[::-1] + ) + # box of the background that will be updated and the limits of it + background_y0, background_y1 = (top_left[0], top_left[0] + frame.shape[0]) + background_x0, background_x1 = (top_left[1], top_left[1] + frame.shape[1]) + background_size_y, background_size_x = self._background.shape[:2] + + # define box of the frame that will be used + # if the scale is not enough to support the movement, warn the user but keep drawing + # cropping the frame so that the operation doesn't fail + frame_y0, frame_y1, frame_x0, frame_x1 = (0, frame.shape[0], 0, frame.shape[1]) + if ( + background_y0 < 0 + or background_x0 < 0 + or background_y1 > background_size_y + or background_x1 > background_size_x + ): + warn_once( + "moving_camera_scale is not enough to cover the range of camera movement, frame will be cropped" + ) + # crop left or top of the frame if necessary + frame_y0 = max(-background_y0, 0) + frame_x0 = max(-background_x0, 0) + # crop right or bottom of the frame if necessary + frame_y1 = max( + min(background_size_y - background_y0, background_y1 - background_y0), 0 + ) + frame_x1 = max( + min(background_size_x - background_x0, background_x1 - background_x0), 0 + ) + # handle cases where the limits of the background become negative which numpy will interpret incorrectly + background_y0 = max(background_y0, 0) + background_x0 = max(background_x0, 0) + background_y1 = max(background_y1, 0) + background_x1 = max(background_x1, 0) + self._background[ + background_y0:background_y1, background_x0:background_x1, : + ] = frame[frame_y0:frame_y1, frame_x0:frame_x1, :] + return self._background diff --git a/norfair/drawing/path.py b/norfair/drawing/path.py new file mode 100644 index 00000000..7f97f3dd --- /dev/null +++ b/norfair/drawing/path.py @@ -0,0 +1,232 @@ +from collections import defaultdict +from typing import Callable, Optional, Sequence, Tuple + +import numpy as np + +from norfair.drawing.color import Palette +from norfair.drawing.drawer import Drawer +from norfair.tracker import TrackedObject +from norfair.utils import warn_once + + +class Paths: + """ + Class that draws the paths taken by a set of points of interest defined from the coordinates of each tracker estimation. + + Parameters + ---------- + get_points_to_draw : Optional[Callable[[np.array], np.array]], optional + Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject]) + and returns a list of points for which we want to draw their paths. + + By default it is the mean point of all the points in the tracker. + thickness : Optional[int], optional + Thickness of the circles representing the paths of interest. + color : Optional[Tuple[int, int, int]], optional + [Color][norfair.drawing.Color] of the circles representing the paths of interest. + radius : Optional[int], optional + Radius of the circles representing the paths of interest. + attenuation : float, optional + A float number in [0, 1] that dictates the speed at which the path is erased. + if it is `0` then the path is never erased. + + Examples + -------- + >>> from norfair import Tracker, Video, Path + >>> video = Video("video.mp4") + >>> tracker = Tracker(...) + >>> path_drawer = Path() + >>> for frame in video: + >>> detections = get_detections(frame) # runs detector and returns Detections + >>> tracked_objects = tracker.update(detections) + >>> frame = path_drawer.draw(frame, tracked_objects) + >>> video.write(frame) + """ + + def __init__( + self, + get_points_to_draw: Optional[Callable[[np.array], np.array]] = None, + thickness: Optional[int] = None, + color: Optional[Tuple[int, int, int]] = None, + radius: Optional[int] = None, + attenuation: float = 0.01, + ): + if get_points_to_draw is None: + + def get_points_to_draw(points): + return [np.mean(np.array(points), axis=0)] + + self.get_points_to_draw = get_points_to_draw + + self.radius = radius + self.thickness = thickness + self.color = color + self.mask = None + self.attenuation_factor = 1 - attenuation + + def draw( + self, frame: np.ndarray, tracked_objects: Sequence[TrackedObject] + ) -> np.array: + """ + Draw the paths of the points interest on a frame. + + !!! warning + This method does **not** draw frames in place as other drawers do, the resulting frame is returned. + + Parameters + ---------- + frame : np.ndarray + The OpenCV frame to draw on. + tracked_objects : Sequence[TrackedObject] + List of [`TrackedObject`][norfair.tracker.TrackedObject] to get the points of interest in order to update the paths. + + Returns + ------- + np.array + The resulting frame. + """ + if self.mask is None: + frame_scale = frame.shape[0] / 100 + + if self.radius is None: + self.radius = int(max(frame_scale * 0.7, 1)) + if self.thickness is None: + self.thickness = int(max(frame_scale / 7, 1)) + + self.mask = np.zeros(frame.shape, np.uint8) + + self.mask = (self.mask * self.attenuation_factor).astype("uint8") + + for obj in tracked_objects: + if obj.abs_to_rel is not None: + warn_once( + "It seems that your using the Path drawer together with MotionEstimator. This is not fully supported and the results will not be what's expected" + ) + + if self.color is None: + color = Palette.choose_color(obj.id) + else: + color = self.color + + points_to_draw = self.get_points_to_draw(obj.estimate) + + for point in points_to_draw: + frame = Drawer.circle( + self.mask, + position=tuple(point.astype(int)), + radius=self.radius, + color=color, + thickness=self.thickness, + ) + + return Drawer.alpha_blend(self.mask, frame, alpha=1, beta=1) + + +class AbsolutePaths: + """ + Class that draws the absolute paths taken by a set of points. + + Works just like [`Paths`][norfair.drawing.Paths] but supports camera motion. + + !!! warning + This drawer is not optimized so it can be stremely slow. Performance degrades linearly with + `max_history * number_of_tracked_objects`. + + Parameters + ---------- + get_points_to_draw : Optional[Callable[[np.array], np.array]], optional + Function that takes a list of points (the `.estimate` attribute of a [`TrackedObject`][norfair.tracker.TrackedObject]) + and returns a list of points for which we want to draw their paths. + + By default it is the mean point of all the points in the tracker. + thickness : Optional[int], optional + Thickness of the circles representing the paths of interest. + color : Optional[Tuple[int, int, int]], optional + [Color][norfair.drawing.Color] of the circles representing the paths of interest. + radius : Optional[int], optional + Radius of the circles representing the paths of interest. + max_history : int, optional + Number of past points to include in the path. High values make the drawing slower + + Examples + -------- + >>> from norfair import Tracker, Video, Path + >>> video = Video("video.mp4") + >>> tracker = Tracker(...) + >>> path_drawer = Path() + >>> for frame in video: + >>> detections = get_detections(frame) # runs detector and returns Detections + >>> tracked_objects = tracker.update(detections) + >>> frame = path_drawer.draw(frame, tracked_objects) + >>> video.write(frame) + """ + + def __init__( + self, + get_points_to_draw: Optional[Callable[[np.array], np.array]] = None, + thickness: Optional[int] = None, + color: Optional[Tuple[int, int, int]] = None, + radius: Optional[int] = None, + max_history=20, + ): + + if get_points_to_draw is None: + + def get_points_to_draw(points): + return [np.mean(np.array(points), axis=0)] + + self.get_points_to_draw = get_points_to_draw + + self.radius = radius + self.thickness = thickness + self.color = color + self.past_points = defaultdict(lambda: []) + self.max_history = max_history + self.alphas = np.linspace(0.99, 0.01, max_history) + + def draw(self, frame, tracked_objects, coord_transform=None): + frame_scale = frame.shape[0] / 100 + + if self.radius is None: + self.radius = int(max(frame_scale * 0.7, 1)) + if self.thickness is None: + self.thickness = int(max(frame_scale / 7, 1)) + for obj in tracked_objects: + if not obj.live_points.any(): + continue + + if self.color is None: + color = Palette.choose_color(obj.id) + else: + color = self.color + + points_to_draw = self.get_points_to_draw(obj.get_estimate(absolute=True)) + + for point in coord_transform.abs_to_rel(points_to_draw): + Drawer.circle( + frame, + position=tuple(point.astype(int)), + radius=self.radius, + color=color, + thickness=self.thickness, + ) + + last = points_to_draw + for i, past_points in enumerate(self.past_points[obj.id]): + overlay = frame.copy() + last = coord_transform.abs_to_rel(last) + for j, point in enumerate(coord_transform.abs_to_rel(past_points)): + Drawer.line( + overlay, + tuple(last[j].astype(int)), + tuple(point.astype(int)), + color=color, + thickness=self.thickness, + ) + last = past_points + + alpha = self.alphas[i] + frame = Drawer.alpha_blend(overlay, frame, alpha=alpha) + self.past_points[obj.id].insert(0, points_to_draw) + self.past_points[obj.id] = self.past_points[obj.id][: self.max_history] + return frame diff --git a/norfair/drawing/utils.py b/norfair/drawing/utils.py new file mode 100644 index 00000000..19f825bd --- /dev/null +++ b/norfair/drawing/utils.py @@ -0,0 +1,24 @@ +from typing import TYPE_CHECKING, Optional, Sequence, Tuple + +import numpy as np + +if TYPE_CHECKING: + from .drawer import Drawable + + +def _centroid(tracked_points: np.ndarray) -> Tuple[int, int]: + num_points = tracked_points.shape[0] + sum_x = np.sum(tracked_points[:, 0]) + sum_y = np.sum(tracked_points[:, 1]) + return int(sum_x / num_points), int(sum_y / num_points) + + +def _build_text(drawable: "Drawable", draw_labels, draw_ids): + text = "" + if draw_labels and drawable.label is not None: + text = str(drawable.label) + if draw_ids and drawable.id is not None: + if len(text) > 0: + text += "-" + text += str(drawable.id) + return text