diff --git a/.gitmodules b/.gitmodules index e69de29b..6d86e368 100644 --- a/.gitmodules +++ b/.gitmodules @@ -0,0 +1,8 @@ +[submodule "depthai-core"] + path = depthai-core + url = https://github.com/luxonis/depthai-core.git + branch = v3_develop +[submodule "gen3-gaze-estimation-cpp/depthai-core"] + path = gen3-gaze-estimation-cpp/depthai-core + url = https://github.com/luxonis/depthai-core.git + branch = v3_develop \ No newline at end of file diff --git a/depthai-core b/depthai-core new file mode 160000 index 00000000..777c2c8d --- /dev/null +++ b/depthai-core @@ -0,0 +1 @@ +Subproject commit 777c2c8d853de7c35815d31cf7a512bffa3aac74 diff --git a/gen2-gaze-estimation/MultiMsgSync.py b/gen2-gaze-estimation/MultiMsgSync.py index b96a0ee8..b4e0c1f3 100644 --- a/gen2-gaze-estimation/MultiMsgSync.py +++ b/gen2-gaze-estimation/MultiMsgSync.py @@ -39,8 +39,8 @@ def get_msgs(self): seq_remove = [] # Arr of sequence numbers to get deleted for seq, msgs in self.msgs.items(): + print(seq) seq_remove.append(seq) # Will get removed from dict if we find synced msgs pair - # Check if we have both detections and color frame with this sequence number if "color" in msgs and "len" in msgs: diff --git a/gen2-gaze-estimation/face-detection-retail-0004.blob b/gen2-gaze-estimation/face-detection-retail-0004.blob new file mode 100644 index 00000000..6b375880 Binary files /dev/null and b/gen2-gaze-estimation/face-detection-retail-0004.blob differ diff --git a/gen2-gaze-estimation/main.py b/gen2-gaze-estimation/main.py index c1dee4dc..da37812c 100644 --- a/gen2-gaze-estimation/main.py +++ b/gen2-gaze-estimation/main.py @@ -40,11 +40,13 @@ def create_output(name: str, output: dai.Node.Output): print("Creating Face Detection Neural Network...") face_det_nn = pipeline.create(dai.node.MobileNetDetectionNetwork) face_det_nn.setConfidenceThreshold(0.5) -face_det_nn.setBlobPath(blobconverter.from_zoo( - name="face-detection-retail-0004", - shaves=6, - version=openvino_version -)) +#face_det_nn.setBlobPath(blobconverter.from_zoo( +# name="face-detection-retail-0004", +# shaves=6, +# version=openvino_version +#)) +face_det_nn.setBlobPath("face-detection-retail-0004.blob") + # Link Face ImageManip -> Face detection NN node face_det_manip.out.link(face_det_nn.input) @@ -148,7 +150,6 @@ def create_output(name: str, output: dai.Node.Output): script.inputs['none'].setQueueSize(1) create_output('gaze', gaze_nn.out) - #================================================== with dai.Device(pipeline) as device: @@ -170,6 +171,7 @@ def create_output(name: str, output: dai.Node.Output): msgs = sync.get_msgs() if msgs is not None: + print("adasd") frame = msgs["color"].getCvFrame() dets = msgs["detection"].detections for i, detection in enumerate(dets): @@ -195,4 +197,4 @@ def create_output(name: str, output: dai.Node.Output): cv2.imshow("Lasers", frame) if cv2.waitKey(1) == ord('q'): - break + break \ No newline at end of file diff --git a/gen3-gaze-estimation-cpp/.gitignore b/gen3-gaze-estimation-cpp/.gitignore new file mode 100644 index 00000000..608b1d44 --- /dev/null +++ b/gen3-gaze-estimation-cpp/.gitignore @@ -0,0 +1,2 @@ +.vscode/ +build/ \ No newline at end of file diff --git a/gen3-gaze-estimation-cpp/.gitmodules b/gen3-gaze-estimation-cpp/.gitmodules new file mode 100644 index 00000000..fcee8dad --- /dev/null +++ b/gen3-gaze-estimation-cpp/.gitmodules @@ -0,0 +1,4 @@ +[submodule "depthai-core"] + path = depthai-core + url = https://github.com/luxonis/depthai-core.git + branch = v3_develop \ No newline at end of file diff --git a/gen3-gaze-estimation-cpp/CMakeLists.txt b/gen3-gaze-estimation-cpp/CMakeLists.txt new file mode 100644 index 00000000..4e6219cf --- /dev/null +++ b/gen3-gaze-estimation-cpp/CMakeLists.txt @@ -0,0 +1,55 @@ +cmake_minimum_required(VERSION 3.4) + +# Add depthai-core dependency +add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/depthai-core EXCLUDE_FROM_ALL) + +# Create a project with name 'gen3' +set(TARGET_NAME gen3) +project(${TARGET_NAME}) + +# Dependencies (optional, only used for example) +find_package(OpenCV REQUIRED) + +# Add source files +add_executable("${TARGET_NAME}" + src/main.cpp +) + +# Link with libraries +target_link_libraries(${TARGET_NAME} + PUBLIC + depthai::core + ${OpenCV_LIBS} # optional, used for example +) + +# Copy files to /build +file(COPY script.py DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") +file(COPY face-detection-retail-0004.blob DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") +file(COPY gaze-estimation-adas-0002.blob DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") +file(COPY head-pose-estimation-adas-0001.blob DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") +file(COPY landmarks-regression-retail-0009.blob DESTINATION "${CMAKE_CURRENT_BINARY_DIR}") + + +# Set some errors +if(NOT MSVC) + target_compile_options(${TARGET_NAME} PRIVATE $<$:-Werror=return-type>) +endif() + +# Set compiler features (c++14) +set_property(TARGET ${TARGET_NAME} PROPERTY CXX_STANDARD 17) + + +# Windows - Add runtime dependencies +if(WIN32) + if(CMAKE_VERSION VERSION_LESS "3.21") + message(WARNING "CMake version less than 3.21 - automatic DLL handling not available. Make sure to copy required DLLs to the same folder as .exe") + else() + # TARGET_RUNTIME_DLLS generator expression available since CMake 3.21 + set(depthai_dll_libraries "$") + # Copy the required dlls + add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND + ${CMAKE_COMMAND} -E copy_if_different ${depthai_dll_libraries} $ + COMMAND_EXPAND_LISTS + ) + endif() +endif() diff --git a/gen3-gaze-estimation-cpp/depthai-core b/gen3-gaze-estimation-cpp/depthai-core new file mode 160000 index 00000000..a851d042 --- /dev/null +++ b/gen3-gaze-estimation-cpp/depthai-core @@ -0,0 +1 @@ +Subproject commit a851d04224aab3fa8b2fa41a9d4d7631a4c642ab diff --git a/gen3-gaze-estimation-cpp/face-detection-retail-0004.blob b/gen3-gaze-estimation-cpp/face-detection-retail-0004.blob new file mode 100644 index 00000000..3962e9b1 Binary files /dev/null and b/gen3-gaze-estimation-cpp/face-detection-retail-0004.blob differ diff --git a/gen3-gaze-estimation-cpp/gaze-estimation-adas-0002.blob b/gen3-gaze-estimation-cpp/gaze-estimation-adas-0002.blob new file mode 100644 index 00000000..cbecd66d Binary files /dev/null and b/gen3-gaze-estimation-cpp/gaze-estimation-adas-0002.blob differ diff --git a/gen3-gaze-estimation-cpp/head-pose-estimation-adas-0001.blob b/gen3-gaze-estimation-cpp/head-pose-estimation-adas-0001.blob new file mode 100644 index 00000000..d8324a77 Binary files /dev/null and b/gen3-gaze-estimation-cpp/head-pose-estimation-adas-0001.blob differ diff --git a/gen3-gaze-estimation-cpp/landmarks-regression-retail-0009.blob b/gen3-gaze-estimation-cpp/landmarks-regression-retail-0009.blob new file mode 100644 index 00000000..194f9471 Binary files /dev/null and b/gen3-gaze-estimation-cpp/landmarks-regression-retail-0009.blob differ diff --git a/gen3-gaze-estimation-cpp/script.py b/gen3-gaze-estimation-cpp/script.py new file mode 100644 index 00000000..8e31bc96 --- /dev/null +++ b/gen3-gaze-estimation-cpp/script.py @@ -0,0 +1,152 @@ +import time +sync = {} # Dict of messages + +def find_in_dict(target_seq, name): + if str(target_seq) in sync: + return sync[str(target_seq)][name] + +def add_to_dict(det, seq, name): + sync[str(seq)][name] = det + +def correct_bb(bb): + if bb.xmin < 0: bb.xmin = 0.001 + if bb.ymin < 0: bb.ymin = 0.001 + if bb.xmax > 1: bb.xmax = 0.999 + if bb.ymax > 1: bb.ymax = 0.999 + +def check_gaze_est(seq): + dict = sync[str(seq)] + + if "left" in dict and "right" in dict and "angles" in dict: + # node.warn("GOT ALL 3") + # Send to gaze estimation NN + node.io['to_gaze_left'].send(dict['left']) + node.io['to_gaze_right'].send(dict['right']) + head_pose = NNData(6) + head_pose.setLayer("head_pose_angles", dict['angles']) + node.io['to_gaze_head'].send(head_pose) + + # Clear previous results + for i, sq in enumerate(sync): + del sync[str(seq)] + if str(seq) == str(sq): + return + +PAD = 0.15 +PAD2x = PAD * 2 +def get_eye_coords(x, y, det): + xdelta = det.xmax - det.xmin + ydelta = det.ymax - det.ymin + + xmin = x - PAD + xmax = xmin + PAD2x + ymin = y - PAD + ymax = ymin + PAD2x + + xmin2 = det.xmin + xdelta * xmin + xmax2 = det.xmin + xdelta * xmax + ymin2 = det.ymin + ydelta * ymin + ymax2 = det.ymin + ydelta * ymax + ret = (xmin2, ymin2, xmax2, ymax2) + # node.warn(f"Eye: {x}/{y}, Crop eyes: {ret}, det {det.xmin}, {det.ymin}, {det.xmax}, {det.ymax}") + return ret + +while True: + time.sleep(0.001) + + preview = node.io['preview'].tryGet() + if preview is not None: + sync[str(preview.getSequenceNum())] = { + "frame": preview + } + # node.warn(f"New frame, {len(sync)}") + + face_dets = node.io['face_det_in'].tryGet() + if face_dets is not None: + passthrough = node.io['face_pass'].get() + seq = passthrough.getSequenceNum() + + # No detections, carry on + if len(face_dets.detections) == 0: + del sync[str(seq)] + continue + + #node.warn(f"New detection {seq}") + if len(sync) == 0: continue + img = find_in_dict(seq, "frame") + if img is None: continue + + add_to_dict(face_dets.detections[0], seq, "detections") + + for det in face_dets.detections: + correct_bb(det) + + # To head post estimation model + cfg1 = ImageManipConfig() + cfg1.setCropRect(det.xmin, det.ymin, det.xmax, det.ymax) + cfg1.setResize(60, 60) + cfg1.setKeepAspectRatio(False) + node.io['headpose_cfg'].send(cfg1) + node.io['headpose_img'].send(img) + + # To face landmark detection model + cfg2 = ImageManipConfig() + cfg2.setCropRect(det.xmin, det.ymin, det.xmax, det.ymax) + cfg2.setResize(48, 48) + cfg2.setKeepAspectRatio(False) + node.io['landmark_cfg'].send(cfg2) + node.io['landmark_img'].send(img) + break # Only 1 face at the time currently supported + + headpose = node.io['headpose_in'].tryGet() + if headpose is not None: + passthrough = node.io['headpose_pass'].get() + seq = passthrough.getSequenceNum() + # Face rotation in degrees + y = headpose.getLayerFp16('angle_y_fc')[0] + p = headpose.getLayerFp16('angle_p_fc')[0] + r = headpose.getLayerFp16('angle_r_fc')[0] + angles = [y,p,r] + # node.warn(f"angles {angles}") + add_to_dict(angles, seq, "angles") + check_gaze_est(seq) + + landmark_in = node.io['landmark_in'].tryGet() + if landmark_in is not None: + passthrough = node.io['landmark_pass'].get() + seq = passthrough.getSequenceNum() + + img = find_in_dict(seq, "frame") + det = find_in_dict(seq, "detections") + if img is None or det is None: continue + + landmarks = landmark_in.getFirstLayerFp16() + + # We need to crop left and right eye out of the face frame + left_cfg = ImageManipConfig() + left_cfg.setCropRect(*get_eye_coords(landmarks[0], landmarks[1], det)) + left_cfg.setResize(60, 60) + left_cfg.setKeepAspectRatio(False) + node.io['left_manip_cfg'].send(left_cfg) + node.io['left_manip_img'].send(img) + + right_cfg = ImageManipConfig() + right_cfg.setCropRect(*get_eye_coords(landmarks[2], landmarks[3], det)) + right_cfg.setResize(60, 60) + right_cfg.setKeepAspectRatio(False) + node.io['right_manip_cfg'].send(right_cfg) + node.io['right_manip_img'].send(img) + + left_eye = node.io['left_eye_in'].tryGet() + if left_eye is not None: + # node.warn("LEFT EYE GOT") + seq = left_eye.getSequenceNum() + add_to_dict(left_eye, seq, "left") + check_gaze_est(seq) + + right_eye = node.io['right_eye_in'].tryGet() + if right_eye is not None: + # node.warn("RIGHT EYE GOT") + seq = right_eye.getSequenceNum() + add_to_dict(right_eye, seq, "right") + check_gaze_est(seq) \ No newline at end of file diff --git a/gen3-gaze-estimation-cpp/src/MultiMsgSync.cpp b/gen3-gaze-estimation-cpp/src/MultiMsgSync.cpp new file mode 100644 index 00000000..e1d5a1a3 --- /dev/null +++ b/gen3-gaze-estimation-cpp/src/MultiMsgSync.cpp @@ -0,0 +1,57 @@ +// Color frames (ImgFrame), object detection (ImgDetections) and age/gender gaze (NNData) +// messages arrive to the host all with some additional delay. +// For each ImgFrame there's one ImgDetections msg, which has multiple detections, and for each +// detection there's a NNData msg which contains age/gender gaze results.// +// How it works: +// Every ImgFrame, ImgDetections and NNData message has it's own sequence number, by which we can sync messages. + +#include +#include +#include +#include +#include + +class TwoStageHostSeqSync{ + public: + TwoStageHostSeqSync(){ + msgs.clear(); + } + // name: color,detection or gaze + void add_msg(std::shared_ptr msg, std::string name){ + int64_t f = -1; + if(name == "gaze" || name == "landmarks") + f = msg->get()->getSequenceNum(); + else if(name == "color") + f = msg->get()->getSequenceNum(); + else f = msg->get()->getSequenceNum(); + auto seq = std::to_string(f); + msgs[seq][name].push_back(msg); + } + + std::pair>>,int> get_msgs(){ + //std::cout<<"msgs size: "< seq_remove; + + for(auto it = msgs.begin(); it != msgs.end();it++){ + auto seq = it->first; + auto r_msgs = it->second; + + seq_remove.push_back(seq); // Will get removed from dict if we find synced msgs pairs + // Check if we have both detections and color frame with this sequence number + if(r_msgs.count("color") > 0 && r_msgs.count("detection") > 0){ + // Check if all detected objects (faces) have finished gaze (age/gender) inference + if(0 < r_msgs["gaze"].size()){ + // We have synced msgs, remove previous msgs (memory cleaning) + for(auto rm : seq_remove){ + msgs[rm].clear(); + } + return {r_msgs,0}; // Returned synced msgs + } + } + } + return {msgs["-1"],-1}; // No synced msgs + } + + private: + std::map>>> msgs; +}; \ No newline at end of file diff --git a/gen3-gaze-estimation-cpp/src/bbox.cpp b/gen3-gaze-estimation-cpp/src/bbox.cpp new file mode 100644 index 00000000..dba5958e --- /dev/null +++ b/gen3-gaze-estimation-cpp/src/bbox.cpp @@ -0,0 +1,53 @@ +#include +#include +#include + +class Point{ + //Used within the BoundingBox class when dealing with points. + public: + Point(float x,float y) : x(x),y(y){} + //Denormalize the point to pixel coordinates (0..frame width, 0..frame height) + std::array denormalize(std::vector frame_shape){ + return {(int)(x * (float)frame_shape[1]), int(y * (float)frame_shape[0])}; + } + + private: + float x,y; +}; + + +class BoundingBox{ + //This class helps with bounding box calculations. It can be used to calculate relative bounding boxes, + //map points from relative to absolute coordinates and vice versa, crop frames, etc. + public: + BoundingBox(dai::ImgDetection bbox){ + xmin = bbox.xmin,ymin = bbox.ymin,xmax = bbox.xmax,ymax = bbox.ymax; + width = xmax-xmin,height=ymax-ymin; + } + + + std::array denormalize(std::vector frame_shape){ + /* + Denormalize the bounding box to pixel coordinates (0..frame width, 0..frame height). + Useful when you want to draw the bounding box on the frame. + + */ + return { + (int)(frame_shape[1] * xmin),(int)(frame_shape[0] * ymin), + (int)(frame_shape[1] * xmax),(int)(frame_shape[0] * ymax) + }; + } + + Point map_point(float x,float y){ + /* + Useful when you have a point inside the bounding box, and you want to map it to the frame. + Example: You run face detection, create BoundingBox from the result, and also run + facial landmarks detection on the cropped frame of the face. The landmarks are relative + to the face bounding box, but you want to draw them on the original frame. + */ + float mapped_x = xmin + width * x, mapped_y = ymin + height * y; + return Point(mapped_x, mapped_y); + } + private: + float xmin,ymin,xmax,ymax,width,height; +}; \ No newline at end of file diff --git a/gen3-gaze-estimation-cpp/src/main.cpp b/gen3-gaze-estimation-cpp/src/main.cpp new file mode 100644 index 00000000..f79aeb75 --- /dev/null +++ b/gen3-gaze-estimation-cpp/src/main.cpp @@ -0,0 +1,226 @@ +#include +#include +#include +#include +#include "depthai/depthai.hpp" +#include "MultiMsgSync.cpp" +#include "bbox.cpp" + +int main(){ + dai::Pipeline pipeline(true); + pipeline.setOpenVINOVersion(dai::OpenVINO::VERSION_2021_4); + std::tuple VIDEO_SIZE = {1072,1072}; + + auto cam = pipeline.create(); + // For ImageManip rotate you need input frame of multiple of 16 + cam->setPreviewSize(1072,1072); + cam->setVideoSize(VIDEO_SIZE); + cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); + cam->setInterleaved(false); + cam->setPreviewNumFramesPool(20); + cam->setFps(20); + cam->setBoardSocket(dai::CameraBoardSocket::CAM_A); + + auto sync = pipeline.create(); + sync->setRunOnHost(true); + //sync->setSyncThreshold(std::chrono::nanoseconds(500'000'000)); // 0.5 sec + auto msgs_queue = sync->out.createOutputQueue(); + + //std::map> queues; + //queues["color"] = cam->video.createOutputQueue(); + //auto videoOut = cam->video.createOutputQueue(); + cam->video.link(sync->inputs["color"]); + + // ImageManip that will crop the frame before sending it to the Face detection NN node + auto face_det_manip = pipeline.create(); + face_det_manip->initialConfig.setResize(300,300); + face_det_manip->setMaxOutputFrameSize(300*300*3); + cam->preview.link(face_det_manip->inputImage); + + + //=================[ FACE DETECTION ]================= + std::cout<<"Creating Face Detection Neural Network..."<()->build(); + face_det_nn->setConfidenceThreshold(0.5); + face_det_nn->setBlobPath("face-detection-retail-0004.blob"); + + // Link Face ImageManip -> Face detection NN node + face_det_manip->out.link(face_det_nn->input); + + //queues["detection"] = face_det_nn->out.createOutputQueue(); + face_det_nn->out.link(sync->inputs["detection"]); + + //=================[ SCRIPT NODE ]================= + // Script node will take the output from the face detection NN as an input and set ImageManipConfig + // to the 'age_gender_manip' to crop the initial frame + auto script = pipeline.create(); + script->setProcessor(dai::ProcessorType::LEON_CSS); + + face_det_nn->out.link(script->inputs["face_det_in"]); + face_det_nn->passthrough.link(script->inputs["face_pass"]); + + cam->preview.link(script->inputs["preview"]); + + std::ifstream f("script.py", std::ios::binary); + std::vector buffer(std::istreambuf_iterator(f), {}); + script->setScript(buffer); + + //=================[ HEAD POSE ESTIMATION ]================= + auto headpose_manip = pipeline.create(); + headpose_manip->initialConfig.setResize(60,60); + script->outputs["headpose_cfg"].link(headpose_manip->inputConfig); + script->outputs["headpose_img"].link(headpose_manip->inputImage); + + auto headpose_nn = pipeline.create()->build(); + headpose_nn->setBlobPath("head-pose-estimation-adas-0001.blob"); + headpose_manip->out.link(headpose_nn->input); + + headpose_nn->out.link(script->inputs["headpose_in"]); + headpose_nn->passthrough.link(script->inputs["headpose_pass"]); + + //=================[ LANDMARKS DETECTION ]================= + auto landmark_manip = pipeline.create(); + landmark_manip->initialConfig.setResize(48,48); + script->outputs["landmark_cfg"].link(landmark_manip->inputConfig); + script->outputs["landmark_img"].link(landmark_manip->inputImage); + + auto landmark_nn = pipeline.create()->build(); + landmark_nn->setBlobPath("landmarks-regression-retail-0009.blob"); + landmark_manip->out.link(landmark_nn->input); + + + landmark_nn->out.link(script->inputs["landmark_in"]); + landmark_nn->passthrough.link(script->inputs["landmark_pass"]); + + //queues["landmarks"] = landmark_nn->out.createOutputQueue(); + landmark_nn->out.link(sync->inputs["landmarks"]); + + //=================[ LEFT EYE CROP ]================= + auto left_manip = pipeline.create(); + left_manip->initialConfig.setResize(60,60); + left_manip->inputConfig.setWaitForMessage(true); + script->outputs["left_manip_img"].link(left_manip->inputImage); + script->outputs["left_manip_cfg"].link(left_manip->inputConfig); + left_manip->out.link(script->inputs["left_eye_in"]); + + //=================[ RIGHT EYE CROP ]================= + auto right_manip = pipeline.create(); + right_manip->initialConfig.setResize(60,60); + right_manip->inputConfig.setWaitForMessage(true); + script->outputs["right_manip_img"].link(right_manip->inputImage); + script->outputs["right_manip_cfg"].link(right_manip->inputConfig); + right_manip->out.link(script->inputs["right_eye_in"]); + + //=================[ GAZE ESTIMATION ]================= + auto gaze_nn = pipeline.create()->build(); + gaze_nn->setBlobPath("gaze-estimation-adas-0002.blob"); + + std::vector SCRIPT_OUTPUT_NAMES = {"to_gaze_head","to_gaze_left","to_gaze_right"}, + NN_NAMES = {"head_pose_angles","left_eye_image","right_eye_image"}; + for(size_t i=0;ioutputs[script_name].link(gaze_nn->inputs[nn_name]); + // Set NN input to blocking and to not reuse previous msgs + gaze_nn->inputs[nn_name].setBlocking(true); + gaze_nn->inputs[nn_name].setReusePreviousMessage(false); + } + + //queues["gaze"] = gaze_nn->out.createOutputQueue(); + gaze_nn->out.link(sync->inputs["gaze"]); + + + //# Workaround, so NNData (output of gaze_nn) will take seq_num from this message (FW bug) + //# Will be fixed in depthai 2.24 + gaze_nn->passthroughs["left_eye_image"].link(script->inputs["none"]); + script->inputs["none"].setBlocking(false); + script->inputs["none"].setMaxSize(1); + + //================================================== + //TwoStageHostSeqSync sync; + + + // landmarks,gaze + std::vector names = {"color","detection" , "landmarks","gaze"}; + pipeline.start(); + + while(pipeline.isRunning()) { + // ??? + std::cout<<"main loop\n"; + //if(videoOut->has()) + // cv::imshow("video",videoOut->get()->getCvFrame()); + /* + for(auto name : names){ + if(queues[name]->has()){ + auto msg = queues[name]; + sync->inputs[name].send(msg->get()); + + //sync.add_msg(msg,name); + if(name == "color"){ + cv::imshow("video",msg->get()->getCvFrame()); + } + } + } + */ + + int key = cv::waitKey(1); + if(key == 'q' || key == 'Q') { + pipeline.stop(); + break; + } + //continue; + //auto msgs = sync.get_msgs(); + auto msgs = msgs_queue->get(); + std::cout<<"ASD\n"; + if(msgs == nullptr) continue; + std::cout<<"here\n"; + auto frame = msgs->get("color")->getCvFrame(); + auto dets = msgs->get("detection")->detections; + + //auto frame = msgs->data["color"][0]->get()->getCvFrame(); + //auto dets = msgs.first["detection"][0]->get()->detections; + + for(size_t i = 0; i < dets.size();i++){ + auto detection = dets[i]; + BoundingBox det(detection); + //replaced top-left and bottom-right with one array (easier impl) + auto pts = det.denormalize({frame.rows,frame.cols}); + + cv::rectangle(frame, cv::Point(pts[0],pts[1]),cv::Point(pts[2],pts[3]), + cv::Scalar(10,245,10),1); + + //auto gaze_ptr = msgs.first["gaze"][i]->get(); + auto gaze_ptr = msgs->get("gaze"); + auto gaze = gaze_ptr->getTensor(gaze_ptr->getAllLayerNames()[0],0); + + auto gaze_x = (int)(gaze[0]*100.f), gaze_y = (int)(gaze[1]*100.f); + + //auto landmarks_ptr = msgs.first["landmarks"][i]->get(); + auto landmarks_ptr = msgs->get("landmarks"); + auto xlandmarks = landmarks_ptr->getTensor(landmarks_ptr->getAllLayerNames()[0],0); + std::vector landmarks(xlandmarks.begin(),xlandmarks.end()); + + int colors[5][3] = { + {0,127,255}, + {0,127,255}, + {255,0,127}, + {127,255,0}, + {127,255,0}, + }; + for(size_t lm_i = 0;lm_i < landmarks.size()/2;lm_i++){ + // 0,1 - left eye, 2,3 - right eye, 4,5 - nose tip, 6,7 - left mouth, 8,9 - right mouth + auto x = landmarks[lm_i*2], y = landmarks[lm_i*2+1]; + auto point = det.map_point(x,y).denormalize({frame.rows,frame.cols}); + if(lm_i <= 1){ // Draw arrows from left eye & right eye + cv::arrowedLine(frame, cv::Point(point[0],point[1]), cv::Point((point[0] + gaze_x*5), (point[1] - gaze_y*5)), cv::Scalar(colors[lm_i][0],colors[lm_i][1],colors[lm_i][2]), 3); + } + else cv::circle(frame,cv::Point(point[0],point[1]),2,cv::Scalar(colors[lm_i][0],colors[lm_i][1],colors[lm_i][2]),2); + } + + } + + cv::imshow("Lasers",frame); + } + return 0; +} \ No newline at end of file