diff --git a/.github/workflows/build_ros1.yml b/.github/workflows/build_ros1.yml
new file mode 100644
index 0000000..24bb384
--- /dev/null
+++ b/.github/workflows/build_ros1.yml
@@ -0,0 +1,36 @@
+name: ROS 1 Workflow
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+
+jobs:
+ build_2004:
+ name: "ROS1 Ubuntu 20.04"
+ runs-on: ubuntu-latest
+ steps:
+ - name: Code Checkout
+ uses: actions/checkout@v2
+ - name: Reconfigure git to use HTTP authentication
+ run: |
+ git config --global url."https://github.com/".insteadOf git@github.com: &&
+ git config --global url."https://".insteadOf git://
+ - name: Create Workspace and Docker Image
+ run: |
+ export REPO=$(basename $GITHUB_REPOSITORY) &&
+ cd $GITHUB_WORKSPACE/.. && mkdir src/ && cd src/ &&
+ git clone https://github.com/rpng/open_vins.git &&
+ git clone https://github.com/ethz-asl/maplab.git --recursive &&
+ cd .. && mv $REPO/ src/ && mkdir $REPO/ && mv src/ $REPO/ && cd $REPO/ &&
+ docker build -t ov_maplab -f $GITHUB_WORKSPACE/src/$REPO/Dockerfile_ros1_20_04 .
+ - name: Run Build in Docker
+ run: >
+ docker run -t --mount type=bind,source=$GITHUB_WORKSPACE,target=/catkin_ws ov_maplab /bin/bash -c
+ "cd /catkin_ws
+ && catkin init
+ && catkin config --merge-devel
+ && catkin config --extend /opt/ros/noetic
+ && catkin build ov_maplab"
+
+
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 6399100..fb86b4a 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -7,11 +7,16 @@ project(ov_maplab)
find_package(catkin REQUIRED COMPONENTS roscpp rosbag tf std_msgs geometry_msgs sensor_msgs nav_msgs visualization_msgs cv_bridge ov_core ov_msckf vi_map vio_common feature_tracking online_map_builders console_common)
add_definitions(-DROS_AVAILABLE=1)
-# Include libraries
+# Include libraries (if we don't have opencv 4, then fallback to opencv 3)
+# The OpenCV version needs to match the one used by cv_bridge otherwise you will get a segmentation fault!
find_package(Eigen3 REQUIRED)
-find_package(OpenCV 3 REQUIRED)
+find_package(OpenCV 3 QUIET)
+if (NOT OpenCV_FOUND)
+ find_package(OpenCV 4 REQUIRED)
+endif ()
find_package(Boost REQUIRED COMPONENTS system filesystem thread date_time)
-message(STATUS "OPENCV: " ${OpenCV_VERSION} " | BOOST: " ${Boost_VERSION})
+find_package(Ceres REQUIRED)
+message(STATUS "OPENCV: " ${OpenCV_VERSION} " | BOOST: " ${Boost_VERSION} " | CERES: " ${Ceres_VERSION})
# Describe catkin project
catkin_package(
@@ -36,6 +41,7 @@ include_directories(
src
${EIGEN3_INCLUDE_DIR}
${Boost_INCLUDE_DIRS}
+ ${CERES_INCLUDE_DIRS}
${catkin_INCLUDE_DIRS}
)
@@ -43,6 +49,7 @@ include_directories(
list(APPEND thirdparty_libraries
${Boost_LIBRARIES}
${OpenCV_LIBRARIES}
+ ${CERES_LIBRARIES}
${catkin_LIBRARIES}
)
diff --git a/Dockerfile b/Dockerfile_ros1_20_04
similarity index 64%
rename from Dockerfile
rename to Dockerfile_ros1_20_04
index bd8961c..7725b7e 100644
--- a/Dockerfile
+++ b/Dockerfile_ros1_20_04
@@ -1,4 +1,4 @@
-FROM osrf/ros:melodic-desktop-full
+FROM osrf/ros:noetic-desktop-full
# =========================================================
# =========================================================
@@ -14,14 +14,25 @@ FROM osrf/ros:melodic-desktop-full
# Dependencies we use, catkin tools is very good build system
# Also some helper utilities for fast in terminal edits (nano etc)
RUN apt-get update && apt-get install -y libeigen3-dev nano git
-RUN sudo apt-get install -y python-catkin-tools
-
-# Seems this has Python 3.6 installed on it...
-RUN sudo apt-get install -y python3-dev python3-matplotlib python3-numpy python3-psutil python3-tk
+RUN sudo apt-get install -y python3-catkin-tools python3-osrf-pycommon
+
+# Ceres solver install and setup
+RUN sudo apt-get install -y cmake libgoogle-glog-dev libgflags-dev libatlas-base-dev libeigen3-dev libsuitesparse-dev libceres-dev
+# ENV CERES_VERSION="2.0.0"
+# RUN git clone https://ceres-solver.googlesource.com/ceres-solver && \
+# cd ceres-solver && \
+# git checkout tags/${CERES_VERSION} && \
+# mkdir build && cd build && \
+# cmake .. && \
+# make -j$(nproc) install && \
+# rm -rf ../../ceres-solver
+
+# Seems this has Python 3.8 installed on it...
+RUN apt-get update && apt-get install -y python3-dev python3-matplotlib python3-numpy python3-psutil python3-tk
# Maplab dependecies
# https://github.com/ethz-asl/maplab/wiki/Installation-Ubuntu
-RUN sudo apt install -y autotools-dev ccache doxygen dh-autoreconf git liblapack-dev libblas-dev libgtest-dev libreadline-dev libssh2-1-dev pylint clang-format python-autopep8 python-catkin-tools python-pip python-git python-setuptools python-termcolor python-wstool libatlas3-base
+RUN sudo apt install -y autotools-dev ccache doxygen dh-autoreconf git liblapack-dev libblas-dev libgtest-dev libreadline-dev libssh2-1-dev pylint clang-format-6.0 python3-autopep8 python3-catkin-tools python3-pip python3-git python3-setuptools python3-termcolor python3-wstool libatlas3-base libv4l-dev
RUN sudo pip install requests
# Install CMake 3.13.5
@@ -33,7 +44,7 @@ RUN cmake --version
# Install deps needed for clion remote debugging
# https://blog.jetbrains.com/clion/2020/01/using-docker-with-clion/
-RUN sed -i '6i\source "/catkin_ws/devel/setup.bash"\' /ros_entrypoint.sh
+# RUN sed -i '6i\source "/catkin_ws/devel/setup.bash"\' /ros_entrypoint.sh
RUN apt-get update && apt-get install -y ssh build-essential gcc g++ \
gdb clang cmake rsync tar python && apt-get clean
RUN ( \
diff --git a/ReadMe.md b/ReadMe.md
index b25a028..dd0cc42 100644
--- a/ReadMe.md
+++ b/ReadMe.md
@@ -1,11 +1,13 @@
# OpenVINS Maplab Interface
-Here we have our interface wrapper for exporting visual-inertial runs from [OpenVINS](https://github.com/rpng/open_vins) into the ViMap structure taken by [maplab](https://github.com/ethz-asl/maplab). The state estimates and raw images are appended to the ViMap as OpenVINS runs through a dataset. After completion of the dataset, we re-extract features and triangulate them due to the incompatibilities of the two frontends. Maplab requires BRISK or FREAK descriptors, while OpenVINS works with KLT or ORB feature tracking. In the future we will try to only extract descriptors on tracked features from OpenVINS, but for now we just re-detect for simplicity. We have tested this on the [EurocMav](https://docs.openvins.com/gs-datasets.html#gs-data-euroc) and [TUM-VI](https://docs.openvins.com/gs-datasets.html#gs-data-tumvi) datasets and have had good success with merging the different runs and optimizing the resulting graph. To ensure that we are able to compile maplab, we provide a [docker image](Dockerfile) for Ubuntu 18.04 which has all dependencies required.
+[![ROS 1 Workflow](https://github.com/rpng/ov_maplab/actions/workflows/build_ros1.yml/badge.svg)](https://github.com/rpng/ov_maplab/actions/workflows/build_ros1.yml)
+
+Here we have our interface wrapper for exporting visual-inertial runs from [OpenVINS](https://github.com/rpng/open_vins) into the ViMap structure taken by [maplab](https://github.com/ethz-asl/maplab). The state estimates and raw images are appended to the ViMap as OpenVINS runs through a dataset. After completion of the dataset, we re-extract features and triangulate them due to the incompatibilities of the two frontends. Maplab requires BRISK or FREAK descriptors, while OpenVINS works with KLT or ORB feature tracking. In the future we will try to only extract descriptors on tracked features from OpenVINS, but for now we just re-detect for simplicity. We have tested this on the [EurocMav](https://docs.openvins.com/gs-datasets.html#gs-data-euroc) and [TUM-VI](https://docs.openvins.com/gs-datasets.html#gs-data-tumvi) datasets and have had good success with merging the different runs and optimizing the resulting graph. To ensure that we are able to compile maplab, we provide a [docker image](Dockerfile) for Ubuntu 20.04 which has all dependencies required.
## Dependencies
* OpenVINS (v2.7 release) - https://docs.openvins.com/gs-installing.html
-* maplab (develop v1, 18.04 release) - https://github.com/ethz-asl/maplab/wiki/Installation-Ubuntu
+* maplab (v2.0, 20.04 release) - https://github.com/ethz-asl/maplab/wiki/Installation-Ubuntu
* Docker - https://docs.docker.com/get-docker/
## Installation Guide
@@ -20,11 +22,11 @@ git clone https://github.com/rpng/open_vins.git
git clone https://github.com/ethz-asl/maplab.git --recursive
# switch open_vins to last tested commit (might build with newer)
cd open_vins/
-git checkout 2b506eeedd0b158c014641b9240a62ae80f6d7a0
+git checkout 4534a2f32d4763bdc8c95121b3292c7423e12aca
cd ..
# switch maplab to last tested commit (might build with newer)
cd maplab/
-git checkout 483daf4988a76c15be362fd017ec78581c4f88d9
+git checkout 0b4868efeb292851d71f98d31a1e6bb40ebb244b
git submodule update --init --recursive
cd ..
```
@@ -52,8 +54,9 @@ ov_docker ov_maplab bash
cd catkin_ws/
catkin init
catkin config --merge-devel
-catkin config --extend /opt/ros/melodic
-catkin build maplab ov_maplab ov_data -j4
+catkin config --extend /opt/ros/noetic
+catkin config --cmake-args -DCMAKE_BUILD_TYPE=Release
+catkin build maplab ov_maplab -j4
catkin build ov_maplab -j4 --no-deps # after first build
source devel/setup.bash
```
diff --git a/docs/Screenshot from 2023-08-13 13-42-36.png b/docs/Screenshot from 2023-08-13 13-42-36.png
new file mode 100644
index 0000000..920c776
Binary files /dev/null and b/docs/Screenshot from 2023-08-13 13-42-36.png differ
diff --git a/entrypoint.sh b/entrypoint.sh
new file mode 100755
index 0000000..290c89c
--- /dev/null
+++ b/entrypoint.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+set -e
+
+# setup ros environment
+# ensure you have already built the workspace!
+source "/catkin_ws/devel/setup.bash" --
+exec "$@"
diff --git a/launch/serial.launch b/launch/serial.launch
index 2fb6047..b2be782 100644
--- a/launch/serial.launch
+++ b/launch/serial.launch
@@ -21,7 +21,7 @@
-
+
diff --git a/package.xml b/package.xml
index d8b1890..b31513f 100644
--- a/package.xml
+++ b/package.xml
@@ -29,6 +29,7 @@
visualization_msgs
cv_bridge
ov_core
+ ov_data
ov_msckf
vi_map
vio_common
@@ -48,6 +49,8 @@
visualization_msgs
cv_bridge
ov_core
+ ov_data
+ ov_eval
ov_msckf
vi_map
vio_common
diff --git a/scripts/commands_eurocmav.yaml b/scripts/commands_eurocmav.yaml
new file mode 100644
index 0000000..9336002
--- /dev/null
+++ b/scripts/commands_eurocmav.yaml
@@ -0,0 +1,30 @@
+
+vi_map_folder_paths:
+ - /datasets/euroc_mav/maplab/V1_01_easy
+ - /datasets/euroc_mav/maplab/V1_02_medium
+ - /datasets/euroc_mav/maplab/V1_03_difficult
+ - /datasets/euroc_mav/maplab/V2_01_easy
+ - /datasets/euroc_mav/maplab/V2_02_medium
+ - /datasets/euroc_mav/maplab/V2_03_difficult
+commands:
+ - load --map_folder=
+ - retriangulate_landmarks
+ - v --vis_color_by_mission
+ - optvi --ba_num_iterations=5 -ba_visualize_every_n_iterations=1
+ - retriangulate_landmarks
+ - loopclosure_all_missions
+ - optvi --ba_num_iterations=5 -ba_visualize_every_n_iterations=1
+ - retriangulate_landmarks
+ - loopclosure_all_missions
+ - optvi --ba_num_iterations=30 -ba_visualize_every_n_iterations=5
+ - evaluate_landmark_quality
+ - remove_bad_landmarks
+ - save --map_folder=_result
+ - export_to_openvins --overwrite --export_path=_result
+ - export_trajectory_to_csv --overwrite --export_path=_result
+ - keyframe_heuristic
+ - v --vis_color_by_mission
+ - save --map_folder=_result_keyframed
+ - export_to_openvins --overwrite --export_path=_result_keyframed
+ - export_trajectory_to_csv --overwrite --export_path=_result_keyframed
+
diff --git a/scripts/commands_rpngplane.yaml b/scripts/commands_rpngplane.yaml
new file mode 100644
index 0000000..f8c43fd
--- /dev/null
+++ b/scripts/commands_rpngplane.yaml
@@ -0,0 +1,32 @@
+
+vi_map_folder_paths:
+ - /datasets/rpng_plane/maplab/table_01
+ - /datasets/rpng_plane/maplab/table_02
+ - /datasets/rpng_plane/maplab/table_03
+ - /datasets/rpng_plane/maplab/table_04
+ - /datasets/rpng_plane/maplab/table_05
+ - /datasets/rpng_plane/maplab/table_06
+ - /datasets/rpng_plane/maplab/table_07
+ - /datasets/rpng_plane/maplab/table_08
+commands:
+ - load --map_folder=
+ - retriangulate_landmarks
+ - v --vis_color_by_mission
+ - optvi --ba_num_iterations=5 -ba_visualize_every_n_iterations=1
+ - retriangulate_landmarks
+ - loopclosure_all_missions
+ - optvi --ba_num_iterations=5 -ba_visualize_every_n_iterations=1
+ - retriangulate_landmarks
+ - loopclosure_all_missions
+ - optvi --ba_num_iterations=30 -ba_visualize_every_n_iterations=5
+ - evaluate_landmark_quality
+ - remove_bad_landmarks
+ - save --map_folder=_result
+ - export_to_openvins --overwrite --export_path=_result
+ - export_trajectory_to_csv --overwrite --export_path=_result
+ - keyframe_heuristic
+ - v --vis_color_by_mission
+ - save --map_folder=_result_keyframed
+ - export_to_openvins --overwrite --export_path=_result_keyframed
+ - export_trajectory_to_csv --overwrite --export_path=_result_keyframed
+
diff --git a/scripts/process_eurocmav.sh b/scripts/process_eurocmav.sh
new file mode 100755
index 0000000..8b6a27c
--- /dev/null
+++ b/scripts/process_eurocmav.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+
+# Source our workspace directory to load ENV variables
+SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source ${SCRIPT_DIR}/../../../devel/setup.bash
+
+#=============================================================
+#=============================================================
+#=============================================================
+
+# dataset locations
+bagnames=(
+ "V1_01_easy"
+ "V1_02_medium"
+ "V1_03_difficult"
+ "V2_01_easy"
+ "V2_02_medium"
+ "V2_03_difficult"
+)
+
+#=============================================================
+#=============================================================
+#=============================================================
+
+big_start_time="$(date -u +%s)"
+
+# Loop through all datasets
+for i in "${!bagnames[@]}"; do
+
+# start timing
+start_time="$(date -u +%s)"
+
+# run our ROS launch file (note we send console output to terminator)
+roslaunch ov_maplab serial.launch \
+ max_cameras:="2" \
+ use_stereo:="true" \
+ config:="euroc_mav" \
+ dataset:="${bagnames[i]}" \
+ dolivetraj:="true" &> /dev/null
+
+# print out the time elapsed
+end_time="$(date -u +%s)"
+elapsed="$(($end_time-$start_time))"
+echo "BASH: ${bagnames[i]} - vio took $elapsed seconds";
+
+done
+
+# start timing
+start_time="$(date -u +%s)"
+
+# run our maplab batch processing command
+rosrun maplab_console batch_runner --batch_control_file=src/ov_maplab/scripts/commands_eurocmav.yaml
+
+# print out the time elapsed
+end_time="$(date -u +%s)"
+elapsed="$(($end_time-$start_time))"
+echo "BASH: full maplab opt took $elapsed seconds";
+
+# fix our permissions...
+sudo chmod -R 777 /datasets/euroc_mav/maplab/
+sudo chown -R patrick /datasets/euroc_mav/maplab/
+
+# print out the time elapsed
+big_end_time="$(date -u +%s)"
+big_elapsed="$(($big_end_time-$big_start_time))"
+echo "BASH: script took $big_elapsed seconds in total!!";
\ No newline at end of file
diff --git a/scripts/process_rpngplane.sh b/scripts/process_rpngplane.sh
new file mode 100755
index 0000000..59cfe3d
--- /dev/null
+++ b/scripts/process_rpngplane.sh
@@ -0,0 +1,68 @@
+#!/usr/bin/env bash
+
+# Source our workspace directory to load ENV variables
+SCRIPT_DIR="$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+source ${SCRIPT_DIR}/../../../devel/setup.bash
+
+#=============================================================
+#=============================================================
+#=============================================================
+
+# dataset locations
+bagnames=(
+ "table_01"
+ "table_02"
+ "table_03"
+ "table_04"
+ "table_05"
+ "table_06"
+ "table_07"
+ "table_08"
+)
+
+#=============================================================
+#=============================================================
+#=============================================================
+
+big_start_time="$(date -u +%s)"
+
+# Loop through all datasets
+for i in "${!bagnames[@]}"; do
+
+# start timing
+start_time="$(date -u +%s)"
+
+# run our ROS launch file (note we send console output to terminator)
+roslaunch ov_maplab serial.launch \
+ max_cameras:="1" \
+ use_stereo:="true" \
+ config:="rpng_plane" \
+ dataset:="${bagnames[i]}" \
+ dolivetraj:="true" &> /dev/null
+
+# print out the time elapsed
+end_time="$(date -u +%s)"
+elapsed="$(($end_time-$start_time))"
+echo "BASH: ${bagnames[i]} - vio took $elapsed seconds";
+
+done
+
+# start timing
+start_time="$(date -u +%s)"
+
+# run our maplab batch processing command
+rosrun maplab_console batch_runner --batch_control_file=src/ov_maplab/scripts/commands_rpngplane.yaml
+
+# print out the time elapsed
+end_time="$(date -u +%s)"
+elapsed="$(($end_time-$start_time))"
+echo "BASH: full maplab opt took $elapsed seconds";
+
+# fix our permissions...
+sudo chmod -R 777 /datasets/rpng_plane/maplab/
+sudo chown -R patrick /datasets/rpng_plane/maplab/
+
+# print out the time elapsed
+big_end_time="$(date -u +%s)"
+big_elapsed="$(($big_end_time-$big_start_time))"
+echo "BASH: script took $big_elapsed seconds in total!!";
\ No newline at end of file
diff --git a/src/mapper/MapBuilder.cpp b/src/mapper/MapBuilder.cpp
index 8de32fc..c60b0cb 100644
--- a/src/mapper/MapBuilder.cpp
+++ b/src/mapper/MapBuilder.cpp
@@ -37,7 +37,7 @@ MapBuilder::MapBuilder(std::shared_ptr nh, std::shared_ptr> cameras;
- // Loop through through, and load each of the cameras
+ // Loop through, and load each of the cameras
std::stringstream ss;
for (int i = 0; i < _params.state_options.num_cameras; i++) {
@@ -94,7 +94,7 @@ MapBuilder::MapBuilder(std::shared_ptr nh, std::shared_ptraddSensorAsBase(std::move(imu_sensor));
- //sensor_manager->addSensor(std::move(ncamera), imu_sensor_id, T_CitoI.at(0));
+ // sensor_manager->addSensor(std::move(ncamera), imu_sensor_id, T_CitoI.at(0));
sensor_manager->addSensor(std::move(ncamera), imu_sensor_id, aslam::Transformation());
// Create the master map object
@@ -111,7 +111,8 @@ MapBuilder::MapBuilder(std::shared_ptr nh, std::shared_ptrget_state()->_timestamp);
- camera_data.erase(_app->get_state()->_timestamp);
+
+ // Erase all older camera timestamps from our history
+ auto it0 = camera_data.begin();
+ while (it0 != camera_data.end()) {
+ if (it0->first <= _app->get_state()->_timestamp) {
+ it0 = camera_data.erase(it0);
+ } else {
+ it0++;
+ }
+ }
// Get the current timestamp in the imu clock frame of reference
double t_ItoC = _app->get_state()->_calib_dt_CAMtoIMU->value()(0);
@@ -254,7 +264,10 @@ void MapBuilder::feed_measurement_camera(const ov_core::CameraData &message_tmp)
// Get IMU measurements between last and current vertex nodes
// Also track features from the last frame to this one!
- if (last_timestamp != -1) {
+ // The first ever frame doesn't need its pose just the feature tracks
+ if (last_timestamp == -1) {
+ trackpipe->initializeFirstNFrame(nframe.get());
+ } else {
// First lets construct an IMU vector of measurements we need
// TODO: delete old IMU measurements from here
diff --git a/src/ros1_serial_msckf.cpp b/src/ros1_serial_msckf.cpp
index 85305e8..2b30423 100644
--- a/src/ros1_serial_msckf.cpp
+++ b/src/ros1_serial_msckf.cpp
@@ -66,6 +66,9 @@ int main(int argc, char **argv) {
// Create our VIO system
VioManagerOptions params;
params.print_and_load(parser);
+ // params.num_opencv_threads = 0; // uncomment if you want repeatability
+ // params.use_multi_threading_pubs = 0; // uncomment if you want repeatability
+ params.use_multi_threading_subs = false;
sys = std::make_shared(params);
viz = std::make_shared(nh, sys);
builder = std::make_shared(nh, sys, params);
@@ -84,29 +87,16 @@ int main(int argc, char **argv) {
std::string topic_imu;
nh->param("topic_imu", topic_imu, "/imu0");
parser->parse_external("relative_config_imu", "imu0", "rostopic", topic_imu);
+ PRINT_DEBUG("[SERIAL]: imu: %s\n", topic_imu.c_str());
- // Our camera topics (stereo pairs and non-stereo mono)
- std::vector> topic_cameras;
- if (params.use_stereo && params.state_options.num_cameras == 2) {
- // Read in the topics
- std::string cam_topic0, cam_topic1;
- nh->param("topic_camera" + std::to_string(0), cam_topic0, "/cam" + std::to_string(0) + "/image_raw");
- nh->param("topic_camera" + std::to_string(1), cam_topic1, "/cam" + std::to_string(1) + "/image_raw");
- parser->parse_external("relative_config_imucam", "cam" + std::to_string(0), "rostopic", cam_topic0);
- parser->parse_external("relative_config_imucam", "cam" + std::to_string(1), "rostopic", cam_topic1);
- topic_cameras.emplace_back(0, cam_topic0);
- topic_cameras.emplace_back(1, cam_topic1);
- PRINT_DEBUG("serial cam (stereo): %s\n", cam_topic0.c_str());
- PRINT_DEBUG("serial cam (stereo): %s\n", cam_topic1.c_str());
- } else {
- for (int i = 0; i < params.state_options.num_cameras; i++) {
- // read in the topic
- std::string cam_topic;
- nh->param("topic_camera" + std::to_string(i), cam_topic, "/cam" + std::to_string(i) + "/image_raw");
- parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic);
- topic_cameras.emplace_back(i, cam_topic);
- PRINT_DEBUG("serial cam (mono): %s\n", cam_topic.c_str());
- }
+ // Our camera topics
+ std::vector topic_cameras;
+ for (int i = 0; i < params.state_options.num_cameras; i++) {
+ std::string cam_topic;
+ nh->param("topic_camera" + std::to_string(i), cam_topic, "/cam" + std::to_string(i) + "/image_raw");
+ parser->parse_external("relative_config_imucam", "cam" + std::to_string(i), "rostopic", cam_topic);
+ topic_cameras.emplace_back(cam_topic);
+ PRINT_DEBUG("[SERIAL]: cam: %s\n", cam_topic.c_str());
}
// Location of the ROS bag we want to read in
@@ -115,13 +105,14 @@ int main(int argc, char **argv) {
PRINT_DEBUG("ros bag path is: %s\n", path_to_bag.c_str());
// Load groundtruth if we have it
+ // NOTE: needs to be a csv ASL format file
std::map> gt_states;
if (nh->hasParam("path_gt")) {
std::string path_to_gt;
nh->param("path_gt", path_to_gt, "");
if (!path_to_gt.empty()) {
ov_core::DatasetReader::load_gt_file(path_to_gt, gt_states);
- PRINT_DEBUG("gt file path is: %s\n", path_to_gt.c_str());
+ PRINT_DEBUG("[SERIAL]: gt file path is: %s\n", path_to_gt.c_str());
}
}
@@ -163,182 +154,136 @@ int main(int argc, char **argv) {
return EXIT_FAILURE;
}
- // Open our iterators
- auto view_imu = std::make_shared(bag, rosbag::TopicQuery(topic_imu), time_init, time_finish);
- auto view_imu_iter = view_imu->begin();
- std::vector> view_cameras;
- std::vector view_cameras_iterators;
- for (const auto &topic : topic_cameras) {
- auto view_tmp = std::make_shared(bag, rosbag::TopicQuery(topic.second), time_init, time_finish);
- view_cameras.push_back(view_tmp);
- view_cameras_iterators.push_back(view_tmp->begin());
- }
-
- // Record the current measurement timestamps
- sensor_msgs::Imu::ConstPtr msg_imu_current;
- sensor_msgs::Imu::ConstPtr msg_imu_next;
- std::vector msg_images_current;
- std::vector msg_images_next;
- msg_imu_current = view_imu_iter->instantiate();
- view_imu_iter++;
- msg_imu_next = view_imu_iter->instantiate();
- for (int i = 0; i < params.state_options.num_cameras; i++) {
- msg_images_current.emplace_back(view_cameras_iterators.at(i)->instantiate());
- view_cameras_iterators.at(i)++;
- msg_images_next.emplace_back(view_cameras_iterators.at(i)->instantiate());
+ // We going to loop through and collect a list of all messages
+ // This is done so we can access arbitrary points in the bag
+ // NOTE: if we instantiate messages here, this requires the whole bag to be read
+ // NOTE: thus we just check the topic which allows us to quickly loop through the index
+ // NOTE: see this PR https://github.com/ros/ros_comm/issues/117
+ double max_camera_time = -1;
+ std::vector msgs;
+ for (const rosbag::MessageInstance &msg : view) {
+ if (!ros::ok())
+ break;
+ if (msg.getTopic() == topic_imu) {
+ // if (msg.instantiate() == nullptr) {
+ // PRINT_ERROR(RED "[SERIAL]: IMU topic has unmatched message types!!\n" RESET);
+ // PRINT_ERROR(RED "[SERIAL]: Supports: sensor_msgs::Imu\n" RESET);
+ // return EXIT_FAILURE;
+ // }
+ msgs.push_back(msg);
+ }
+ for (int i = 0; i < params.state_options.num_cameras; i++) {
+ if (msg.getTopic() == topic_cameras.at(i)) {
+ // sensor_msgs::CompressedImage::ConstPtr img_c = msg.instantiate();
+ // sensor_msgs::Image::ConstPtr img_i = msg.instantiate();
+ // if (img_c == nullptr && img_i == nullptr) {
+ // PRINT_ERROR(RED "[SERIAL]: Image topic has unmatched message types!!\n" RESET);
+ // PRINT_ERROR(RED "[SERIAL]: Supports: sensor_msgs::Image and sensor_msgs::CompressedImage\n" RESET);
+ // return EXIT_FAILURE;
+ // }
+ msgs.push_back(msg);
+ max_camera_time = std::max(max_camera_time, msg.getTime().toSec());
+ }
+ }
}
+ PRINT_DEBUG("[SERIAL]: total of %zu messages!\n", msgs.size());
//===================================================================================
//===================================================================================
//===================================================================================
+
+ // Loop through our message array, and lets process them
+ std::set used_index;
+ for (int m = 0; m < (int)msgs.size(); m++) {
- while (ros::ok()) {
-
- // Check if we should end since we have run out of measurements
- bool should_stop = false;
- if (view_imu_iter == view_imu->end()) {
- should_stop = true;
- }
- for (int i = 0; i < params.state_options.num_cameras; i++) {
- if (view_cameras_iterators.at(i) == view_cameras.at(i)->end()) {
- should_stop = true;
- }
- }
- if (should_stop) {
+ // End once we reach the last time, or skip if before beginning time (shouldn't happen)
+ if (!ros::ok() || msgs.at(m).getTime() > time_finish || msgs.at(m).getTime().toSec() > max_camera_time)
break;
- }
+ if (msgs.at(m).getTime() < time_init)
+ continue;
- // We should process the IMU if all the current cameras are greater then its time
- bool should_process_imu = false;
- for (int i = 0; i < params.state_options.num_cameras; i++) {
- double time_imu = msg_imu_current->header.stamp.toSec();
- double time_cam = msg_images_current.at(i)->header.stamp.toSec();
- if (time_imu <= time_cam) {
- should_process_imu = true;
- }
- }
- if (should_process_imu) {
- viz->callback_inertial(msg_imu_current);
- builder->callback_inertial(msg_imu_current);
- msg_imu_current = msg_imu_next;
- view_imu_iter++;
- if (view_imu_iter == view_imu->end())
- break;
- msg_imu_next = view_imu_iter->instantiate();
+ // Skip messages that we have already used
+ if (used_index.find(m) != used_index.end()) {
+ used_index.erase(m);
continue;
}
- // If we are stereo, then we should collect both the left and right
- if (params.state_options.num_cameras == 2) {
-
- // Now lets do some logic to find two images which are next to each other
- // We want to ensure that our stereo pair are very close to occurring at the same time
- bool have_found_pair = false;
- while (!have_found_pair && view_cameras_iterators.at(0) != view_cameras.at(0)->end() &&
- view_cameras_iterators.at(1) != view_cameras.at(1)->end()) {
-
- // Get left and right cameras
- auto msg_cam0 = msg_images_current.at(0);
- auto msg_cam1 = msg_images_current.at(1);
- auto msg_cam0_next = msg_images_next.at(0);
- auto msg_cam1_next = msg_images_next.at(1);
-
- // timestamps
- double time0 = msg_cam0->header.stamp.toSec();
- double time1 = msg_cam1->header.stamp.toSec();
- double time0_next = msg_cam0_next->header.stamp.toSec();
- double time1_next = msg_cam1_next->header.stamp.toSec();
-
- // We will have a match if the current left and right images are closer then the next one
- // Consider the case that we drop an image:
- // (L1) L2 (R2) R3 <- current pointers are at L1 and R2
- // In this case, we dropped the R1 image, thus we should skip the L1 image
- // We can check to see that L1 is further away compared to L2 from R2
- // Thus we should skip the L1 frame (advance the bag forward) and check this logic again!
- if (std::abs(time1 - time0) < std::abs(time1_next - time0) && std::abs(time0 - time1) < std::abs(time0_next - time1)) {
- have_found_pair = true;
- } else if (std::abs(time1 - time0) >= std::abs(time1_next - time0)) {
- // PRINT_WARNING("skipping cam1 (%.4f >= %.4f)",std::abs(time1-time0), std::abs(time1_next-time0));
- msg_images_current.at(1) = msg_images_next.at(1);
- view_cameras_iterators.at(1)++;
- if(view_cameras_iterators.at(1) != view_cameras.at(1)->end()) {
- msg_images_next.at(1) = view_cameras_iterators.at(1)->instantiate();
- }
- } else {
- // PRINT_WARNING("skipping cam0 (%.4f >= %.4f)",std::abs(time0-time1), std::abs(time0_next-time1));
- msg_images_current.at(0) = msg_images_next.at(0);
- view_cameras_iterators.at(0)++;
- if(view_cameras_iterators.at(0) != view_cameras.at(0)->end()) {
- msg_images_next.at(0) = view_cameras_iterators.at(0)->instantiate();
- }
- }
- }
- // Break out if we have ended
- if (view_cameras_iterators.at(0) == view_cameras.at(0)->end() || view_cameras_iterators.at(1) == view_cameras.at(1)->end()) {
- break;
- }
-
- // Check if we should initialize using the groundtruth (always use left)
- Eigen::Matrix imustate;
- if (!gt_states.empty() && !sys->initialized() &&
- ov_core::DatasetReader::get_gt_state(msg_images_current.at(0)->header.stamp.toSec(), imustate, gt_states)) {
- // biases are pretty bad normally, so zero them
- // imustate.block(11,0,6,1).setZero();
- sys->initialize_with_gt(imustate);
- }
+ // IMU processing
+ if (msgs.at(m).getTopic() == topic_imu) {
+ // PRINT_DEBUG("processing imu = %.3f sec\n", msgs.at(m).getTime().toSec() - time_init.toSec());
+ viz->callback_inertial(msgs.at(m).instantiate());
+ builder->callback_inertial(msgs.at(m).instantiate());
+ }
- // Feed it into our system
- viz->callback_stereo(msg_images_current.at(0), msg_images_current.at(1), 0, 1);
- builder->callback_stereo(msg_images_current.at(0), msg_images_current.at(1), 0, 1);
- // Move forward in time
- msg_images_current.at(0) = msg_images_next.at(0);
- view_cameras_iterators.at(0)++;
- if(view_cameras_iterators.at(0) != view_cameras.at(0)->end()) {
- msg_images_next.at(0) = view_cameras_iterators.at(0)->instantiate();
- }
- msg_images_current.at(1) = msg_images_next.at(1);
- view_cameras_iterators.at(1)++;
- if(view_cameras_iterators.at(1) != view_cameras.at(1)->end()) {
- msg_images_next.at(1) = view_cameras_iterators.at(1)->instantiate();
- }
+ // Camera processing
+ for (int cam_id = 0; cam_id < params.state_options.num_cameras; cam_id++) {
- } else {
+ // Skip if this message is not a camera topic
+ if (msgs.at(m).getTopic() != topic_cameras.at(cam_id))
+ continue;
- // Find the camera which should be processed (smallest time)
- int smallest_cam = 0;
- for (int i = 0; i < params.state_options.num_cameras; i++) {
- double time_cam0 = msg_images_current.at(smallest_cam)->header.stamp.toSec();
- double time_cam1 = msg_images_current.at(i)->header.stamp.toSec();
- if (time_cam1 < time_cam0) {
- smallest_cam = i;
+ // We have a matching camera topic here, now find the other cameras for this time
+ // For each camera, we will find the nearest timestamp (within 0.02sec) that is greater than the current
+ // If we are unable, then this message should just be skipped since it isn't a sync'ed pair!
+ std::map camid_to_msg_index;
+ double meas_time = msgs.at(m).getTime().toSec();
+ for (int cam_idt = 0; cam_idt < params.state_options.num_cameras; cam_idt++) {
+ if (cam_idt == cam_id) {
+ camid_to_msg_index.insert({cam_id, m});
+ continue;
+ }
+ int cam_idt_idx = -1;
+ for (int mt = m; mt < (int)msgs.size(); mt++) {
+ if (msgs.at(mt).getTopic() != topic_cameras.at(cam_idt))
+ continue;
+ if (std::abs(msgs.at(mt).getTime().toSec() - meas_time) < 0.02)
+ cam_idt_idx = mt;
+ break;
}
+ if (cam_idt_idx != -1) {
+ camid_to_msg_index.insert({cam_idt, cam_idt_idx});
+ }
+ }
+
+ // Skip processing if we were unable to find any messages
+ if ((int)camid_to_msg_index.size() != params.state_options.num_cameras) {
+ PRINT_DEBUG(YELLOW "[SERIAL]: Unable to find stereo pair for message %d at %.2f into bag (will skip!)\n" RESET, m,
+ meas_time - time_init.toSec());
+ continue;
}
// Check if we should initialize using the groundtruth
- auto msg_camera = msg_images_current.at(smallest_cam);
Eigen::Matrix imustate;
- if (!gt_states.empty() && !sys->initialized() &&
- ov_core::DatasetReader::get_gt_state(msg_camera->header.stamp.toSec(), imustate, gt_states)) {
+ if (!gt_states.empty() && !sys->initialized() && ov_core::DatasetReader::get_gt_state(meas_time, imustate, gt_states)) {
// biases are pretty bad normally, so zero them
// imustate.block(11,0,6,1).setZero();
sys->initialize_with_gt(imustate);
}
- // Feed it into our system
- viz->callback_monocular(msg_camera, smallest_cam);
- builder->callback_monocular(msg_camera, smallest_cam);
-
- // move forward
- msg_images_current.at(smallest_cam) = msg_images_next.at(smallest_cam);
- view_cameras_iterators.at(smallest_cam)++;
- if(view_cameras_iterators.at(smallest_cam) != view_cameras.at(smallest_cam)->end()) {
- msg_images_next.at(smallest_cam) = view_cameras_iterators.at(smallest_cam)->instantiate();
+ // Pass our data into our visualizer callbacks!
+ // PRINT_DEBUG("processing cam = %.3f sec\n", msgs.at(m).getTime().toSec() - time_init.toSec());
+ if (params.state_options.num_cameras == 1) {
+ viz->callback_monocular(msgs.at(camid_to_msg_index.at(0)).instantiate(), 0);
+ builder->callback_monocular(msgs.at(camid_to_msg_index.at(0)).instantiate(), 0);
+ } else if (params.state_options.num_cameras == 2) {
+ auto msg0 = msgs.at(camid_to_msg_index.at(0));
+ auto msg1 = msgs.at(camid_to_msg_index.at(1));
+ used_index.insert(camid_to_msg_index.at(0)); // skip this message
+ used_index.insert(camid_to_msg_index.at(1)); // skip this message
+ viz->callback_stereo(msg0.instantiate(), msg1.instantiate(), 0, 1);
+ builder->callback_stereo(msg0.instantiate(), msg1.instantiate(), 0, 1);
+ } else {
+ PRINT_ERROR(RED "[SERIAL]: We currently only support 1 or 2 camera serial input....\n" RESET);
+ return EXIT_FAILURE;
}
+
+ break;
}
}
+
// Final visualization
viz->visualize_final();
builder->save_to_disk();