Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • nicolaspope/ftl
1 result
Show changes
Commits on Source (265)
Showing
with 312 additions and 1677 deletions
......@@ -6,7 +6,7 @@
variables:
GIT_SUBMODULE_STRATEGY: recursive
CMAKE_ARGS_WINDOWS: '-DCMAKE_GENERATOR_PLATFORM=x64 -DPORTAUDIO_DIR="D:/Build/portaudio" -DNVPIPE_DIR="D:/Build/NvPipe" -DEigen3_DIR="C:/Program Files (x86)/Eigen3/share/eigen3/cmake" -DOpenCV_DIR="D:/Build/opencv-4.1.1" -DCUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.1" -DWITH_OPENVR=TRUE -DOPENVR_DIR="D:/Build/OpenVRSDK" -DWITH_CERES=FALSE'
CMAKE_ARGS_WINDOWS: '-DCMAKE_GENERATOR_PLATFORM=x64 -DCeres_DIR="C:/Program Files/Ceres" -DPORTAUDIO_INCLUDE_DIRS="C:/Build/src/portaudio/include" -DPORTAUDIO_LIBRARY="C:/Build/bin/portaudio/Release/portaudio_x64.lib" -DPYLON_DIR="C:/Program Files/Basler/pylon 6/Development" -DOpenCV_DIR="C:/Build/bin/opencv/install" -DCUDA_TOOLKIT_ROOT_DIR="C:/Program Files/NVIDIA GPU Computing Toolkit/CUDA/v10.2" -DWITH_OPENVR=TRUE -DWITH_CERES=TRUE'
stages:
- all
......@@ -20,6 +20,7 @@ linux:
- linux
variables:
FTL_LIB: ../../build/SDK/C/libftl-dev.so
LD_LIBRARY_PATH: /opt/pylon/lib/
# before_script:
# - export DEBIAN_FRONTEND=noninteractive
# - apt-get update -qq && apt-get install -y -qq g++ cmake git
......@@ -27,8 +28,9 @@ linux:
script:
- mkdir build
- cd build
- cmake .. -DWITH_OPTFLOW=TRUE -DUSE_CPPCHECK=FALSE -DBUILD_CALIBRATION=TRUE -DWITH_CERES=TRUE -DCMAKE_BUILD_TYPE=Release
- make
- /snap/bin/cmake .. -GNinja -DCMAKE_CXX_FLAGS="-fdiagnostics-color" -DWITH_OPTFLOW=TRUE -DUSE_CPPCHECK=FALSE -DBUILD_CALIBRATION=TRUE -DWITH_CERES=TRUE -DCMAKE_BUILD_TYPE=Release -DCPACK_GENERATOR=DEB
- ninja
- ninja package
- ctest --output-on-failure
- cd ../SDK/Python
- python3 -m unittest discover test
......@@ -52,17 +54,15 @@ webserver-deploy:
### Windows
.build-windows: &build-windows
- 'call "C:/Program Files (x86)/Microsoft Visual Studio/2017/Community/VC/Auxiliary/Build/vcvars64.bat"'
- call vcvars64.bat
- mkdir build
- cd build
- echo cmake %CMAKE_ARGS% %CMAKE_ARGS_WINDOWS% -DNANOGUI_DIR="C:/Program Files (x86)/NanoGUI" ..
- cmake %CMAKE_ARGS% %CMAKE_ARGS_WINDOWS% -DREALSENSE_DIR="C:/Program Files (x86)/Intel RealSense SDK 2.0" -DNANOGUI_DIR="C:/Program Files (x86)/NanoGUI" ..
- cmake %CMAKE_ARGS% %CMAKE_ARGS_WINDOWS% -DREALSENSE_DIR="C:/Program Files (x86)/Intel RealSense SDK 2.0" -DOPENVR_DIR="C:/Program Files (x86)/OpenVRSDK" -DOPUS_DIR="C:/Program Files (x86)/Opus" ..
- devenv ftl.utu.fi.sln /build Release
- rmdir /q /s "%DEPLOY_DIR%/%CI_COMMIT_REF_SLUG%"
- mkdir "%DEPLOY_DIR%/%CI_COMMIT_REF_SLUG%"
- 'copy "applications\vision\Release\ftl-vision.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
- 'copy "applications\calibration\Release\ftl-calibrate.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
- 'copy "applications\gui\Release\ftl-gui.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
- 'copy "applications\gui2\Release\ftl-gui2.exe" "%DEPLOY_DIR%\%CI_COMMIT_REF_SLUG%"'
windows-vision:
except:
......@@ -70,7 +70,7 @@ windows-vision:
stage: all
variables:
CMAKE_ARGS: '-DENABLE_PROFILER=TRUE -DWITH_OPTFLOW=TRUE -DBUILD_VISION=TRUE -DBUILD_CALIBRATION=FALSE -DBUILDRECONSTRUCT=FALSE -DBUILDRENDERER=FALSE -DBUILD_TESTING=FALSE -DBUILD_TESTS=FALSE'
DEPLOY_DIR: 'D:/Shared/AutoDeploy'
DEPLOY_DIR: 'C:/Shared/AutoDeploy'
tags:
- win
script:
......@@ -82,8 +82,11 @@ windows-master:
stage: all
variables:
CMAKE_ARGS: '-DWITH_OPTFLOW=TRUE'
DEPLOY_DIR: 'D:/Shared/AutoDeploy'
DEPLOY_DIR: 'C:/Shared/AutoDeploy'
tags:
- win
script:
- *build-windows
# - set PATH=%PATH%;C:/Shared/Deploy
# - ctest --output-on-failure --timeout 60
cmake_minimum_required (VERSION 3.1.0)
cmake_minimum_required (VERSION 3.16.0)
include (CheckIncludeFile)
include (CheckIncludeFileCXX)
include (CheckFunctionExists)
include(CheckLanguage)
if (WIN32)
set(CMAKE_GENERATOR_TOOLSET "host=x64")
endif()
project (ftl.utu.fi VERSION 0.0.4)
include(GNUInstallDirs)
include(CTest)
enable_testing()
option(WITH_NVPIPE "Use NvPipe for compression if available" ON)
option(WITH_OPTFLOW "Use NVIDIA Optical Flow if available" OFF)
option(WITH_OPENVR "Build with OpenVR support" OFF)
option(WITH_OPUS "Use Opus audio compression" ON)
option(WITH_FIXSTARS "Use Fixstars libSGM" ON)
option(WITH_CERES "Use Ceres solver" ON)
option(WITH_SDK "Build the C shared SDK" ON)
option(USE_CPPCHECK "Apply cppcheck during build" ON)
option(BUILD_VISION "Enable the vision component" ON)
option(BUILD_RECONSTRUCT "Enable the reconstruction component" ON)
option(BUILD_RENDERER "Enable the renderer component" ON)
option(BUILD_GUI "Enable the GUI" ON)
option(BUILD_CALIBRATION "Enable the calibration component" OFF)
option(BUILD_TOOLS "Compile developer and research tools" ON)
option(BUILD_TESTS "Compile all unit and integration tests" ON)
option(ENABLE_PROFILER "Enable builtin performance profiling" OFF)
......@@ -42,12 +47,18 @@ MACRO( VERSION_STR_TO_INTS major minor patch version )
ENDMACRO( VERSION_STR_TO_INTS )
if (CMAKE_COMPILER_IS_GNUCXX)
set(CMAKE_CUDA_HOST_COMPILER gcc-7)
endif()
find_package( OpenCV REQUIRED COMPONENTS core imgproc highgui cudaimgproc calib3d imgcodecs videoio aruco cudaarithm cudastereo cudaoptflow face tracking quality xfeatures2d)
find_package( Threads REQUIRED )
find_package( URIParser REQUIRED )
find_package( MsgPack REQUIRED )
find_package( Eigen3 REQUIRED )
find_package( Pylon )
VERSION_STR_TO_INTS(OPENCV_MAJOR OPENCV_MINOR OPENCV_PATCH ${OpenCV_VERSION})
math(EXPR OPENCV_NUMBER "(${OPENCV_MAJOR} * 10000) + (${OPENCV_MINOR} * 100) + ${OPENCV_PATCH}")
......@@ -113,35 +124,71 @@ else()
add_library(openvr INTERFACE)
endif()
# ============== Opus ==========================================================
if (WITH_OPUS)
find_library( OPUS_LIBRARY NAMES opus PATHS ${OPUS_DIR} PATH_SUFFIXES lib)
if (OPUS_LIBRARY)
find_path( OPUS_INCLUDE NAMES opus/opus.h)
if (WIN32 OR OPUS_INCLUDE)
set(HAVE_OPUS TRUE)
add_library(Opus UNKNOWN IMPORTED)
#set_property(TARGET nanogui PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${NANOGUI_EXTRA_INCS})
set_property(TARGET Opus PROPERTY IMPORTED_LOCATION ${OPUS_LIBRARY})
message(STATUS "Found Opus: ${OPUS_LIBRARY}")
else()
message(STATUS "Opus headers not installed")
endif()
if(WIN32)
# Find include
find_path(OPUS_INCLUDE_DIRS
NAMES opus/opus.h
PATHS "C:/Program Files/Opus" "C:/Program Files (x86)/Opus" ${OPUS_DIR}
PATH_SUFFIXES include
)
set_property(TARGET Opus PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${OPUS_INCLUDE_DIRS})
endif()
else()
message(STATUS "No Opus, audio compression disabled")
set(OPUS_LIBRARY "")
add_library(Opus INTERFACE)
endif()
else()
set(OPUS_LIBRARY "")
add_library(Opus INTERFACE)
endif()
# ==============================================================================
add_subdirectory(lib/libstereo)
include_directories(lib/libstereo/include)
set_property(TARGET libstereo PROPERTY FOLDER "dependencies")
#
if (WITH_FIXSTARS)
set(HAVE_LIBSGM true)
add_subdirectory(lib/libsgm)
include_directories(lib/libsgm/include)
set_property(TARGET sgm PROPERTY FOLDER "dependencies")
else()
add_library(sgm INTERFACE)
endif()
# ==== Ceres ===================================================================
if (WITH_CERES)
#find_package(glog QUIET)
find_package(Ceres REQUIRED)
set(HAVE_CERES true)
if (WIN32)
# Hack to fix missing includes
set_property(TARGET ceres PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${Ceres_DIR}/../include)
endif()
else()
add_library(ceres INTERFACE)
endif()
# ==============================================================================
if(${CMAKE_VERSION} VERSION_GREATER "3.12.0")
cmake_policy(SET CMP0074 NEW)
endif()
set(CMAKE_CXX_STANDARD 17) # For PCL/VTK https://github.com/PointCloudLibrary/pcl/issues/2686
set(CMAKE_CXX_STANDARD 17)
set(HAVE_OPENCV TRUE)
# Readline library is not required on Windows
......@@ -155,6 +202,8 @@ else()
endif()
endif()
# ==== Realsense ===============================================================
find_library( REALSENSE_LIBRARY NAMES realsense2 librealsense2 PATHS ${REALSENSE_DIR} PATH_SUFFIXES lib/x64)
if (REALSENSE_LIBRARY)
set(HAVE_REALSENSE TRUE)
......@@ -177,43 +226,7 @@ else()
add_library(realsense INTERFACE)
endif()
if (BUILD_GUI)
set(HAVE_NANOGUI TRUE)
# Disable building extras we won't need (pure C++ project)
set(NANOGUI_BUILD_SHARED OFF CACHE BOOL " " FORCE)
set(NANOGUI_BUILD_EXAMPLE OFF CACHE BOOL " " FORCE)
set(NANOGUI_BUILD_PYTHON OFF CACHE BOOL " " FORCE)
set(NANOGUI_INSTALL OFF CACHE BOOL " " FORCE)
# Add the configurations from nanogui
add_subdirectory(ext/nanogui)
# For reliability of parallel build, make the NanoGUI targets dependencies
set_property(TARGET glfw glfw_objects nanogui PROPERTY FOLDER "dependencies")
endif()
find_library( NVPIPE_LIBRARY NAMES NvPipe libNvPipe PATHS ${NVPIPE_DIR} PATH_SUFFIXES lib)
if (NVPIPE_LIBRARY)
set(HAVE_NVPIPE TRUE)
add_library(nvpipe UNKNOWN IMPORTED)
#set_property(TARGET nanogui PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${NANOGUI_EXTRA_INCS})
set_property(TARGET nvpipe PROPERTY IMPORTED_LOCATION ${NVPIPE_LIBRARY})
message(STATUS "Found NvPipe: ${NVPIPE_LIBRARY}")
if(WIN32)
# Find include
find_path(NVPIPE_INCLUDE_DIRS
NAMES NvPipe.h
PATHS "C:/Program Files/NvPipe" "C:/Program Files (x86)/NvPipe" ${NVPIPE_DIR}
PATH_SUFFIXES include
)
set_property(TARGET nvpipe PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${NVPIPE_INCLUDE_DIRS})
endif()
else()
set(NVPIPE_LIBRARY "")
add_library(nvpipe INTERFACE)
endif()
# ==== Portaudio v19 ===========================================================
# Portaudio v19 library
find_library( PORTAUDIO_LIBRARY NAMES portaudio PATHS ${PORTAUDIO_DIR} PATH_SUFFIXES lib)
......@@ -239,6 +252,8 @@ else()
message(WARNING "Portaudio not found - sound disabled")
endif()
# ==============================================================================
# Assimp library
#find_library( ASSIMP_LIBRARY NAMES assimp PATHS ${PORTAUDIO_DIR} PATH_SUFFIXES lib)
#if (ASSIMP_LIBRARY)
......@@ -271,16 +286,22 @@ endif()
check_language(CUDA)
if (CUDA_TOOLKIT_ROOT_DIR)
enable_language(CUDA)
set(CMAKE_CUDA_FLAGS "-Xcompiler -fPIC")
if (NOT WIN32)
set(CMAKE_CUDA_FLAGS "-Xcompiler -fPIC")
endif()
set(CMAKE_CUDA_ARCHITECTURES 61)
set(CMAKE_CUDA_FLAGS_DEBUG "--gpu-architecture=compute_61 -g -DDEBUG -D_DEBUG")
set(CMAKE_CUDA_FLAGS_RELEASE "--gpu-architecture=compute_61")
set(HAVE_CUDA TRUE)
include_directories(${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
# Some kind of fix for nvcc and -pthread problem on Linux
if (NOT WIN32)
set_property(TARGET Threads::Threads
PROPERTY INTERFACE_COMPILE_OPTIONS $<$<COMPILE_LANGUAGE:CUDA>:-Xcompiler -pthread>
"$<$<NOT:$<COMPILE_LANGUAGE:CUDA>>:-pthread>")
endif()
endif ()
......@@ -354,23 +375,85 @@ include(ftl_paths)
if (WIN32) # TODO(nick) Should do based upon compiler (VS)
add_definitions(-DWIN32)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /std:c++17")
set(CMAKE_GENERATOR_TOOLSET "host=x64")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /arch:AVX2 /MP4 /std:c++17 /wd4996")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /DFTL_DEBUG /Wall")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /O2")
set(OS_LIBS "")
else()
add_definitions(-DUNIX)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -fPIC -msse3 -Werror -Wall")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_DEBUG -pg")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3 -mfpmath=sse")
# -fdiagnostics-color
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -fPIC -march=native -mfpmath=sse -Wall -Werror=unused-result -Werror=return-type")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -pg")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} -O3")
set(OS_LIBS "dl")
endif()
SET(CMAKE_USE_RELATIVE_PATHS ON)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
# ==== NvPipe ==================================================================
#add_subdirectory(lib/nvpipe)
#find_library( NVPIPE_LIBRARY NAMES NvPipe libNvPipe PATHS ${NVPIPE_DIR} PATH_SUFFIXES lib)
#if (NVPIPE_LIBRARY)
set(HAVE_NVPIPE TRUE)
# add_library(nvpipe UNKNOWN IMPORTED)
#set_property(TARGET nanogui PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${NANOGUI_EXTRA_INCS})
# set_property(TARGET nvpipe PROPERTY IMPORTED_LOCATION ${NVPIPE_LIBRARY})
# message(STATUS "Found NvPipe: ${NVPIPE_LIBRARY}")
# if(WIN32)
# Find include
# find_path(NVPIPE_INCLUDE_DIRS
# NAMES NvPipe.h
# PATHS "C:/Program Files/NvPipe" "C:/Program Files (x86)/NvPipe" ${NVPIPE_DIR}
# PATH_SUFFIXES include
# )
# set_property(TARGET nvpipe PROPERTY INTERFACE_INCLUDE_DIRECTORIES ${NVPIPE_INCLUDE_DIRS})
# endif()
#else()
# set(NVPIPE_LIBRARY "")
# add_library(nvpipe INTERFACE)
#endif()
if (WIN32)
add_library(nvidia-ml INTERFACE)
endif()
if (WITH_FIXSTARS)
set(HAVE_LIBSGM true)
add_subdirectory(lib/libsgm)
include_directories(lib/libsgm/include)
set_property(TARGET sgm PROPERTY FOLDER "dependencies")
else()
add_library(sgm INTERFACE)
endif()
# ==============================================================================
if (BUILD_GUI)
set(HAVE_NANOGUI TRUE)
# Disable building extras we won't need (pure C++ project)
set(NANOGUI_BUILD_SHARED OFF CACHE BOOL " " FORCE)
set(NANOGUI_BUILD_EXAMPLE OFF CACHE BOOL " " FORCE)
set(NANOGUI_BUILD_PYTHON OFF CACHE BOOL " " FORCE)
set(NANOGUI_INSTALL OFF CACHE BOOL " " FORCE)
set(NANOGUI_EIGEN_INCLUDE_DIR ${EIGEN_INCLUDE_DIR} CACHE STRING " " FORCE)
# Add the configurations from nanogui
add_subdirectory(ext/nanogui)
# For reliability of parallel build, make the NanoGUI targets dependencies
set_property(TARGET glfw glfw_objects nanogui PROPERTY FOLDER "dependencies")
endif()
# =============================================================================
add_subdirectory(components/common/cpp)
add_subdirectory(applications/calibration)
add_subdirectory(components/codecs)
add_subdirectory(components/structures)
add_subdirectory(components/net)
......@@ -387,8 +470,10 @@ add_subdirectory(components/calibration)
add_subdirectory(applications/tools)
# SDK only compiles on linux currently
if (NOT WIN32)
add_subdirectory(SDK/C)
if (WITH_SDK)
if (NOT WIN32)
add_subdirectory(SDK/C)
endif()
endif()
if (HAVE_AVFORMAT)
......@@ -404,23 +489,17 @@ if (BUILD_VISION)
add_subdirectory(applications/vision)
endif()
if (BUILD_CALIBRATION)
if (NOT HAVE_CERES)
message(ERROR "Ceres is required")
endif()
add_subdirectory(applications/calibration-ceres)
add_subdirectory(applications/calibration-multi)
endif()
if (BUILD_RECONSTRUCT)
add_subdirectory(applications/reconstruct)
add_subdirectory(applications/reconstruct2)
endif()
if (HAVE_NANOGUI)
add_subdirectory(applications/gui)
#add_subdirectory(applications/gui)
add_subdirectory(applications/gui2)
endif()
add_subdirectory(applications/aruco)
### Generate Build Configuration Files =========================================
configure_file(${CMAKE_SOURCE_DIR}/components/common/cpp/include/ftl/config.h.in
......@@ -431,17 +510,9 @@ configure_file(${CMAKE_SOURCE_DIR}/components/common/cpp/src/config.cpp.in
${CMAKE_SOURCE_DIR}/components/common/cpp/src/config.cpp
)
# For issue #17
# https://gitlab.kitware.com/cmake/cmake/issues/16915#note_456382
if ( TARGET Qt5::Core )
get_property( core_options TARGET Qt5::Core PROPERTY INTERFACE_COMPILE_OPTIONS )
string( REPLACE "-fPIC" "" new_core_options "${core_options}" )
set_property( TARGET Qt5::Core PROPERTY INTERFACE_COMPILE_OPTIONS ${new_core_options} )
set_property( TARGET Qt5::Core PROPERTY INTERFACE_POSITION_INDEPENDENT_CODE "ON" )
set( CMAKE_CXX_COMPILE_OPTIONS_PIE "-fPIC" )
endif()
if (WIN32) # TODO(nick) Should do based upon compiler (VS)
set_property(DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} PROPERTY VS_STARTUP_PROJECT ${VS_STARTUP_PROJECT})
set_property(TARGET ftl-vision PROPERTY VS_DEBUGGER_WORKING_DIRECTORY ${VS_DEBUG_WORKING_DIRECTORY})
endif()
include(ftl_CPack)
......@@ -441,7 +441,7 @@ EXTRACT_ALL = YES
# be included in the documentation.
# The default value is: NO.
EXTRACT_PRIVATE = NO
EXTRACT_PRIVATE = YES
# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
# scope will be included in the documentation.
......@@ -2306,7 +2306,7 @@ UML_LIMIT_NUM_FIELDS = 10
# The default value is: NO.
# This tag requires that the tag HAVE_DOT is set to YES.
TEMPLATE_RELATIONS = NO
TEMPLATE_RELATIONS = YES
# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
# YES then doxygen will generate a graph for each documented file showing the
......
......@@ -8,6 +8,7 @@
#include <ftl/operators/operator.hpp>
#include <ftl/operators/disparity.hpp>
#include <ftl/operators/mvmls.hpp>
#include <ftl/data/framepool.hpp>
#include <opencv2/imgproc.hpp>
......@@ -18,6 +19,8 @@ static ftlError_t last_error = FTLERROR_OK;
static ftl::Configurable *root = nullptr;
struct FTLStream {
FTLStream() : pool(2,5) {}
bool readonly;
ftl::stream::Sender *sender;
ftl::stream::Stream *stream;
......@@ -27,8 +30,8 @@ struct FTLStream {
ftl::operators::Graph *pipelines;
std::vector<ftl::rgbd::FrameState> video_states;
ftl::rgbd::FrameSet video_fs;
ftl::data::Pool pool;
std::shared_ptr<ftl::rgbd::FrameSet> video_fs;
};
ftlError_t ftlGetLastStreamError(ftlStream_t stream) {
......@@ -39,7 +42,7 @@ static void createFileWriteStream(FTLStream *s, const ftl::URI &uri) {
if (!root) {
int argc = 1;
const char *argv[] = {"SDK",0};
root = ftl::configure(argc, const_cast<char**>(argv), "sdk_default");
root = ftl::configure(argc, const_cast<char**>(argv), "sdk_default", {});
}
auto *fs = ftl::create<ftl::stream::File>(root, "ftlfile");
......@@ -62,12 +65,10 @@ ftlStream_t ftlCreateWriteStream(const char *uri) {
s->stream = nullptr;
s->sender = nullptr;
s->pipelines = nullptr;
s->video_fs.id = 0;
s->video_fs.count = 0;
s->video_fs.mask = 0;
s->video_fs = std::make_shared<ftl::data::FrameSet>(&s->pool, ftl::data::FrameID(0,0), ftl::timer::get_time());
s->video_fs->mask = 0;
s->interval = 40;
s->video_fs.frames.reserve(32);
s->video_states.resize(32);
//s->video_fs->frames.reserve(32);
s->has_fresh_data = false;
switch (u.getScheme()) {
......@@ -87,7 +88,7 @@ ftlStream_t ftlCreateWriteStream(const char *uri) {
}
last_error = FTLERROR_OK;
s->video_fs.timestamp = ftl::timer::get_time();
//s->video_fs.timestamp = ftl::timer::get_time();
return s;
}
......@@ -106,10 +107,10 @@ ftlError_t ftlImageWrite(
return FTLERROR_STREAM_INVALID_PARAMETER;
if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
return FTLERROR_STREAM_BAD_CHANNEL;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
if (!data) return FTLERROR_STREAM_NO_DATA;
if (stream->video_fs.hasChannel(static_cast<ftl::codecs::Channel>(channel)))
if (stream->video_fs->hasChannel(static_cast<ftl::codecs::Channel>(channel)))
return FTLERROR_STREAM_DUPLICATE;
stream->sender->set("codec", 1);
......@@ -117,9 +118,9 @@ ftlError_t ftlImageWrite(
stream->sender->set("lossless", true);
try {
auto &frame = stream->video_fs.frames[sourceId];
auto &frame = stream->video_fs->frames[sourceId];
auto &img = frame.create<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
auto &intrin = frame.getLeft();
auto &intrin = frame.cast<ftl::rgbd::Frame>().getLeft();
LOG(INFO) << "INTRIN: " << intrin.width << "x" << intrin.height << " for " << sourceId << ", " << (int)channel;
......@@ -155,10 +156,10 @@ ftlError_t ftlImageWrite(
if (tmp2.empty()) return FTLERROR_STREAM_NO_DATA;
img.upload(tmp2);
ftl::codecs::Channels<0> channels;
if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs.id)) channels = stream->stream->selected(stream->video_fs.id);
std::unordered_set<ftl::codecs::Channel> channels;
if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs->id().frameset())) channels = stream->stream->selected(stream->video_fs->id().frameset());
channels += static_cast<ftl::codecs::Channel>(channel);
stream->stream->select(stream->video_fs.id, channels, true);
stream->stream->select(stream->video_fs->id().frameset(), channels, true);
} catch (const std::exception &e) {
return FTLERROR_UNKNOWN;
......@@ -174,14 +175,18 @@ ftlError_t ftlIntrinsicsWriteLeft(ftlStream_t stream, int32_t sourceId, int32_t
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
while (stream->video_fs.frames.size() <= static_cast<unsigned int>(sourceId)) {
stream->video_fs.frames.emplace_back();
}
//while (stream->video_fs->frames.size() < static_cast<unsigned int>(sourceId)) {
stream->video_fs->resize(static_cast<unsigned int>(sourceId)+1);
//}
if (stream->video_fs.hasFrame(sourceId)) {
if (stream->video_fs->hasFrame(sourceId)) {
return FTLERROR_STREAM_DUPLICATE;
}
if (stream->video_fs->frames[sourceId].status() == ftl::data::FrameStatus::CREATED) {
stream->video_fs->frames[sourceId].store();
}
ftl::rgbd::Camera cam;
cam.fx = f;
cam.fy = f;
......@@ -193,12 +198,11 @@ ftlError_t ftlIntrinsicsWriteLeft(ftlStream_t stream, int32_t sourceId, int32_t
cam.maxDepth = maxDepth;
cam.baseline = baseline;
cam.doffs = 0.0f;
stream->video_fs.mask |= 1 << sourceId;
stream->video_fs.count++;
if (!stream->video_fs.frames[sourceId].origin()) {
stream->video_fs.frames[sourceId].setOrigin(&stream->video_states[sourceId]);
}
stream->video_fs.frames[sourceId].setLeft(cam);
stream->video_fs->mask |= 1 << sourceId;
//if (!stream->video_fs->frames[sourceId].origin()) {
// stream->video_fs.frames[sourceId].setOrigin(&stream->video_states[sourceId]);
//}
stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>().setLeft() = cam;
stream->has_fresh_data = true;
return FTLERROR_OK;
......@@ -209,7 +213,7 @@ ftlError_t ftlIntrinsicsWriteRight(ftlStream_t stream, int32_t sourceId, int32_t
return FTLERROR_STREAM_INVALID_STREAM;
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
ftl::rgbd::Camera cam;
......@@ -223,7 +227,7 @@ ftlError_t ftlIntrinsicsWriteRight(ftlStream_t stream, int32_t sourceId, int32_t
cam.maxDepth = maxDepth;
cam.baseline = baseline;
cam.doffs = 0.0f;
stream->video_fs.frames[sourceId].setRight(cam);
stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>().setRight() = cam;
stream->has_fresh_data = true;
return FTLERROR_OK;
......@@ -234,15 +238,15 @@ ftlError_t ftlPoseWrite(ftlStream_t stream, int32_t sourceId, const float *data)
if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
if (!data) return FTLERROR_STREAM_NO_DATA;
Eigen::Matrix4f pose;
for (int i=0; i<16; ++i) pose.data()[i] = data[i];
auto &frame = stream->video_fs.frames[sourceId];
frame.setPose(pose.cast<double>());
auto &frame = stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>();
frame.setPose() = pose.cast<double>();
return FTLERROR_OK;
}
......@@ -252,16 +256,16 @@ ftlError_t ftlRemoveOcclusion(ftlStream_t stream, int32_t sourceId, ftlChannel_t
if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
return FTLERROR_STREAM_BAD_CHANNEL;
if (!stream->video_fs.frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
if (!stream->video_fs->frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
return FTLERROR_STREAM_BAD_CHANNEL;
auto &frame = stream->video_fs.frames[sourceId];
auto &frame = stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>();
//auto &mask = frame.create<cv::cuda::GpuMat>(ftl::codecs::Channel::Mask);
auto &depth = frame.get<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
auto &depth = frame.set<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
auto &intrin = frame.getLeft();
cv::Mat depthR(intrin.height, intrin.width, CV_32F, const_cast<float*>(data), pitch);
......@@ -277,14 +281,14 @@ ftlError_t ftlMaskOcclusion(ftlStream_t stream, int32_t sourceId, ftlChannel_t c
if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
return FTLERROR_STREAM_BAD_CHANNEL;
if (!stream->video_fs.frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
if (!stream->video_fs->frames[sourceId].hasChannel(static_cast<ftl::codecs::Channel>(channel)))
return FTLERROR_STREAM_BAD_CHANNEL;
auto &frame = stream->video_fs.frames[sourceId];
auto &frame = stream->video_fs->frames[sourceId].cast<ftl::rgbd::Frame>();
auto &mask = frame.create<cv::cuda::GpuMat>(ftl::codecs::Channel::Mask);
auto &depth = frame.get<cv::cuda::GpuMat>(static_cast<ftl::codecs::Channel>(channel));
auto &intrin = frame.getLeft();
......@@ -330,10 +334,10 @@ ftlError_t ftlSelect(ftlStream_t stream, ftlChannel_t channel) {
if (static_cast<int>(channel) < 0 || static_cast<int>(channel) > 32)
return FTLERROR_STREAM_BAD_CHANNEL;
ftl::codecs::Channels<0> channels;
if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs.id)) channels = stream->stream->selected(stream->video_fs.id);
std::unordered_set<ftl::codecs::Channel> channels;
if (stream->stream->size() > static_cast<unsigned int>(stream->video_fs->id().frameset())) channels = stream->stream->selected(stream->video_fs->id().frameset());
channels += static_cast<ftl::codecs::Channel>(channel);
stream->stream->select(stream->video_fs.id, channels, true);
stream->stream->select(stream->video_fs->id().frameset(), channels, true);
return FTLERROR_OK;
}
......@@ -354,26 +358,18 @@ ftlError_t ftlNextFrame(ftlStream_t stream) {
try {
cudaSetDevice(0);
if (stream->pipelines) {
stream->pipelines->apply(stream->video_fs, stream->video_fs, 0);
stream->pipelines->apply(*stream->video_fs, *stream->video_fs);
// FIXME: Stream sync
}
for (auto &c : stream->video_fs->firstFrame().changed()) {
stream->sender->post(*stream->video_fs, c.first);
}
stream->sender->post(stream->video_fs);
} catch (const std::exception &e) {
return FTLERROR_STREAM_ENCODE_FAILED;
}
// Reset the frameset.
for (size_t i=0; i<stream->video_fs.frames.size(); ++i) {
if (!stream->video_fs.hasFrame(i)) continue;
auto &f = stream->video_fs.frames[i];
f.reset();
f.setOrigin(&stream->video_states[i]);
}
// FIXME: These should be reset each time
//stream->video_fs.count = 0;
//stream->video_fs.mask = 0;
stream->video_fs.timestamp += stream->interval;
stream->video_fs = std::make_shared<ftl::data::FrameSet>(&stream->pool, ftl::data::FrameID(0,0), stream->video_fs->timestamp()+stream->interval);
stream->has_fresh_data = false;
return FTLERROR_OK;
}
......@@ -389,9 +385,12 @@ ftlError_t ftlDestroyStream(ftlStream_t stream) {
try {
cudaSetDevice(0);
if (stream->pipelines) {
stream->pipelines->apply(stream->video_fs, stream->video_fs, 0);
stream->pipelines->apply(*stream->video_fs, *stream->video_fs);
// FIXME: Stream sync
}
for (auto &c : stream->video_fs->firstFrame().changed()) {
stream->sender->post(*stream->video_fs, c.first);
}
stream->sender->post(stream->video_fs);
} catch (const std::exception &e) {
err = FTLERROR_STREAM_ENCODE_FAILED;
LOG(ERROR) << "Sender exception: " << e.what();
......@@ -418,12 +417,12 @@ ftlError_t ftlSetPropertyString(ftlStream_t stream, int32_t sourceId, const char
if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
if (!value) return FTLERROR_STREAM_INVALID_PARAMETER;
if (!prop) return FTLERROR_STREAM_INVALID_PARAMETER;
stream->video_fs.frames[sourceId].set(std::string(prop), std::string(value));
//stream->video_fs.frames[sourceId].set(std::string(prop), std::string(value));
return FTLERROR_OK;
}
......@@ -432,12 +431,12 @@ ftlError_t ftlSetPropertyInt(ftlStream_t stream, int32_t sourceId, const char *p
if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
if (!value) return FTLERROR_STREAM_INVALID_PARAMETER;
if (!prop) return FTLERROR_STREAM_INVALID_PARAMETER;
stream->video_fs.frames[sourceId].set(std::string(prop), value);
//stream->video_fs.frames[sourceId].set(std::string(prop), value);
return FTLERROR_OK;
}
......@@ -446,11 +445,11 @@ ftlError_t ftlSetPropertyFloat(ftlStream_t stream, int32_t sourceId, const char
if (!stream->stream) return FTLERROR_STREAM_INVALID_STREAM;
if (sourceId < 0 || sourceId >= 32)
return FTLERROR_STREAM_INVALID_PARAMETER;
if (!stream->video_fs.hasFrame(sourceId))
if (!stream->video_fs->hasFrame(sourceId))
return FTLERROR_STREAM_NO_INTRINSICS;
if (!value) return FTLERROR_STREAM_INVALID_PARAMETER;
if (!prop) return FTLERROR_STREAM_INVALID_PARAMETER;
stream->video_fs.frames[sourceId].set(std::string(prop), value);
//stream->video_fs.frames[sourceId].set(std::string(prop), value);
return FTLERROR_OK;
}
......@@ -14,8 +14,8 @@ from enum import IntEnum
################################################################################
# components/codecs/include/ftl/codecs/packet.hpp
Packet = namedtuple("Packet", ["codec", "definition", "block_total",
"block_number", "flags", "data"])
Packet = namedtuple("Packet", ["codec", "definition", "frame_count",
"bitrate", "flags", "data"])
StreamPacket = namedtuple("StreamPacket", ["timestamp", "frameset_id",
"frame_number", "channel"])
......@@ -33,7 +33,10 @@ class codec_t(IntEnum):
PNG = 1
H264 = 2
HEVC = 3
WAV = 4
H264_LOSSLESS = 4
HEVC_LOSSLESS = 5
WAV = 32
OPUS = 33
JSON = 100
CALIBRATION = 101
POSE = 102
......@@ -127,10 +130,10 @@ class FTLDecoder:
################################################################################
def split_images(packet, im):
if packet.block_total == 1:
if packet.frame_count == 1:
return im
n = packet.block_total
n = packet.frame_count
height, width = definition_t[packet.definition]
cols = im.shape[1] // width
......@@ -145,7 +148,7 @@ def split_images(packet, im):
return imgs
def decode_codec_opencv(packet):
if packet.block_total != 1 or packet.block_number != 0:
if packet.frame_count != 1:
warn("Unsupported block format (todo)") # is this relevant?
im = _int_to_float(cv.imdecode(np.frombuffer(packet.data, dtype=np.uint8),
......@@ -154,7 +157,7 @@ def decode_codec_opencv(packet):
return split_images(packet, im)
def decode_codec_opencv_float(packet):
if packet.block_total != 1 or packet.block_number != 0:
if packet.frame_count != 1:
warn("Unsupported block format (todo)") # is this relevant?
im = cv.imdecode(np.frombuffer(packet.data, dtype=np.uint8),
......
......@@ -151,9 +151,9 @@ class FTLStreamWriter:
elif data.dtype in [np.int8, np.uint8]:
if nchans == 3:
ftl_dtype = _imageformat_t.RGB
ftl_dtype = _imageformat_t.BGR
elif nchans == 4:
ftl_dtype = _imageformat_t.RGBA
ftl_dtype = _imageformat_t.BGRA
else:
raise ValueError("Unsupported number of channels: %i" % nchans)
......
......@@ -12,6 +12,7 @@ class TestStreamWriter(unittest.TestCase):
def test_read_write_frames_uint8_1080p(self):
""" Write calibration and random 1080p image and then read them """
return # Sebastian to fix this test: Line 31 has wrong channel in orig[1].
f = tempfile.NamedTemporaryFile(suffix=".ftl")
......
set(FTLARUCOSRC
src/main.cpp
)
add_executable(ftl-aruco ${FTLARUCOSRC})
target_include_directories(ftl-aruco PRIVATE src)
target_link_libraries(ftl-aruco ftlcommon Threads::Threads ${OpenCV_LIBS})
#include <ftl/timer.hpp>
#include <ftl/configuration.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/aruco.hpp>
#include <vector>
#include <string>
int main(int argc, char** argv) {
std::vector<cv::Mat> tags;
unsigned int ntags = 10;
cv::Ptr<cv::aruco::Dictionary> dict =
cv::aruco::getPredefinedDictionary(cv::aruco::DICT_4X4_50);
unsigned int size = 512;
unsigned int margin = 64;
unsigned int delay = 50;
argc--;
argv++;
auto opts = ftl::config::read_options(&argv, &argc);
if (opts.count("delay"))
delay = std::stoi(opts["delay"]);
if (opts.count("dict"))
dict = cv::aruco::getPredefinedDictionary(std::stoi(opts["dict"]));
if (opts.count("ntags"))
ntags = std::stoi(opts["ntags"]);
if (opts.count("size"))
size = std::stoi(opts["size"]);
if (opts.count("margin"))
margin = std::stoi(opts["margin"]);
cv::Mat blank = cv::Mat(size + margin*2, size + margin*2, CV_8UC1);
blank.setTo(255);
for (unsigned int i = 0; i < ntags; i++) {
auto& tag = tags.emplace_back();
tag.create(size + margin*2, size + margin*2, CV_8UC1);
tag.setTo(255);
cv::aruco::drawMarker(dict, i, size, tag(cv::Rect(margin, margin, size, size)), 1);
}
int id = 0;
bool show_blank = false;
ftl::timer::setInterval(delay);
ftl::timer::setHighPrecision(true);
auto h = ftl::timer::add(ftl::timer::kTimerMain, [&](uint64_t){
cv::imshow("ArUco", show_blank ? blank : tags[id]);
if (cv::waitKey(1) == 27) { ftl::timer::stop(false); }
show_blank = !show_blank;
id = (id + 1) % ntags;
return true;
});
ftl::timer::start(true);
return 0;
}
\ No newline at end of file
add_executable (ftl-calibration-ceres
src/main.cpp
src/calibration_data.cpp
src/visibility.cpp
src/calibration.cpp
)
target_include_directories(ftl-calibration-ceres PRIVATE src/)
target_include_directories(ftl-calibration-ceres PUBLIC ${OpenCV_INCLUDE_DIRS})
target_link_libraries(ftl-calibration-ceres ftlcalibration Threads::Threads ftlcommon Eigen3::Eigen ceres)
add_subdirectory(test)
#include "calibration.hpp"
#include "ftl/calibration/optimize.hpp"
#include "loguru.hpp"
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
using std::vector;
using cv::Mat;
using cv::Size;
using cv::Point2d;
using cv::Point3d;
using cv::Vec3d;
using cv::norm;
using ftl::calibration::BundleAdjustment;
using namespace ftl::calibration;
int ftl::calibration::recoverPose(const Mat &E, const vector<Point2d> &_points1,
const vector<Point2d> &_points2, const Mat &_cameraMatrix1,
const Mat &_cameraMatrix2, Mat &_R, Mat &_t, double distanceThresh,
Mat &triangulatedPoints) {
Mat cameraMatrix1;
Mat cameraMatrix2;
Mat cameraMatrix;
Mat points1(_points1.size(), 2, CV_64FC1);
Mat points2(_points2.size(), 2, CV_64FC1);
CHECK(points1.size() == points2.size());
for (size_t i = 0; i < _points1.size(); i++) {
auto p1 = points1.ptr<double>(i);
p1[0] = _points1[i].x;
p1[1] = _points1[i].y;
auto p2 = points2.ptr<double>(i);
p2[0] = _points2[i].x;
p2[1] = _points2[i].y;
}
_cameraMatrix1.convertTo(cameraMatrix1, CV_64F);
_cameraMatrix2.convertTo(cameraMatrix2, CV_64F);
cameraMatrix = Mat::eye(Size(3, 3), CV_64FC1);
double fx1 = cameraMatrix1.at<double>(0,0);
double fy1 = cameraMatrix1.at<double>(1,1);
double cx1 = cameraMatrix1.at<double>(0,2);
double cy1 = cameraMatrix1.at<double>(1,2);
double fx2 = cameraMatrix2.at<double>(0,0);
double fy2 = cameraMatrix2.at<double>(1,1);
double cx2 = cameraMatrix2.at<double>(0,2);
double cy2 = cameraMatrix2.at<double>(1,2);
points1.col(0) = (points1.col(0) - cx1) / fx1;
points1.col(1) = (points1.col(1) - cy1) / fy1;
points2.col(0) = (points2.col(0) - cx2) / fx2;
points2.col(1) = (points2.col(1) - cy2) / fy2;
// TODO mask
// cameraMatrix = I (for details, see OpenCV's recoverPose() source code)
// modules/calib3d/src/five-point.cpp (461)
//
// https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
return cv::recoverPose(E, points1, points2, cameraMatrix, _R, _t, distanceThresh, cv::noArray(), triangulatedPoints);
}
double ftl::calibration::computeExtrinsicParameters(const Mat &K1, const Mat &D1,
const Mat &K2, const Mat &D2, const vector<Point2d> &points1,
const vector<Point2d> &points2, const vector<Point3d> &object_points, Mat &R,
Mat &t, vector<Point3d> &points_out) {
Mat F = cv::findFundamentalMat(points1, points2, cv::noArray(), cv::FM_8POINT);
Mat E = K2.t() * F * K1;
Mat points3dh;
// distanceThresh unit?
recoverPose(E, points1, points2, K1, K2, R, t, 1000.0, points3dh);
points_out.clear();
points_out.reserve(points3dh.cols);
for (int col = 0; col < points3dh.cols; col++) {
CHECK(points3dh.at<double>(3, col) != 0);
Point3d p = Point3d(points3dh.at<double>(0, col),
points3dh.at<double>(1, col),
points3dh.at<double>(2, col))
/ points3dh.at<double>(3, col);
points_out.push_back(p);
}
double s = ftl::calibration::optimizeScale(object_points, points_out);
t = t * s;
auto params1 = Camera(K1, D1, Mat::eye(3, 3, CV_64FC1), Mat::zeros(3, 1, CV_64FC1));
auto params2 = Camera(K2, D2, R, t);
auto ba = BundleAdjustment();
ba.addCamera(params1);
ba.addCamera(params2);
for (size_t i = 0; i < points_out.size(); i++) {
ba.addPoint({points1[i], points2[i]}, points_out[i]);
}
ba.addObject(object_points);
double error_before = ba.reprojectionError();
BundleAdjustment::Options options;
options.optimize_intrinsic = false;
options.fix_camera_extrinsic = {0};
ba.run(options);
double error_after = ba.reprojectionError();
// bundle adjustment didn't work correctly if these checks fail
if (error_before < error_after) {
LOG(WARNING) << "error before < error_after (" << error_before << " <" << error_after << ")";
}
CHECK((cv::countNonZero(params1.rvec()) == 0) && (cv::countNonZero(params1.tvec()) == 0));
R = params2.rmat();
t = params2.tvec();
return sqrt(error_after);
}
#pragma once
#ifndef _FTL_CALIBRATION_HPP_
#define _FTL_CALIBRATION_HPP_
#include <vector>
#include <opencv2/core/core.hpp>
namespace ftl {
namespace calibration {
/**
* Same as OpenCV's recoverPose(), but does not assume same intrinsic paramters
* for both cameras.
*
* @todo Write unit tests to check that intrinsic parameters work as expected.
*/
int recoverPose(const cv::Mat &E, const std::vector<cv::Point2d> &_points1,
const std::vector<cv::Point2d> &_points2, const cv::Mat &_cameraMatrix1,
const cv::Mat &_cameraMatrix2, cv::Mat &_R, cv::Mat &_t,
double distanceThresh, cv::Mat &triangulatedPoints);
/**
* Find camera rotation and translation from first to second camera. Uses
* OpenCV's recoverPose() (with modifications) to estimate camera pose and
* triangulate point locations. Scale is estimated from object_points. 8 point
* algorithm (OpenCV) is used to estimate fundamental matrix at beginning.
*/
double computeExtrinsicParameters(const cv::Mat &K1, const cv::Mat &D1,
const cv::Mat &K2, const cv::Mat &D2, const std::vector<cv::Point2d> &points1,
const std::vector<cv::Point2d> &points2, const std::vector<cv::Point3d> &object_points,
cv::Mat &R, cv::Mat &t, std::vector<cv::Point3d> &points_out);
}
}
#endif
#include "calibration_data.hpp"
#include <opencv2/calib3d.hpp>
using std::vector;
using std::reference_wrapper;
using std::pair;
using std::make_pair;
using cv::Mat;
using cv::Point2d;
using cv::Point3d;
using cv::Vec3d;
using cv::Rect;
using cv::Size;
using ftl::calibration::CalibrationData;
CalibrationData::CalibrationData(int n_cameras) { init(n_cameras); }
void CalibrationData::init(int n_cameras) {
n_cameras_ = n_cameras;
cameras_ = vector<Camera>(n_cameras);
}
int CalibrationData::addObservation(const vector<bool> &visible, const vector<Point2d> &observations) {
if ((int) observations.size() != n_cameras_) { throw std::exception(); }
if ((int) visible.size() != n_cameras_) { throw std::exception(); }
visible_.insert(visible_.end(), visible.begin(), visible.end());
observations_.insert(observations_.end(), observations.begin(), observations.end());
points_.push_back(Point3d(0.0, 0.0, 0.0));
return npoints() - 1;
}
int CalibrationData::addObservation(const vector<bool> &visible, const vector<Point2d> &observations, const Point3d &point) {
if ((int) observations.size() != n_cameras_) { throw std::exception(); }
if ((int) visible.size() != n_cameras_) { throw std::exception(); }
visible_.insert(visible_.end(), visible.begin(), visible.end());
observations_.insert(observations_.end(), observations.begin(), observations.end());
points_.push_back(point);
return npoints() - 1;
}
int CalibrationData::addObservations(const vector<bool>& visible, const vector<vector<Point2d>>& observations, const vector<Point3d>& points) {
int retval = -1;
for (size_t i = 0; i < observations.size(); i++) {
retval = addObservation(visible, observations[i], points[i]);
}
return retval;
}
pair<vector<vector<Point2d>>, vector<reference_wrapper<Point3d>>> CalibrationData::_getVisible(const vector<int> &cameras) {
int n_points = npoints();
vector<vector<Point2d>> observations(cameras.size());
vector<reference_wrapper<Point3d>> points;
for (size_t k = 0; k < cameras.size(); k++) {
observations[k].reserve(n_points);
}
points.reserve(n_points);
for (int i = 0; i < n_points; i++) {
if (!isVisible(cameras, i)) { continue; }
for (size_t k = 0; k < cameras.size(); k++) {
observations[k].push_back(getObservation(cameras[k], i));
}
points.push_back(getPoint(i));
}
return make_pair(observations, points);
}
vector<vector<Point2d>> CalibrationData::getObservations(const vector<int> &cameras) {
return _getVisible(cameras).first;
}
vector<reference_wrapper<Point3d>> CalibrationData::getPoints(const vector<int> &cameras) {
return _getVisible(cameras).second;
}
#pragma once
#ifndef _FTL_CALIBRATION_DATA_HPP_
#define _FTL_CALIBRATION_DATA_HPP_
#include <vector>
#include <opencv2/core/core.hpp>
#include <ftl/calibration/optimize.hpp>
#define _NDISTORTION_PARAMETERS 3
#define _NCAMERA_PARAMETERS (9 + _NDISTORTION_PARAMETERS)
namespace ftl {
namespace calibration {
class CalibrationData {
public:
CalibrationData() {};
explicit CalibrationData(int n_cameras);
void init(int n_cameras);
int addObservation(const std::vector<bool>& visible, const std::vector<cv::Point2d>& observations);
int addObservation(const std::vector<bool>& visible, const std::vector<cv::Point2d>& observations, const cv::Point3d& point);
int addObservations(const std::vector<bool>& visible, const std::vector<std::vector<cv::Point2d>>& observations, const std::vector<cv::Point3d>& point);
void reserve(int n_points) {
points_.reserve(n_points);
points_camera_.reserve(n_points*n_cameras_);
observations_.reserve(n_points*n_cameras_);
visible_.reserve(n_points*n_cameras_);
}
// TODO: method for save poinst_camera_, could return (for example)
// vector<pair<Point3d*>, vector<Point3d*>>
std::vector<std::vector<cv::Point2d>> getObservations(const std::vector<int> &cameras);
/* Get points corresponding to observations returned by getObservations() or getObservationsPtr() */
std::vector<std::reference_wrapper<cv::Point3d>> getPoints(const std::vector<int> &cameras);
/* get reference/pointer to data */
inline Camera& getCamera(int k) { return cameras_[k]; }
inline std::vector<Camera>& getCameras() { return cameras_; }
inline cv::Point3d& getPoint(int i) { return points_[i]; }
inline cv::Point2d& getObservation(int k, int i) { return observations_[n_cameras_*i+k]; }
inline bool isVisible(int k, int i) { return visible_[n_cameras_*i+k]; }
inline bool isVisible(const std::vector<int> &cameras, int i) {
for (const auto &k : cameras) { if (!isVisible(k, i)) { return false; } }
return true;
}
int npoints() const { return points_.size(); }
int ncameras() const { return n_cameras_; }
private:
std::pair<std::vector<std::vector<cv::Point2d>>, std::vector<std::reference_wrapper<cv::Point3d>>> _getVisible(const std::vector<int> &cameras);
int n_cameras_;
// cameras
std::vector<Camera> cameras_;
// points for each observation
std::vector<cv::Point3d> points_;
// points estimated from cameras' observations
std::vector<cv::Point3d> points_camera_;
// observations
std::vector<cv::Point2d> observations_;
// visibility
std::vector<bool> visible_;
};
}
}
#endif
#include "loguru.hpp"
#include <tuple>
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/opencv.hpp>
#include <ceres/ceres.h>
#include <ceres/rotation.h>
#include <ftl/calibration.hpp>
#include "calibration_data.hpp"
#include "visibility.hpp"
#include "calibration.hpp"
using std::string;
using std::vector;
using std::map;
using std::reference_wrapper;
using std::tuple;
using std::pair;
using std::make_pair;
using cv::Mat;
using cv::Size;
using cv::Point2d;
using cv::Point3d;
using namespace ftl::calibration;
void loadData( const string &fname,
Visibility &visibility,
CalibrationData &data) {
vector<Mat> K;
vector<vector<int>> visible;
vector<vector<Point2d>> points;
cv::FileStorage fs(fname, cv::FileStorage::READ);
fs["K"] >> K;
fs["points2d"] >> points;
fs["visible"] >> visible;
fs.release();
int ncameras = K.size();
int npoints = points[0].size();
visibility.init(ncameras);
data.init(ncameras);
for (int i = 0; i < npoints; i+= 1) {
vector<bool> v(ncameras, 0);
vector<Point2d> p(ncameras);
for (int k = 0; k < ncameras; k++) {
v[k] = visible[k][i];
p[k] = points[k][i];
}
visibility.update(v);
data.addObservation(v, p);
}
for (size_t i = 1; i < K.size(); i += 2) {
// mask right cameras
visibility.mask(i-1, i);
}
for (int i = 0; i < ncameras; i++) {
data.getCamera(i).setIntrinsic(K[i]);
}
}
void calibrate(const string &fname) {
Visibility visibility;
CalibrationData data;
loadData(fname, visibility, data);
// 2x 15cm squares (ArUco tags) 10cm apart
vector<Point3d> object_points = {
Point3d(0.0, 0.0, 0.0),
Point3d(0.15, 0.0, 0.0),
Point3d(0.15, 0.15, 0.0),
Point3d(0.0, 0.15, 0.0),
Point3d(0.25, 0.0, 0.0),
Point3d(0.40, 0.0, 0.0),
Point3d(0.40, 0.15, 0.0),
Point3d(0.25, 0.15, 0.0)
};
int refcamera = 0;
auto path = visibility.shortestPath(refcamera);
map<pair<int, int>, pair<Mat, Mat>> transformations;
// Needs better solution which allows simple access to all estimations.
// Required for calculating average coordinates and to know which points
// are missing.
vector<tuple<int, vector<Point3d>>> points_all;
transformations[make_pair(refcamera, refcamera)] =
make_pair(Mat::eye(3, 3, CV_64FC1), Mat::zeros(3, 1, CV_64FC1));
for (int i = 0; i < data.ncameras(); i++) {
// Calculate initial translation T from refcam. Visibility graph is
// used to create a chain of transformations from refcam to i.
int current = refcamera;
if (i == refcamera) { continue; }
Mat D = Mat::zeros(1, 5, CV_64FC1);
Mat R = Mat::eye(3, 3, CV_64FC1);
Mat t = Mat::zeros(3, 1, CV_64FC1);
for (const int &to : path.to(i)) {
auto edge = make_pair(current, to);
if (transformations.find(edge) == transformations.end()) {
Mat R_;
Mat t_;
vector<Point3d> points;
auto observations = data.getObservations({current, to});
double err = computeExtrinsicParameters(
data.getCamera(current).intrinsicMatrix(),
data.getCamera(current).distortionCoefficients(),
data.getCamera(to).intrinsicMatrix(),
data.getCamera(to).distortionCoefficients(),
observations[0], observations[1],
object_points, R_, t_, points);
LOG(INFO) << current << " -> " << to << " (RMS error: " << err << ")";
transformations[edge] = make_pair(R_, t_);
points_all.push_back(make_tuple(current, points));
}
const auto [R_update, t_update] = transformations[edge];
R = R * R_update;
t = R_update * t + t_update;
current = to;
}
transformations[make_pair(refcamera, i)] = make_pair(R.clone(), t.clone());
data.getCamera(i).setExtrinsic(R, t);
}
// TODO: see points_all comment
/*
for (auto& [i, points] : points_all) {
Mat R = data.getCamera(i).rmat();
Mat t = data.getCamera(i).tvec();
transform::inverse(R, t); // R, t: refcam -> i
CHECK(points.size() == points_ref.size());
for (size_t j = 0; j < points.size(); j++) {
Point3d &point = points_ref[j];
if (point != Point3d(0,0,0)) {
point = (transform::apply(points[j], R, t) + point) / 2.0;
}
else {
point = transform::apply(points[j], R, t);
}
}
}
*/
vector<int> idx;
BundleAdjustment ba;
ba.addCameras(data.getCameras());
vector<bool> visible(data.ncameras());
vector<Point2d> observations(data.ncameras());
int missing = 0;
for (int i = 0; i < data.npoints(); i++) {
for (int j = 0; j < data.ncameras(); j++) {
visible[j] = data.isVisible(j, i);
observations[j] = data.getObservation(j, i);
}
// TODO: see points_all comment
if (data.getPoint(i) == Point3d(0.,0.,0.)) {
missing++;
continue;
}
ba.addPoint(visible, observations, data.getPoint(i));
}
LOG(INFO) << "missing points: " << missing;
LOG(INFO) << "Initial reprojection error: " << ba.reprojectionError();
BundleAdjustment::Options options;
options.verbose = false;
options.optimize_intrinsic = true;
ba.run(options);
LOG(INFO) << "Finale reprojection error: " << ba.reprojectionError();
// calibration done, updated values in data
}
int main(int argc, char* argv[]) {
return 0;
}
add_executable(visibility_unit
./tests.cpp
../src/visibility.cpp
./visibility_unit.cpp
)
target_include_directories(visibility_unit PUBLIC ../src/)
target_link_libraries(visibility_unit Threads::Threads dl ftlcommon)
add_test(VisibilityTest visibility_unit)
################################################################################
#add_executable(calibration_unit
# ./tests.cpp
# ../src/calibration.cpp
# ../src/optimization.cpp
# ../src/calibration_data.cpp
# ./calibration_unit.cpp
#)
#target_include_directories(calibration_unit PUBLIC ../src/)
#target_link_libraries(calibration_unit Threads::Threads dl ftlcommon Eigen3::Eigen ceres)
#add_test(CalibrationTest calibration_unit WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
#include "catch.hpp"
#include "calibration.hpp"
#include "optimization.hpp"
#include <vector>
#include <opencv2/core/core.hpp>
using std::vector;
using std::string;
using cv::Mat;
using cv::Point3d;
using cv::Point2d;
using namespace ftl::calibration;
void loadData(const string &fname,vector<Mat> &K, vector<vector<int>> &visible,
vector<vector<Point2d>> &points) {
cv::FileStorage fs(fname, cv::FileStorage::READ);
fs["K"] >> K;
fs["points2d"] >> points;
fs["visible"] >> visible;
fs.release();
}
/* TEST_CASE("Camera calibration and parameter optimization", "") {
vector<Mat> K;
vector<vector<int>> visible;
vector<vector<Point2d>> observations;
SECTION("Load data") {
loadData("data/points.yml", K, visible, observations);
//REQUIRE(K.size() != 0);
}
//int ncameras = K.size();
//int npoints = observations[0].size();
}*/
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
\ No newline at end of file
set(CALIBMULTI
src/main.cpp
src/visibility.cpp
src/util.cpp
src/multicalibrate.cpp
../calibration-ceres/src/calibration.cpp
)
add_executable(ftl-calibrate-multi ${CALIBMULTI})
target_include_directories(ftl-calibrate-multi PRIVATE src ../calibration-ceres/src)
target_link_libraries(ftl-calibrate-multi ftlcalibration ftlcommon ftlnet ftlrgbd ftlstreams Threads::Threads ${OpenCV_LIBS} ceres)
#include <loguru.hpp>
#include <ftl/threads.hpp>
#include <ftl/configuration.hpp>
#include <ftl/net/universe.hpp>
#include <ftl/rgbd/source.hpp>
#include <ftl/rgbd/group.hpp>
#include <ftl/calibration.hpp>
#include <ftl/master.hpp>
#include <ftl/streams/receiver.hpp>
#include <ftl/streams/netstream.hpp>
#include <opencv2/core.hpp>
#include <opencv2/calib3d.hpp>
#include <opencv2/aruco.hpp>
#include <opencv2/core/eigen.hpp>
#include <opencv2/opencv.hpp>
#include <algorithm>
#include <numeric>
#include <fstream>
#include "util.hpp"
#include "multicalibrate.hpp"
using std::string;
using std::optional;
using std::list;
using std::vector;
using std::map;
using std::pair;
using std::make_pair;
using cv::Mat;
using cv::Scalar;
using cv::Size;
using cv::Point2f;
using cv::Point2d;
using cv::Point3f;
using cv::Point3d;
using cv::Vec4f;
using cv::Vec4d;
using ftl::net::Universe;
using ftl::rgbd::Source;
using ftl::codecs::Channel;
vector<Point3d> calibration_target = {
Point3d(0.0, 0.0, 0.0),
Point3d(0.15, 0.0, 0.0),
Point3d(0.15, 0.15, 0.0),
Point3d(0.0, 0.15, 0.0),
Point3d(0.25, 0.0, 0.0),
Point3d(0.40, 0.0, 0.0),
Point3d(0.40, 0.15, 0.0),
Point3d(0.25, 0.15, 0.0)
};
Mat createCameraMatrix(const ftl::rgbd::Camera &parameters) {
Mat m = (cv::Mat_<double>(3,3) <<
parameters.fx, 0.0, -parameters.cx,
0.0, parameters.fy, -parameters.cy,
0.0, 0.0, 1.0);
return m;
}
struct CalibrationParams {
string output_path;
string registration_file;
vector<size_t> idx_cameras;
bool save_extrinsic = true;
bool save_intrinsic = false;
bool optimize_intrinsic = false;
int reference_camera = -1;
double alpha = 0.0;
Size size;
bool offline = false;
};
////////////////////////////////////////////////////////////////////////////////
// Visualization
////////////////////////////////////////////////////////////////////////////////
void stack(const vector<Mat> &img, Mat &out, const int rows, const int cols) {
Size size = img[0].size();
Size size_out = Size(size.width * cols, size.height * rows);
if (size_out != out.size() || out.type() != CV_8UC3) {
out = Mat(size_out, CV_8UC3, Scalar(0, 0, 0));
}
for (size_t i = 0; i < img.size(); i++) {
int row = i % rows;
int col = i / rows;
auto rect = cv::Rect(size.width * col, size.height * row, size.width, size.height);
img[i].copyTo(out(rect));
}
}
void stack(const vector<Mat> &img, Mat &out) {
// TODO
int rows = 2;
int cols = (img.size() + 1) / 2;
stack(img, out, rows, cols);
}
void visualizeCalibration( MultiCameraCalibrationNew &calib, Mat &out,
vector<Mat> &rgb, const vector<Mat> &map1,
const vector<Mat> &map2, const vector<cv::Rect> &roi)
{
vector<Scalar> colors = {
Scalar(64, 64, 255),
Scalar(64, 64, 255),
Scalar(64, 255, 64),
Scalar(64, 255, 64),
};
vector<int> markers = {cv::MARKER_SQUARE, cv::MARKER_DIAMOND};
for (size_t c = 0; c < rgb.size(); c++) {
cv::remap(rgb[c], rgb[c], map1[c], map2[c], cv::INTER_CUBIC);
cv::rectangle(rgb[c], roi[c], Scalar(24, 224, 24), 2);
for (int r = 50; r < rgb[c].rows; r = r+50) {
cv::line(rgb[c], cv::Point(0, r), cv::Point(rgb[c].cols-1, r), cv::Scalar(0,0,255), 1);
}
cv::putText(rgb[c],
"Camera " + std::to_string(c),
Point2i(roi[c].x + 10, roi[c].y + 30),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
}
stack(rgb, out);
}
////////////////////////////////////////////////////////////////////////////////
// RPC
////////////////////////////////////////////////////////////////////////////////
// Using Mat directly
vector<Mat> getDistortionParametersRPC(ftl::net::Universe* net, ftl::stream::Net* nstream) {
return net->call<vector<Mat>>(nstream->getPeer(), "get_distortion");
}
bool setRectifyRPC( ftl::net::Universe* net, ftl::stream::Net* nstream,
bool enabled) {
return net->call<bool>(nstream->getPeer(), "set_rectify", enabled);
}
bool setIntrinsicsRPC( ftl::net::Universe* net, ftl::stream::Net* nstream,
const Size &size, const vector<Mat> &K, const vector<Mat> &D) {
return net->call<bool>(nstream->getPeer(), "set_intrinsics",
size, K[0], D[0], K[1], D[1] );
}
bool setExtrinsicsRPC( ftl::net::Universe* net, ftl::stream::Net* nstream,
const Mat &R, const Mat &t) {
return net->call<bool>(nstream->getPeer(), "set_extrinsics", R, t);
}
bool setPoseRPC(ftl::net::Universe* net, ftl::stream::Net* nstream,
const Mat &pose) {
return net->call<bool>(nstream->getPeer(), "set_pose", pose);
}
bool saveCalibrationRPC(ftl::net::Universe* net, ftl::stream::Net* nstream) {
return net->call<bool>(nstream->getPeer(), "save_calibration");
}
////////////////////////////////////////////////////////////////////////////////
/* run calibration and perform RPC to update calibration on nodes */
void calibrateRPC( ftl::net::Universe* net,
MultiCameraCalibrationNew &calib,
const CalibrationParams &params,
vector<ftl::stream::Net*> &nstreams,
vector<Mat> &map1,
vector<Mat> &map2,
vector<cv::Rect> &roi) {
int reference_camera = params.reference_camera;
if (params.reference_camera < 0) {
reference_camera = calib.getOptimalReferenceCamera();
reference_camera -= (reference_camera & 1);
LOG(INFO) << "optimal camera (automatic): " << reference_camera;
}
LOG(INFO) << "reference camera: " << reference_camera;
if (params.optimize_intrinsic) calib.setFixIntrinsic(0);
calib.calibrateAll(reference_camera);
vector<Mat> R, t;
calib.getCalibration(R, t);
size_t n_cameras = calib.getCamerasCount();
vector<Mat> R_rect(n_cameras), t_rect(n_cameras);
vector<Mat> Rt_out(n_cameras);
map1.resize(n_cameras);
map2.resize(n_cameras);
roi.resize(n_cameras);
for (size_t c = 0; c < n_cameras; c += 2) {
Mat K1 = calib.getCameraMat(c);
Mat K2 = calib.getCameraMat(c + 1);
Mat D1 = calib.getDistCoeffs(c);
Mat D2 = calib.getDistCoeffs(c + 1);
Mat P1, P2, Q;
Mat R1, R2;
Mat R_c1c2, T_c1c2;
calculateTransform(R[c], t[c], R[c + 1], t[c + 1], R_c1c2, T_c1c2);
cv::stereoRectify(K1, D1, K2, D2, params.size, R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, params.alpha);
R_c1c2 = R_c1c2.clone();
T_c1c2 = T_c1c2.clone();
// calculate extrinsics from rectified parameters
Mat _t = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
//Rt_out[c] = getMat4x4(R[c], t[c]) * getMat4x4(R1, _t).inv();
//Rt_out[c + 1] = getMat4x4(R[c + 1], t[c + 1]) * getMat4x4(R2, _t).inv();
LOG(INFO) << K1;
LOG(INFO) << K2;
LOG(INFO) << R_c1c2;
LOG(INFO) << T_c1c2;
LOG(INFO) << "--------------------------------------------------------";
auto *nstream = nstreams[c/2];
while(true) {
try {
if (params.save_intrinsic && params.optimize_intrinsic) {
// never update distortion during extrinsic calibration
setIntrinsicsRPC(net, nstream, params.size, {K1, K2}, {Mat(0, 0, CV_64FC1), Mat(0, 0, CV_64FC1)});
}
if (params.save_extrinsic) {
setExtrinsicsRPC(net, nstream, R_c1c2, T_c1c2);
setPoseRPC(net, nstream, getMat4x4(R[c], t[c]));
}
if (params.save_intrinsic || params.save_extrinsic) {
saveCalibrationRPC(net, nstream);
LOG(INFO) << "CALIBRATION SENT";
}
break;
} catch (std::exception &ex) {
LOG(ERROR) << "RPC failed: " << ex.what();
std::this_thread::sleep_for(std::chrono::seconds(1));
}
}
// for visualization
Size new_size;
cv::stereoRectify(K1, D1, K2, D2, params.size, R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, 1.0, new_size, &roi[c], &roi[c + 1]);
//roi[c] = cv::Rect(0, 0, params.size.width, params.size.height);
//roi[c+1] = cv::Rect(0, 0, params.size.width, params.size.height);
cv::initUndistortRectifyMap(K1, D1, R1, P1, params.size, CV_16SC2, map1[c], map2[c]);
cv::initUndistortRectifyMap(K2, D2, R2, P2, params.size, CV_16SC2, map1[c + 1], map2[c + 1]);
}
}
std::vector<cv::Point2d> findCalibrationTarget(const cv::Mat &im, const cv::Mat &K) {
// check input type
std::vector<std::vector<cv::Point2f>> corners;
std::vector<int> ids;
auto params = cv::aruco::DetectorParameters::create();
params->cornerRefinementMinAccuracy = 0.01;
params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR;
cv::aruco::detectMarkers(im, cv::aruco::getPredefinedDictionary(cv::aruco::DICT_5X5_100), corners, ids, params, cv::noArray(), K);
if (corners.size() > 2) { LOG(ERROR) << "Too many ArUco tags in image"; }
if (corners.size() != 2) { return {}; }
const size_t ncorners = 4;
const size_t ntags = ids.size();
std::vector<cv::Point2d> points;
if (ids[0] == 1) {
std::swap(ids[0], ids[1]);
std::swap(corners[0], corners[1]);
}
if (ids[0] != 0) {
LOG(ERROR) << "Tags ID0 and ID1 expected";
return {};
}
points.reserve(ntags*ncorners);
for (size_t i = 0; i < ntags; i++) {
for (size_t j = 0; j < ncorners; j++) {
points.push_back(corners[i][j]);
}
}
return points;
}
void runCameraCalibration( ftl::Configurable* root,
int n_views, int min_visible,
string path, string filename,
bool save_input,
CalibrationParams &params)
{
Universe *net = ftl::create<Universe>(root, "net");
ftl::ctrl::Master ctrl(root, net);
net->start();
net->waitConnections();
ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver");
gen->setStream(stream);
auto stream_uris = net->findAll<std::string>("list_streams");
std::sort(stream_uris.begin(), stream_uris.end());
std::vector<ftl::stream::Net*> nstreams;
int count = 0;
for (auto &s : stream_uris) {
LOG(INFO) << " --- found stream: " << s;
auto *nstream = ftl::create<ftl::stream::Net>(stream, std::to_string(count), net);
std::string name = *(nstream->get<std::string>("name"));
nstream->set("uri", s);
nstreams.push_back(nstream);
stream->add(nstream);
++count;
}
const size_t n_sources = nstreams.size();
const size_t n_cameras = n_sources * 2;
size_t reference_camera = 0;
std::mutex mutex;
std::atomic<bool> new_frames = false;
vector<Mat> rgb_(n_cameras), rgb_new(n_cameras);
vector<Mat> camera_parameters(n_cameras);
Size res;
gen->onFrameSet([stream, &mutex, &new_frames, &rgb_new, &camera_parameters, &res](ftl::rgbd::FrameSet &fs) {
stream->select(fs.id, Channel::Left + Channel::Right);
if (fs.frames.size() != (rgb_new.size()/2)) {
// nstreams.size() == (rgb_new.size()/2)
LOG(ERROR) << "frames.size() != nstreams.size(), "
<< fs.frames.size() << " != " << (rgb_new.size()/2);
}
UNIQUE_LOCK(mutex, CALLBACK);
bool good = true;
try {
for (size_t i = 0; i < fs.frames.size(); i ++) {
if (!fs.frames[i].hasChannel(Channel::Left)) {
good = false;
LOG(ERROR) << "No left channel";
break;
}
if (!fs.frames[i].hasChannel(Channel::Right)) {
good = false;
LOG(ERROR) << "No right channel";
break;
}
auto idx = stream->originStream(0, i);
CHECK(idx >= 0) << "negative index";
fs.frames[i].download(Channel::Left+Channel::Right);
Mat &left = fs.frames[i].get<Mat>(Channel::Left);
Mat &right = fs.frames[i].get<Mat>(Channel::Right);
/*
// note: also returns empty sometimes
fs.frames[i].upload(Channel::Left+Channel::Right);
Mat left, right;
fs.frames[i].get<cv::cuda::GpuMat>(Channel::Left).download(left);
fs.frames[i].get<cv::cuda::GpuMat>(Channel::Right).download(right);
*/
CHECK(!left.empty() && !right.empty());
cv::cvtColor(left, rgb_new[2*idx], cv::COLOR_BGRA2BGR);
cv::cvtColor(right, rgb_new[2*idx+1], cv::COLOR_BGRA2BGR);
camera_parameters[2*idx] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getLeftCamera()),
rgb_new[2*idx].size(), Size(fs.frames[i].getLeftCamera().width, fs.frames[i].getLeftCamera().height));
camera_parameters[2*idx+1] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getRightCamera()),
rgb_new[2*idx].size(), Size(fs.frames[i].getRightCamera().width, fs.frames[i].getRightCamera().height));
if (res.empty()) res = rgb_new[2*idx].size();
}
}
catch (std::exception ex) {
LOG(ERROR) << "exception: " << ex.what();
good = false;
}
catch (...) {
LOG(ERROR) << "unknown exception";
good = false;
}
new_frames = good;
return true;
});
stream->begin();
ftl::timer::start(false);
while(true) {
if (!res.empty()) {
params.size = res;
LOG(INFO) << "Camera resolution: " << params.size;
break;
}
std::this_thread::sleep_for(std::chrono::seconds(1));
}
for (auto *nstream: nstreams) {
bool res = true;
while(res) {
try { res = setRectifyRPC(net, nstream, false); }
catch (...) {}
if (res) {
LOG(ERROR) << "set_rectify() failed for " << *(nstream->get<string>("uri"));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
else {
LOG(INFO) << "rectification disabled for " << *(nstream->get<string>("uri"));
}
}
}
// TODO: parameter for calibration target type
auto calib = MultiCameraCalibrationNew( n_cameras, reference_camera,
params.size, CalibrationTarget(0.15)
);
calib.object_points_ = calibration_target;
int iter = 0;
Mat show;
cv::Mat show2;
vector<int> visible;
vector<vector<Point2d>> points(n_cameras);
vector<Mat> rgb(n_cameras);
std::this_thread::sleep_for(std::chrono::seconds(3)); // rectification disabled, has some delay
while(calib.getMinVisibility() < static_cast<size_t>(n_views)) {
loop:
cv::waitKey(10);
while (true) {
if (new_frames) {
UNIQUE_LOCK(mutex, LOCK);
rgb.swap(rgb_new);
new_frames = false;
break;
}
cv::waitKey(10);
}
for (Mat &im : rgb) {
if (im.empty()) {
LOG(ERROR) << "EMPTY";
goto loop;
}
}
visible.clear();
visible.resize(n_cameras, 0);
int n_found = 0;
for (size_t i = 0; i < n_cameras; i++) {
auto new_points = findCalibrationTarget(rgb[i], camera_parameters[i]);
if (new_points.size() == 0) {
points[i] = vector(8, Point2d(0.0,0.0));
}
else {
points[i] = new_points;
visible[i] = 1;
n_found++;
}
}
if (n_found >= min_visible) {
calib.addPoints(points, visible);
if (save_input) {
for (size_t i = 0; i < n_cameras; i++) {
cv::imwrite(path + std::to_string(i) + "_" + std::to_string(iter) + ".jpg", rgb[i]);
}
}
iter++;
}
for (size_t i = 0; i < n_cameras; i++) {
if (visible[i]) {
for (size_t j = 0; j < points[i].size(); j++) {
cv::drawMarker( rgb[i], points[i][j], Scalar(42, 255, 42), cv::MARKER_CROSS, 25, 1);
cv::putText(rgb[i], std::to_string(j), Point2i(points[i][j]),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
}
}
// index
cv::putText(rgb[i],
"Camera " + std::to_string(i),
Point2i(10, 30),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
// resolution
cv::putText(rgb[i],
"[" + std::to_string(rgb[i].size().width) + "x" + std::to_string(rgb[i].size().height) + "]",
Point2i(rgb[i].size().width-150, 30),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
// uri
cv::putText(rgb[i],
stream_uris[i/2],
Point2i(10, rgb[i].rows-10),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
// remaining frames
cv::putText(rgb[i],
std::to_string(std::max(0, (int) (n_views - calib.getViewsCount(i)))),
Point2i(rgb[i].size().width-150, rgb[i].rows-10),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
}
stack(rgb, show);
cv::namedWindow("Cameras", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
//cv::resize(show, show2, cv::Size(float(show.cols) / float(show.rows) * 1080.0f, 1080));
cv::resizeWindow("Cameras", cv::Size(2*1920,1080));
cv::imshow("Cameras", show);
}
cv::destroyWindow("Cameras");
for (size_t i = 0; i < nstreams.size(); i++) {
while(true) {
try {
vector<Mat> D = getDistortionParametersRPC(net, nstreams[i]);
LOG(INFO) << "K[" << 2*i << "] = \n" << camera_parameters[2*i];
LOG(INFO) << "D[" << 2*i << "] = " << D[0];
LOG(INFO) << "K[" << 2*i+1 << "] = \n" << camera_parameters[2*i+1];
LOG(INFO) << "D[" << 2*i+1 << "] = " << D[1];
calib.setCameraParameters(2*i, camera_parameters[2*i], D[0]);
calib.setCameraParameters(2*i+1, camera_parameters[2*i+1], D[1]);
break;
}
catch (...) {}
}
}
Mat out;
vector<Mat> map1, map2;
vector<cv::Rect> roi;
vector<size_t> idx;
calibrateRPC(net, calib, params, nstreams, map1, map2, roi);
if (save_input) {
cv::FileStorage fs(path + filename, cv::FileStorage::WRITE);
calib.saveInput(fs);
fs.release();
}
// visualize
while(cv::waitKey(10) != 27) {
while (!new_frames) {
if (cv::waitKey(50) != -1) { ftl::running = false; }
}
{
UNIQUE_LOCK(mutex, LOCK)
rgb.swap(rgb_new);
new_frames = false;
}
visualizeCalibration(calib, out, rgb, map1, map2, roi);
cv::namedWindow("Calibration", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
cv::imshow("Calibration", out);
}
for (size_t i = 0; i < nstreams.size(); i++) {
while(true) {
try {
setRectifyRPC(net, nstreams[i], true);
break;
}
catch (...) {}
}
}
ftl::running = false;
ftl::timer::stop();
ftl::pool.stop(true);
}
void runCameraCalibrationPath( ftl::Configurable* root,
string path, string filename,
CalibrationParams &params)
{
Universe *net = ftl::create<Universe>(root, "net");
ftl::ctrl::Master ctrl(root, net);
net->start();
net->waitConnections();
ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver");
gen->setStream(stream);
auto stream_uris = net->findAll<std::string>("list_streams");
std::sort(stream_uris.begin(), stream_uris.end());
std::vector<ftl::stream::Net*> nstreams;
int count = 0;
for (auto &s : stream_uris) {
LOG(INFO) << " --- found stream: " << s;
auto *nstream = ftl::create<ftl::stream::Net>(stream, std::to_string(count), net);
std::string name = *(nstream->get<std::string>("name"));
nstream->set("uri", s);
nstreams.push_back(nstream);
stream->add(nstream);
++count;
}
const size_t n_sources = nstreams.size();
const size_t n_cameras = n_sources * 2;
size_t reference_camera = 0;
std::mutex mutex;
std::atomic<bool> new_frames = false;
vector<Mat> rgb_(n_cameras), rgb_new(n_cameras);
vector<Mat> camera_parameters(n_cameras);
Size res;
gen->onFrameSet([stream, &mutex, &new_frames, &rgb_new, &camera_parameters, &res](ftl::rgbd::FrameSet &fs) {
stream->select(fs.id, Channel::Left + Channel::Right);
if (fs.frames.size() != (rgb_new.size()/2)) {
// nstreams.size() == (rgb_new.size()/2)
LOG(ERROR) << "frames.size() != nstreams.size(), "
<< fs.frames.size() << " != " << (rgb_new.size()/2);
}
UNIQUE_LOCK(mutex, CALLBACK);
bool good = true;
try {
for (size_t i = 0; i < fs.frames.size(); i ++) {
if (!fs.frames[i].hasChannel(Channel::Left)) {
good = false;
LOG(ERROR) << "No left channel";
break;
}
if (!fs.frames[i].hasChannel(Channel::Right)) {
good = false;
LOG(ERROR) << "No right channel";
break;
}
auto idx = stream->originStream(0, i);
CHECK(idx >= 0) << "negative index";
fs.frames[i].download(Channel::Left+Channel::Right);
Mat &left = fs.frames[i].get<Mat>(Channel::Left);
Mat &right = fs.frames[i].get<Mat>(Channel::Right);
CHECK(!left.empty() && !right.empty());
cv::cvtColor(left, rgb_new[2*idx], cv::COLOR_BGRA2BGR);
cv::cvtColor(right, rgb_new[2*idx+1], cv::COLOR_BGRA2BGR);
/*
camera_parameters[2*idx] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getLeftCamera()),
rgb_new[2*idx].size(), Size(fs.frames[i].getLeftCamera().width, fs.frames[i].getLeftCamera().height));
camera_parameters[2*idx+1] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getRightCamera()),
rgb_new[2*idx].size(), Size(fs.frames[i].getRightCamera().width, fs.frames[i].getRightCamera().height));
if (res.empty()) res = rgb_new[2*idx].size();*/
}
}
catch (std::exception ex) {
LOG(ERROR) << "exception: " << ex.what();
good = false;
}
catch (...) {
LOG(ERROR) << "unknown exception";
good = false;
}
new_frames = good;
return true;
});
stream->begin();
ftl::timer::start(false);
for (auto *nstream: nstreams) {
bool res = true;
while(res) {
try { res = setRectifyRPC(net, nstream, false); }
catch (...) {}
if (res) {
LOG(ERROR) << "set_rectify() failed for " << *(nstream->get<string>("uri"));
std::this_thread::sleep_for(std::chrono::milliseconds(100));
}
else {
LOG(INFO) << "rectification disabled for " << *(nstream->get<string>("uri"));
}
}
}
// TODO: parameter for calibration target type
auto calib = MultiCameraCalibrationNew( n_cameras, reference_camera,
params.size, CalibrationTarget(0.150)
);
calib.object_points_ = calibration_target;
cv::FileStorage fs(path + filename, cv::FileStorage::READ);
fs["resolution"] >> params.size;
params.idx_cameras.resize(n_cameras);
std::iota(params.idx_cameras.begin(), params.idx_cameras.end(), 0);
calib.loadInput(path + filename, params.idx_cameras);
/*for (size_t i = 0; i < nstreams.size(); i++) {
while(true) {
try {
if (camera_parameters[2*i].empty() || camera_parameters[2*i+1].empty()) {
std::this_thread::sleep_for(std::chrono::seconds(1));
continue;
}
vector<Mat> D = getDistortionParametersRPC(net, nstreams[i]);
LOG(INFO) << "K[" << 2*i << "] = \n" << camera_parameters[2*i];
LOG(INFO) << "D[" << 2*i << "] = " << D[0];
LOG(INFO) << "K[" << 2*i+1 << "] = \n" << camera_parameters[2*i+1];
LOG(INFO) << "D[" << 2*i+1 << "] = " << D[1];
calib.setCameraParameters(2*i, camera_parameters[2*i], D[0]);
calib.setCameraParameters(2*i+1, camera_parameters[2*i+1], D[1]);
break;
}
catch (...) {}
}
}*/
Mat out;
vector<Mat> map1, map2;
vector<cv::Rect> roi;
vector<size_t> idx;
calibrateRPC(net, calib, params, nstreams, map1, map2, roi);
vector<Mat> rgb(n_cameras);
// visualize
while(cv::waitKey(10) != 27) {
while (!new_frames) {
if (cv::waitKey(50) != -1) { ftl::running = false; }
}
{
UNIQUE_LOCK(mutex, LOCK)
rgb.swap(rgb_new);
new_frames = false;
}
visualizeCalibration(calib, out, rgb, map1, map2, roi);
cv::namedWindow("Calibration", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
cv::imshow("Calibration", out);
}
for (size_t i = 0; i < nstreams.size(); i++) {
while(true) {
try {
setRectifyRPC(net, nstreams[i], true);
break;
}
catch (...) {}
}
}
ftl::running = false;
ftl::timer::stop();
ftl::pool.stop(true);
}
int main(int argc, char **argv) {
auto options = ftl::config::read_options(&argv, &argc);
auto root = ftl::configure(argc, argv, "registration_default");
// run calibration from saved input?
const bool load_input = root->value<bool>("load_input", false);
// should calibration input be saved
const bool save_input = root->value<bool>("save_input", false);
// should extrinsic calibration be saved (only used with load_input)
const bool save_extrinsic = root->value<bool>("save_extrinsic", true);
// should intrinsic calibration be saved
const bool save_intrinsic = root->value<bool>("save_intrinsic", false);
const bool optimize_intrinsic = root->value<bool>("optimize_intrinsic", false);
// directory where calibration data and images are saved, if save_input enabled
const string calibration_data_dir = root->value<string>("calibration_data_dir", "./");
// file to save calibration input (2d points and visibility)
const string calibration_data_file = root->value<string>("calibration_data_file", "data.yml");
// in how many cameras should the pattern be visible
const int min_visible = root->value<int>("min_visible", 3);
// minimum for how many times pattern is seen per camera
const int n_views = root->value<int>("n_views", 500);
// reference camera, -1 for automatic
const int ref_camera = root->value<int>("reference_camera", -1);
// registration file path
const string registration_file = root->value<string>("registration_file", FTL_LOCAL_CONFIG_ROOT "/registration.json");
// location where extrinsic calibration files saved
const string output_directory = root->value<string>("output_directory", "./");
const bool offline = root->value<bool>("offline", false);
CalibrationParams params;
params.offline = offline;
params.save_extrinsic = save_extrinsic;
params.save_intrinsic = save_intrinsic;
params.optimize_intrinsic = optimize_intrinsic;
params.output_path = output_directory;
params.registration_file = registration_file;
params.reference_camera = ref_camera;
LOG(INFO) << "\n"
<< "\n"
<< "\n save_input: " << (int) save_input
// << "\n load_input: " << (int) load_input
// << "\n save_extrinsic: " << (int) save_extrinsic
// << "\n save_intrinsic: " << (int) save_intrinsic
<< "\n optimize_intrinsic: " << (int) optimize_intrinsic
// << "\n calibration_data_dir: " << calibration_data_dir
// << "\n calibration_data_file: " << calibration_data_file
<< "\n min_visible: " << min_visible
<< "\n n_views: " << n_views
<< "\n reference_camera: " << ref_camera << (ref_camera != -1 ? "" : " (automatic)")
// << "\n registration_file: " << registration_file
// << "\n output_directory: " << output_directory
<< "\n";
if (load_input) {
runCameraCalibrationPath(root, calibration_data_dir, calibration_data_file, params);
}
else {
runCameraCalibration(root, n_views, min_visible, calibration_data_dir, calibration_data_file, save_input, params);
}
return 0;
}
\ No newline at end of file