diff --git a/CMakeLists.txt b/CMakeLists.txt
index 25fd1d0b59a4fdab623b4a1929a83005d2ee0bfc..42198650d9b9835f14a0b72fb6f82050f6f2ed3c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -22,7 +22,6 @@ option(BUILD_VISION "Enable the vision component" ON)
 option(BUILD_RECONSTRUCT "Enable the reconstruction component" ON)
 option(BUILD_RENDERER "Enable the renderer component" ON)
 option(BUILD_GUI "Enable the GUI" ON)
-option(BUILD_CALIBRATION "Enable the calibration component" OFF)
 option(BUILD_TOOLS "Compile developer and research tools" ON)
 option(BUILD_TESTS "Compile all unit and integration tests" ON)
 option(ENABLE_PROFILER "Enable builtin performance profiling" OFF)
@@ -444,7 +443,6 @@ endif()
 
 
 add_subdirectory(components/common/cpp)
-#add_subdirectory(applications/calibration)
 add_subdirectory(components/codecs)
 add_subdirectory(components/structures)
 add_subdirectory(components/net)
diff --git a/applications/calibration-ceres/CMakeLists.txt b/applications/calibration-ceres/CMakeLists.txt
deleted file mode 100644
index d994acaa990d35355f4f891f8a539dd5335782ed..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/CMakeLists.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-add_executable (ftl-calibration-ceres
-                src/main.cpp
-                src/calibration_data.cpp
-                src/visibility.cpp
-                src/calibration.cpp
-)
-
-target_include_directories(ftl-calibration-ceres PRIVATE src/)
-target_include_directories(ftl-calibration-ceres PUBLIC ${OpenCV_INCLUDE_DIRS})
-target_link_libraries(ftl-calibration-ceres ftlcalibration Threads::Threads ftlcommon Eigen3::Eigen ceres)
-
-add_subdirectory(test)
diff --git a/applications/calibration-ceres/src/calibration.cpp b/applications/calibration-ceres/src/calibration.cpp
deleted file mode 100644
index a78905a2cfb762ffc1081994f0d09c8ee6463a9a..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/src/calibration.cpp
+++ /dev/null
@@ -1,135 +0,0 @@
-#include "calibration.hpp"
-#include "ftl/calibration/optimize.hpp"
-
-#include "loguru.hpp"
-
-#include <opencv2/core.hpp>
-#include <opencv2/calib3d.hpp>
-
-using std::vector;
-
-using cv::Mat;
-using cv::Size;
-using cv::Point2d;
-using cv::Point3d;
-using cv::Vec3d;
-
-using cv::norm;
-
-using ftl::calibration::BundleAdjustment;
-
-using namespace ftl::calibration;
-
-int ftl::calibration::recoverPose(const Mat &E, const vector<Point2d> &_points1,
-	const vector<Point2d> &_points2, const Mat &_cameraMatrix1,
-	const Mat &_cameraMatrix2, Mat &_R, Mat &_t, double distanceThresh,
-	Mat &triangulatedPoints) {
-
-	Mat cameraMatrix1;
-	Mat cameraMatrix2;
-	Mat cameraMatrix;
-
-	Mat points1(_points1.size(), 2, CV_64FC1);
-	Mat points2(_points2.size(), 2, CV_64FC1);
-
-	CHECK(points1.size() == points2.size());
-
-	for (size_t i = 0; i < _points1.size(); i++) {
-		auto p1 = points1.ptr<double>(i);
-		p1[0] = _points1[i].x;
-		p1[1] = _points1[i].y;
-
-		auto p2 = points2.ptr<double>(i);
-		p2[0] = _points2[i].x;
-		p2[1] = _points2[i].y;
-	}
-
-	_cameraMatrix1.convertTo(cameraMatrix1, CV_64F);
-	_cameraMatrix2.convertTo(cameraMatrix2, CV_64F);
-	cameraMatrix = Mat::eye(Size(3, 3), CV_64FC1);
-
-	double fx1 = cameraMatrix1.at<double>(0,0);
-	double fy1 = cameraMatrix1.at<double>(1,1);
-	double cx1 = cameraMatrix1.at<double>(0,2);
-	double cy1 = cameraMatrix1.at<double>(1,2);
-
-	double fx2 = cameraMatrix2.at<double>(0,0);
-	double fy2 = cameraMatrix2.at<double>(1,1);
-	double cx2 = cameraMatrix2.at<double>(0,2);
-	double cy2 = cameraMatrix2.at<double>(1,2);
-
-	points1.col(0) = (points1.col(0) - cx1) / fx1;
-	points1.col(1) = (points1.col(1) - cy1) / fy1;
-
-	points2.col(0) = (points2.col(0) - cx2) / fx2;
-	points2.col(1) = (points2.col(1) - cy2) / fy2;
-
-	// TODO mask
-	// cameraMatrix = I (for details, see OpenCV's recoverPose() source code)
-	// modules/calib3d/src/five-point.cpp (461)
-	//
-	// https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
-
-	return cv::recoverPose(E, points1, points2, cameraMatrix, _R, _t, distanceThresh, cv::noArray(), triangulatedPoints);
-}
-
-double ftl::calibration::computeExtrinsicParameters(const Mat &K1, const Mat &D1,
-	const Mat &K2, const Mat &D2, const vector<Point2d> &points1,
-	const vector<Point2d> &points2, const vector<Point3d> &object_points, Mat &R,
-	Mat &t, vector<Point3d> &points_out) {
-
-	Mat F = cv::findFundamentalMat(points1, points2, cv::noArray(), cv::FM_8POINT);
-	Mat E = K2.t() * F * K1;
-
-	Mat points3dh;
-	// distanceThresh unit?
-	recoverPose(E, points1, points2, K1, K2, R, t, 1000.0, points3dh);
-
-	points_out.clear();
-	points_out.reserve(points3dh.cols);
-
-	for (int col = 0; col < points3dh.cols; col++) {
-		CHECK(points3dh.at<double>(3, col) != 0);
-		Point3d p = Point3d(points3dh.at<double>(0, col),
-							points3dh.at<double>(1, col),
-							points3dh.at<double>(2, col))
-							/ points3dh.at<double>(3, col);
-		points_out.push_back(p);
-	}
-
-	double s = ftl::calibration::optimizeScale(object_points, points_out);
-	t = t * s;
-
-	auto params1 = Camera(K1, D1, Mat::eye(3, 3, CV_64FC1), Mat::zeros(3, 1, CV_64FC1));
-	auto params2 = Camera(K2, D2, R, t);
-
-	auto ba = BundleAdjustment();
-	ba.addCamera(params1);
-	ba.addCamera(params2);
-
-	for (size_t i = 0; i < points_out.size(); i++) {
-		ba.addPoint({points1[i], points2[i]}, points_out[i]);
-	}
-	
-	ba.addObject(object_points);
-
-	double error_before = ba.reprojectionError();
-
-	BundleAdjustment::Options options;
-	options.optimize_intrinsic = false;
-	options.fix_camera_extrinsic = {0};
-	ba.run(options);
-
-	double error_after = ba.reprojectionError();
-
-	// bundle adjustment didn't work correctly if these checks fail
-	if (error_before < error_after) {
-		LOG(WARNING) << "error before < error_after (" << error_before << "  <" << error_after << ")";
-	}
-	CHECK((cv::countNonZero(params1.rvec()) == 0) && (cv::countNonZero(params1.tvec()) == 0));
-
-	R = params2.rmat();
-	t = params2.tvec();
-
-	return sqrt(error_after);
-}
diff --git a/applications/calibration-ceres/src/calibration.hpp b/applications/calibration-ceres/src/calibration.hpp
deleted file mode 100644
index 11bb36d920da1e638ce49ad699a955fe5032acec..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/src/calibration.hpp
+++ /dev/null
@@ -1,37 +0,0 @@
-#pragma once
-#ifndef _FTL_CALIBRATION_HPP_
-#define _FTL_CALIBRATION_HPP_
-
-#include <vector>
-
-#include <opencv2/core/core.hpp>
-
-namespace ftl {
-namespace calibration {
-
-/**
- * Same as OpenCV's recoverPose(), but does not assume same intrinsic paramters
- * for both cameras.
- *
- * @todo Write unit tests to check that intrinsic parameters work as expected.
- */
-int recoverPose(const cv::Mat &E, const std::vector<cv::Point2d> &_points1,
-	const std::vector<cv::Point2d> &_points2, const cv::Mat &_cameraMatrix1,
-	const cv::Mat &_cameraMatrix2, cv::Mat &_R, cv::Mat &_t,
-	double distanceThresh, cv::Mat &triangulatedPoints);
-
-/**
- * Find camera rotation and translation from first to second camera. Uses
- * OpenCV's recoverPose() (with modifications) to estimate camera pose and
- * triangulate point locations. Scale is estimated from object_points. 8 point
- * algorithm (OpenCV) is used to estimate fundamental matrix at beginning.
- */
-double computeExtrinsicParameters(const cv::Mat &K1, const cv::Mat &D1,
-	const cv::Mat &K2, const cv::Mat &D2, const std::vector<cv::Point2d> &points1,
-	const std::vector<cv::Point2d> &points2, const std::vector<cv::Point3d> &object_points,
-	cv::Mat &R, cv::Mat &t, std::vector<cv::Point3d> &points_out);
-
-}
-}
-
-#endif
diff --git a/applications/calibration-ceres/src/calibration_data.cpp b/applications/calibration-ceres/src/calibration_data.cpp
deleted file mode 100644
index 1e634a3921330ba6b2b8a04d75ecdd5352596409..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/src/calibration_data.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-#include "calibration_data.hpp"
-
-#include <opencv2/calib3d.hpp>
-
-using std::vector;
-using std::reference_wrapper;
-using std::pair;
-using std::make_pair;
-
-using cv::Mat;
-using cv::Point2d;
-using cv::Point3d;
-using cv::Vec3d;
-using cv::Rect;
-using cv::Size;
-
-using ftl::calibration::CalibrationData;
-
-
-CalibrationData::CalibrationData(int n_cameras) { init(n_cameras); }
-
-void CalibrationData::init(int n_cameras) {
-	n_cameras_ = n_cameras;
-	cameras_ = vector<Camera>(n_cameras);
-}
-
-int CalibrationData::addObservation(const vector<bool> &visible, const vector<Point2d> &observations) {
-	if ((int) observations.size() != n_cameras_) { throw std::exception(); }
-	if ((int) visible.size() != n_cameras_) { throw std::exception(); }
-	visible_.insert(visible_.end(), visible.begin(), visible.end());
-	observations_.insert(observations_.end(), observations.begin(), observations.end());
-	points_.push_back(Point3d(0.0, 0.0, 0.0));
-
-	return npoints() - 1;
-}
-
-int CalibrationData::addObservation(const vector<bool> &visible, const vector<Point2d> &observations, const Point3d &point) {
-	if ((int) observations.size() != n_cameras_) { throw std::exception(); }
-	if ((int) visible.size() != n_cameras_) { throw std::exception(); }
-	visible_.insert(visible_.end(), visible.begin(), visible.end());
-	observations_.insert(observations_.end(), observations.begin(), observations.end());
-	points_.push_back(point);
-
-	return npoints() - 1;
-}
-
-int CalibrationData::addObservations(const vector<bool>& visible, const vector<vector<Point2d>>& observations, const vector<Point3d>& points) {
-	int retval = -1;
-	for (size_t i = 0; i < observations.size(); i++) {
-		retval = addObservation(visible, observations[i], points[i]);
-	}
-	return retval;
-}
-
-pair<vector<vector<Point2d>>, vector<reference_wrapper<Point3d>>> CalibrationData::_getVisible(const vector<int> &cameras) {
-
-	int n_points = npoints();
-	vector<vector<Point2d>> observations(cameras.size());
-	vector<reference_wrapper<Point3d>> points;
-
-	for (size_t k = 0; k < cameras.size(); k++) {
-		observations[k].reserve(n_points);
-	}
-	points.reserve(n_points);
-
-	for (int i = 0; i < n_points; i++) {
-		if (!isVisible(cameras, i)) { continue; }
-
-		for (size_t k = 0; k < cameras.size(); k++) {
-			observations[k].push_back(getObservation(cameras[k], i));
-		}
-		points.push_back(getPoint(i));
-	}
-
-	return make_pair(observations, points);
-}
-
-vector<vector<Point2d>> CalibrationData::getObservations(const vector<int> &cameras) {
-	return _getVisible(cameras).first;
-}
-
-vector<reference_wrapper<Point3d>> CalibrationData::getPoints(const vector<int> &cameras) {
-	return _getVisible(cameras).second;
-}
diff --git a/applications/calibration-ceres/src/calibration_data.hpp b/applications/calibration-ceres/src/calibration_data.hpp
deleted file mode 100644
index 9e5871bf2ce6407ec58a938c0574dd4d6374f6a7..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/src/calibration_data.hpp
+++ /dev/null
@@ -1,75 +0,0 @@
-#pragma once
-#ifndef _FTL_CALIBRATION_DATA_HPP_
-#define _FTL_CALIBRATION_DATA_HPP_
-
-#include <vector>
-#include <opencv2/core/core.hpp>
-#include <ftl/calibration/optimize.hpp>
-
-#define _NDISTORTION_PARAMETERS 3
-#define _NCAMERA_PARAMETERS (9 + _NDISTORTION_PARAMETERS)
-
-namespace ftl {
-namespace calibration {
-
-class CalibrationData {
-public:
-	CalibrationData() {};
-	explicit CalibrationData(int n_cameras);
-
-	void init(int n_cameras);
-
-	int addObservation(const std::vector<bool>& visible, const std::vector<cv::Point2d>& observations);
-	int addObservation(const std::vector<bool>& visible, const std::vector<cv::Point2d>& observations, const cv::Point3d& point);
-
-	int addObservations(const std::vector<bool>& visible, const std::vector<std::vector<cv::Point2d>>& observations, const std::vector<cv::Point3d>& point);
-
-	void reserve(int n_points) {
-		points_.reserve(n_points);
-		points_camera_.reserve(n_points*n_cameras_);
-		observations_.reserve(n_points*n_cameras_);
-		visible_.reserve(n_points*n_cameras_);
-	}
-
-	// TODO: method for save poinst_camera_, could return (for example)
-	// vector<pair<Point3d*>, vector<Point3d*>>
-	
-	std::vector<std::vector<cv::Point2d>> getObservations(const std::vector<int> &cameras);
-	/* Get points corresponding to observations returned by getObservations() or getObservationsPtr() */
-	std::vector<std::reference_wrapper<cv::Point3d>> getPoints(const std::vector<int> &cameras);
-
-	/* get reference/pointer to data */
-	inline Camera& getCamera(int k) { return cameras_[k]; }
-	inline std::vector<Camera>& getCameras() { return cameras_; }
-	inline cv::Point3d& getPoint(int i) { return points_[i]; }
-	inline cv::Point2d& getObservation(int k, int i) { return observations_[n_cameras_*i+k]; }
-	inline bool isVisible(int k, int i) { return visible_[n_cameras_*i+k]; }
-	inline bool isVisible(const std::vector<int> &cameras, int i) {
-		for (const auto &k : cameras) { if (!isVisible(k, i)) { return false; } }
-		return true;
-	}
-
-	int npoints() const { return points_.size(); }
-	int ncameras() const { return n_cameras_; }
-
-private:
-	std::pair<std::vector<std::vector<cv::Point2d>>, std::vector<std::reference_wrapper<cv::Point3d>>> _getVisible(const std::vector<int> &cameras);
-	
-	int n_cameras_;
-
-	// cameras
-	std::vector<Camera> cameras_;
-	// points for each observation
-	std::vector<cv::Point3d> points_;
-	// points estimated from cameras' observations
-	std::vector<cv::Point3d> points_camera_;
-	// observations
-	std::vector<cv::Point2d> observations_;
-	// visibility
-	std::vector<bool> visible_;
-};
-
-}
-}
-
-#endif
diff --git a/applications/calibration-ceres/src/main.cpp b/applications/calibration-ceres/src/main.cpp
deleted file mode 100644
index 9020d0e7737f43a7eff44406ad7853243161a4e5..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/src/main.cpp
+++ /dev/null
@@ -1,225 +0,0 @@
-#include "loguru.hpp"
-
-#include <tuple>
-
-#include <opencv2/core.hpp>
-#include <opencv2/calib3d.hpp>
-
-#include <opencv2/opencv.hpp>
-
-#include <ceres/ceres.h>
-#include <ceres/rotation.h>
-
-#include <ftl/calibration.hpp>
-
-#include "calibration_data.hpp"
-#include "visibility.hpp"
-#include "calibration.hpp"
-
-using std::string;
-using std::vector;
-using std::map;
-using std::reference_wrapper;
-
-using std::tuple;
-using std::pair;
-using std::make_pair;
-
-using cv::Mat;
-using cv::Size;
-
-using cv::Point2d;
-using cv::Point3d;
-
-using namespace ftl::calibration;
-
-void loadData(	const string &fname,
-				Visibility &visibility,
-				CalibrationData &data) {
-
-	vector<Mat> K;
-	vector<vector<int>> visible;
-	vector<vector<Point2d>> points;
-
-	cv::FileStorage fs(fname, cv::FileStorage::READ);
-
-	fs["K"] >> K;
-	fs["points2d"] >> points;
-	fs["visible"] >> visible;
-	fs.release();
-
-	int ncameras = K.size();
-	int npoints = points[0].size();
-
-	visibility.init(ncameras);
-	data.init(ncameras);
-
-	for (int i = 0; i < npoints; i+= 1) {
-
-		vector<bool> v(ncameras, 0);
-		vector<Point2d> p(ncameras);
-
-		for (int k = 0; k < ncameras; k++) {
-			v[k] = visible[k][i];
-			p[k] = points[k][i];
-		}
-
-		visibility.update(v);
-		data.addObservation(v, p);
-	}
-
-	for (size_t i = 1; i < K.size(); i += 2) {
-		// mask right cameras
-		visibility.mask(i-1, i);
-	}
-
-	for (int i = 0; i < ncameras; i++) {
-		data.getCamera(i).setIntrinsic(K[i]);
-	}
-}
-
-void calibrate(const string &fname) {
-
-	Visibility visibility;
-	CalibrationData data;
-
-	loadData(fname, visibility, data);
-
-	// 2x 15cm squares (ArUco tags) 10cm apart
-	vector<Point3d> object_points = {
-			Point3d(0.0, 0.0, 0.0),
-			Point3d(0.15, 0.0, 0.0),
-			Point3d(0.15, 0.15, 0.0),
-			Point3d(0.0, 0.15, 0.0),
-
-			Point3d(0.25, 0.0, 0.0),
-			Point3d(0.40, 0.0, 0.0),
-			Point3d(0.40, 0.15, 0.0),
-			Point3d(0.25, 0.15, 0.0)
-	};
-
-	int refcamera = 0;
-	auto path = visibility.shortestPath(refcamera);
-
-	map<pair<int, int>, pair<Mat, Mat>> transformations;
-
-	// Needs better solution which allows simple access to all estimations.
-	// Required for calculating average coordinates and to know which points
-	// are missing.
-
-	vector<tuple<int, vector<Point3d>>> points_all;
-
-	transformations[make_pair(refcamera, refcamera)] =
-		make_pair(Mat::eye(3, 3, CV_64FC1), Mat::zeros(3, 1, CV_64FC1));
-
-	for (int i = 0; i < data.ncameras(); i++) {
-
-		// Calculate initial translation T from refcam. Visibility graph is
-		// used to create a chain of transformations from refcam to i.
-
-		int current =  refcamera;
-		if (i == refcamera) { continue; }
-
-		Mat D = Mat::zeros(1, 5, CV_64FC1);
-		Mat R = Mat::eye(3, 3, CV_64FC1);
-		Mat t = Mat::zeros(3, 1, CV_64FC1);
-
-		for (const int &to : path.to(i)) {
-			auto edge = make_pair(current, to);
-
-			if (transformations.find(edge) == transformations.end()) {
-				Mat R_;
-				Mat t_;
-				vector<Point3d> points;
-
-				auto observations = data.getObservations({current, to});
-				double err = computeExtrinsicParameters(
-					data.getCamera(current).intrinsicMatrix(),
-					data.getCamera(current).distortionCoefficients(),
-					data.getCamera(to).intrinsicMatrix(),
-					data.getCamera(to).distortionCoefficients(),
-					observations[0], observations[1],
-					object_points, R_, t_, points);
-
-				LOG(INFO) << current << " -> " << to << " (RMS error: " << err << ")";
-
-				transformations[edge] = make_pair(R_, t_);
-				points_all.push_back(make_tuple(current, points));
-			}
-
-			const auto [R_update, t_update] = transformations[edge];
-
-			R = R * R_update;
-			t = R_update * t + t_update;
-
-			current = to;
-		}
-
-		transformations[make_pair(refcamera, i)] = make_pair(R.clone(), t.clone());
-
-		data.getCamera(i).setExtrinsic(R, t);
-	}
-
-	// TODO: see points_all comment
-	/*
-	for (auto& [i, points] : points_all) {
-
-		Mat R = data.getCamera(i).rmat();
-		Mat t = data.getCamera(i).tvec();
-		transform::inverse(R, t); // R, t: refcam -> i
-
-		CHECK(points.size() == points_ref.size());
-
-		for (size_t j = 0; j < points.size(); j++) {
-			Point3d &point = points_ref[j];
-
-			if (point != Point3d(0,0,0)) {
-				point = (transform::apply(points[j], R, t) + point) / 2.0;
-			}
-			else {
-				point = transform::apply(points[j], R, t);
-			}
-		}
-	}
-	*/
-	vector<int> idx;
-	BundleAdjustment ba;
-
-	ba.addCameras(data.getCameras());
-
-	vector<bool> visible(data.ncameras());
-	vector<Point2d> observations(data.ncameras());
-
-	int missing = 0;
-	for (int i = 0; i < data.npoints(); i++) {
-		for (int j = 0; j < data.ncameras(); j++) {
-			visible[j] = data.isVisible(j, i);
-			observations[j] = data.getObservation(j, i);
-		}
-
-		// TODO: see points_all comment
-		if (data.getPoint(i) == Point3d(0.,0.,0.)) {
-			missing++;
-			continue;
-		}
-
-		ba.addPoint(visible, observations, data.getPoint(i));
-	}
-
-	LOG(INFO) << "missing points: " << missing;
-	LOG(INFO) << "Initial reprojection error: " << ba.reprojectionError();
-
-	BundleAdjustment::Options options;
-	options.verbose = false;
-	options.optimize_intrinsic = true;
-
-	ba.run(options);
-
-	LOG(INFO) << "Finale reprojection error: " << ba.reprojectionError();
-
-	// calibration done, updated values in data
-}
-
-int main(int argc, char* argv[]) {
-	return 0;
-}
diff --git a/applications/calibration-ceres/src/visibility.cpp b/applications/calibration-ceres/src/visibility.cpp
deleted file mode 100644
index c941cbb12c96dbdb5166f0af373cc486af938b2d..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/src/visibility.cpp
+++ /dev/null
@@ -1,226 +0,0 @@
-#include "visibility.hpp"
-#include "loguru.hpp"
-
-#include <limits>
-#include <algorithm>
-#include <queue>
-
-using std::vector;
-using std::pair;
-using std::make_pair;
-
-using ftl::calibration::Paths;
-using ftl::calibration::Visibility;
-
-template<typename T>
-Paths<T>::Paths(const vector<int> &previous, const vector<T> &distances) :
-	previous_(previous), distances_(distances) {}
-
-template<typename T>
-vector<int> Paths<T>::from(int i) const {
-	vector<int> path;
-
-	if (previous_[i] == -1) { return {}; }
-
-	int current = i;
-	do {
-		if (distance(i) == std::numeric_limits<T>::max()) { return {}; } // no path
-
-		path.push_back(current);
-		current = previous_[current];
-	}
-	while (previous_[current] != -1);
-
-	return path;
-}
-
-template<typename T>
-vector<int> Paths<T>::to(int i) const {
-	vector<int> path = from(i);
-	std::reverse(path.begin(), path.end());
-	return path;
-}
-
-template<typename T>
-T Paths<T>::distance(int i) const {
-	return distances_[i];
-}
-
-template<typename T>
-bool Paths<T>::connected() const {
-	for (const auto &distance : distances_) {
-		if (distance == std::numeric_limits<T>::max()) { return false; }
-	}
-	return true;
-}
-
-template<typename T>
-std::string Paths<T>::to_string() const {
-	std::stringstream sb;
-
-	int node = -1;
-	for (size_t i = 0; i < distances_.size(); i++) {
-		if (previous_[i] == -1) {
-			node = i;
-			break;
-		}
-	}
-
-	for (size_t i = 0; i < distances_.size(); i++) {
-		sb << node;
-
-		for (const auto &p : to(i)) {
-			sb << " -> " << p;
-		}
-
-		sb << " (" << distances_[i] << ")\n";
-	}
-	return sb.str();
-}
-
-template class Paths<int>;
-template class Paths<float>;
-template class Paths<double>;
-
-////////////////////////////////////////////////////////////////////////////////
-
-template<typename T>
-static bool isValidAdjacencyMatrix(const vector<vector<T>> &graph) {
-	size_t nrows = graph.size();
-	for (const auto &col : graph) {
-		if (nrows != col.size()) { return false; }
-	}
-
-	for (size_t r = 0; r < nrows; r++) {
-		for (size_t c = r; c < nrows; c++) {
-			if (graph[r][c] != graph[c][r]) {
-				return false; }
-		}
-	}
-
-	return true;
-}
-
-template<typename T>
-static vector<int> find_neighbors(int i, const vector<vector<T>> &graph) {
-	vector<int> res;
-
-	for (size_t j = 0; j < graph.size(); j++) {
-		if (graph[i][j] > 0) { res.push_back(j); }
-	}
-
-	return res;
-}
-
-template<typename T>
-static pair<vector<int>, vector<T>> dijstra_impl(const int i, const vector<vector<T>> &graph) {
-	// distance, index
-	std::priority_queue<pair<T, int>, vector<pair<T, int>>, std::greater<pair<T, int>>> pq;
-
-	pq.push(make_pair(0, i));
-
-	vector<T> distance_path(graph.size(), std::numeric_limits<T>::max());
-	vector<int> previous(graph.size(), -1);
-
-	distance_path[i] = 0;
-
-	while(!pq.empty()) {
-		int current = pq.top().second;
-		pq.pop();
-
-		for (int n : find_neighbors(current, graph)) {
-			T cost = graph[current][n] + distance_path[current];
-			if (distance_path[n] > cost) {
-				distance_path[n] = cost;
-				pq.push(make_pair(cost, n));
-				previous[n] = current;
-			}
-		}
-	}
-
-	return make_pair(previous, distance_path);
-}
-
-template<typename T>
-Paths<T> ftl::calibration::dijstra(const int i, const vector<vector<T>> &graph) {
-	auto tmp = dijstra_impl(i, graph);
-	return Paths<T>(tmp.first, tmp.second);
-}
-
-template Paths<int> ftl::calibration::dijstra(const int i, const vector<vector<int>> &graph);
-template Paths<float> ftl::calibration::dijstra(const int i, const vector<vector<float>> &graph);
-template Paths<double> ftl::calibration::dijstra(const int i, const vector<vector<double>> &graph);
-
-////////////////////////////////////////////////////////////////////////////////
-
-Visibility::Visibility(int n_cameras) :
-	n_cameras_(n_cameras),
-	graph_(n_cameras, vector(n_cameras, 0)),
-	mask_(n_cameras, vector(n_cameras, false))
-	{}
-
-Visibility::Visibility(const vector<vector<int>> &graph) :
-	n_cameras_(graph.size()),
-	graph_(graph),
-	mask_(graph.size(), vector(graph.size(), false)) {
-
-		if (!isValidAdjacencyMatrix(graph)) {
-			throw std::exception();
-		}
-}
-
-void Visibility::init(int n_cameras) {
-	n_cameras_ = n_cameras;
-	graph_ = vector(n_cameras, vector(n_cameras, 0));
-	mask_ = vector(n_cameras, vector(n_cameras, false));
-}
-
-template<typename T>
-void Visibility::update(const vector<T> &add) {
-	if ((int) add.size() != n_cameras_) { throw std::exception(); }
-
-	for (int i = 0; i < n_cameras_; i++) {
-		if (!add[i]) { continue; }
-
-		for (int j = 0; j < n_cameras_; j++) {
-			if (i == j) { continue; }
-			if (add[j]) { graph_[i][j] += 1; }
-		}
-	}
-}
-
-template void Visibility::update(const std::vector<int> &add);
-template void Visibility::update(const std::vector<bool> &add);
-
-void Visibility::mask(int a, int b) {
-	mask_[a][b] = true;
-	mask_[b][a] = true;
-}
-
-void Visibility::unmask(int a, int b) {
-	mask_[a][b] = false;
-	mask_[b][a] = false;
-}
-
-int Visibility::distance(int a, int b) const {
-	return graph_[a][b];
-}
-
-Paths<float> Visibility::shortestPath(int i) const {
-	if ((i < 0) || (i >= n_cameras_)) { return Paths<float>({}, {}); /* throw exception */}
-
-	vector<vector<float>> graph(n_cameras_, vector<float>(n_cameras_, 0.0f));
-	for (int r = 0; r < n_cameras_; r++) {
-		for (int c = 0; c < n_cameras_; c++) {
-			float v = graph_[r][c];
-			if ((v != 0) && !mask_[r][c]) { graph[r][c] = 1.0f / v; }
-		}
-	}
-
-	auto res = dijstra_impl(i, graph);
-	for (float &distance : res.second) {
-		distance = 1.0f / distance;
-	}
-
-	return Paths<float>(res.first, res.second);
-}
diff --git a/applications/calibration-ceres/src/visibility.hpp b/applications/calibration-ceres/src/visibility.hpp
deleted file mode 100644
index 31c86dca399e194bc81c7342065c75556461175d..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/src/visibility.hpp
+++ /dev/null
@@ -1,91 +0,0 @@
-#pragma once
-#ifndef _FTL_VISIBILITY_HPP_
-#define _FTL_VISIBILITY_HPP_
-
-#include <vector>
-#include <string>
-
-namespace ftl {
-namespace calibration {
-
-/**
- * @brief Result from Dijkstra's algorithm.
- */
-template<typename T>
-class Paths {
-public:
-	Paths(const std::vector<int> &previous, const std::vector<T> &distances);
-
-	/**
-	 * @brief Shortest path from node i. Same as to(i) in reverse order
-	 */
-	std::vector<int> from(int i) const;
-
-	/**
-	 * @brief Shortest to node i. Same as from(i) in reverse order.
-	 */
-	std::vector<int> to(int i) const;
-
-	/**
-	 * @brief Is graph connected, i.e. all nodes are reachable.
-	 */
-	bool connected() const;
-
-	/**
-	 * @brief Distance to node i
-	 */
-	T distance(int i) const;
-
-	std::string to_string() const;
-
-private:
-	std::vector<int> previous_;
-	std::vector<T> distances_;
-};
-
-/**
- * @brief Dijstra's algorithm: shortest paths from node i.
- * @param i		node index
- * @param graph	adjacency matrix
- *
- * dijstra<int>(), dijstra<float>() and dijstra<double>() defined.
- */
-template<typename T>
-Paths<T> dijstra(const int i, const std::vector<std::vector<T>> &graph_);
-
-class Visibility {
-	public:
-	Visibility() {};
-	explicit Visibility(int n_cameras);
-	explicit Visibility(const std::vector<std::vector<int>> &graph);
-
-	void init(int n_cameras);
-
-	template<typename T>
-	void update(const std::vector<T> &add);
-
-	void mask(int a, int b);
-	void unmask(int a, int b);
-
-	/**
-	 * @brief Find most visibility shortest path to camera i
-	 */
-	Paths<float> shortestPath(int i) const;
-
-	protected:
-	std::vector<int> neighbors(int i) const;
-	int distance(int a, int b) const;
-
-	private:
-
-	int n_cameras_;
-	// adjacency matrix
-	std::vector<std::vector<int>> graph_;
-	// masked values (mask_[i][j]) are not used
-	std::vector<std::vector<bool>> mask_;
-};
-
-}
-}
-
-#endif
diff --git a/applications/calibration-ceres/test/CMakeLists.txt b/applications/calibration-ceres/test/CMakeLists.txt
deleted file mode 100644
index 6c90f1d58ff1aa6e8c4d2c7bac0c2fd60f53917b..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/test/CMakeLists.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-add_executable(visibility_unit
-	./tests.cpp
-	../src/visibility.cpp
-	./visibility_unit.cpp
-)
-
-target_include_directories(visibility_unit PUBLIC ../src/)
-target_link_libraries(visibility_unit Threads::Threads dl ftlcommon)
-
-add_test(VisibilityTest visibility_unit)
-
-################################################################################
-
-#add_executable(calibration_unit
-#	./tests.cpp
-#	../src/calibration.cpp
-#	../src/optimization.cpp
-#	../src/calibration_data.cpp
-#	./calibration_unit.cpp
-#)
-
-#target_include_directories(calibration_unit PUBLIC ../src/)
-#target_link_libraries(calibration_unit Threads::Threads dl ftlcommon Eigen3::Eigen ceres)
-
-#add_test(CalibrationTest calibration_unit WORKING_DIRECTORY ${CMAKE_SOURCE_DIR})
diff --git a/applications/calibration-ceres/test/calibration_unit.cpp b/applications/calibration-ceres/test/calibration_unit.cpp
deleted file mode 100644
index 19e24dc9ff2d9dee317a6433024f4f1f93c92ac5..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/test/calibration_unit.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-#include "catch.hpp"
-
-#include "calibration.hpp"
-#include "optimization.hpp"
-
-#include <vector>
-#include <opencv2/core/core.hpp>
-
-using std::vector;
-using std::string;
-
-using cv::Mat;
-using cv::Point3d;
-using cv::Point2d;
-
-using namespace ftl::calibration;
-
-void loadData(const string &fname,vector<Mat> &K, vector<vector<int>> &visible,
-	vector<vector<Point2d>> &points) {
-
-	cv::FileStorage fs(fname, cv::FileStorage::READ);
-
-	fs["K"] >> K;
-	fs["points2d"] >> points;
-	fs["visible"] >> visible;
-	fs.release();
-}
-
-/* TEST_CASE("Camera calibration and parameter optimization", "") {
-
-    vector<Mat> K;
-    vector<vector<int>> visible;
-	vector<vector<Point2d>> observations;
-
-	SECTION("Load data") {
-		loadData("data/points.yml", K, visible, observations);
-		//REQUIRE(K.size() != 0);
-	}
-
-	//int ncameras = K.size();
-	//int npoints = observations[0].size();
-}*/
diff --git a/applications/calibration-ceres/test/tests.cpp b/applications/calibration-ceres/test/tests.cpp
deleted file mode 100644
index 49d9bf522ea5c5ba028e1e38be57f5174cff0f67..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/test/tests.cpp
+++ /dev/null
@@ -1,3 +0,0 @@
-#define CATCH_CONFIG_MAIN
-
-#include "catch.hpp"
\ No newline at end of file
diff --git a/applications/calibration-ceres/test/visibility_unit.cpp b/applications/calibration-ceres/test/visibility_unit.cpp
deleted file mode 100644
index ebdfc178b7fc465f1114620e46a3587865f91180..0000000000000000000000000000000000000000
--- a/applications/calibration-ceres/test/visibility_unit.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-#include "catch.hpp"
-#include "visibility.hpp"
-
-using std::vector;
-using ftl::calibration::dijstra;
-
-/// https://www.geeksforgeeks.org/dijkstras-shortest-path-algorithm-greedy-algo-7/
-static const vector<vector<int>> graph1 = {
-		{0, 4, 0, 0, 0, 0, 0, 8, 0},
-		{4, 0, 8, 0, 0, 0, 0,11, 0},
-		{0, 8, 0, 7, 0, 4, 0, 0, 2},
-		{0, 0, 7, 0, 9,14, 0, 0, 0},
-		{0, 0, 0, 9, 0,10, 0, 0, 0},
-		{0, 0, 4,14,10, 0, 2, 0, 0},
-		{0, 0, 0, 0, 0, 2, 0, 1, 6},
-		{8,11, 0, 0, 0, 0, 1, 0, 7},
-		{0, 0, 2, 0, 0, 0, 6, 7, 0}
-};
-
-TEST_CASE("Dijstra's Algorithm", "") {
-	SECTION("Find shortest paths") {
-		auto path = dijstra(0, graph1);
-
-		REQUIRE(path.distance(1) == 4);
-		REQUIRE(path.distance(2) == 12);
-		REQUIRE(path.distance(3) == 19);
-		REQUIRE(path.distance(4) == 21);
-		REQUIRE(path.distance(5) == 11);
-		REQUIRE(path.distance(6) == 9);
-		REQUIRE(path.distance(7) == 8);
-		REQUIRE(path.distance(8) == 14);
-
-		REQUIRE((path.to(1) == vector {1}));
-		REQUIRE((path.to(2) == vector {1, 2}));
-		REQUIRE((path.to(3) == vector {1, 2, 3}));
-		REQUIRE((path.to(4) == vector {7, 6, 5, 4}));
-		REQUIRE((path.to(5) == vector {7, 6, 5}));
-		REQUIRE((path.to(6) == vector {7, 6}));
-		REQUIRE((path.to(7) == vector {7}));
-		REQUIRE((path.to(8) == vector {1, 2, 8}));
-	}
-
-	SECTION("Check connectivity") {
-		auto path = dijstra(0, graph1);
-		REQUIRE(path.connected());
-	}
-}
diff --git a/applications/calibration-multi/CMakeLists.txt b/applications/calibration-multi/CMakeLists.txt
deleted file mode 100644
index d8c9c5b40a711dbd3778187163bfd4bba327bf7a..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/CMakeLists.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-set(CALIBMULTI
-	src/main.cpp
-	src/visibility.cpp
-	src/util.cpp
-	src/multicalibrate.cpp
-	../calibration-ceres/src/calibration.cpp
-)
-
-add_executable(ftl-calibrate-multi ${CALIBMULTI})
-
-target_include_directories(ftl-calibrate-multi PRIVATE src ../calibration-ceres/src)
-
-target_link_libraries(ftl-calibrate-multi ftlcalibration ftlcommon ftlnet ftlrgbd ftlstreams Threads::Threads ${OpenCV_LIBS} ceres)
diff --git a/applications/calibration-multi/src/main.cpp b/applications/calibration-multi/src/main.cpp
deleted file mode 100644
index ddd41ceb1b22f87e4aee84c19e52e9d5b0c8faf0..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/src/main.cpp
+++ /dev/null
@@ -1,861 +0,0 @@
-#include <loguru.hpp>
-#include <ftl/threads.hpp>
-#include <ftl/configuration.hpp>
-#include <ftl/net/universe.hpp>
-#include <ftl/rgbd/source.hpp>
-#include <ftl/rgbd/group.hpp>
-
-#include <ftl/calibration.hpp>
-
-#include <ftl/master.hpp>
-#include <ftl/streams/receiver.hpp>
-#include <ftl/streams/netstream.hpp>
-
-#include <opencv2/core.hpp>
-#include <opencv2/calib3d.hpp>
-#include <opencv2/aruco.hpp>
-#include <opencv2/core/eigen.hpp>
-#include <opencv2/opencv.hpp>
-
-#include <algorithm>
-#include <numeric>
-#include <fstream>
-
-#include "util.hpp"
-#include "multicalibrate.hpp"
-
-using std::string;
-using std::optional;
-
-using std::list;
-using std::vector;
-using std::map;
-using std::pair;
-using std::make_pair;
-
-using cv::Mat;
-using cv::Scalar;
-using cv::Size;
-using cv::Point2f;
-using cv::Point2d;
-using cv::Point3f;
-using cv::Point3d;
-using cv::Vec4f;
-using cv::Vec4d;
-
-using ftl::net::Universe;
-using ftl::rgbd::Source;
-using ftl::codecs::Channel;
-
-vector<Point3d> calibration_target =  {
-			Point3d(0.0, 0.0, 0.0),
-			Point3d(0.15, 0.0, 0.0),
-			Point3d(0.15, 0.15, 0.0),
-			Point3d(0.0, 0.15, 0.0),
-
-			Point3d(0.25, 0.0, 0.0),
-			Point3d(0.40, 0.0, 0.0),
-			Point3d(0.40, 0.15, 0.0),
-			Point3d(0.25, 0.15, 0.0)
-};
-
-Mat createCameraMatrix(const ftl::rgbd::Camera &parameters) {
-	Mat m = (cv::Mat_<double>(3,3) <<
-				parameters.fx,	0.0,			-parameters.cx,
-				0.0, 			parameters.fy,	-parameters.cy,
-				0.0,			0.0,			 1.0);
-	return m;
-}
-
-struct CalibrationParams {
-	string output_path;
-	string registration_file;
-	vector<size_t> idx_cameras;
-	bool save_extrinsic = true;
-	bool save_intrinsic = false;
-	bool optimize_intrinsic = false;
-	int reference_camera = -1;
-	double alpha = 0.0;
-	Size size;
-	bool offline = false;
-};
-
-////////////////////////////////////////////////////////////////////////////////
-// Visualization
-////////////////////////////////////////////////////////////////////////////////
-
-void stack(const vector<Mat> &img, Mat &out, const int rows, const int cols) {
-	Size size = img[0].size();
-	Size size_out = Size(size.width * cols, size.height * rows);
-	if (size_out != out.size() || out.type() != CV_8UC3) {
-		out = Mat(size_out, CV_8UC3, Scalar(0, 0, 0));
-	}
-
-	for (size_t i = 0; i < img.size(); i++) {
-		int row = i % rows;
-		int col = i / rows;
-		auto rect = cv::Rect(size.width * col, size.height * row, size.width, size.height);
-		img[i].copyTo(out(rect));
-	}
-}
-
-void stack(const vector<Mat> &img, Mat &out) {
-	// TODO
-	int rows = 2;
-	int cols = (img.size() + 1) / 2;
-	stack(img, out, rows, cols);
-}
-
-void visualizeCalibration(	MultiCameraCalibrationNew &calib, Mat &out,
-				 			vector<Mat> &rgb, const vector<Mat> &map1,
-							const vector<Mat> &map2, const vector<cv::Rect> &roi)
-{
-	vector<Scalar> colors = {
-		Scalar(64, 64, 255),
-		Scalar(64, 64, 255),
-		Scalar(64, 255, 64),
-		Scalar(64, 255, 64),
-	};
-
-	vector<int> markers = {cv::MARKER_SQUARE, cv::MARKER_DIAMOND};
-
-	for (size_t c = 0; c < rgb.size(); c++) {
-		cv::remap(rgb[c], rgb[c], map1[c], map2[c], cv::INTER_CUBIC);
-		cv::rectangle(rgb[c], roi[c], Scalar(24, 224, 24), 2);
-
-		for (int r = 50; r < rgb[c].rows; r = r+50) {
-			cv::line(rgb[c], cv::Point(0, r), cv::Point(rgb[c].cols-1, r), cv::Scalar(0,0,255), 1);
-		}
-
-		cv::putText(rgb[c],
-			"Camera " + std::to_string(c),
-			Point2i(roi[c].x + 10, roi[c].y + 30),
-			cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
-	}
-	stack(rgb, out);
-}
-
-////////////////////////////////////////////////////////////////////////////////
-// RPC
-////////////////////////////////////////////////////////////////////////////////
-// Using Mat directly
-
-vector<Mat> getDistortionParametersRPC(ftl::net::Universe* net, ftl::stream::Net* nstream) {
-	return net->call<vector<Mat>>(nstream->getPeer(), "get_distortion");
-}
-
-bool setRectifyRPC(	ftl::net::Universe* net, ftl::stream::Net* nstream,
-					bool enabled) {
-	return net->call<bool>(nstream->getPeer(), "set_rectify", enabled);
-}
-
-bool setIntrinsicsRPC(	ftl::net::Universe* net, ftl::stream::Net* nstream,
-						const Size &size, const vector<Mat> &K, const vector<Mat> &D) {
-
-	return net->call<bool>(nstream->getPeer(), "set_intrinsics",
-							size, K[0], D[0], K[1], D[1] );
-}
-
-bool setExtrinsicsRPC(	ftl::net::Universe* net, ftl::stream::Net* nstream,
-						const Mat &R, const Mat &t) {
-	return net->call<bool>(nstream->getPeer(), "set_extrinsics", R, t);
-}
-
-bool setPoseRPC(ftl::net::Universe* net, ftl::stream::Net* nstream,
-				const Mat &pose) {
-	return net->call<bool>(nstream->getPeer(), "set_pose",  pose);
-}
-
-bool saveCalibrationRPC(ftl::net::Universe* net, ftl::stream::Net* nstream) {
-	return net->call<bool>(nstream->getPeer(), "save_calibration");
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-/* run calibration and perform RPC to update calibration on nodes */
-
-void calibrateRPC(	ftl::net::Universe* net,
-					MultiCameraCalibrationNew &calib,
-					const CalibrationParams &params,
-					vector<ftl::stream::Net*> &nstreams,
-					vector<Mat> &map1,
-					vector<Mat> &map2,
-					vector<cv::Rect> &roi) {
-	int reference_camera = params.reference_camera;
-	if (params.reference_camera < 0) {
-		reference_camera = calib.getOptimalReferenceCamera();
-		reference_camera -= (reference_camera & 1);
-		LOG(INFO) << "optimal camera (automatic): " << reference_camera;
-	}
-	LOG(INFO) << "reference camera: " << reference_camera;
-
-	if (params.optimize_intrinsic) calib.setFixIntrinsic(0);
-
-	calib.calibrateAll(reference_camera);
-	vector<Mat> R, t;
-	calib.getCalibration(R, t);
-
-	size_t n_cameras = calib.getCamerasCount();
-
-	vector<Mat> R_rect(n_cameras), t_rect(n_cameras);
-	vector<Mat> Rt_out(n_cameras);
-	map1.resize(n_cameras);
-	map2.resize(n_cameras);
-	roi.resize(n_cameras);
-
-	for (size_t c = 0; c < n_cameras; c += 2) {
-		Mat K1 = calib.getCameraMat(c);
-		Mat K2 = calib.getCameraMat(c + 1);
-		Mat D1 = calib.getDistCoeffs(c);
-		Mat D2 = calib.getDistCoeffs(c + 1);
-		Mat P1, P2, Q;
-		Mat R1, R2;
-		Mat R_c1c2, T_c1c2;
-
-		calculateTransform(R[c], t[c], R[c + 1], t[c + 1], R_c1c2, T_c1c2);
-		cv::stereoRectify(K1, D1, K2, D2, params.size, R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, params.alpha);
-
-		R_c1c2 = R_c1c2.clone();
-		T_c1c2 = T_c1c2.clone();
-
-		// calculate extrinsics from rectified parameters
-		Mat _t = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
-		//Rt_out[c] = getMat4x4(R[c], t[c]) * getMat4x4(R1, _t).inv();
-		//Rt_out[c + 1] = getMat4x4(R[c + 1], t[c + 1]) * getMat4x4(R2, _t).inv();
-
-		LOG(INFO) << K1;
-		LOG(INFO) << K2;
-		LOG(INFO) << R_c1c2;
-		LOG(INFO) << T_c1c2;
-
-		LOG(INFO) << "--------------------------------------------------------";
-
-		auto *nstream = nstreams[c/2];
-		while(true) {
-			try {
-				if (params.save_intrinsic && params.optimize_intrinsic) {
-					// never update distortion during extrinsic calibration
-					setIntrinsicsRPC(net, nstream, params.size, {K1, K2}, {Mat(0, 0, CV_64FC1), Mat(0, 0, CV_64FC1)});
-				}
-				if (params.save_extrinsic) {
-					setExtrinsicsRPC(net, nstream, R_c1c2, T_c1c2);
-					setPoseRPC(net, nstream, getMat4x4(R[c], t[c]));
-				}
-				if (params.save_intrinsic || params.save_extrinsic) {
-					saveCalibrationRPC(net, nstream);
-					LOG(INFO) << "CALIBRATION SENT";
-				}
-				break;
-
-			} catch (std::exception &ex) {
-				LOG(ERROR) << "RPC failed: " << ex.what();
-				std::this_thread::sleep_for(std::chrono::seconds(1));
-			}
-		}
-
-		// for visualization
-		Size new_size;
-		cv::stereoRectify(K1, D1, K2, D2, params.size, R_c1c2, T_c1c2, R1, R2, P1, P2, Q, 0, 1.0, new_size, &roi[c], &roi[c + 1]);
-		//roi[c] = cv::Rect(0, 0, params.size.width, params.size.height);
-		//roi[c+1] = cv::Rect(0, 0, params.size.width, params.size.height);
-		cv::initUndistortRectifyMap(K1, D1, R1, P1, params.size, CV_16SC2, map1[c], map2[c]);
-		cv::initUndistortRectifyMap(K2, D2, R2, P2, params.size, CV_16SC2, map1[c + 1], map2[c + 1]);
-	}
-}
-
-std::vector<cv::Point2d> findCalibrationTarget(const cv::Mat &im, const cv::Mat &K) {
-	// check input type
-	std::vector<std::vector<cv::Point2f>> corners;
-	std::vector<int> ids;
-
-	auto params = cv::aruco::DetectorParameters::create();
-	params->cornerRefinementMinAccuracy = 0.01;
-	params->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR;
-	cv::aruco::detectMarkers(im, cv::aruco::getPredefinedDictionary(cv::aruco::DICT_5X5_100), corners, ids, params, cv::noArray(), K);
-
-	if (corners.size() > 2) { LOG(ERROR) << "Too many ArUco tags in image"; }
-	if (corners.size() != 2) { return {}; }
-
-	const size_t ncorners = 4;
-	const size_t ntags = ids.size();
-
-	std::vector<cv::Point2d> points;
-
-	if (ids[0] == 1) {
-		std::swap(ids[0], ids[1]);
-		std::swap(corners[0], corners[1]);
-	}
-
-	if (ids[0] != 0) {
-		LOG(ERROR) << "Tags ID0 and ID1 expected";
-		return {};
-	}
-
-	points.reserve(ntags*ncorners);
-
-	for (size_t i = 0; i < ntags; i++) {
-		for (size_t j = 0; j < ncorners; j++) {
-			points.push_back(corners[i][j]);
-		}
-	}
-
-	return points;
-}
-
-
-void runCameraCalibration(	ftl::Configurable* root,
-							int n_views, int min_visible,
-							string path, string filename,
-							bool save_input,
-							CalibrationParams &params)
-{
-	Universe *net = ftl::create<Universe>(root, "net");
-	ftl::ctrl::Master ctrl(root, net);
-
-	net->start();
-	net->waitConnections();
-
-	ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
-	ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver");
-	gen->setStream(stream);
-	auto stream_uris = net->findAll<std::string>("list_streams");
-	std::sort(stream_uris.begin(), stream_uris.end());
-	std::vector<ftl::stream::Net*> nstreams;
-
-	int count = 0;
-	for (auto &s : stream_uris) {
-		LOG(INFO) << " --- found stream: " << s;
-		auto *nstream = ftl::create<ftl::stream::Net>(stream, std::to_string(count), net);
-		std::string name = *(nstream->get<std::string>("name"));
-		nstream->set("uri", s);
-		nstreams.push_back(nstream);
-		stream->add(nstream);
-
-		++count;
-	}
-
-	const size_t n_sources = nstreams.size();
-	const size_t n_cameras = n_sources * 2;
-	size_t reference_camera = 0;
-
-	std::mutex mutex;
-	std::atomic<bool> new_frames = false;
-	vector<Mat> rgb_(n_cameras), rgb_new(n_cameras);
-	vector<Mat> camera_parameters(n_cameras);
-	Size res;
-
-	gen->onFrameSet([stream, &mutex, &new_frames, &rgb_new, &camera_parameters, &res](ftl::rgbd::FrameSet &fs) {
-		stream->select(fs.id, Channel::Left + Channel::Right);
-		if (fs.frames.size() != (rgb_new.size()/2)) {
-			// nstreams.size() == (rgb_new.size()/2)
-			LOG(ERROR)	<< "frames.size() != nstreams.size(), "
-						<< fs.frames.size() << " != " << (rgb_new.size()/2);
-		}
-
-		UNIQUE_LOCK(mutex, CALLBACK);
-		bool good = true;
-		try {
-			for (size_t i = 0; i < fs.frames.size(); i ++) {
-				if (!fs.frames[i].hasChannel(Channel::Left)) {
-					good = false;
-					LOG(ERROR) << "No left channel";
-					break;
-				}
-
-				if (!fs.frames[i].hasChannel(Channel::Right)) {
-					good = false;
-					LOG(ERROR) << "No right channel";
-					break;
-				}
-
-				auto idx = stream->originStream(0, i);
-				CHECK(idx >= 0) << "negative index";
-
-				fs.frames[i].download(Channel::Left+Channel::Right);
-				Mat &left = fs.frames[i].get<Mat>(Channel::Left);
-				Mat &right = fs.frames[i].get<Mat>(Channel::Right);
-
-				/*
-				// note: also returns empty sometimes
-				fs.frames[i].upload(Channel::Left+Channel::Right);
-				Mat left, right;
-				fs.frames[i].get<cv::cuda::GpuMat>(Channel::Left).download(left);
-				fs.frames[i].get<cv::cuda::GpuMat>(Channel::Right).download(right);
-				*/
-
-				CHECK(!left.empty() && !right.empty());
-
-				cv::cvtColor(left, rgb_new[2*idx], cv::COLOR_BGRA2BGR);
-				cv::cvtColor(right, rgb_new[2*idx+1], cv::COLOR_BGRA2BGR);
-
-				camera_parameters[2*idx] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getLeftCamera()),
-					Size(fs.frames[i].getLeftCamera().width, fs.frames[i].getLeftCamera().height), rgb_new[2*idx].size());
-				camera_parameters[2*idx+1] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getRightCamera()),
-					Size(fs.frames[i].getRightCamera().width, fs.frames[i].getRightCamera().height), rgb_new[2*idx].size());
-
-				if (res.empty()) res = rgb_new[2*idx].size();
-			}
-		}
-		catch (std::exception ex) {
-			LOG(ERROR) << "exception: " << ex.what();
-			good = false;
-		}
-		catch (...)  {
-			LOG(ERROR) << "unknown exception";
-			good = false;
-		}
-		new_frames = good;
-		return true;
-	});
-
-	stream->begin();
-	ftl::timer::start(false);
-
-	while(true) {
-		if (!res.empty()) {
-			params.size = res;
-			LOG(INFO) << "Camera resolution: " << params.size;
-			break;
-		}
-		std::this_thread::sleep_for(std::chrono::seconds(1));
-	}
-
-	for (auto *nstream: nstreams) {
-		bool res = true;
-		while(res) {
-			try { res = setRectifyRPC(net, nstream, false); }
-			catch (...) {}
-
-			if (res) {
-				LOG(ERROR) << "set_rectify() failed for " << *(nstream->get<string>("uri"));
-				std::this_thread::sleep_for(std::chrono::milliseconds(100));
-			}
-			else {
-				LOG(INFO) << "rectification disabled for " << *(nstream->get<string>("uri"));
-			}
-		}
-	}
-
-	// TODO: parameter for calibration target type
-	auto calib = MultiCameraCalibrationNew(	n_cameras, reference_camera,
-											params.size, CalibrationTarget(0.15)
-	);
-	calib.object_points_ = calibration_target;
-
-	int iter = 0;
-	Mat show;
-	cv::Mat show2;
-
-	vector<int> visible;
-	vector<vector<Point2d>> points(n_cameras);
-
-	vector<Mat> rgb(n_cameras);
-	std::this_thread::sleep_for(std::chrono::seconds(3)); // rectification disabled, has some delay
-
-	while(calib.getMinVisibility() < static_cast<size_t>(n_views)) {
-		loop:
-		cv::waitKey(10);
-
-		while (true) {
-			if (new_frames) {
-				UNIQUE_LOCK(mutex, LOCK);
-				rgb.swap(rgb_new);
-				new_frames = false;
-				break;
-			}
-			cv::waitKey(10);
-		}
-
-		for (Mat &im : rgb) {
-			if (im.empty()) {
-				LOG(ERROR) << "EMPTY";
-				goto loop;
-			}
-		}
-
-		visible.clear();
-		visible.resize(n_cameras, 0);
-		int n_found = 0;
-
-		for (size_t i = 0; i < n_cameras; i++) {
-			auto new_points = findCalibrationTarget(rgb[i], camera_parameters[i]);
-			if (new_points.size() == 0) {
-				points[i] = vector(8, Point2d(0.0,0.0));
-			}
-			else {
-				points[i] = new_points;
-				visible[i] = 1;
-				n_found++;
-			}
-		}
-
-		if (n_found >= min_visible) {
-			calib.addPoints(points, visible);
-
-			if (save_input) {
-				for (size_t i = 0; i < n_cameras; i++) {
-					cv::imwrite(path + std::to_string(i) + "_" + std::to_string(iter) + ".jpg", rgb[i]);
-				}
-			}
-			iter++;
-		}
-
-		for (size_t i = 0; i < n_cameras; i++) {
-			if (visible[i]) {
-				for (size_t j = 0; j < points[i].size(); j++) {
-					cv::drawMarker(	rgb[i], points[i][j], Scalar(42, 255, 42), cv::MARKER_CROSS, 25, 1);
-					cv::putText(rgb[i], std::to_string(j), Point2i(points[i][j]),
-						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
-				}
-			}
-
-			// index
-			cv::putText(rgb[i],
-						"Camera " + std::to_string(i),
-						Point2i(10, 30),
-						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
-
-			// resolution
-			cv::putText(rgb[i],
-						"[" + std::to_string(rgb[i].size().width) + "x" + std::to_string(rgb[i].size().height) + "]",
-						Point2i(rgb[i].size().width-150, 30),
-						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
-
-			// uri
-			cv::putText(rgb[i],
-						stream_uris[i/2],
-						Point2i(10, rgb[i].rows-10),
-						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
-
-			// remaining frames
-			cv::putText(rgb[i],
-						std::to_string(std::max(0, (int) (n_views - calib.getViewsCount(i)))),
-						Point2i(rgb[i].size().width-150, rgb[i].rows-10),
-						cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, Scalar(64, 64, 255), 1);
-
-		}
-
-		stack(rgb, show);
-		cv::namedWindow("Cameras", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
-		//cv::resize(show, show2, cv::Size(float(show.cols) / float(show.rows) * 1080.0f, 1080));
-		cv::resizeWindow("Cameras", cv::Size(2*1920,1080));
-		cv::imshow("Cameras", show);
-	}
-	cv::destroyWindow("Cameras");
-
-	for (size_t i = 0; i < nstreams.size(); i++) {
-		while(true) {
-			try {
-				vector<Mat> D = getDistortionParametersRPC(net, nstreams[i]);
-				LOG(INFO) << "K[" << 2*i << "] = \n" << camera_parameters[2*i];
-				LOG(INFO) << "D[" << 2*i << "] = " << D[0];
-				LOG(INFO) << "K[" << 2*i+1 << "] = \n" << camera_parameters[2*i+1];
-				LOG(INFO) << "D[" << 2*i+1 << "] = " << D[1];
-				calib.setCameraParameters(2*i, camera_parameters[2*i], D[0]);
-				calib.setCameraParameters(2*i+1, camera_parameters[2*i+1], D[1]);
-				break;
-			}
-			catch (...) {}
-		}
-	}
-
-	Mat out;
-	vector<Mat> map1, map2;
-	vector<cv::Rect> roi;
-	vector<size_t> idx;
-	calibrateRPC(net, calib, params, nstreams, map1, map2, roi);
-
-	if (save_input) {
-		cv::FileStorage fs(path + filename, cv::FileStorage::WRITE);
-		calib.saveInput(fs);
-		fs.release();
-	}
-
-
-	// visualize
-	while(cv::waitKey(10) != 27) {
-
-		while (!new_frames) {
-			if (cv::waitKey(50) != -1) { ftl::running = false; }
-		}
-
-		{
-			UNIQUE_LOCK(mutex, LOCK)
-			rgb.swap(rgb_new);
-			new_frames = false;
-		}
-
-		visualizeCalibration(calib, out, rgb, map1, map2, roi);
-		cv::namedWindow("Calibration", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
-		cv::imshow("Calibration", out);
-	}
-
-	for (size_t i = 0; i < nstreams.size(); i++) {
-		while(true) {
-			try {
-				setRectifyRPC(net, nstreams[i], true);
-				break;
-			}
-			catch (...) {}
-		}
-	}
-
-	ftl::running = false;
-	ftl::timer::stop();
-	ftl::pool.stop(true);
-}
-
-void runCameraCalibrationPath(	ftl::Configurable* root,
-								string path, string filename,
-								CalibrationParams &params)
-{
-	Universe *net = ftl::create<Universe>(root, "net");
-	ftl::ctrl::Master ctrl(root, net);
-
-	net->start();
-	net->waitConnections();
-
-	ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
-	ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver");
-	gen->setStream(stream);
-	auto stream_uris = net->findAll<std::string>("list_streams");
-	std::sort(stream_uris.begin(), stream_uris.end());
-	std::vector<ftl::stream::Net*> nstreams;
-
-	int count = 0;
-	for (auto &s : stream_uris) {
-		LOG(INFO) << " --- found stream: " << s;
-		auto *nstream = ftl::create<ftl::stream::Net>(stream, std::to_string(count), net);
-		std::string name = *(nstream->get<std::string>("name"));
-		nstream->set("uri", s);
-		nstreams.push_back(nstream);
-		stream->add(nstream);
-
-		++count;
-	}
-
-	const size_t n_sources = nstreams.size();
-	const size_t n_cameras = n_sources * 2;
-	size_t reference_camera = 0;
-
-	std::mutex mutex;
-	std::atomic<bool> new_frames = false;
-	vector<Mat> rgb_(n_cameras), rgb_new(n_cameras);
-	vector<Mat> camera_parameters(n_cameras);
-	Size res;
-
-	gen->onFrameSet([stream, &mutex, &new_frames, &rgb_new, &camera_parameters, &res](ftl::rgbd::FrameSet &fs) {
-		stream->select(fs.id, Channel::Left + Channel::Right);
-		if (fs.frames.size() != (rgb_new.size()/2)) {
-			// nstreams.size() == (rgb_new.size()/2)
-			LOG(ERROR)	<< "frames.size() != nstreams.size(), "
-						<< fs.frames.size() << " != " << (rgb_new.size()/2);
-		}
-
-		UNIQUE_LOCK(mutex, CALLBACK);
-		bool good = true;
-		try {
-			for (size_t i = 0; i < fs.frames.size(); i ++) {
-				if (!fs.frames[i].hasChannel(Channel::Left)) {
-					good = false;
-					LOG(ERROR) << "No left channel";
-					break;
-				}
-
-				if (!fs.frames[i].hasChannel(Channel::Right)) {
-					good = false;
-					LOG(ERROR) << "No right channel";
-					break;
-				}
-
-				auto idx = stream->originStream(0, i);
-				CHECK(idx >= 0) << "negative index";
-
-				fs.frames[i].download(Channel::Left+Channel::Right);
-				Mat &left = fs.frames[i].get<Mat>(Channel::Left);
-				Mat &right = fs.frames[i].get<Mat>(Channel::Right);
-
-				CHECK(!left.empty() && !right.empty());
-
-				cv::cvtColor(left, rgb_new[2*idx], cv::COLOR_BGRA2BGR);
-				cv::cvtColor(right, rgb_new[2*idx+1], cv::COLOR_BGRA2BGR);
-				/*
-				camera_parameters[2*idx] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getLeftCamera()),
-					rgb_new[2*idx].size(), Size(fs.frames[i].getLeftCamera().width, fs.frames[i].getLeftCamera().height));
-				camera_parameters[2*idx+1] = ftl::calibration::scaleCameraMatrix(createCameraMatrix(fs.frames[i].getRightCamera()),
-					rgb_new[2*idx].size(), Size(fs.frames[i].getRightCamera().width, fs.frames[i].getRightCamera().height));
-
-				if (res.empty()) res = rgb_new[2*idx].size();*/
-			}
-		}
-		catch (std::exception ex) {
-			LOG(ERROR) << "exception: " << ex.what();
-			good = false;
-		}
-		catch (...)  {
-			LOG(ERROR) << "unknown exception";
-			good = false;
-		}
-		new_frames = good;
-		return true;
-	});
-
-	stream->begin();
-	ftl::timer::start(false);
-
-	for (auto *nstream: nstreams) {
-		bool res = true;
-		while(res) {
-			try { res = setRectifyRPC(net, nstream, false); }
-			catch (...) {}
-
-			if (res) {
-				LOG(ERROR) << "set_rectify() failed for " << *(nstream->get<string>("uri"));
-				std::this_thread::sleep_for(std::chrono::milliseconds(100));
-			}
-			else {
-				LOG(INFO) << "rectification disabled for " << *(nstream->get<string>("uri"));
-			}
-		}
-	}
-
-	// TODO: parameter for calibration target type
-	auto calib = MultiCameraCalibrationNew(	n_cameras, reference_camera,
-											params.size, CalibrationTarget(0.150)
-	);
-	calib.object_points_ = calibration_target;
-
-	cv::FileStorage fs(path + filename, cv::FileStorage::READ);
-	fs["resolution"] >> params.size;
-	params.idx_cameras.resize(n_cameras);
-	std::iota(params.idx_cameras.begin(), params.idx_cameras.end(), 0);
-	calib.loadInput(path + filename, params.idx_cameras);
-
-	/*for (size_t i = 0; i < nstreams.size(); i++) {
-		while(true) {
-			try {
-				if (camera_parameters[2*i].empty() || camera_parameters[2*i+1].empty()) {
-					std::this_thread::sleep_for(std::chrono::seconds(1));
-					continue;
-				}
-				vector<Mat> D = getDistortionParametersRPC(net, nstreams[i]);
-				LOG(INFO) << "K[" << 2*i << "] = \n" << camera_parameters[2*i];
-				LOG(INFO) << "D[" << 2*i << "] = " << D[0];
-				LOG(INFO) << "K[" << 2*i+1 << "] = \n" << camera_parameters[2*i+1];
-				LOG(INFO) << "D[" << 2*i+1 << "] = " << D[1];
-				calib.setCameraParameters(2*i, camera_parameters[2*i], D[0]);
-				calib.setCameraParameters(2*i+1, camera_parameters[2*i+1], D[1]);
-				break;
-			}
-			catch (...) {}
-		}
-	}*/
-
-
-	Mat out;
-	vector<Mat> map1, map2;
-	vector<cv::Rect> roi;
-	vector<size_t> idx;
-	calibrateRPC(net, calib, params, nstreams, map1, map2, roi);
-
-
-	vector<Mat> rgb(n_cameras);
-
-	// visualize
-	while(cv::waitKey(10) != 27) {
-
-		while (!new_frames) {
-			if (cv::waitKey(50) != -1) { ftl::running = false; }
-		}
-
-		{
-			UNIQUE_LOCK(mutex, LOCK)
-			rgb.swap(rgb_new);
-			new_frames = false;
-		}
-
-		visualizeCalibration(calib, out, rgb, map1, map2, roi);
-		cv::namedWindow("Calibration", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
-		cv::imshow("Calibration", out);
-	}
-
-	for (size_t i = 0; i < nstreams.size(); i++) {
-		while(true) {
-			try {
-				setRectifyRPC(net, nstreams[i], true);
-				break;
-			}
-			catch (...) {}
-		}
-	}
-
-	ftl::running = false;
-	ftl::timer::stop();
-	ftl::pool.stop(true);
-}
-
-
-int main(int argc, char **argv) {
-	auto options = ftl::config::read_options(&argv, &argc);
-	auto root = ftl::configure(argc, argv, "registration_default");
-
-	// run calibration from saved input?
-	const bool load_input = root->value<bool>("load_input", false);
-	// should calibration input be saved
-	const bool save_input = root->value<bool>("save_input", false);
-	// should extrinsic calibration be saved (only used with load_input)
-	const bool save_extrinsic = root->value<bool>("save_extrinsic", true);
-	// should intrinsic calibration be saved
-	const bool save_intrinsic = root->value<bool>("save_intrinsic", false);
-	const bool optimize_intrinsic = root->value<bool>("optimize_intrinsic", false);
-	// directory where calibration data and images are saved, if save_input enabled
-	const string calibration_data_dir = root->value<string>("calibration_data_dir", "./");
-	// file to save calibration input (2d points and visibility)
-	const string calibration_data_file = root->value<string>("calibration_data_file", "data.yml");
-	// in how many cameras should the pattern be visible
-	const int min_visible = root->value<int>("min_visible", 3);
-	// minimum for how many times pattern is seen per camera
-	const int n_views = root->value<int>("n_views", 500);
-	// reference camera, -1 for automatic
-	const int ref_camera = root->value<int>("reference_camera", -1);
-	// registration file path
-	const string registration_file = root->value<string>("registration_file", FTL_LOCAL_CONFIG_ROOT "/registration.json");
-	// location where extrinsic calibration files saved
-	const string output_directory = root->value<string>("output_directory", "./");
-
-	const bool offline = root->value<bool>("offline", false);
-
-	CalibrationParams params;
-	params.offline = offline;
-	params.save_extrinsic = save_extrinsic;
-	params.save_intrinsic = save_intrinsic;
-	params.optimize_intrinsic = optimize_intrinsic;
-	params.output_path = output_directory;
-	params.registration_file = registration_file;
-	params.reference_camera = ref_camera;
-
-	LOG(INFO)	<< "\n"
-				<< "\n"
-				<< "\n                save_input: " << (int) save_input
-//				<< "\n                load_input: " << (int) load_input
-//				<< "\n            save_extrinsic: " << (int) save_extrinsic
-//				<< "\n            save_intrinsic: " << (int) save_intrinsic
-				<< "\n        optimize_intrinsic: " << (int) optimize_intrinsic
-//				<< "\n      calibration_data_dir: " << calibration_data_dir
-//				<< "\n     calibration_data_file: " << calibration_data_file
-				<< "\n               min_visible: " << min_visible
-				<< "\n                   n_views: " << n_views
-				<< "\n          reference_camera: " << ref_camera << (ref_camera != -1 ? "" : " (automatic)")
-//				<< "\n         registration_file: " << registration_file
-//				<< "\n          output_directory: " << output_directory
-				<< "\n";
-
-	if (load_input) {
-		runCameraCalibrationPath(root, calibration_data_dir, calibration_data_file, params);
-	}
-	else {
-		runCameraCalibration(root, n_views, min_visible, calibration_data_dir, calibration_data_file, save_input, params);
-	}
-
-	return 0;
-}
diff --git a/applications/calibration-multi/src/multicalibrate.cpp b/applications/calibration-multi/src/multicalibrate.cpp
deleted file mode 100644
index 090cce9da524ba8f2fdc47d05bfaad985a969ce0..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/src/multicalibrate.cpp
+++ /dev/null
@@ -1,687 +0,0 @@
-#include "multicalibrate.hpp"
-
-#include "calibration_data.hpp"
-#include "calibration.hpp"
-
-#include <ftl/calibration/optimize.hpp>
-
-#include <opencv2/core.hpp>
-#include <opencv2/calib3d.hpp>
-
-#include <loguru.hpp>
-
-#include <map>
-
-using cv::Mat;
-using cv::Size;
-using cv::Point2d;
-using cv::Point3d;
-using cv::Vec4d;
-using cv::Scalar;
-
-using std::string;
-using std::vector;
-using std::map;
-using std::pair;
-using std::make_pair;
-
-double CalibrationTarget::estimateScale(vector<Point3d> points) {
-
-	// 1. calculate statistics
-	// 2. reject possible outliers
-	// 3. calculate scale factor
-
-	double f = 0.0;
-	double S = 0.0;
-	double m = 0.0;
-
-	vector<double> d(points.size() / 2, 0.0);
-
-	for (size_t i = 0; i < points.size(); i += 2) {
-		const Point3d &p1 = points[i];
-		const Point3d &p2 = points[i + 1];
-
-		Point3d p = p1 - p2;
-
-		double x = sqrt(p.x * p.x + p.y * p.y + p.z * p.z);
-		double prev_mean = m;
-		d[i/2] = x;
-
-		f = f + 1.0;
-		m = m + (x - m) / f;
-		S = S + (x - m) * (x - prev_mean);
-
-	}
-
-	double stddev = sqrt(S / f);
-	f = 0.0;
-
-	int outliers = 0;
-	double scale = 0.0;
-
-	for (double l : d) {
-		// TODO:	* Parameterize how large deviation allowed
-		//			* Validate this actually improves quality
-
-		if (abs(l - m) > 3.0 * stddev) {
-			outliers++;
-		}
-		else {
-			f += 1.0;
-			scale += 1.0 / l;
-		}
-		DCHECK(scale != INFINITY);
-	}
-
-	if (outliers != 0) {
-		LOG(WARNING) << "Outliers (large std. deviation in scale): " << outliers;
-	}
-
-	LOG(INFO) << "calibration target std. dev. " <<  stddev << " (" << (int) f << " samples), scale: " << scale * calibration_bar_length_ / f;
-
-	return scale * calibration_bar_length_ / f;
-
-	// TODO:	LM-optimization for scale.
-}
-
-MultiCameraCalibrationNew::MultiCameraCalibrationNew(
-			size_t n_cameras, size_t reference_camera, Size resolution,
-			CalibrationTarget target, int fix_intrinsics) :
-
-	target_(target),
-	visibility_graph_(n_cameras),
-	is_calibrated_(false),
-	n_cameras_(n_cameras),
-	reference_camera_(reference_camera),
-	min_visible_points_(50),
-
-	fix_intrinsics_(fix_intrinsics == 1 ? 5 : 0),
-	resolution_(resolution),
-	K_(n_cameras),
-	dist_coeffs_(n_cameras),
-	R_(n_cameras),
-	t_(n_cameras),
-
-	points3d_optimized_(n_cameras),
-	points3d_(n_cameras),
-	points2d_(n_cameras),
-	visible_(n_cameras),
-
-	fm_method_(cv::FM_8POINT), // RANSAC/LMEDS results need validation (does not work)
-	fm_ransac_threshold_(0.95),
-	fm_confidence_(0.90)
-{
-	for (auto &K : K_) { K = Mat::eye(Size(3, 3), CV_64FC1); }
-	for (auto &d : dist_coeffs_) { d = Mat(Size(5, 1), CV_64FC1, Scalar(0.0)); }
-}
-
-Mat MultiCameraCalibrationNew::getCameraMat(size_t idx) {
-	DCHECK(idx < n_cameras_);
-	Mat K;
-	K_[idx].copyTo(K);
-	return K;
-}
-
-Mat MultiCameraCalibrationNew::getCameraMatNormalized(size_t idx, double scale_x, double scale_y)
-{
-	Mat K = getCameraMat(idx);
-	CHECK((scale_x != 0.0 && scale_y != 0.0) || ((scale_x == 0.0) && scale_y == 0.0));
-
-	scale_x = scale_x / (double) resolution_.width;
-	scale_y = scale_y / (double) resolution_.height;
-
-	Mat scale(Size(3, 3), CV_64F, 0.0);
-	scale.at<double>(0, 0) = scale_x;
-	scale.at<double>(1, 1) = scale_y;
-	scale.at<double>(2, 2) = 1.0;
-
-	return (scale * K);
-}
-
-Mat MultiCameraCalibrationNew::getDistCoeffs(size_t idx) {
-	DCHECK(idx < n_cameras_);
-	Mat D;
-	dist_coeffs_[idx].copyTo(D);
-	return D;
-}
-
-void MultiCameraCalibrationNew::setCameraParameters(size_t idx, const Mat &K, const Mat &distCoeffs) {
-	CHECK(idx < n_cameras_);
-	CHECK(K.size() == Size(3, 3));
-	CHECK(distCoeffs.total() == 5);
-	K.convertTo(K_[idx], CV_64FC1);
-	distCoeffs.convertTo(dist_coeffs_[idx], CV_64FC1);
-}
-
-void MultiCameraCalibrationNew::setCameraParameters(size_t idx, const Mat &K) {
-	DCHECK(idx < n_cameras_);
-	setCameraParameters(idx, K, dist_coeffs_[idx]);
-}
-
-void MultiCameraCalibrationNew::addPoints(vector<vector<Point2d>> points, vector<int> visible) {
-	DCHECK(points.size() == visible.size());
-	DCHECK(visible.size() == n_cameras_);
-
-	for (size_t i = 0; i < n_cameras_; i++) {
-		visible_[i].insert(visible_[i].end(), points[i].size(), visible[i]);
-		points2d_[i].insert(points2d_[i].end(), points[i].begin(), points[i].end());
-	}
-	visibility_graph_.update(visible);
-}
-
-void MultiCameraCalibrationNew::reset() {
-	is_calibrated_ = false;
-	weights_ = vector(n_cameras_, vector(points2d_[0].size(), 0.0));
-	inlier_ = vector(n_cameras_, vector(points2d_[0].size(), 0));
-	points3d_ = vector(n_cameras_, vector(points2d_[0].size(), Point3d()));
-	points3d_optimized_ = vector(points2d_[0].size(), Point3d());
-	R_ = vector<Mat>(n_cameras_, Mat::eye(Size(3, 3), CV_64FC1));
-	t_ = vector<Mat>(n_cameras_, Mat(Size(1, 3), CV_64FC1, Scalar(0.0)));
-}
-
-void MultiCameraCalibrationNew::saveInput(const string &filename) {
-	cv::FileStorage fs(filename, cv::FileStorage::WRITE);
-	saveInput(fs);
-	fs.release();
-}
-
-void MultiCameraCalibrationNew::saveInput(cv::FileStorage &fs) {
-	fs << "resolution" << resolution_;
-	fs << "K" << K_;
-	fs << "D" << dist_coeffs_;
-	fs << "points2d" << points2d_;
-	fs << "visible" << visible_;
-}
-
-void MultiCameraCalibrationNew::loadInput(const std::string &filename, const vector<size_t> &cameras_in) {
-	points2d_.clear();
-	points3d_.clear();
-	points3d_optimized_.clear();
-	visible_.clear();
-	inlier_.clear();
-	K_.clear();
-	dist_coeffs_.clear();
-	cv::FileStorage fs(filename, cv::FileStorage::READ);
-	vector<Mat> K;
-	vector<vector<Point2d>> points2d;
-	vector<vector<int>> visible;
-	fs["K"] >> K;
-	fs["D"] >> dist_coeffs_;
-	fs["points2d"] >> points2d;
-	fs["visible"] >> visible;
-	fs["resolution"] >> resolution_;
-	fs.release();
-
-	vector<size_t> cameras;
-	if (cameras_in.size() == 0) {
-		cameras.resize(K.size());
-		size_t i = 0;
-		for (auto &c : cameras) { c = i++; }
-	}
-	else {
-		cameras.reserve(cameras_in.size());
-		for (auto &c : cameras_in) { cameras.push_back(c); }
-	}
-
-	n_cameras_ = cameras.size();
-
-	points2d_.resize(n_cameras_);
-	points3d_.resize(n_cameras_);
-	visible_.resize(n_cameras_);
-
-	for (auto const &c : cameras) {
-		K_.push_back(K[c]);
-		LOG(INFO) << K[c];
-	}
-	for (size_t c = 0; c < n_cameras_; c++) {
-		points2d_[c].reserve(visible[0].size());
-		points3d_[c].reserve(visible[0].size());
-		visible_[c].reserve(visible[0].size());
-		points3d_optimized_.reserve(visible[0].size());
-	}
-
-	visibility_graph_ = Visibility(n_cameras_);
-	dist_coeffs_.resize(n_cameras_);
-
-	vector<vector<Point2d>> points2d_add(n_cameras_, vector<Point2d>());
-	vector<int> visible_add(n_cameras_);
-	for (size_t i = 0; i < visible[0].size(); i += target_.n_points) {
-		int count = 0;
-		for (size_t c = 0; c < n_cameras_; c++) {
-			count += visible[c][i];
-			points2d_add[c].clear();
-			points2d_add[c].insert(
-								points2d_add[c].begin(),
-								points2d[cameras[c]].begin() + i,
-								points2d[cameras[c]].begin() + i + target_.n_points);
-			visible_add[c] = visible[cameras[c]][i];
-		}
-		if (count >= 2) {
-			addPoints(points2d_add, visible_add);
-		}
-	}
-	reset();
-
-	DCHECK(points2d_.size() == n_cameras_);
-	DCHECK(points2d_.size() == visible_.size());
-	size_t len = visible_[0].size();
-	for (size_t i = 0; i < n_cameras_; i++) {
-		DCHECK(visible_[i].size() == len);
-		DCHECK(points2d_[i].size() == visible_[i].size());
-	}
-}
-
-size_t MultiCameraCalibrationNew::getViewsCount() {
-	return points2d_[0].size() / target_.n_points;
-}
-
-size_t MultiCameraCalibrationNew::getOptimalReferenceCamera() {
-	return (size_t) visibility_graph_.getOptimalCamera();
-}
-
-bool MultiCameraCalibrationNew::isVisible(size_t camera, size_t idx) {
-	return visible_[camera][idx] == 1;
-}
-
-bool MultiCameraCalibrationNew::isValid(size_t camera, size_t idx) {
-	return inlier_[camera][idx] >= 0;
-}
-
-bool MultiCameraCalibrationNew::isValid(size_t idx) {
-	for (auto camera : inlier_) {
-		if (camera[idx] > 0) return true;
-	}
-	return false;
-}
-
-vector<Point2d> MultiCameraCalibrationNew::getPoints(size_t camera, size_t idx) {
-	return vector<Point2d> (points2d_[camera].begin() + idx * (target_.n_points),
-							points2d_[camera].begin() + idx * (target_.n_points + 1));
-}
-
-
-void MultiCameraCalibrationNew::updatePoints3D(size_t camera, Point3d new_point,
-		size_t idx, const Mat &R, const Mat &t) {
-
-	int &f = inlier_[camera][idx];
-	Point3d &point = points3d_[camera][idx];
-	new_point = transformPoint(new_point, R, t);
-
-	if (f == -1) return;
-
-	if (f > 0) {
-		// TODO:	remove parameter (10.0 cm - 1.0m); over 0.25m difference
-		//			would most likely suggest very bad triangulation (sync? wrong match?)
-		// 			instead store all triangulations and handle outliers
-		//			(perhaps inverse variance weighted mean?)
-
-		if (euclideanDistance(point, new_point) > 10.0) {
-			LOG(ERROR) << "bad value (skipping) " << "(" << point << " vs " << new_point << ")";
-			f = -1;
-		}
-		else {
-			point = (point * f + new_point) / (double) (f + 1);
-			f++;
-		}
-	}
-	else {
-		point = new_point;
-		f = 1;
-	}
-}
-
-void MultiCameraCalibrationNew::updatePoints3D(size_t camera, vector<Point3d> points,
-		vector<size_t> idx, const Mat &R, const Mat &t) {
-
-	for (size_t i = 0; i < idx.size(); i++) {
-		updatePoints3D(camera, points[i], idx[i], R, t);
-	}
-}
-
-void MultiCameraCalibrationNew::getVisiblePoints(
-		vector<size_t> cameras, vector<vector<Point2d>> &points, vector<size_t> &idx) {
-
-	size_t n_points_total = points2d_[0].size();
-	DCHECK(cameras.size() <= n_cameras_);
-	DCHECK(n_points_total % target_.n_points == 0);
-
-	idx.clear();
-	idx.reserve(n_points_total);
-	points.clear();
-	points.resize(cameras.size(), {});
-
-	for (size_t i = 0; i < n_points_total; i += target_.n_points) {
-		bool visible_all = true;
-
-		for (auto c : cameras) {
-			for (size_t j = 0; j < target_.n_points; j++) {
-				visible_all &= isVisible(c, i + j);
-			}
-		}
-
-		if (!visible_all) { continue; }
-
-		for (size_t j = 0; j < target_.n_points; j++) {
-			idx.push_back(i + j);
-		}
-
-		for (size_t c = 0; c < cameras.size(); c++) {
-			points[c].insert(points[c].end(),
-							 points2d_[cameras[c]].begin() + i,
-							 points2d_[cameras[c]].begin() + i + target_.n_points
-			);
-		}
-	}
-
-	for (auto p : points) {	DCHECK(idx.size() == p.size()); }
-}
-
-double MultiCameraCalibrationNew::calibratePair(size_t camera_from, size_t camera_to, Mat &rmat, Mat &tvec) {
-
-
-	vector<size_t> idx;
-	vector<Point2d> points1, points2;
-	{
-		vector<vector<Point2d>> points2d;
-		getVisiblePoints({camera_from, camera_to}, points2d, idx);
-
-		points1 = points2d[0];
-		points2 = points2d[1];
-	}
-	DCHECK(points1.size() % target_.n_points == 0);
-	DCHECK(points1.size() == points2.size());
-
-	// cameras possibly lack line of sight?
-	DCHECK(points1.size() > 8);
-
-	Mat &K1 = K_[camera_from];
-	Mat &K2 = K_[camera_to];
-
-	Mat R1, R2, t1, t2;
-	R1 = Mat::eye(Size(3, 3), CV_64FC1);
-	t1 = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
-
-	vector<Point3d> points3d;
-
-	double err = ftl::calibration::computeExtrinsicParameters(K1, dist_coeffs_[camera_from], K2, dist_coeffs_[camera_to], points1, points2, object_points_, R2, t2, points3d);
-	calculateTransform(R2, t2, R1, t1, rmat, tvec);
-
-
-	// Store and average 3D points for both cameras (skip garbage)
-	if (err < 10.0) {
-		Mat rmat1, tvec1;
-		updatePoints3D(camera_from, points3d, idx, R1, t1);
-		updatePoints3D(camera_to, points3d, idx, R2, t2);
-	}
-	else {
-		LOG(ERROR)	<< "Large RMS error ("
-					<< reprojectionError(points3d, points2, K2, rmat, tvec)
-					<< "), not updating points!";
-	}
-
-	//LOG(INFO) << reprojectionError(points3d, points1, K1, R1, t1);
-	//LOG(INFO) << reprojectionError(points3d, points2, K2, R2, t2);
-
-	return err;
-}
-
-Point3d MultiCameraCalibrationNew::getPoint3D(size_t camera, size_t idx) {
-	return points3d_[camera][idx];
-}
-
-void MultiCameraCalibrationNew::calculateMissingPoints3D() {
-	points3d_optimized_.clear();
-	points3d_optimized_.resize(points3d_[reference_camera_].size());
-
-	for (size_t i = 0; i < points3d_optimized_.size(); i++) {
-		if (inlier_[reference_camera_][i] > 0) {
-			points3d_optimized_[i] = points3d_[reference_camera_][i];
-			continue;
-		}
-
-		if (!isValid(i)) continue;
-
-		double f = 0.0;
-		Point3d point(0.0, 0.0, 0.0);
-		for (size_t c = 0; c < n_cameras_; c++) {
-			if (inlier_[c][i] <= 0) { continue; }
-			point += transformPoint(getPoint3D(c, i), R_[c], t_[c]);
-			f += 1.0;
-		}
-
-		DCHECK(f != 0.0);
-
-		points3d_optimized_[i] = point / f;
-	}
-}
-
-double MultiCameraCalibrationNew::getReprojectionError(size_t c_from, size_t c_to, const Mat &K, const Mat &R, const Mat &t) {
-
-	vector<Point2d> points2d;
-	vector<Point3d> points3d;
-
-	for (size_t i = 0; i < points2d_[c_from].size(); i++) {
-		if (!isValid(i) || !isVisible(c_from, i) || !isVisible(c_to, i)) continue;
-		points2d.push_back(points2d_[c_from][i]);
-		points3d.push_back(points3d_[c_to][i]);
-	}
-
-	return reprojectionError(points3d, points2d, K, R, t);
-}
-
-double MultiCameraCalibrationNew::getReprojectionErrorOptimized(size_t c_from, const Mat &K, const Mat &R, const Mat &t) {
-
-	vector<Point2d> points2d;
-	vector<Point3d> points3d;
-
-	for (size_t i = 0; i < points2d_[c_from].size(); i++) {
-		if (!isValid(i) || !isVisible(c_from, i)) continue;
-		points2d.push_back(points2d_[c_from][i]);
-		points3d.push_back(points3d_optimized_[i]);
-	}
-
-	return reprojectionError(points3d, points2d, K, R, t);
-}
-
-
-double MultiCameraCalibrationNew::calibrateAll(int reference_camera) {
-	if (reference_camera != -1) {
-		DCHECK(reference_camera >= 0 && reference_camera < static_cast<int>(n_cameras_));
-		reference_camera_ = reference_camera;
-	}
-
-	for (const auto &K : K_) {
-		LOG(INFO) << K;
-	}
-
-	reset(); // remove all old calibration results
-	map<pair<size_t, size_t>, pair<Mat, Mat>> transformations;
-
-	// All cameras should be calibrated pairwise; otherwise all possible 3D
-	// points are not necessarily triangulated
-
-	auto paths = visibility_graph_.findShortestPaths(reference_camera_);
-
-	for (size_t c1 = 0; c1 < n_cameras_; c1++) {
-	for (size_t c2 = c1; c2 < n_cameras_; c2++) {
-		if (c1 == c2) {
-			transformations[make_pair(c1, c2)] =
-				make_pair(Mat::eye(Size(3, 3), CV_64FC1),
-				Mat(Size(1, 3), CV_64FC1, Scalar(0.0))
-			);
-			continue;
-		}
-
-		size_t n_visible = getVisiblePointsCount({c1, c2});
-
-		if (n_visible < min_visible_points_) {
-			LOG(INFO)	<< "Not enough (" << min_visible_points_ << ") points  between "
-						<< "cameras " << c1 << " and " << c2 << " (" << n_visible << " points), "
-						<< "skipping";
-			continue;
-		}
-		LOG(INFO)	<< "Running pairwise calibration for cameras "
-					<< c1 << " and " << c2 << "(" << n_visible << " points)";
-
-		if (transformations.find(make_pair(c2, c1)) != transformations.end()) {
-			continue;
-		}
-		Mat R, t, R_i, t_i;
-
-		// TODO: threshold parameter, 16.0 possibly too high
-
-		if (calibratePair(c1, c2, R, t) > 16.0) {
-			LOG(ERROR)	<< "Pairwise calibration failed, skipping cameras "
-						<< c1 << " and " << c2;
-			visibility_graph_.deleteEdge(c1, c2);
-			continue;
-		}
-
-		calculateInverse(R, t, R_i, t_i);
-
-		transformations[make_pair(c2, c1)] = make_pair(R, t);
-		transformations[make_pair(c1, c2)] = make_pair(R_i, t_i);
-	}}
-
-	for (size_t c = 0; c < paths.size(); c++) {
-		Mat R_chain = Mat::eye(Size(3, 3), CV_64FC1);
-		Mat t_chain = Mat(Size(1, 3), CV_64FC1, Scalar(0.0));
-		LOG(INFO) << "Chain for camera " << c;
-		for (auto e: paths[c]) {
-			CHECK(transformations.find(e) != transformations.end()) << "chain not calculated; pairwise calibration possibly failed earlier?";
-			LOG(INFO) << e.first << " -> " << e.second;
-			Mat R = transformations[e].first;
-			Mat t = transformations[e].second;
-			R_chain = R * R_chain;
-			t_chain = t + R * t_chain;
-		}
-
-		R_[c] = R_chain;
-		t_[c] = t_chain;
-		/*R_[c] = transformations[make_pair(reference_camera_, c)].first;
-		t_[c] = transformations[make_pair(reference_camera_, c)].second;
-		DCHECK(R_[c].size() == Size(3, 3));
-		DCHECK(t_[c].size() == Size(1, 3));*/
-	}
-
-	calculateMissingPoints3D();
-
-	for (size_t c_from = 0; c_from < n_cameras_; c_from++) {
-		if (c_from == reference_camera_) continue;
-		Mat R, t;
-		calculateInverse(R_[c_from], t_[c_from], R, t);
-		LOG(INFO)	<< "Error before BA, cameras " << reference_camera_ << " and " << c_from << ": "
-					<< getReprojectionErrorOptimized(c_from, K_[c_from], R, t);
-	}
-
-	double err;
-	{
-		auto cameras = vector<ftl::calibration::Camera>();
-
-		for (size_t i = 0; i < n_cameras_; i++) {
-			calculateInverse(R_[i], t_[i], R_[i], t_[i]);
-			cameras.push_back(ftl::calibration::Camera(K_[i], dist_coeffs_[i], R_[i], t_[i]));
-		}
-
-		ftl::calibration::BundleAdjustment ba;
-		ba.addCameras(cameras);
-
-		for (size_t i = 0; i < points3d_optimized_.size(); i++) {
-
-			auto &p = points3d_optimized_[i];
-			DCHECK(!isnanl(p.x) && !isnanl(p.y) && !isnanl(p.z));
-
-			int count = 0;
-			for (size_t c = 0; c < n_cameras_; c++) {
-				if (isVisible(c, i) && isValid(c, i)) { count++; }
-			}
-
-			if (count < 2) continue;
-
-			vector<bool> visible(n_cameras_);
-			vector<Point2d> points2d(n_cameras_);
-
-			for (size_t c = 0; c < n_cameras_; c++) {
-				bool good = isVisible(c, i) && isValid(c, i);
-				visible[c] = good;
-				points2d[c] = points2d_[c][i];
-			}
-
-			ba.addPoint(visible, points2d, p);
-		}
-
-		ba.addObject(object_points_);
-
-		ftl::calibration::BundleAdjustment::Options options;
-		options.loss = ftl::calibration::BundleAdjustment::Options::Loss::CAUCHY;
-		options.optimize_intrinsic = !fix_intrinsics_;
-		options.fix_distortion = true;
-		options.max_iter = 50;
-		options.fix_camera_extrinsic = {reference_camera};
-		options.verbose = true;
-		options.max_iter = 500;
-
-		err = ba.reprojectionError();
-		ba.run(options);
-
-		for (size_t i = 0; i < n_cameras_; i++) {
-			R_[i] = cameras[i].rmat();
-			t_[i] = cameras[i].tvec();
-			K_[i] = cameras[i].intrinsicMatrix();
-			//dist_coeffs_[i] = D; // not updated
-			calculateInverse(R_[i], t_[i], R_[i], t_[i]);
-		}
-	}
-
-	for (size_t c_from = 0; c_from < n_cameras_; c_from++) {
-		if (c_from == reference_camera_) continue;
-		Mat R, t;
-		calculateInverse(R_[c_from], t_[c_from], R, t);
-		LOG(INFO)	<< "Error (RMS) after BA, cameras " << reference_camera_ << " and " << c_from << ": "
-					<< getReprojectionErrorOptimized(c_from, K_[c_from], R, t);
-
-	}
-
-	is_calibrated_ = true;
-	return err;
-}
-
-void MultiCameraCalibrationNew::projectPointsOriginal(size_t camera_src, size_t camera_dst, size_t idx, vector<Point2d> &points) {
-
-}
-
-void MultiCameraCalibrationNew::projectPointsOptimized(size_t camera_dst, size_t idx, vector<Point2d> &points) {
-	// TODO:	indexing does not match input (points may be skipped in loadInput())
-
-	points.clear();
-	size_t i = target_.n_points * idx;
-
-	if (!isValid(i)) return;
-
-	Point3d p1(points3d_optimized_[i]);
-	Point3d p2(points3d_optimized_[i + 1]);
-
-	if (!std::isfinite(p1.x) || !std::isfinite(p2.x)) {
-		// DEBUG: should not happen
-		LOG(ERROR) << "Bad point! (no valid triangulation)";
-		return;
-	}
-
-	Mat R, tvec, rvec;
-	calculateTransform(R_[reference_camera_], t_[reference_camera_], R_[camera_dst], t_[camera_dst], R, tvec);
-
-	cv::Rodrigues(R, rvec);
-	cv::projectPoints(	vector<Point3d> { p1, p2 },
-						rvec, tvec, K_[camera_dst], dist_coeffs_[camera_dst], points);
-}
-
-void MultiCameraCalibrationNew::getCalibration(vector<Mat> &R, vector<Mat> &t) {
-	DCHECK(is_calibrated_);
-	R.resize(n_cameras_);
-	t.resize(n_cameras_);
-
-	for (size_t i = 0; i < n_cameras_; i++) {
-		R_[i].copyTo(R[i]);
-		t_[i].copyTo(t[i]);
-	}
-}
diff --git a/applications/calibration-multi/src/multicalibrate.hpp b/applications/calibration-multi/src/multicalibrate.hpp
deleted file mode 100644
index f696e6328d69126cc864c9df97909b9f3154e841..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/src/multicalibrate.hpp
+++ /dev/null
@@ -1,159 +0,0 @@
-#pragma once
-
-#include <opencv2/core.hpp>
-
-#include "visibility.hpp"
-#include "util.hpp"
-
-using cv::Mat;
-using cv::Size;
-using cv::Point2d;
-using cv::Point3d;
-using cv::Vec4d;
-using cv::Scalar;
-
-using std::vector;
-using std::pair;
-
-class CalibrationTarget {
-public:
-	explicit CalibrationTarget(double length):
-		n_points(2),
-		calibration_bar_length_(length)
-	{}
-	
-	/* @brief	Estimate scale factor.
-	 * @param	3D points (can pass n views)
-	 */
-	double estimateScale(vector<Point3d> points3d);
-	size_t n_points;
-
-private:
-	double calibration_bar_length_;
-};
-
-class MultiCameraCalibrationNew {
-public:
-	MultiCameraCalibrationNew(	size_t n_cameras, size_t reference_camera,
-								Size resolution, CalibrationTarget target,
-								int fix_intrinsics=1);
-	
-	void setCameraParameters(size_t idx, const Mat &K, const Mat &distCoeffs);
-	void setCameraParameters(size_t idx, const Mat &K);
-
-	void addPoints(vector<vector<Point2d>> points2d, vector<int> visibility);
-
-	size_t getViewsCount();
-	size_t getCamerasCount() { return n_cameras_; }
-	size_t getOptimalReferenceCamera();
-
-	size_t getMinVisibility() { return visibility_graph_.getMinVisibility(); }
-	size_t getViewsCount(size_t camera) { return visibility_graph_.getViewsCount(camera); }
-
-	void setFixIntrinsic(int value) { fix_intrinsics_ = (value == 1 ? 5 : 0); }
-
-	void loadInput(const std::string &filename, const vector<size_t> &cameras = {});
-
-	void saveInput(cv::FileStorage &fs);
-	void saveInput(const std::string &filename);
-
-	Mat getCameraMat(size_t idx);
-	Mat getCameraMatNormalized(size_t idx, double scale_x = 1.0, double scale_y = 1.0);
-
-	Mat getDistCoeffs(size_t idx);
-
-	double calibrateAll(int reference_camera = -1);
-	double getReprojectionError();
-	void getCalibration(vector<Mat> &R, vector<Mat> &t);
-
-	void projectPointsOriginal(size_t camera_src, size_t camera_dst, size_t idx, vector<Point2d> &points);
-	void projectPointsOptimized(size_t camera_dst, size_t idx, vector<Point2d> &points);
-
-	std::vector<cv::Point3d> object_points_;
-
-protected:
-	bool isVisible(size_t camera, size_t idx);
-	bool isValid(size_t camera, size_t idx);
-	bool isValid(size_t idx);
-
-	Point3d getPoint3D(size_t camera, size_t i);
-
-	vector<Point2d> getPoints(size_t camera, size_t idx);
-	vector<vector<Point2d>> getAllPoints(size_t camera, vector<size_t> idx);
-
-	void getVisiblePoints(	vector<size_t> cameras,
-							vector<vector<Point2d>> &points,
-							vector<size_t> &idx);
-
-	size_t getVisiblePointsCount(vector<size_t> cameras) {
-		// TODO: for pairs can use visibility graph adjacency matrix
-		vector<vector<Point2d>> points2d;
-		vector<size_t> idx;
-		getVisiblePoints(cameras, points2d, idx);
-		return idx.size();
-	}
-
-	size_t getTotalPointsCount() {
-		return points2d_[0].size();
-	}
-
-	vector<Point3d> getPoints3D(size_t idx);
-
-	/* @brief	Find points which are visible on all cameras. Returns
-	 * 			corresponding indices in idx vector.
-	 */
-	void getVisiblePoints3D(vector<size_t> cameras,
-							vector<vector<Point3d>> &points,
-							vector<size_t> &idx);
-
-	/* @brief	Update 3D points with new values. If no earlier data, new data
-	 *			is used as is, otherwise calculates average.
-	 */
-	void updatePoints3D(size_t camera, Point3d new_point, size_t idx, const Mat &R, const Mat &t);
-	void updatePoints3D(size_t camera, vector<Point3d> new_points, vector<size_t> idx, const Mat &R, const Mat &t);
-
-	/* @brief	Calculates 3D points that are not visible in reference camera
-	 *			from transformations in visible cameras.
-	 */
-	void calculateMissingPoints3D();
-
-	void getTransformation(size_t camera_from, size_t camera_to, Mat &R, Mat &T);
-	double calibratePair(size_t camera_from, size_t camera_to, Mat &R, Mat &T);
-
-	/* @brief	Calculate reprojection error of visible points (triangulation) */
-	double getReprojectionError(size_t c_from, size_t c_to, const Mat &K, const Mat &R, const Mat &T);
-
-	/* @brief	Calculate reprojection error of visible points (optimized/averaged points) */
-	double getReprojectionErrorOptimized(size_t c_from, const Mat &K, const Mat &R, const Mat &T);
-
-	/* @brief	Remove old calibration data calculated by calibrateAll */
-	void reset();
-
-	
-private:
-	CalibrationTarget target_;
-	Visibility visibility_graph_; 
-
-	bool is_calibrated_;
-	size_t n_cameras_;
-	size_t reference_camera_;
-	size_t min_visible_points_;
-	int fix_intrinsics_;
-
-	Size resolution_;
-	vector<Mat> K_;
-	vector<Mat> dist_coeffs_;
-	vector<Mat> R_;
-	vector<Mat> t_;
-
-	vector<Point3d> points3d_optimized_;
-	vector<vector<Point3d>> points3d_;
-	vector<vector<Point2d>> points2d_;
-	vector<vector<int>> visible_;
-	vector<vector<int>> inlier_; // "inlier"
-	vector<vector<double>> weights_;
-
-	int fm_method_;
-	double fm_ransac_threshold_;
-	double fm_confidence_;
-};
diff --git a/applications/calibration-multi/src/util.cpp b/applications/calibration-multi/src/util.cpp
deleted file mode 100644
index 0019516e4027472d2be733899bde75a50a94dd0f..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/src/util.cpp
+++ /dev/null
@@ -1,174 +0,0 @@
-#include "util.hpp"
-
-#include <loguru.hpp>
-
-#include <opencv2/core.hpp>
-#include <opencv2/calib3d.hpp>
-#include <opencv2/imgproc.hpp>
-#include <opencv2/aruco.hpp>
-
-using std::vector;
-
-using cv::Mat;
-using cv::Point2i;
-using cv::Point2d;
-using cv::Point3d;
-using cv::Size;
-using cv::Scalar;
-
-/* @brief	Visualize epipolar lines for given points in the other image.
- * @param	Points in image
- * @param	Corresponding image where to draw the lines
- * @param	Fundamental matrix
- * @param	Line color
- * @param	Which image (1 or 2), see OpenCV's computeCorrespondEpilines()
- */
-void drawEpipolarLines(vector<Point2d> const &points, Mat &img, Mat const &F, Scalar color, int image) {
-	Mat lines;
-	cv::computeCorrespondEpilines(points, image, F, lines);
-
-	for (int i = 0; i < lines.rows; i++) {
-		cv::Vec3f l = lines.at<cv::Vec3f>(i);
-		float a = l[0];
-		float b = l[1];
-		float c = l[2];
-		float x0, y0, x1, y1;
-		x0 = 0;
-		y0 = (-c -a * x0) / b;
-		x1 = img.cols;
-		y1 = (-c -a * x1) / b;
-		cv::line(img, cv::Point(x0, y0), cv::Point(x1,y1), color, 1);
-	}
-}
-
-/* @breif	Find calibration points. AruCo markers, two per image.
- *			visible parameter input/ouput
- */
-int findCorrespondingPoints(vector<Mat> imgs, vector<vector<Point2d>> &points,
-							vector<int> &visible) {
-	using namespace cv;
-	int count = 0;
-
-	visible.resize(imgs.size(), 1);
-
-	points.clear();
-	points.resize(imgs.size(), vector<Point2d>(2, Point2d(0.0, 0.0)));
-
-	auto dictionary = aruco::getPredefinedDictionary(aruco::DICT_5X5_50);
-	vector<vector<Point2f>> corners;
-	vector<int> ids;
-	
-	for (size_t i = 0; i < imgs.size(); i++) {
-		if (visible[i] == 0) continue;
-
-		aruco::detectMarkers(imgs[i], dictionary, corners, ids);
-		if (corners.size() == 2) {
-			Point2d center0((corners[0][0] + corners[0][1] + corners[0][2] + corners[0][3]) / 4.0);
-			Point2d center1((corners[1][0] + corners[1][1] + corners[1][2] + corners[1][3]) / 4.0);
-			if (ids[0] != 0) { std::swap(center0, center1); }
-
-			points[i][0] = center0; points[i][1] = center1;
-			visible[i] = 1;
-
-			count++;
-		}
-		else {
-			visible[i] = 0;
-		}
-	}
-
-	return count;
-}
-
-/* @brief	Find AruCo marker centers.
- * @param	(input) image
- * @param	(output) found centers
- * @param	(output) marker IDs
- */
-void findMarkerCenters(Mat &img, vector<Point2d> &points, vector<int> &ids, int dict) {
-	using namespace cv;
-
-	points.clear();
-
-	auto dictionary = aruco::getPredefinedDictionary(dict);
-	vector<vector<Point2f>> corners;
-
-	aruco::detectMarkers(img, dictionary, corners, ids);
-	for (size_t j = 0; j < corners.size(); j++) {
-		Point2f center((corners[j][0] + corners[j][1] + corners[j][2] + corners[j][3]) / 4.0);
-		points.push_back(center);
-	}
-}
-
-/* OpenCV's recoverPose() expects both cameras to have identical intrinsic
- * parameters.
- */
-int recoverPose(Mat &E, vector<Point2d> &_points1, vector<Point2d> &_points2,
-				Mat &_cameraMatrix1, Mat &_cameraMatrix2,
-				Mat &_R, Mat &_t, double distanceThresh,
-				Mat &triangulatedPoints) {
-
-	Mat points1, points2, cameraMatrix1, cameraMatrix2, cameraMatrix;
-	
-	Mat(_points1.size(), 2, CV_64FC1, _points1.data()).convertTo(points1, CV_64F);
-	Mat(_points2.size(), 2, CV_64FC1, _points2.data()).convertTo(points2, CV_64F);
-	_cameraMatrix1.convertTo(cameraMatrix1, CV_64F);
-	_cameraMatrix2.convertTo(cameraMatrix2, CV_64F);
-	cameraMatrix = Mat::eye(Size(3, 3), CV_64FC1);
-
-	double fx1 = cameraMatrix1.at<double>(0,0);
-	double fy1 = cameraMatrix1.at<double>(1,1);
-	double cx1 = cameraMatrix1.at<double>(0,2);
-	double cy1 = cameraMatrix1.at<double>(1,2);
-
-	double fx2 = cameraMatrix2.at<double>(0,0);
-	double fy2 = cameraMatrix2.at<double>(1,1);
-	double cx2 = cameraMatrix2.at<double>(0,2);
-	double cy2 = cameraMatrix2.at<double>(1,2);
-
-	points1.col(0) = (points1.col(0) - cx1) / fx1;
-	points1.col(1) = (points1.col(1) - cy1) / fy1;
-
-	points2.col(0) = (points2.col(0) - cx2) / fx2;
-	points2.col(1) = (points2.col(1) - cy2) / fy2;
-
-	// TODO mask
-	// cameraMatrix = I (for details, see OpenCV's recoverPose() source code)
-	// modules/calib3d/src/five-point.cpp (461)
-	//
-	// https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
-
-	return cv::recoverPose(E, points1, points2, cameraMatrix, _R, _t, distanceThresh, cv::noArray(), triangulatedPoints);
-}
-
-/* @brief	Calculate RMS reprojection error
- * @param	3D points
- * @param	Expected 2D points
- * @param	Camera matrix
- * @param	Rotation matrix/vector
- * @param	Translation vector
- */
-double reprojectionError(	const vector<Point3d> &points3d, const vector<Point2d> &points2d,
-							const Mat &K, const Mat &rvec, const Mat &tvec) {
-	
-	DCHECK(points3d.size() == points2d.size());
-	
-	Mat _rvec;
-	if (rvec.size() == Size(3, 3)) { cv::Rodrigues(rvec, _rvec); }
-	else { _rvec = rvec; }
-
-	DCHECK(_rvec.size() == Size(1, 3) || _rvec.size() == Size(3, 1));
-
-	vector<Point2d> points_reprojected;
-	cv::projectPoints(points3d, _rvec, tvec, K, cv::noArray(), points_reprojected);
-	
-	int n_points = points2d.size();
-	double err = 0.0;
-
-	for (int i = 0; i < n_points; i++) {
-		Point2d a = points2d[i] - points_reprojected[i];
-		err += a.x * a.x + a.y * a.y;
-	}
-
-	return sqrt(err / n_points);
-}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/util.hpp b/applications/calibration-multi/src/util.hpp
deleted file mode 100644
index 7c2b5702feb1b518fd4662560f349c2aa4de9fed..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/src/util.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-#pragma once
-
-#include <loguru.hpp>
-
-#include <opencv2/core.hpp>
-#include <opencv2/aruco.hpp>
-
-using std::vector;
-
-using cv::Mat;
-using cv::Point2i;
-using cv::Point2d;
-using cv::Point3d;
-using cv::Size;
-using cv::Scalar;
-
-/* @brief	Visualize epipolar lines for given points in the other image.
- * @param	Points in image
- * @param	Corresponding image where to draw the lines
- * @param	Fundamental matrix
- * @param	Line color
- * @param	Which image (1 or 2), see OpenCV's computeCorrespondEpilines()
- */
-void drawEpipolarLines(vector<Point2d> const &points, Mat &img, Mat const &F, Scalar color, int image=1);
-
-
-/* @breif	Find calibration points. AruCo markers, two per image.
- */
-int findCorrespondingPoints(vector<Mat> imgs, vector<vector<Point2d>> &points,
-							vector<int> &visible);
-
-/* @brief	Find AruCo marker centers.
- * @param	(input) image
- * @param	(output) found centers
- * @param	(output) marker IDs
- */
-void findMarkerCenters(Mat &img, vector<Point2d> &points, vector<int> &ids, int dict=cv::aruco::DICT_4X4_50);
-
-/* OpenCV's recoverPose() expects both cameras to have identical intrinsic
- * parameters.
- * 
- * https://github.com/opencv/opencv/blob/371bba8f54560b374fbcd47e7e02f015ac4969ad/modules/calib3d/src/five-point.cpp#L461
- */
-int recoverPose(Mat &E, vector<Point2d> &_points1, vector<Point2d> &_points2,
-				Mat &_cameraMatrix1, Mat &_cameraMatrix2,
-				Mat &_R, Mat &_t, double distanceThresh,
-				Mat &triangulatedPoints);
-
-/* @brief	Calculate RMS reprojection error
- * @param	3D points
- * @param	Expected 2D points
- * @param	Camera matrix
- * @param	Rotation matrix/vector
- * @param	Translation vector
- */
-double reprojectionError(	const vector<Point3d> &points3d, const vector<Point2d> &points2d,
-							const Mat &K, const Mat &rvec, const Mat &tvec);
-
-inline double euclideanDistance(Point3d a, Point3d b) {
-	Point3d c = a - b;
-	return sqrt(c.x*c.x + c.y*c.y + c.z*c.z);
-}
-
-inline Point3d transformPoint(Point3d p, Mat R, Mat t) {
-	DCHECK(R.size() == Size(3, 3));
-	DCHECK(t.size() == Size(1, 3));
-	return Point3d(Mat(R * Mat(p) + t));
-}
-
-inline Point3d inverseTransformPoint(Point3d p, Mat R, Mat t) {
-	DCHECK(R.size() == Size(3, 3));
-	DCHECK(t.size() == Size(1, 3));
-	return Point3d(Mat(R.t() * (Mat(p) - t)));
-}
-
-inline Mat getMat4x4(const Mat &R, const Mat &t) {
-	DCHECK(R.size() == Size(3, 3));
-	DCHECK(t.size() == Size(1, 3));
-	Mat M = Mat::eye(Size(4, 4), CV_64FC1);
-	R.copyTo(M(cv::Rect(0, 0, 3, 3)));
-	t.copyTo(M(cv::Rect(3, 0, 1, 3)));
-	return M;
-}
-
-inline void getRT(const Mat RT, Mat &R, Mat &t) {
-	R = RT(cv::Rect(0, 0, 3, 3));
-	t = RT(cv::Rect(3, 0, 1, 3));
-}
-
-// calculate transforms from (R1, t1) to (R2, t2), where parameters
-// (R1, t1) and (R2, t2) map to same (target) coordinate system
-
-inline void calculateTransform(const Mat &R1, const Mat &T1, const Mat &R2, const Mat &T2, Mat &R, Mat &tvec, Mat &M) {
-	Mat M_src = getMat4x4(R1, T1);
-	Mat M_dst = getMat4x4(R2, T2);
-	M = M_dst.inv() * M_src;	
-	R = M(cv::Rect(0, 0, 3, 3));
-	tvec = M(cv::Rect(3, 0, 1, 3));
-}
-
-inline void calculateTransform(const Mat &R1, const Mat &T1, const Mat &R2, const Mat &T2,Mat &R, Mat &tvec) {
-	Mat M;
-	calculateTransform(R1, T1, R2, T2, R, tvec, M);
-}
-
-inline void calculateInverse(const Mat &R2, const Mat &T2, Mat &R, Mat &T) {
-	Mat R1 = Mat::eye(Size(3, 3), CV_64FC1);
-	Mat T1(Size(1, 3), CV_64FC1, Scalar(0.0));
-	calculateTransform(R1, T1, R2, T2, R, T);
-}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/visibility.cpp b/applications/calibration-multi/src/visibility.cpp
deleted file mode 100644
index df0427c5ddc466ba037dcbe8bdba2adc406ed89d..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/src/visibility.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-#include <numeric>
-#include <loguru.hpp>
-#include <queue>
-
-#include "visibility.hpp"
-
-using cv::Mat;
-using cv::Scalar;
-using cv::Size;
-using std::vector;
-using std::pair;
-using std::make_pair;
-
-Visibility::Visibility(int n_cameras) : n_cameras_(n_cameras) {
-	visibility_ = Mat(Size(n_cameras, n_cameras), CV_32SC1, Scalar(0));
-	count_ = vector(n_cameras, 0);
-}
-
-void Visibility::update(vector<int> &visible) {
-	DCHECK(visible.size() == (size_t) n_cameras_);
-
-	for (int i = 0; i < n_cameras_; i++) {
-		if (visible[i] == 0) continue;
-		count_[i]++;
-
-		for (int j = 0; j < n_cameras_; j++) {
-			if (i == j) continue;
-			if (visible[j] == 1) visibility_.at<int>(i, j)++;
-		}
-	}
-}
-
-int Visibility::getOptimalCamera() {
-	// most visible on average
-	int best_i = 0;
-	double best_score = -INFINITY;
-	for (int i = 0; i < visibility_.rows; i++) {
-		double score = 0.0;
-		for (int x = 0; x < visibility_.cols; x++) {
-			score += visibility_.at<int>(i, x);
-		}
-		score = score / (double) visibility_.cols;
-		if (score > best_score) {
-			best_i = i;
-			best_score = score;
-		}
-	}
-	
-	return best_i;
-}
-
-void Visibility::deleteEdge(int camera1, int camera2)
-{
-	visibility_.at<int>(camera1, camera2) = 0;
-	visibility_.at<int>(camera2, camera1) = 0;
-}
-
-int Visibility::getMinVisibility() {
-	int min_count = INT_MAX;
-
-	for (int i = 0; i < n_cameras_; i++) {
-		if (count_[i] < min_count) {
-			min_count = count_[i];
-		}
-	}
-	
-	return min_count;
-}
-
-int Visibility::getViewsCount(int camera) {
-	return count_[camera];
-}
-
-vector<vector<pair<int, int>>> Visibility::findShortestPaths(int reference) {
-	DCHECK(reference < n_cameras_);
-
-	vector<vector<pair<int, int>>> res(n_cameras_);
-	for (int i = 0; i < n_cameras_; i++) {
-		res[i] = findShortestPath(i, reference);
-	}
-	
-	return res;
-}
-
-vector<pair<int, int>> Visibility::findShortestPath(int from, int to) {
-	if (from == to) return vector<pair<int, int>>();
-
-	vector<bool> visited(n_cameras_, false);
-	vector<double> distances(n_cameras_, INFINITY);
-	vector<int> previous(n_cameras_, -1);
-	
-	distances[from] = 0.0;
-
-	auto cmp = [](pair<int, double> u, pair<int, double> v) { return u.second > v.second; };
-	std::priority_queue<pair<int, double>, vector<pair<int, double>>, decltype(cmp)> pq(cmp);
-
-	pq.push(make_pair(from, distances[from]));
-
-	while(!pq.empty()) {
-		pair<int, double> current = pq.top();
-		pq.pop();
-
-		int current_id = current.first;
-		double current_distance = distances[current_id];
-
-		visited[current_id] = true;
-
-		for (int i = 0; i < n_cameras_; i++) {
-			int count = visibility_.at<int>(current_id, i);
-			if (count == 0) continue; // not connected
-
-			double distance = 1.0 / (double) count;
-			double new_distance = current_distance + distance;
-			
-			if (distances[i] > new_distance) {
-				distances[i] = new_distance;
-				previous[i] = current_id;
-
-				pq.push(make_pair(i, distances[i]));
-			}
-		}
-	}
-
-	vector<pair<int, int>> res;
-	int prev = previous[to];
-	int current = to;
-
-	do {
-		res.push_back(make_pair(current, prev));
-		current = prev;
-		prev = previous[prev];
-	}
-	while(prev != -1);
-
-	std::reverse(res.begin(), res.end());
-	return res;
-}
-
-vector<int> Visibility::getClosestCameras(int c) {
-
-	// initialize original index locations
-	vector<int> idx(n_cameras_);
-	iota(idx.begin(), idx.end(), 0);
-	int* views = visibility_.ptr<int>(c);
-	
-	// sort indexes based on comparing values in v
-	sort(idx.begin(), idx.end(),
-		[views](size_t i1, size_t i2) {return views[i1] < views[i2];});
-
-	return idx;
-}
\ No newline at end of file
diff --git a/applications/calibration-multi/src/visibility.hpp b/applications/calibration-multi/src/visibility.hpp
deleted file mode 100644
index 569e51c1bf77e861d1f789bea14b3107b23bd999..0000000000000000000000000000000000000000
--- a/applications/calibration-multi/src/visibility.hpp
+++ /dev/null
@@ -1,56 +0,0 @@
-#pragma once
-
-#include <opencv2/core.hpp>
-
-using cv::Mat;
-using std::vector;
-using std::pair;
-
-class Visibility {
-public:
-	explicit Visibility(int n_cameras);
-
-	/**
-	 * @breif	Update visibility graph.
-	 * @param	Which cameras see the feature(s) in this iteration
-	 */
-	void update(vector<int> &visible);
-
-	/**
-	 * @brief	For all cameras, find shortest (optimal) paths to reference
-	 * 			camera
-	 * @param	Id of reference camera
-	 *
-	 * Calculates shortest path in weighted graph using Dijstra's
-	 * algorithm. Weights are inverse of views between cameras (nodes)
-	 *
-	 * @todo	Add constant weight for each edge (prefer less edges)
-	 */
-	vector<vector<pair<int, int>>> findShortestPaths(int reference);
-
-	vector<int> getClosestCameras(int c);
-	void deleteEdge(int camera1, int camera2);
-
-	int getOptimalCamera();
-
-	/** @brief Returns the smallest visibility count (any camera)
-	 */
-	int getMinVisibility();
-
-	/** @brief Returns the visibility camera's value
-	 */
-	int getViewsCount(int camera);
-
-protected:
-	/**
-	 * @brief	Find shortest path between nodes
-	 * @param	Source node id
-	 * @param	Destination node id
-	 */
-	vector<pair<int, int>> findShortestPath(int from, int to);
-
-private:
-	int n_cameras_;		// @brief number of cameras
-	cv::Mat visibility_;	// @brief adjacency matrix
-	vector<int> count_;
-};
diff --git a/applications/calibration/CMakeLists.txt b/applications/calibration/CMakeLists.txt
deleted file mode 100644
index 2452f392a9cb6d0dbcac4737ba99ff106996f699..0000000000000000000000000000000000000000
--- a/applications/calibration/CMakeLists.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-set(CALIBSRC
-	src/main.cpp
-	src/lens.cpp
-	src/stereo.cpp
-	src/align.cpp
-	src/common.cpp
-)
-
-add_executable(ftl-calibrate ${CALIBSRC})
-
-target_include_directories(ftl-calibrate PRIVATE src)
-
-target_link_libraries(ftl-calibrate ftlcalibration ftlcommon Threads::Threads ${OpenCV_LIBS})
-
-
diff --git a/applications/calibration/src/align.cpp b/applications/calibration/src/align.cpp
deleted file mode 100644
index 01123aea98cfa094be7e12076f7d222697b61569..0000000000000000000000000000000000000000
--- a/applications/calibration/src/align.cpp
+++ /dev/null
@@ -1,391 +0,0 @@
-#include "align.hpp"
-
-#include <loguru.hpp>
-
-#include <opencv2/core.hpp>
-#include <opencv2/core/utility.hpp>
-#include <opencv2/imgproc.hpp>
-#include <opencv2/calib3d.hpp>
-#include <opencv2/imgcodecs.hpp>
-#include <opencv2/videoio.hpp>
-#include <opencv2/highgui.hpp>
-
-using std::map;
-using std::string;
-using cv::Mat;
-using std::vector;
-using cv::Point2f;
-using cv::Size;
-
-struct Rec4f {
-	float left;
-	float right;
-	float top;
-	float bottom;
-};
-
-bool loadIntrinsicsMap(const std::string &ifile, const cv::Size &imageSize, Mat &map1, Mat &map2, Mat &cameraMatrix, float scale) {
-    using namespace cv;
-
-    FileStorage fs;
-
-    // reading intrinsic parameters
-        fs.open((ifile).c_str(), FileStorage::READ);
-        if (!fs.isOpened()) {
-            LOG(WARNING) << "Could not open intrinsics file : " << ifile;
-            return false;
-        }
-        
-        LOG(INFO) << "Intrinsics from: " << ifile;
-
-
-    Mat D1;
-    fs["M"] >> cameraMatrix;
-    fs["D"] >> D1;
-
-    //cameraMatrix *= scale;
-
-	initUndistortRectifyMap(cameraMatrix, D1, Mat::eye(3,3, CV_64F), cameraMatrix, imageSize, CV_16SC2,
-    		map1, map2);
-
-    return true;
-}
-
-inline bool hasOption(const map<string, string> &options, const std::string &opt) {
-    return options.find(opt) != options.end();
-}
-
-inline std::string getOption(map<string, string> &options, const std::string &opt) {
-    auto str = options[opt];
-    return str.substr(1,str.size()-2);
-}
-
-static const float kPI = 3.14159f;
-
-/*static float calculateZRotation(const vector<Point2f> &points, Size &boardSize) {
-    Point2f tl = points[boardSize.width * (boardSize.height / 2)];
-    Point2f tr = points[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
-
-    float dx = tr.x - tl.x;
-    float dy = tr.y - tl.y;
-    float angle = atan2(dy,  dx) * (180.0f / kPI);
-    return angle;
-}
-
-static Point2f parallaxDistortion(const vector<Point2f> &points, Size &boardSize) {
-    Point2f tl = points[0];
-    Point2f tr = points[boardSize.width-1];
-    Point2f bl = points[(boardSize.height-1)*boardSize.width];
-    Point2f br = points[points.size()-1];
-
-    float dx1 = tr.x - tl.x;
-    float dx2 = br.x - bl.x;
-    float ddx = dx1 - dx2;
-
-    float dy1 = bl.y - tl.y;
-    float dy2 = br.y - tr.y;
-    float ddy = dy1 - dy2;
-
-    return Point2f(ddx, ddy);
-}*/
-
-static float distanceTop(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
-    Point2f tl = points[0];
-    Point2f tr = points[boardSize.width-1];
-
-    float pixSize = tr.x - tl.x;
-    float mmSize = boardSize.width * squareSize;
-    float focal = camMatrix.at<double>(0,0);
-
-    return ((mmSize / pixSize) * focal) / 1000.0f;
-}
-
-static float distanceBottom(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
-    Point2f bl = points[(boardSize.height-1)*boardSize.width];
-    Point2f br = points[points.size()-1];
-
-    float pixSize = br.x - bl.x;
-    float mmSize = boardSize.width * squareSize;
-    float focal = camMatrix.at<double>(0,0);
-
-    return ((mmSize / pixSize) * focal) / 1000.0f;
-}
-
-static float distanceLeft(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
-    Point2f bl = points[(boardSize.height-1)*boardSize.width];
-    Point2f tl = points[0];
-
-    float pixSize = bl.y - tl.y;
-    float mmSize = boardSize.height * squareSize;
-    float focal = camMatrix.at<double>(0,0);
-
-    return ((mmSize / pixSize) * focal) / 1000.0f;
-}
-
-static float distanceRight(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
-    Point2f tr = points[boardSize.width-1];
-    Point2f br = points[points.size()-1];
-
-    float pixSize = br.y - tr.y;
-    float mmSize = boardSize.height * squareSize;
-    float focal = camMatrix.at<double>(0,0);
-
-    return ((mmSize / pixSize) * focal) / 1000.0f;
-}
-
-static Rec4f distances(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
-	return {
-		-distanceLeft(camMatrix, points, boardSize, squareSize),
-		-distanceRight(camMatrix, points, boardSize, squareSize),
-		-distanceTop(camMatrix, points, boardSize, squareSize),
-		-distanceBottom(camMatrix, points, boardSize, squareSize)
-	};
-}
-
-/*static float distance(const Mat &camMatrix, const vector<Point2f> &points, Size &boardSize, float squareSize) {
-    Point2f tl = points[boardSize.width * (boardSize.height / 2)];
-    Point2f tr = points[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
-
-    float pixSize = tr.x - tl.x;
-    float mmSize = boardSize.width * squareSize;
-    float focal = camMatrix.at<double>(0,0);
-
-    return ((mmSize / pixSize) * focal) / 1000.0f;
-}
-
-static Point2f diffY(const vector<Point2f> &pointsA, const vector<Point2f> &pointsB, Size &boardSize) {
-    Point2f tlA = pointsA[boardSize.width * (boardSize.height / 2)];
-    Point2f trA = pointsA[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
-
-    Point2f tlB = pointsB[boardSize.width * (boardSize.height / 2)];
-    Point2f trB = pointsB[boardSize.width * (boardSize.height / 2) + boardSize.width-1];
-
-    float d1 = tlA.y - tlB.y;
-    float d2 = trA.y - trB.y;
-
-    return Point2f(d1,d2);
-}*/
-
-static const float kDistanceThreshold = 0.005f;
-
-
-static void showAnaglyph(const Mat &frame_l, const Mat &frame_r, Mat &img3d) {
-    using namespace cv;
-
-    float data[] = {0.299f, 0.587f, 0.114f, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.299, 0.587, 0.114};
-    Mat m(2, 9, CV_32FC1, data);
-
-    //Mat img3d;
-
-    img3d = Mat(frame_l.size(), CV_8UC3);
-  
-    for (int y=0; y<img3d.rows; y++) {
-        unsigned char *row3d = img3d.ptr(y);
-        const unsigned char *rowL = frame_l.ptr(y);
-        const unsigned char *rowR = frame_r.ptr(y);
-
-        for (int x=0; x<img3d.cols*3; x+=3) {
-            const uchar lb = rowL[x+0];
-            const uchar lg = rowL[x+1];
-            const uchar lr = rowL[x+2];
-            const uchar rb = rowR[x+0];
-            const uchar rg = rowR[x+1];
-            const uchar rr = rowR[x+2];
-
-            row3d[x+0] = lb*m.at<float>(0,6) + lg*m.at<float>(0,7) + lr*m.at<float>(0,8) + rb*m.at<float>(1,6) + rg*m.at<float>(1,7) + rr*m.at<float>(1,8);
-            row3d[x+1] = lb*m.at<float>(0,3) + lg*m.at<float>(0,4) + lr*m.at<float>(0,5) + rb*m.at<float>(1,3) + rg*m.at<float>(1,4) + rr*m.at<float>(1,5);
-            row3d[x+2] = lb*m.at<float>(0,0) + lg*m.at<float>(0,1) + lr*m.at<float>(0,2) + rb*m.at<float>(1,0) + rg*m.at<float>(1,1) + rr*m.at<float>(1,2);
-        }
-    }
-
-    //imshow("Anaglyph", img3d);
-    //return img3d;
-}
-
-void ftl::calibration::align(map<string, string> &opt) {
-    using namespace cv;
-
-    float squareSize = 36.0f;
-
-    VideoCapture camA(0);
-    VideoCapture camB(1);
-
-    if (!camA.isOpened() || !camB.isOpened()) {
-        LOG(ERROR) << "Could not open a camera device";
-        return;
-    }
-
-    camA.set(cv::CAP_PROP_FRAME_WIDTH, 1280);  // TODO Use settings
-	camA.set(cv::CAP_PROP_FRAME_HEIGHT, 720);
-    camB.set(cv::CAP_PROP_FRAME_WIDTH, 1280);
-	camB.set(cv::CAP_PROP_FRAME_HEIGHT, 720);
-
-    Mat map1, map2, cameraMatrix;
-    Size imgSize(1280,720);
-    loadIntrinsicsMap((hasOption(opt, "profile")) ? getOption(opt,"profile") : "./panasonic.yml", imgSize, map1, map2, cameraMatrix, 1.0f);
-
-    Size boardSize(9,6);
-
-#if CV_VERSION_MAJOR >= 4
-	int chessBoardFlags = CALIB_CB_NORMALIZE_IMAGE;
-#else
-	int chessBoardFlags = CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE;
-
-	if (!settings.useFisheye) {
-		// fast check erroneously fails with high distortions like fisheye
-		chessBoardFlags |= CALIB_CB_FAST_CHECK;
-	}
-#endif
-
-    bool anaglyph = true;
-
-    while (true) {
-        Mat frameA, fA;
-        Mat frameB, fB;
-
-        camA.grab();
-        camB.grab();
-        camA.retrieve(frameA);
-        camB.retrieve(frameB);
-
-        remap(frameA, fA, map1, map2, INTER_LINEAR);
-        remap(frameB, fB, map1, map2, INTER_LINEAR);
-
-        // Get the chessboard
-        vector<Point2f> pointBufA;
-        vector<Point2f> pointBufB;
-		bool foundA, foundB;
-        foundA = findChessboardCornersSB(fA, boardSize,
-				pointBufA, chessBoardFlags);
-        foundB = findChessboardCornersSB(fB, boardSize,
-				pointBufB, chessBoardFlags);
-
-    // Step 1: Position cameras correctly with respect to chessboard
-    //      - print distance estimate etc
-
-    // Step 2: Show individual camera tilt degrees with left / right indicators
-
-    // Show also up down tilt perspective error
-
-    // Step 3: Display current baseline in mm
-
-        if (foundA) {
-            // Draw the corners.
-			//drawChessboardCorners(fA, boardSize,
-			//		Mat(pointBufA), foundA);
-        }
-
-        if (foundB) {
-            // Draw the corners.
-			//drawChessboardCorners(fB, boardSize,
-			//		Mat(pointBufB), foundB);
-        }
-
-        Mat anag;
-        showAnaglyph(fA, fB, anag);
-
-        if (foundA) {
-			Rec4f dists = distances(cameraMatrix, pointBufA, boardSize, squareSize);
-			//Rec4f angs = angles(pointBufA, boardSize);
-
-			// TODO Check angles also...
-			bool lrValid = std::abs(dists.left-dists.right) <= kDistanceThreshold;
-			bool tbValid = std::abs(dists.top-dists.bottom) <= kDistanceThreshold;
-			bool tiltUp = dists.top < dists.bottom && !tbValid;
-			bool tiltDown = dists.top > dists.bottom && !tbValid;
-			bool rotLeft = dists.left > dists.right && !lrValid;
-			bool rotRight = dists.left < dists.right && !lrValid;
-
-			// TODO Draw lines
-            Point2f bl = pointBufA[(boardSize.height-1)*boardSize.width];
-            Point2f tl = pointBufA[0];
-            Point2f tr = pointBufA[boardSize.width-1];
-            Point2f br = pointBufA[pointBufA.size()-1];
-
-
-            line(anag, tl, tr, (!lrValid && tiltUp) ? Scalar(0,0,255) : Scalar(0,255,0));
-            line(anag, bl, br, (!lrValid && tiltDown) ? Scalar(0,0,255) : Scalar(0,255,0));
-            line(anag, tl, bl, (!tbValid && rotLeft) ? Scalar(0,0,255) : Scalar(0,255,0));
-            line(anag, tr, br, (!tbValid && rotRight) ? Scalar(0,0,255) : Scalar(0,255,0));
-
-            //fA = fA(Rect(tl.x - 100, tl.y- 100, br.x-tl.x + 200, br.y-tl.y + 200));
-
-			// Show distance error between cameras
-
-			// Show estimated baseline
-
-            //if (step == 0) {
-            //    Point2f pd = parallaxDistortion(pointBufA, boardSize);
-            //    putText(fA, string("Distort: ") + std::to_string(pd.x) + string(",") + std::to_string(pd.y), Point(10,50), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
-            //} else if (step == 1) {
-                //float d = distance(cameraMatrix, pointBufA, boardSize, squareSize);
-                //putText(anag, string("Distance: ") + std::to_string(-d) + string("m"), Point(10,50), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
-           // } else if (step == 2) {
-                //float angle = calculateZRotation(pointBufA, boardSize) - 180.0f;
-                //putText(anag, string("Angle: ") + std::to_string(angle), Point(10,150), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
-            //} else if (step == 3) {
-            //    Point2f vd = diffY(pointBufA, pointBufB, boardSize);
-            //    putText(fA, string("Vertical: ") + std::to_string(vd.x) + string(",") + std::to_string(vd.y), Point(10,200), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
-           // }
-
-            if (foundB) {
-                //if (step == 0) {
-                    //Point2f pd = parallaxDistortion(pointBufB, boardSize);
-                    //putText(fB, string("Distort: ") + std::to_string(pd.x) + string(",") + std::to_string(pd.y), Point(10,50), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
-                //} else if (step == 1) {
-                    //float d = distance(cameraMatrix, pointBufB, boardSize, squareSize);
-                    //putText(fB, string("Distance: ") + std::to_string(-d) + string("m"), Point(10,100), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
-                //} else if (step == 2) {
-                    //float angle = calculateZRotation(pointBufB, boardSize) - 180.0f;
-                    //putText(fB, string("Angle: ") + std::to_string(angle), Point(10,150), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,0,255), 3);
-                //}
-
-                Rec4f dists = distances(cameraMatrix, pointBufB, boardSize, squareSize);
-                //Rec4f angs = angles(pointBufA, boardSize);
-
-                // TODO Check angles also...
-                bool lrValid = std::abs(dists.left-dists.right) <= kDistanceThreshold;
-                bool tbValid = std::abs(dists.top-dists.bottom) <= kDistanceThreshold;
-                bool tiltUp = dists.top < dists.bottom && !tbValid;
-                bool tiltDown = dists.top > dists.bottom && !tbValid;
-                bool rotLeft = dists.left > dists.right && !lrValid;
-                bool rotRight = dists.left < dists.right && !lrValid;
-
-                // TODO Draw lines
-                Point2f bbl = pointBufB[(boardSize.height-1)*boardSize.width];
-                Point2f btl = pointBufB[0];
-                Point2f btr = pointBufB[boardSize.width-1];
-                Point2f bbr = pointBufB[pointBufB.size()-1];
-
-
-                line(anag, btl, btr, (!lrValid && tiltUp) ? Scalar(0,0,255) : Scalar(0,255,0));
-                line(anag, bbl, bbr, (!lrValid && tiltDown) ? Scalar(0,0,255) : Scalar(0,255,0));
-                line(anag, btl, bbl, (!tbValid && rotLeft) ? Scalar(0,0,255) : Scalar(0,255,0));
-                line(anag, btr, bbr, (!tbValid && rotRight) ? Scalar(0,0,255) : Scalar(0,255,0));
-
-                float baseline1 = std::abs(tl.x - btl.x);
-                float baseline2 = std::abs(tr.x - btr.x);
-                float baseline3 = std::abs(bl.x - bbl.x);
-                float baseline4 = std::abs(br.x - bbr.x);
-                float boardWidth = (std::abs(tl.x-tr.x) + std::abs(btl.x - btr.x)) / 2.0f;
-                float baseline = ((baseline1 + baseline2 + baseline3 + baseline4) / 4.0f) / boardWidth * (boardSize.width*squareSize);
-
-                putText(anag, string("Baseline: ") + std::to_string(baseline) + string("mm"), Point(10,150), FONT_HERSHEY_PLAIN, 2.0, Scalar(0,255,0), 2);
-            }
-
-        }
-
-        /*if (anaglyph) {
-            showAnaglyph(fA,fB);
-        } else {
-            imshow("Left", fA);
-            imshow("Right", fB);
-        }*/
-        imshow("Anaglyph", anag);
-
-		char key = static_cast<char>(waitKey(20));
-		if (key  == 27)
-			break;
-        if (key == 32) anaglyph = !anaglyph;
-    }
-}
\ No newline at end of file
diff --git a/applications/calibration/src/align.hpp b/applications/calibration/src/align.hpp
deleted file mode 100644
index 8a2097c14c9b541299fd112c1f6f279387a1aede..0000000000000000000000000000000000000000
--- a/applications/calibration/src/align.hpp
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _FTL_CALIBRATION_ALIGN_HPP_
-#define _FTL_CALIBRATION_ALIGN_HPP_
-
-#include <map>
-#include <string>
-
-namespace ftl {
-namespace calibration {
-
-void align(std::map<std::string, std::string> &opt);
-
-}
-}
-
-#endif  // _FTL_CALIBRATION_ALIGN_HPP_
diff --git a/applications/calibration/src/common.cpp b/applications/calibration/src/common.cpp
deleted file mode 100644
index 8a678e4b9ba808dccea146c2ce4c8c1c6942a7f0..0000000000000000000000000000000000000000
--- a/applications/calibration/src/common.cpp
+++ /dev/null
@@ -1,241 +0,0 @@
-#include <loguru.hpp>
-#include <ftl/config.h>
-
-#include <opencv2/calib3d.hpp>
-#include <opencv2/imgproc.hpp>
-#include <opencv2/videoio.hpp>
-#include <opencv2/highgui.hpp>
-
-#include "common.hpp"
-
-using std::vector;
-using std::map;
-using std::string;
-
-using cv::Mat;
-using cv::Vec2f, cv::Vec3f;
-using cv::Size;
-
-using cv::stereoCalibrate;
-
-namespace ftl {
-namespace calibration {
-
-// Options
-string getOption(const map<string, string> &options, const string &opt) {
-	const string str = options.at(opt);
-	return str.substr(1, str.size() - 2);
-}
-
-bool hasOption(const map<string, string> &options, const string &opt) {
-	return options.find(opt) != options.end();
-}
-
-int getOptionInt(const map<string, string> &options, const string &opt, int default_value) {
-	if (!hasOption(options, opt)) return default_value;
-	return std::stoi(options.at(opt));
-}
-
-double getOptionDouble(const map<string, string> &options, const string &opt, double default_value) {
-	if (!hasOption(options, opt)) return default_value;
-	return std::stod(options.at(opt));
-}
-
-string getOptionString(const map<string, string> &options, const string &opt, string default_value) {
-	if (!hasOption(options, opt)) return default_value;
-	return getOption(options, opt);
-}
-
-// Save/load files
-
-bool saveExtrinsics(const string &ofile, Mat &R, Mat &T, Mat &R1, Mat &R2, Mat &P1, Mat &P2, Mat &Q) {
-	cv::FileStorage fs;
-	fs.open(ofile, cv::FileStorage::WRITE);
-	if (fs.isOpened()) {
-		fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1"
-			<< P1 << "P2" << P2 << "Q" << Q;
-		fs.release();
-		return true;
-	} else {
-		LOG(ERROR) << "Error: can not save the extrinsic parameters";
-	}
-	return false;
-}
-
-bool saveIntrinsics(const string &ofile, const vector<Mat> &M, const vector<Mat>& D, const Size &size)
-{
-	cv::FileStorage fs(ofile, cv::FileStorage::WRITE);
-	if (fs.isOpened())
-	{
-		fs << "resolution" << size;
-		fs << "K" << M << "D" << D;
-		fs.release();
-		return true;
-	}
-	else
-	{
-		LOG(ERROR) << "Error: can not save the intrinsic parameters to '" << ofile << "'";
-	}
-	return false;
-}
-
-bool loadIntrinsics(const string &ifile, vector<Mat> &K1, vector<Mat> &D1, Size &size) {
-	using namespace cv;
-
-	FileStorage fs;
-
-	// reading intrinsic parameters
-	fs.open((ifile).c_str(), FileStorage::READ);
-	if (!fs.isOpened()) {
-		LOG(WARNING) << "Could not open intrinsics file : " << ifile;
-		return false;
-	}
-	
-	LOG(INFO) << "Intrinsics from: " << ifile;
-
-	fs["resolution"] >> size;
-	fs["K"] >> K1;
-	fs["D"] >> D1;
-	
-	return true;
-}
-
-Grid::Grid(int rows, int cols, int width, int height, 
-		   int offset_x, int offset_y) {
-	rows_ = rows;
-	cols_ = cols;
-	width_ = width;
-	height_ = height;
-	offset_x_ = offset_x;
-	offset_y_ = offset_y;
-	cell_width_ = width_ / cols_;
-	cell_height_ = height_ / rows_;
-	reset();
-
-	corners_ = vector<std::pair<cv::Point, cv::Point>>();
-
-	for (int r = 0; r < rows_; r++) {
-	for (int c = 0; c < cols_; c++) {
-		int x1 = offset_x_ + c * cell_width_;
-		int y1 = offset_y_ + r * cell_height_;
-		int x2 = offset_x_ + (c + 1) * cell_width_ - 1;
-		int y2 = offset_y_ + (r + 1) * cell_height_ - 1;
-		corners_.push_back(std::pair(cv::Point(x1, y1), cv::Point(x2, y2)));
-	}}
-}
-
-void Grid::drawGrid(Mat &rgb) {
-	for (int i = 0; i < rows_ * cols_; ++i) {	
-		bool visited = visited_[i];
-		cv::Scalar color = visited ? cv::Scalar(24, 255, 24) : cv::Scalar(24, 24, 255);
-		cv::rectangle(rgb, corners_[i].first, corners_[i].second, color, 2);
-	}
-}
-
-int Grid::checkGrid(cv::Point p1, cv::Point p2) {
-	// TODO calculate directly
-
-	for (int i = 0; i < rows_ * cols_; ++i) {
-		auto &corners = corners_[i];
-		if (p1.x >= corners.first.x &&
-			p1.x <= corners.second.x &&
-			p1.y >= corners.first.y &&
-			p1.y <= corners.second.y && 
-			p2.x >= corners.first.x &&
-			p2.x <= corners.second.x &&
-			p2.y >= corners.first.y &&
-			p2.y <= corners.second.y) {
-			return i;
-		}
-	}
-
-	return -1;
-}
-
-void Grid::updateGrid(int i) {
-	if (i >= 0 && i < static_cast<int>(visited_.size()) && !visited_[i]) {
-		visited_[i] = true;
-		visited_count_ += 1;
-	}
-}
-
-bool Grid::isVisited(int i) {
-	if (i >= 0 && i < static_cast<int>(visited_.size())) {
-		return visited_[i];
-	}
-	return false;
-}
-
-bool Grid::isComplete() {
-	return visited_count_ == static_cast<int>(visited_.size());
-}
-
-void Grid::reset() {
-	visited_count_ = 0;
-	visited_ = vector<bool>(rows_ * cols_, false);
-	// reset visited
-}
-
-// Calibration classes for different patterns
-
-CalibrationChessboard::CalibrationChessboard(const map<string, string> &opt) {
-	pattern_size_ = Size(	getOptionInt(opt, "cols", 9),
-							getOptionInt(opt, "rows", 6));
-	image_size_ = Size(	getOptionInt(opt, "width", 1280),
-						getOptionInt(opt, "height", 720));
-	pattern_square_size_ = getOptionDouble(opt, "square_size", 0.036);
-
-	LOG(INFO) << "Chessboard calibration parameters";
-	LOG(INFO) << "         rows: " << pattern_size_.height;
-	LOG(INFO) << "         cols: " << pattern_size_.width;
-	LOG(INFO) << "        width: " << image_size_.width;
-	LOG(INFO) << "       height: " << image_size_.height;
-	LOG(INFO) << "  square_size: " << pattern_square_size_;
-	LOG(INFO) << "-----------------------------------";
-
-	// From OpenCV (4.1.0) Documentation
-	//
-	// CALIB_CB_NORMALIZE_IMAGE	Normalize the image gamma with equalizeHist before detection.
-	// CALIB_CB_EXHAUSTIVE		Run an exhaustive search to improve detection rate.
-	// CALIB_CB_ACCURACY		Up sample input image to improve sub-pixel accuracy due to
-	//							aliasing effects. This should be used if an accurate camera
-	//							calibration is required.
-
-	chessboard_flags_ = cv::CALIB_CB_NORMALIZE_IMAGE | cv::CALIB_CB_ACCURACY;
-}
-
-void CalibrationChessboard::objectPoints(vector<Vec3f> &out) {
-	out.reserve(pattern_size_.width * pattern_size_.height);
-	for (int row = 0; row < pattern_size_.height; ++row) {
-	for (int col = 0; col < pattern_size_.width; ++col) {
-		out.push_back(Vec3f(col * pattern_square_size_, row * pattern_square_size_, 0));
-	}}
-}
-
-bool CalibrationChessboard::findPoints(Mat &img, vector<Vec2f> &points) {
-	return cv::findChessboardCornersSB(img, pattern_size_, points, chessboard_flags_);
-}
-
-
-void CalibrationChessboard::drawCorners(Mat &img, const vector<Vec2f> &points) {
-	using cv::Point2i;
-	vector<Point2i> corners(4);
-	corners[1] = Point2i(points[0]);
-	corners[0] = Point2i(points[pattern_size_.width - 1]);
-	corners[2] = Point2i(points[pattern_size_.width * (pattern_size_.height - 1)]);
-	corners[3] = Point2i(points.back());
-	
-	cv::Scalar color = cv::Scalar(200, 200, 200);
-	
-	for (int i = 0; i <= 4; i++)
-	{
-		cv::line(img, corners[i % 4], corners[(i + 1) % 4], color, 2);
-	}
-}
-
-void CalibrationChessboard::drawPoints(Mat &img, const vector<Vec2f> &points) {
-	cv::drawChessboardCorners(img, pattern_size_, points, true);
-}
-
-}
-}
diff --git a/applications/calibration/src/common.hpp b/applications/calibration/src/common.hpp
deleted file mode 100644
index c84f25d249b49eee094e3e898090ffb9ff129f03..0000000000000000000000000000000000000000
--- a/applications/calibration/src/common.hpp
+++ /dev/null
@@ -1,111 +0,0 @@
-#ifndef _FTL_CALIBRATION_COMMON_HPP_
-#define _FTL_CALIBRATION_COMMON_HPP_
-
-#include <map>
-#include <string>
-
-#include <opencv2/core.hpp>
-
-namespace ftl {
-namespace calibration {
-
-std::string getOption(const std::map<std::string, std::string> &options, const std::string &opt);
-bool hasOption(const std::map<std::string, std::string> &options, const std::string &opt);
-int getOptionInt(const std::map<std::string, std::string> &options, const std::string &opt, int default_value);
-double getOptionDouble(const std::map<std::string, std::string> &options, const std::string &opt, double default_value);
-std::string getOptionString(const std::map<std::string, std::string> &options, const std::string &opt, std::string default_value);
-
-bool loadIntrinsics(const std::string &ifile, std::vector<cv::Mat> &K, std::vector<cv::Mat> &D, cv::Size &size);
-bool saveIntrinsics(const std::string &ofile, const std::vector<cv::Mat> &K, const std::vector<cv::Mat> &D, const cv::Size &size);
-
-// TODO loadExtrinsics()
-bool saveExtrinsics(const std::string &ofile, cv::Mat &R, cv::Mat &T, cv::Mat &R1, cv::Mat &R2, cv::Mat &P1, cv::Mat &P2, cv::Mat &Q);
-
-class Grid {
-private:
-	int rows_;
-	int cols_;
-	int width_;
-	int height_;
-	int cell_width_;
-	int cell_height_;
-	int offset_x_;
-	int offset_y_;
-	int visited_count_;
-
-	std::vector<std::pair<cv::Point, cv::Point>> corners_;
-	std::vector<bool> visited_;
-
-public:
-	Grid(int rows, int cols, int width, int height, int offset_x, int offset_y);
-	void drawGrid(cv::Mat &rgb);
-	int checkGrid(cv::Point p1, cv::Point p2);
-	void updateGrid(int i);
-	bool isVisited(int i);
-	bool isComplete();
-	void reset(); 
-};
-
-/**
- * @brief	Wrapper for OpenCV's calibration methods. Paramters depend on
- * 			implementation (different types of patterns).
- *
- * Calibration objects may store state; eg. from previous views of calibration
- * images.
- */
-class Calibration {
-public:
-	/**
-	 * @brief	Calculate reference points for given pattern
-	 * @param	Output parameter
-	 */
-	void objectPoints(std::vector<cv::Vec3f> &out);
-
-	/**
-	 * @brief	Try to find calibration pattern in input image
-	 * @param	Input image
-	 * @param	Output parameter for found point image coordinates
-	 * @returns	true if pattern found, otherwise false
-	 */
-	bool findPoints(cv::Mat &in, std::vector<cv::Vec2f> &out);
-
-	/**
-	 * @brief	Draw points to image
-	 * @param	Image to draw to
-	 * @param	Pattern points (in image coordinates)
-	 */
-	void drawPoints(cv::Mat &img, const std::vector<cv::Vec2f> &points);
-};
-
-/**
- * @brief	Chessboard calibration pattern. Uses OpenCV's
- * 			findChessboardCornersSB function.
- * @todo	Parameters hardcoded in constructor
- *
- * All parameters (command line parameters):
- * 	- rows, cols: pattern size (inner corners)
- * 	- square_size: millimeters (TODO: meters)
- * 	- width, height: image size, pixels
- * 	- flags: see ChessboardCornersSB documentation (TODO: not implemented)
- */
-class CalibrationChessboard : Calibration {
-public:
-	explicit CalibrationChessboard(const std::map<std::string, std::string> &opt);
-	void objectPoints(std::vector<cv::Vec3f> &out);
-	bool findPoints(cv::Mat &in, std::vector<cv::Vec2f> &out);
-	void drawPoints(cv::Mat &img, const std::vector<cv::Vec2f> &points);
-	void drawCorners(cv::Mat &img, const std::vector<cv::Vec2f> &points);
-
-private:
-	int chessboard_flags_ = 0;
-	float pattern_square_size_;
-	cv::Size pattern_size_;
-	cv::Size image_size_;
-};
-
-// TODO other patterns, circles ...
-
-}
-}
-
-#endif // _FTL_CALIBRATION_COMMON_HPP_
diff --git a/applications/calibration/src/lens.cpp b/applications/calibration/src/lens.cpp
deleted file mode 100644
index ef00ba17779fe3a61327e67b5b3da5e8a4ff0f5e..0000000000000000000000000000000000000000
--- a/applications/calibration/src/lens.cpp
+++ /dev/null
@@ -1,261 +0,0 @@
-#include "common.hpp"
-#include "lens.hpp"
-
-#include <ftl/config.h>
-#include <ftl/calibration/parameters.hpp>
-
-#include <loguru.hpp>
-
-#include <opencv2/core.hpp>
-#include <opencv2/core/utility.hpp>
-#include <opencv2/imgproc.hpp>
-#include <opencv2/calib3d.hpp>
-#include <opencv2/imgcodecs.hpp>
-#include <opencv2/videoio.hpp>
-#include <opencv2/highgui.hpp>
-
-#include <vector>
-#include <atomic>
-#include <thread>
-
-using std::map;
-using std::string;
-using std::vector;
-
-using cv::Mat;
-using cv::Vec2f;
-using cv::Vec3f;
-using cv::Size;
-
-using namespace ftl::calibration;
-
-void ftl::calibration::intrinsic(map<string, string> &opt) {
-	LOG(INFO) << "Begin intrinsic calibration";
-
-	// TODO PARAMETERS TO CONFIG FILE
-	const Size image_size = Size(	getOptionInt(opt, "width", 1920),
-							getOptionInt(opt, "height", 1080));
-	const int n_cameras = getOptionInt(opt, "n_cameras", 2);
-	const int iter = getOptionInt(opt, "iter", 20);
-	const int delay = getOptionInt(opt, "delay", 1000);
-	const double aperture_width = getOptionDouble(opt, "aperture_width", 6.2);
-	const double aperture_height = getOptionDouble(opt, "aperture_height", 4.6);
-	const string filename_intrinsics = getOptionString(opt, "profile", FTL_LOCAL_CONFIG_ROOT "/calibration.yml");
-	CalibrationChessboard calib(opt);
-	bool use_guess = getOptionInt(opt, "use_guess", 1);
-	//bool use_guess_distortion = getOptionInt(opt, "use_guess_distortion", 0);
-
-	LOG(INFO) << "Intrinsic calibration parameters";
-	LOG(INFO) << "               profile: " << filename_intrinsics;
-	LOG(INFO) << "             n_cameras: " << n_cameras;
-	LOG(INFO) << "                 width: " << image_size.width;
-	LOG(INFO) << "                height: " << image_size.height;
-	LOG(INFO) << "                  iter: " << iter;
-	LOG(INFO) << "                 delay: " << delay;
-	LOG(INFO) << "        aperture_width: " << aperture_width;
-	LOG(INFO) << "       aperture_height: " << aperture_height;
-	LOG(INFO) << "             use_guess: " << use_guess << "\n";
-	LOG(WARNING) << "WARNING: This application overwrites existing files and does not previous values!";
-	//LOG(INFO) << "  use_guess_distortion: " << use_guess_distortion;
-
-	LOG(INFO) << "-----------------------------------";
-
-	// assume no tangential and thin prism distortion and only estimate first
-	// three radial distortion coefficients
-
-	int calibrate_flags = 	cv::CALIB_FIX_K4 | cv::CALIB_FIX_K5 | cv::CALIB_FIX_K6 |
-							cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_FIX_S1_S2_S3_S4 | cv::CALIB_FIX_ASPECT_RATIO;
-
-	vector<Mat> camera_matrix(n_cameras), dist_coeffs(n_cameras);
-
-	for (Mat &d : dist_coeffs) {
-		d = Mat(Size(8, 1), CV_64FC1, cv::Scalar(0.0));
-	}
-
-	if (use_guess) {
-		camera_matrix.clear();
-		vector<Mat> tmp;
-		Size tmp_size;
-
-		loadIntrinsics(filename_intrinsics, camera_matrix, tmp, tmp_size);
-		CHECK(camera_matrix.size() == static_cast<unsigned int>(n_cameras));
-
-		if ((tmp_size != image_size) && (!tmp_size.empty())) {
-			for (Mat &K : camera_matrix) {
-				K = ftl::calibration::scaleCameraMatrix(K, tmp_size, image_size);
-			}
-		}
-
-		if (tmp_size.empty()) {
-			LOG(ERROR) << "No valid calibration found.";
-		}
-		else {
-			calibrate_flags |= cv::CALIB_USE_INTRINSIC_GUESS;
-		}
-	}
-
-	vector<cv::VideoCapture> cameras;
-	cameras.reserve(n_cameras);
-
-	for (int c = 0; c < n_cameras; c++) { cameras.emplace_back(c); }
-	for (auto &camera : cameras)
-	{
-		if (!camera.isOpened()) {
-			LOG(ERROR) << "Could not open camera device";
-			return;
-		}
-		camera.set(cv::CAP_PROP_FRAME_WIDTH, image_size.width);
-		camera.set(cv::CAP_PROP_FRAME_HEIGHT, image_size.height);
-	}
-
-	vector<vector<vector<Vec2f>>> image_points(n_cameras);
-	vector<vector<vector<Vec3f>>> object_points(n_cameras);
-
-	vector<Mat> img(n_cameras);
-	vector<Mat> img_display(n_cameras);
-	vector<int> count(n_cameras, 0);
-	Mat display(Size(image_size.width * n_cameras, image_size.height), CV_8UC3);
-
-	for (int c = 0; c < n_cameras; c++) {
-		img_display[c] = Mat(display, cv::Rect(c * image_size.width, 0, image_size.width, image_size.height));
-	}
-
-	std::mutex m;
-	std::atomic<bool> ready = false;
-	auto capture = std::thread(
-		[n_cameras, delay, &m, &ready, &count, &calib, &img, &image_points, &object_points]() {
-
-		vector<Mat> tmp(n_cameras);
-		while(true) {
-			if (!ready) {
-				std::this_thread::sleep_for(std::chrono::milliseconds(delay));
-				continue;
-			}
-
-			m.lock();
-			ready = false;
-			for (int c = 0; c < n_cameras; c++) {
-				img[c].copyTo(tmp[c]);
-			}
-			m.unlock();
-
-			for (int c = 0; c < n_cameras; c++) {
-				vector<Vec2f> points;
-				if (calib.findPoints(tmp[c], points)) {
-					count[c]++;
-				}
-				else { continue; }
-
-				vector<Vec3f> points_ref;
-				calib.objectPoints(points_ref);
-				Mat camera_matrix, dist_coeffs;
-				image_points[c].push_back(points);
-				object_points[c].push_back(points_ref);
-			}
-
-			std::this_thread::sleep_for(std::chrono::milliseconds(delay));
-		}
-	});
-
-	while (iter > *std::min_element(count.begin(), count.end())) {
-		if (m.try_lock()) {
-			for (auto &camera : cameras) { camera.grab(); }
-
-			for (int c = 0; c < n_cameras; c++) {
-				cameras[c].retrieve(img[c]);
-			}
-
-			ready = true;
-			m.unlock();
-		}
-
-		for (int c = 0; c < n_cameras; c++) {
-			img[c].copyTo(img_display[c]);
-			m.lock();
-
-			if (image_points[c].size() > 0) {
-
-				for (auto &points : image_points[c]) {
-					calib.drawCorners(img_display[c], points);
-				}
-
-				calib.drawPoints(img_display[c], image_points[c].back());
-			}
-
-			m.unlock();
-		}
-
-		cv::namedWindow("Cameras", cv::WINDOW_KEEPRATIO | cv::WINDOW_NORMAL);
-		cv::imshow("Cameras", display);
-
-		cv::waitKey(10);
-	}
-
-	cv::destroyAllWindows();
-
-	//bool calib_ok = true;
-
-	for (int c = 0; c < n_cameras; c++) {
-		LOG(INFO) << "Calculating intrinsic paramters for camera " << std::to_string(c);
-		vector<Mat> rvecs, tvecs;
-
-		double rms = cv::calibrateCamera(
-							object_points[c], image_points[c],
-							image_size, camera_matrix[c], dist_coeffs[c],
-							rvecs, tvecs, calibrate_flags
-		);
-
-		LOG(INFO) << "final reprojection RMS error: " << rms;
-
-		if (!ftl::calibration::validate::distortionCoefficients(dist_coeffs[c], image_size)) {
-			LOG(ERROR)	<< "Calibration failed: invalid distortion coefficients:\n"
-						<< dist_coeffs[c];
-
-			LOG(WARNING) << "Estimating only intrinsic parameters for camera " << std::to_string(c);
-
-			dist_coeffs[c] = Mat(Size(8, 1), CV_64FC1, cv::Scalar(0.0));
-			calibrate_flags |=	cv::CALIB_FIX_K1 | cv::CALIB_FIX_K2 | cv::CALIB_FIX_K3;
-			c--;
-			continue;
-		}
-
-		//calib_ok = true;
-		calibrate_flags &=	~cv::CALIB_FIX_K1 & ~cv::CALIB_FIX_K2 & ~cv::CALIB_FIX_K3;
-
-		double fovx, fovy, focal_length, aspect_ratio;
-		cv::Point2d principal_point;
-
-		// TODO: check for valid aperture width/height; do not run if not valid values
-		cv::calibrationMatrixValues(camera_matrix[c], image_size, aperture_width, aperture_height,
-									fovx, fovy, focal_length, principal_point, aspect_ratio);
-
-		LOG(INFO) << "";
-		LOG(INFO) << "            fovx (deg): " << fovx;
-		LOG(INFO) << "            fovy (deg): " << fovy;
-		LOG(INFO) << "     focal length (mm): " << focal_length;
-		LOG(INFO) << "  principal point (mm): " << principal_point;
-		LOG(INFO) << "          aspect ratio: " << aspect_ratio;
-		LOG(INFO) << "";
-		LOG(INFO) << "Camera matrix:\n" << camera_matrix[c];
-		LOG(INFO) << "Distortion coefficients:\n" << dist_coeffs[c];
-		LOG(INFO) << "";
-	}
-
-	saveIntrinsics(filename_intrinsics, camera_matrix, dist_coeffs, image_size);
-	LOG(INFO) << "intrinsic paramaters saved to: " << filename_intrinsics;
-
-	vector<Mat> map1(n_cameras), map2(n_cameras);
-	for (int c = 0; c < n_cameras; c++) {
-		cv::initUndistortRectifyMap(camera_matrix[c], dist_coeffs[c], Mat::eye(3,3, CV_64F), camera_matrix[c],
-									image_size, CV_16SC2, map1[c], map2[c]);
-	}
-
-	while (cv::waitKey(25) != 27) {
-		for (auto &camera : cameras ) {	camera.grab(); }
-		for (int c = 0; c < n_cameras; c++) {
-			cameras[c].retrieve(img[c]);
-			cv::remap(img[c], img[c], map1[c], map2[c], cv::INTER_CUBIC);
-			cv::imshow("Camera " + std::to_string(c), img[c]);
-		}
-	}
-}
diff --git a/applications/calibration/src/lens.hpp b/applications/calibration/src/lens.hpp
deleted file mode 100644
index ec455b6bd7b3de6085074e2ea6b6ca7cca3fee9b..0000000000000000000000000000000000000000
--- a/applications/calibration/src/lens.hpp
+++ /dev/null
@@ -1,15 +0,0 @@
-#ifndef _FTL_CALIBRATION_LENS_HPP_
-#define _FTL_CALIBRATION_LENS_HPP_
-
-#include <map>
-#include <string>
-
-namespace ftl {
-namespace calibration {
-
-void intrinsic(std::map<std::string, std::string> &opt);
-
-}
-}
-
-#endif  // _FTL_CALIBRATION_LENS_HPP_
diff --git a/applications/calibration/src/main.cpp b/applications/calibration/src/main.cpp
deleted file mode 100644
index 4bdf85396d969d069e6f44d47de93ff22f5f1a75..0000000000000000000000000000000000000000
--- a/applications/calibration/src/main.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-#include <loguru.hpp>
-#include <ftl/configuration.hpp>
-
-#include "lens.hpp"
-#include "stereo.hpp"
-#include "align.hpp"
-
-int main(int argc, char **argv) {
-	loguru::g_preamble_date = false;
-	loguru::g_preamble_uptime = false;
-	loguru::g_preamble_thread = false;
-	loguru::init(argc, argv, "--verbosity");
-	argc--;
-	argv++;
-
-	// Process Arguments
-	auto options = ftl::config::read_options(&argv, &argc);
-
-	ftl::calibration::intrinsic(options);
-
-	return 0;
-}
diff --git a/applications/calibration/src/stereo.cpp b/applications/calibration/src/stereo.cpp
deleted file mode 100644
index 964cf815300971cf31130e78fae94e1c21a172ad..0000000000000000000000000000000000000000
--- a/applications/calibration/src/stereo.cpp
+++ /dev/null
@@ -1,292 +0,0 @@
-#include <loguru.hpp>
-#include <ftl/config.h>
-
-#include <opencv2/imgproc.hpp>
-#include <opencv2/videoio.hpp>
-#include <opencv2/highgui.hpp>
-
-#include "common.hpp"
-#include "stereo.hpp"
-
-using std::vector;
-using std::map;
-using std::string;
-
-using cv::Mat;
-using cv::Vec2f, cv::Vec3f;
-using cv::Size;
-
-using cv::stereoCalibrate;
-
-using namespace ftl::calibration;
-
-void ftl::calibration::stereo(map<string, string> &opt) {
-	LOG(INFO) << "Begin stereo calibration";
-
-	// TODO PARAMETERS TO CONFIG FILE
-	// image size, also used by CalibrationChessboard
-	Size image_size = Size(	getOptionInt(opt, "width", 1280),
-							getOptionInt(opt, "height", 720));
-	// iterations
-	int iter = getOptionInt(opt, "iter", 50);
-	// delay between images
-	double delay = getOptionInt(opt, "delay", 250);
-	// max_error for a single image; if error larger image discarded
-	double max_error = getOptionDouble(opt, "max_error", 1.0);
-	// scaling/cropping (see OpenCV stereoRectify())
-	float alpha = getOptionDouble(opt, "alpha", 0);
-	// intrinsics filename
-	string filename_intrinsics = getOptionString(opt, "profile", "./panasonic.yml");
-
-	bool use_grid = (bool) getOptionInt(opt, "use_grid", 0);
-
-	LOG(INFO) << "Stereo calibration parameters";
-	LOG(INFO) << "     profile: " << filename_intrinsics;
-	LOG(INFO) << "       width: " << image_size.width;
-	LOG(INFO) << "      height: " << image_size.height;
-	LOG(INFO) << "        iter: " << iter;
-	LOG(INFO) << "       delay: " << delay;
-	LOG(INFO) << "   max_error: " << max_error;
-	LOG(INFO) << "       alpha: " << alpha;
-	LOG(INFO) << "    use_grid: " << use_grid;
-	LOG(INFO) << "-----------------------------------";
-
-	CalibrationChessboard calib(opt);
-	vector<Grid> grids;
-	int grid_i = 0;
-
-	// grid parameters, 3x3 grid; one small grid and one large grid. Grids are cycled until
-	// iter reaches zero
-	grids.push_back(Grid(3, 3,
-						(3.0f/4.0f) * image_size.width, (3.0f/4.0f) * image_size.height,
-						((1.0f/4.0f) * image_size.width) / 2, ((1.0f/4.0f) * image_size.height) / 2));
-
-	grids.push_back(Grid(3, 3, image_size.width, image_size.height, 0, 0));
-	Grid grid = grids[grid_i];
-
-	// PARAMETERS
-
-	int stereocalibrate_flags =
-		cv::CALIB_FIX_INTRINSIC | cv::CALIB_FIX_PRINCIPAL_POINT | cv::CALIB_FIX_ASPECT_RATIO |
-		cv::CALIB_ZERO_TANGENT_DIST | cv::CALIB_SAME_FOCAL_LENGTH | 
-		cv::CALIB_FIX_K3 | cv::CALIB_FIX_K4 | cv::CALIB_FIX_K5;
-
-	vector<cv::VideoCapture> cameras { cv::VideoCapture(0), cv::VideoCapture(1) };
-
-	for (auto &camera : cameras ) {
-		if (!camera.isOpened()) {
-			LOG(ERROR) << "Could not open camera device";
-			return;
-		}
-		camera.set(cv::CAP_PROP_FRAME_WIDTH, image_size.width); 
-		camera.set(cv::CAP_PROP_FRAME_HEIGHT, image_size.height);
-	}
-
-	// image points to calculate the parameters after all input data is captured
-	vector<vector<vector<Vec2f>>> image_points(2);
-	vector<vector<Vec3f>> object_points;
-
-	// image points for each grid, updated to image_points and object_points
-	// after is grid complete
-	vector<vector<vector<Vec2f>>> image_points_grid(9, vector<vector<Vec2f>>(2));
-	vector<vector<Vec3f>> object_points_grid(9);
-	
-	vector<Mat> dist_coeffs(2);
-	vector<Mat> camera_matrices(2);
-	Size intrinsic_resolution;
-	if (!loadIntrinsics(filename_intrinsics, camera_matrices, dist_coeffs, intrinsic_resolution))
-	{
-		LOG(FATAL) << "Failed to load intrinsic camera parameters from file.";
-	}
-	
-	if (intrinsic_resolution != image_size)
-	{
-		LOG(FATAL) << "Intrinsic resolution is not same as input resolution (TODO)";
-	}
-
-	Mat R, T, E, F, per_view_errors;
-	
-	// capture calibration patterns
-	while (iter > 0) {
-		int res = 0;
-		int grid_pos = -1;
-
-		vector<Mat> new_img(2);
-		vector<vector<Vec2f>> new_points(2);
-
-		int delay_remaining = delay;
-		for (; delay_remaining > 50; delay_remaining -= 50) {
-			cv::waitKey(50);
-
-			for (size_t i = 0; i < 2; i++) {
-				auto &camera = cameras[i];
-				auto &img = new_img[i];
-
-				camera.grab();
-				camera.retrieve(img);
-
-				if (use_grid && i == 0) grid.drawGrid(img);
-				cv::imshow("Camera " + std::to_string(i), img);
-			}
-		}
-
-		for (size_t i = 0; i < 2; i++) {
-			auto &img = new_img[i];
-			auto &points = new_points[i];
-
-			// TODO move to "findPoints"-thread
-			if (calib.findPoints(img, points)) {
-				calib.drawPoints(img, points);
-				res++;
-			}
-
-			cv::imshow("Camera " + std::to_string(i), img);
-		}
-
-		if (res != 2) { LOG(WARNING) << "Input not detected on all inputs"; continue; }
-		
-		if (use_grid) {
-			// top left and bottom right corners; not perfect but good enough
-			grid_pos = grid.checkGrid(
-				cv::Point(new_points[0][0]),
-				cv::Point(new_points[0][new_points[0].size()-1])
-			);
-
-			if (grid_pos == -1) { LOG(WARNING) << "Captured pattern not inside grid cell"; continue; }
-		}
-
-		vector<Vec3f> points_ref;
-		calib.objectPoints(points_ref);
-		
-		/* doesn't seem to be very helpful (error almost always low enough)
-		// calculate reprojection error with single pair of images
-		// reject it if RMS reprojection error too high
-		int flags = stereocalibrate_flags;
-		
-		double rms_iter = stereoCalibrate(
-					vector<vector<Vec3f>> { points_ref }, 
-					vector<vector<Vec2f>> { new_points[0] },
-					vector<vector<Vec2f>> { new_points[1] },
-					camera_matrices[0], dist_coeffs[0],
-					camera_matrices[1], dist_coeffs[1],
-					image_size, R, T, E, F, per_view_errors,
-					flags);
-		
-		LOG(INFO) << "rms for pattern: " << rms_iter;
-		if (rms_iter > max_error) {
-			LOG(WARNING) << "RMS reprojection error too high, maximum allowed error: " << max_error;
-			continue;
-		}*/
-		
-		if (use_grid) {
-			// store results in result grid
-			object_points_grid[grid_pos] = points_ref;
-			for (size_t i = 0; i < 2; i++) { image_points_grid[grid_pos][i] = new_points[i]; }
-			
-			grid.updateGrid(grid_pos);
-
-			if (grid.isComplete()) {
-				LOG(INFO) << "Grid complete";
-				grid.reset();
-				grid_i = (grid_i + 1) % grids.size();
-				grid = grids[grid_i];
-
-				// copy results
-				object_points.insert(object_points.end(), object_points_grid.begin(), object_points_grid.end());
-				for (size_t i = 0; i < image_points_grid.size(); i++) {
-					for (size_t j = 0; j < 2; j++) { image_points[j].push_back(image_points_grid[i][j]); }
-				}
-				iter--;
-			}
-		}
-		else {
-			object_points.push_back(points_ref);
-			for (size_t i = 0; i < 2; i++) { image_points[i].push_back(new_points[i]); }
-			iter--;
-		}
-	}
-
-	// calculate stereoCalibration using all input images (which have low enough
-	// RMS error in previous step)
-
-	LOG(INFO) << "Calculating extrinsic stereo parameters using " << object_points.size() << " samples.";
-
-	CHECK(object_points.size() == image_points[0].size());
-	CHECK(object_points.size() == image_points[1].size());
-
-	double rms = stereoCalibrate(object_points,
-		image_points[0], image_points[1],
-		camera_matrices[0], dist_coeffs[0],
-		camera_matrices[1], dist_coeffs[1],
-		image_size, R, T, E, F, per_view_errors,
-		stereocalibrate_flags,
-		cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 120, 1e-6)
-	);
-
-	LOG(INFO) << "Final extrinsic calibration RMS (reprojection error): " << rms;
-	for (int i = 0; i < per_view_errors.rows * per_view_errors.cols; i++) {
-		LOG(4) << "error for sample " << i << ": " << ((double*) per_view_errors.data)[i];
-	}
-
-	Mat R1, R2, P1, P2, Q;
-	cv::Rect validRoi[2];
-
-	stereoRectify(
-		camera_matrices[0], dist_coeffs[0],
-		camera_matrices[1], dist_coeffs[1],
-		image_size, R, T, R1, R2, P1, P2, Q,
-		0, alpha, image_size,
-		&validRoi[0], &validRoi[1]
-	);
-
-	saveExtrinsics(FTL_LOCAL_CONFIG_ROOT "/extrinsics.yml", R, T, R1, R2, P1, P2, Q);
-	LOG(INFO) << "Stereo camera extrinsics saved to: " << FTL_LOCAL_CONFIG_ROOT "/extrinsics.yml";
-
-	for (size_t i = 0; i < 2; i++) { cv::destroyWindow("Camera " + std::to_string(i)); }
-
-	// Visualize results
-	vector<Mat> map1(2), map2(2);
-	cv::initUndistortRectifyMap(camera_matrices[0], dist_coeffs[0], R1, P1, image_size, CV_16SC2, map1[0], map2[0]);
-	cv::initUndistortRectifyMap(camera_matrices[1], dist_coeffs[1], R2, P2, image_size, CV_16SC2, map1[1], map2[1]);
-
-	vector<Mat> in(2);
-	vector<Mat> out(2);
-	// vector<Mat> out_gray(2);
-	// Mat diff, diff_color;
-
-	while(cv::waitKey(25) == -1) {
-		for(size_t i = 0; i < 2; i++) {
-			auto &camera = cameras[i];
-			camera.grab();
-			camera.retrieve(in[i]);
-	
-			auto p = cv::Point2i(camera_matrices[i].at<double>(0, 2), camera_matrices[i].at<double>(1, 2));
-			cv::drawMarker(in[i], p, cv::Scalar(51, 204, 51), cv::MARKER_CROSS, 40, 1);
-			cv::drawMarker(in[i], p, cv::Scalar(51, 204, 51), cv::MARKER_SQUARE, 25);
-
-			cv::remap(in[i], out[i], map1[i], map2[i], cv::INTER_CUBIC);
-
-			// draw lines
-			for (int r = 50; r < image_size.height; r = r+50) {
-				cv::line(out[i], cv::Point(0, r), cv::Point(image_size.width-1, r), cv::Scalar(0,0,255), 1);
-			}
-
-			if (i == 0) { // left camera
-				auto p_r = cv::Point2i(-Q.at<double>(0, 3), -Q.at<double>(1, 3));
-				cv::drawMarker(out[i], p_r, cv::Scalar(0, 0, 204), cv::MARKER_CROSS, 30);
-				cv::drawMarker(out[i], p_r, cv::Scalar(0, 0, 204), cv::MARKER_SQUARE);
-			}
-			
-			cv::imshow("Camera " + std::to_string(i) + " (unrectified)", in[i]);
-			cv::imshow("Camera " + std::to_string(i) + " (rectified)", out[i]);
-		}
-		
-		/* not useful
-		cv::absdiff(out_gray[0], out_gray[1], diff);
-		cv::applyColorMap(diff, diff_color, cv::COLORMAP_JET);
-		cv::imshow("Difference", diff_color);
-		*/
-	}
-
-	cv::destroyAllWindows();
-}
diff --git a/applications/calibration/src/stereo.hpp b/applications/calibration/src/stereo.hpp
deleted file mode 100644
index 0175ea366251fc9003d4571c97b06c9e38ad5a83..0000000000000000000000000000000000000000
--- a/applications/calibration/src/stereo.hpp
+++ /dev/null
@@ -1,18 +0,0 @@
-#ifndef _FTL_CALIBRATION_STEREO_HPP_
-#define _FTL_CALIBRATION_STEREO_HPP_
-
-#include <map>
-#include <string>
-
-#include <opencv2/core.hpp>
-#include <opencv2/calib3d.hpp>
-
-namespace ftl {
-namespace calibration {
-
-void stereo(std::map<std::string, std::string> &opt);
-
-}
-}
-
-#endif  // _FTL_CALIBRATION_STEREO_HPP_