Skip to content
Snippets Groups Projects
Commit 50876bc7 authored by Sebastian Hahta's avatar Sebastian Hahta
Browse files

set pose from vision node

parent bbac581e
No related branches found
No related tags found
1 merge request!210Feature/multiplexer pose
Pipeline #18139 failed
......@@ -281,11 +281,14 @@ void ftl::gui::Camera::showSettings() {
#ifdef HAVE_OPENVR
bool ftl::gui::Camera::setVR(bool on) {
if (on == vr_mode_) {
LOG(WARNING) << "VR mode already enabled";
return on;
}
vr_mode_ = on;
/*
if (on) {
setChannel(Channel::Right);
src_->set("baseline", baseline_);
......@@ -311,7 +314,7 @@ bool ftl::gui::Camera::setVR(bool on) {
setChannel(Channel::Left); // reset to left channel
// todo restore camera params
}
*/
return vr_mode_;
}
#endif
......@@ -447,7 +450,7 @@ const GLTexture &ftl::gui::Camera::captureFrame() {
if (baseline_in != baseline_) {
baseline_ = baseline_in;
src_->set("baseline", baseline_);
//src_->set("baseline", baseline_);
}
Eigen::Matrix4d pose = ConvertSteamVRMatrixToMatrix4(rTrackedDevicePose_[vr::k_unTrackedDeviceIndex_Hmd].mDeviceToAbsoluteTracking);
Eigen::Vector3d ea = pose.block<3, 3>(0, 0).eulerAngles(0, 1, 2);
......
......@@ -43,8 +43,8 @@ class Calibrate : public ftl::Configurable {
/* @brief Get disparity to depth matrix
*
* 2020/01/15: Not used, StereoVideoSource creates a Camera object which
* is used to calculate depth from disparity (disp2depth.cu)
* 2020/01/15: StereoVideoSource creates a Camera object which is used to
* calculate depth from disparity (disp2depth.cu)
*/
const cv::Mat &getQ() const { return Q_; }
......@@ -56,7 +56,7 @@ class Calibrate : public ftl::Configurable {
/* @brief Get camera pose from calibration
*/
cv::Mat getPose() { return pose_; };
const cv::Mat &getPose() const { return pose_; };
/* @brief Enable/disable recitification. If disabled, instance returns
* original camera intrinsic parameters (getCameraMatrixLeft() and
......
#include <loguru.hpp>
#include <Eigen/Eigen>
#include <opencv2/core/eigen.hpp>
#include "stereovideo.hpp"
#include "ftl/configuration.hpp"
......@@ -76,25 +79,6 @@ void StereoVideoSource::init(const string &file) {
pipeline_input_->append<ftl::operators::NVOpticalFlow>("optflow");
#endif
//depth_size_ = cv::Size( host_->value("depth_width", 1280),
// host_->value("depth_height", 720));
/*pipeline_depth_ = ftl::config::create<ftl::operators::Graph>(host_, "disparity");
depth_size_ = cv::Size( pipeline_depth_->value("width", color_size_.width),
pipeline_depth_->value("height", color_size_.height));
pipeline_depth_->append<ftl::operators::FixstarsSGM>("algorithm");
#ifdef HAVE_OPTFLOW
pipeline_depth_->append<ftl::operators::OpticalFlowTemporalSmoothing>("optflow_filter");
#endif
pipeline_depth_->append<ftl::operators::DisparityBilateralFilter>("bilateral_filter");
pipeline_depth_->append<ftl::operators::DisparityToDepth>("calculate_depth");
pipeline_depth_->append<ftl::operators::ColourChannels>("colour"); // Convert BGR to BGRA
pipeline_depth_->append<ftl::operators::Normals>("normals"); // Estimate surface normals
pipeline_depth_->append<ftl::operators::CrossSupport>("cross");
pipeline_depth_->append<ftl::operators::DiscontinuityMask>("discontinuity_mask");
pipeline_depth_->append<ftl::operators::AggreMLS>("mls"); // Perform MLS (using smoothing channel)*/
calib_ = ftl::create<Calibrate>(host_, "calibration", cv::Size(lsrc_->fullWidth(), lsrc_->fullHeight()), stream_);
// Generate camera parameters from camera matrix
......@@ -123,7 +107,7 @@ void StereoVideoSource::init(const string &file) {
// TODO: remove (not used, fx/fy/baseline/.. do not change)
// in case they are modified, update using Calibrate
// (requires new method)
/*
host_->on("baseline", [this](const ftl::config::Event &e) {
params_.baseline = host_->value("baseline", params_.baseline);
UNIQUE_LOCK(host_->mutex(), lk);
......@@ -136,12 +120,11 @@ void StereoVideoSource::init(const string &file) {
UNIQUE_LOCK(host_->mutex(), lk);
calib_->updateCalibration(params_);
});
//
host_->on("doffs", [this](const ftl::config::Event &e) {
params_.doffs = host_->value("doffs", params_.doffs);
});
//
// left and right masks (areas outside rectified images)
// TODO: remove mask
......@@ -152,7 +135,10 @@ void StereoVideoSource::init(const string &file) {
cv::Mat mask_l;
mask_l_gpu.download(mask_l);
mask_l_ = (mask_l == 0);
*/
Eigen::Matrix4d pose;
cv::cv2eigen(calib_->getPose(), pose);
setPose(pose);
LOG(INFO) << "StereoVideo source ready...";
ready_ = true;
......@@ -233,43 +219,6 @@ bool StereoVideoSource::compute(int n, int b) {
//stream_.waitForCompletion();
host_->notify(timestamp_, frame);
/*if (chan == Channel::Depth) {
// stereo algorithms assume input same size as output
bool resize = (depth_size_ != color_size_);
cv::cuda::GpuMat& left = frame.get<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat& right = frame.get<cv::cuda::GpuMat>(Channel::Right);
if (left.empty() || right.empty()) {
return false;
}
if (resize) {
cv::cuda::swap(fullres_left_, left);
cv::cuda::swap(fullres_right_, right);
cv::cuda::resize(fullres_left_, left, depth_size_, 0, 0, cv::INTER_CUBIC, stream_);
cv::cuda::resize(fullres_right_, right, depth_size_, 0, 0, cv::INTER_CUBIC, stream_);
}
pipeline_depth_->apply(frame, frame, host_, cv::cuda::StreamAccessor::getStream(stream_));
stream_.waitForCompletion();
if (resize) {
cv::cuda::swap(fullres_left_, left);
cv::cuda::swap(fullres_right_, right);
}
host_->notify(timestamp_, frame);
} else if (chan == Channel::Right) {
stream_.waitForCompletion();
host_->notify(timestamp_, frame);
} else {
stream_.waitForCompletion();
host_->notify(timestamp_, frame);
}*/
return true;
}
......
#include <fstream>
#include <ftl/streams/filestream.hpp>
using ftl::stream::File;
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment