Skip to content
Snippets Groups Projects
Commit 43e3f842 authored by Sebastian Hahta's avatar Sebastian Hahta
Browse files

Merge branch 'aruco' into 'master'

aruco tags; headset position adjustment

See merge request nicolas.pope/ftl!340
parents 94280cb5 976ed621
No related branches found
No related tags found
1 merge request!340aruco tags; headset position adjustment
Pipeline #29356 failed
...@@ -188,11 +188,11 @@ static void run(ftl::Configurable *root) { ...@@ -188,11 +188,11 @@ static void run(ftl::Configurable *root) {
root->on("quiet", quiet, false); root->on("quiet", quiet, false);
auto *pipeline = ftl::config::create<ftl::operators::Graph>(root, "pipeline"); auto *pipeline = ftl::config::create<ftl::operators::Graph>(root, "pipeline");
pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false); pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
pipeline->append<ftl::operators::DepthChannel>("depth"); // Ensure there is a depth channel pipeline->append<ftl::operators::DepthChannel>("depth"); // Ensure there is a depth channel
//pipeline->append<ftl::operators::ClipScene>("clipping")->value("enabled", false); //pipeline->append<ftl::operators::ClipScene>("clipping")->value("enabled", false);
pipeline->restore("vision_pipeline", { "clipping" }); pipeline->restore("vision_pipeline", { "clipping" });
pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
auto h = creator->onFrameSet([sender,outstream,&stats_count,&latency,&frames,&stats_time,pipeline,&encodable,&previous_encodable](const ftl::data::FrameSetPtr &fs) { auto h = creator->onFrameSet([sender,outstream,&stats_count,&latency,&frames,&stats_time,pipeline,&encodable,&previous_encodable](const ftl::data::FrameSetPtr &fs) {
......
...@@ -121,6 +121,7 @@ class ArUco : public ftl::operators::Operator { ...@@ -121,6 +121,7 @@ class ArUco : public ftl::operators::Operator {
inline Operator::Type type() const override { return Operator::Type::OneToOne; } inline Operator::Type type() const override { return Operator::Type::OneToOne; }
bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override; bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override;
virtual void wait(cudaStream_t) override;
ftl::codecs::Channel channel_in_; ftl::codecs::Channel channel_in_;
ftl::codecs::Channel channel_out_; ftl::codecs::Channel channel_out_;
...@@ -129,6 +130,7 @@ class ArUco : public ftl::operators::Operator { ...@@ -129,6 +130,7 @@ class ArUco : public ftl::operators::Operator {
bool estimate_pose_; bool estimate_pose_;
float marker_size_; float marker_size_;
cv::Mat tmp_; cv::Mat tmp_;
std::future<void> job_;
cv::Ptr<cv::aruco::Dictionary> dictionary_; cv::Ptr<cv::aruco::Dictionary> dictionary_;
cv::Ptr<cv::aruco::DetectorParameters> params_; cv::Ptr<cv::aruco::DetectorParameters> params_;
......
...@@ -40,17 +40,24 @@ static Eigen::Matrix4d matrix(cv::Vec3d &rvec, cv::Vec3d &tvec) { ...@@ -40,17 +40,24 @@ static Eigen::Matrix4d matrix(cv::Vec3d &rvec, cv::Vec3d &tvec) {
} }
ArUco::ArUco(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) { ArUco::ArUco(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0)); dictionary_ = cv::aruco::getPredefinedDictionary(
cfg->value("dictionary", int(cv::aruco::DICT_4X4_50)));
params_ = cv::aruco::DetectorParameters::create(); params_ = cv::aruco::DetectorParameters::create();
params_->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR; params_->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR;
params_->cornerRefinementMinAccuracy = 0.01; params_->cornerRefinementMinAccuracy = 0.05;
params_->cornerRefinementMaxIterations = 20; params_->cornerRefinementMaxIterations = 10;
// default values 13, 23, 10, for speed just one thresholding window size
params_->adaptiveThreshWinSizeMin = 13;
params_->adaptiveThreshWinSizeMax = 23;
params_->adaptiveThreshWinSizeStep = 10;
channel_in_ = Channel::Colour; channel_in_ = Channel::Colour;
channel_out_ = Channel::Shapes3D; channel_out_ = Channel::Shapes3D;
cfg->on("dictionary", [this,cfg]() { cfg->on("dictionary", [this,cfg]() {
dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0)); dictionary_ = cv::aruco::getPredefinedDictionary(
cfg->value("dictionary", 0));
}); });
} }
...@@ -60,6 +67,7 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) { ...@@ -60,6 +67,7 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) {
estimate_pose_ = config()->value("estimate_pose", true); estimate_pose_ = config()->value("estimate_pose", true);
marker_size_ = config()->value("marker_size", 0.1f); marker_size_ = config()->value("marker_size", 0.1f);
job_ = ftl::pool.push([this, &in, &out](int) {
std::vector<Vec3d> rvecs; std::vector<Vec3d> rvecs;
std::vector<Vec3d> tvecs; std::vector<Vec3d> tvecs;
std::vector<std::vector<cv::Point2f>> corners; std::vector<std::vector<cv::Point2f>> corners;
...@@ -69,7 +77,7 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) { ...@@ -69,7 +77,7 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) {
FTL_Profile("ArUco", 0.02); FTL_Profile("ArUco", 0.02);
cv::cvtColor(in.get<cv::Mat>(channel_in_), tmp_, cv::COLOR_BGRA2GRAY); cv::cvtColor(in.get<cv::Mat>(channel_in_), tmp_, cv::COLOR_BGRA2GRAY);
const Mat K = in.getLeftCamera().getCameraMatrix(); const Mat K = in.getLeftCamera().getCameraMatrix(tmp_.size());
const Mat dist; const Mat dist;
cv::aruco::detectMarkers(tmp_, dictionary_, cv::aruco::detectMarkers(tmp_, dictionary_,
...@@ -97,5 +105,12 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) { ...@@ -97,5 +105,12 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) {
} }
out.create<list<Shape3D>>(channel_out_).list = result; out.create<list<Shape3D>>(channel_out_).list = result;
});
return true; return true;
} }
void ArUco::wait(cudaStream_t) {
if (job_.valid()) {
job_.wait();
}
}
\ No newline at end of file
...@@ -87,7 +87,7 @@ struct __align__(16) Camera { ...@@ -87,7 +87,7 @@ struct __align__(16) Camera {
*/ */
static Camera from(ftl::Configurable*); static Camera from(ftl::Configurable*);
cv::Mat getCameraMatrix() const; cv::Mat getCameraMatrix(const cv::Size& sz={0, 0}) const;
#endif #endif
}; };
......
...@@ -35,7 +35,8 @@ Camera Camera::from(ftl::Configurable *cfg) { ...@@ -35,7 +35,8 @@ Camera Camera::from(ftl::Configurable *cfg) {
return r; return r;
} }
cv::Mat Camera::getCameraMatrix() const { cv::Mat Camera::getCameraMatrix(const cv::Size& sz) const {
if (sz == cv::Size{0, 0}) {
cv::Mat K = cv::Mat::eye(cv::Size(3, 3), CV_64FC1); cv::Mat K = cv::Mat::eye(cv::Size(3, 3), CV_64FC1);
K.at<double>(0,0) = fx; K.at<double>(0,0) = fx;
K.at<double>(0,2) = -cx; K.at<double>(0,2) = -cx;
...@@ -43,6 +44,10 @@ cv::Mat Camera::getCameraMatrix() const { ...@@ -43,6 +44,10 @@ cv::Mat Camera::getCameraMatrix() const {
K.at<double>(1,2) = -cy; K.at<double>(1,2) = -cy;
return K; return K;
} }
else {
return scaled(sz.width, sz.height).getCameraMatrix();
}
}
/* /*
* Scale camera parameters to match resolution. * Scale camera parameters to match resolution.
......
...@@ -196,10 +196,10 @@ cv::Mat StereoRectification::cameraMatrix(Channel c) { ...@@ -196,10 +196,10 @@ cv::Mat StereoRectification::cameraMatrix(Channel c) {
} }
else { else {
if (c == Channel::Left) { if (c == Channel::Left) {
return calib_left_.intrinsic.matrix(); return CalibrationData::Intrinsic(calib_left_.intrinsic, image_resolution_).matrix();
} }
else if (c == Channel::Right) { else if (c == Channel::Right) {
return calib_right_.intrinsic.matrix(); return CalibrationData::Intrinsic(calib_right_.intrinsic, image_resolution_).matrix();
} }
} }
throw ftl::exception("Invalid channel, expected Left or Right"); throw ftl::exception("Invalid channel, expected Left or Right");
......
...@@ -329,6 +329,11 @@ bool OpenVRRender::retrieve(ftl::data::Frame &frame_out) { ...@@ -329,6 +329,11 @@ bool OpenVRRender::retrieve(ftl::data::Frame &frame_out) {
if (headset_origin.size() > 0) { if (headset_origin.size() > 0) {
ftl::operators::Poser::get(headset_origin, horigin); ftl::operators::Poser::get(headset_origin, horigin);
double headset_offset = host_->value("headset_offset_z", 0.0);
// move z-axis by offset
Eigen::Vector3d offset =
horigin.block<3, 3>(0, 0)*Eigen::Vector3d(0.0, 0.0, headset_offset);
horigin.block<3, 1>(0, 3) -= offset;
} }
Eigen::Matrix4d new_pose = horigin*viewPose.inverse(); Eigen::Matrix4d new_pose = horigin*viewPose.inverse();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment