From 40eecf54f0128d5e223999932f00fb2422fb86ca Mon Sep 17 00:00:00 2001 From: Sebastian Hahta <joseha@utu.fi> Date: Fri, 21 Aug 2020 11:20:04 +0300 Subject: [PATCH] improve aruco detection speed --- applications/vision/src/main.cpp | 2 +- .../include/ftl/operators/detectandtrack.hpp | 2 + components/operators/src/aruco.cpp | 74 +++++++++++-------- 3 files changed, 47 insertions(+), 31 deletions(-) diff --git a/applications/vision/src/main.cpp b/applications/vision/src/main.cpp index a8c8d9006..4f370c1a0 100644 --- a/applications/vision/src/main.cpp +++ b/applications/vision/src/main.cpp @@ -188,11 +188,11 @@ static void run(ftl::Configurable *root) { root->on("quiet", quiet, false); auto *pipeline = ftl::config::create<ftl::operators::Graph>(root, "pipeline"); + pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false); pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false); pipeline->append<ftl::operators::DepthChannel>("depth"); // Ensure there is a depth channel //pipeline->append<ftl::operators::ClipScene>("clipping")->value("enabled", false); pipeline->restore("vision_pipeline", { "clipping" }); - pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false); auto h = creator->onFrameSet([sender,outstream,&stats_count,&latency,&frames,&stats_time,pipeline,&encodable,&previous_encodable](const ftl::data::FrameSetPtr &fs) { diff --git a/components/operators/include/ftl/operators/detectandtrack.hpp b/components/operators/include/ftl/operators/detectandtrack.hpp index 2e5ce1713..0d8b063d3 100644 --- a/components/operators/include/ftl/operators/detectandtrack.hpp +++ b/components/operators/include/ftl/operators/detectandtrack.hpp @@ -121,6 +121,7 @@ class ArUco : public ftl::operators::Operator { inline Operator::Type type() const override { return Operator::Type::OneToOne; } bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override; + virtual void wait(cudaStream_t) override; ftl::codecs::Channel channel_in_; ftl::codecs::Channel channel_out_; @@ -129,6 +130,7 @@ class ArUco : public ftl::operators::Operator { bool estimate_pose_; float marker_size_; cv::Mat tmp_; + std::future<void> job_; cv::Ptr<cv::aruco::Dictionary> dictionary_; cv::Ptr<cv::aruco::DetectorParameters> params_; diff --git a/components/operators/src/aruco.cpp b/components/operators/src/aruco.cpp index 0d2873567..63381bfcf 100644 --- a/components/operators/src/aruco.cpp +++ b/components/operators/src/aruco.cpp @@ -40,17 +40,23 @@ static Eigen::Matrix4d matrix(cv::Vec3d &rvec, cv::Vec3d &tvec) { } ArUco::ArUco(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) { - dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0)); + dictionary_ = cv::aruco::getPredefinedDictionary( + cfg->value("dictionary", int(cv::aruco::DICT_4X4_50))); params_ = cv::aruco::DetectorParameters::create(); params_->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR; params_->cornerRefinementMinAccuracy = 0.01; params_->cornerRefinementMaxIterations = 20; + params_->adaptiveThreshWinSizeMin = 7; + params_->adaptiveThreshWinSizeMax = 17; + params_->adaptiveThreshWinSizeStep = 10; + channel_in_ = Channel::Colour; channel_out_ = Channel::Shapes3D; cfg->on("dictionary", [this,cfg]() { - dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0)); + dictionary_ = cv::aruco::getPredefinedDictionary( + cfg->value("dictionary", 0)); }); } @@ -60,42 +66,50 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) { estimate_pose_ = config()->value("estimate_pose", true); marker_size_ = config()->value("marker_size", 0.1f); - std::vector<Vec3d> rvecs; - std::vector<Vec3d> tvecs; - std::vector<std::vector<cv::Point2f>> corners; - std::vector<int> ids; + job_ = ftl::pool.push([this, &in, &out](int) { + std::vector<Vec3d> rvecs; + std::vector<Vec3d> tvecs; + std::vector<std::vector<cv::Point2f>> corners; + std::vector<int> ids; - { - FTL_Profile("ArUco", 0.02); - cv::cvtColor(in.get<cv::Mat>(channel_in_), tmp_, cv::COLOR_BGRA2GRAY); + { + FTL_Profile("ArUco", 0.02); + cv::cvtColor(in.get<cv::Mat>(channel_in_), tmp_, cv::COLOR_BGRA2GRAY); - const Mat K = in.getLeftCamera().getCameraMatrix(); - const Mat dist; + const Mat K = in.getLeftCamera().getCameraMatrix(); + const Mat dist; - cv::aruco::detectMarkers(tmp_, dictionary_, - corners, ids, params_, cv::noArray(), K, dist); + cv::aruco::detectMarkers(tmp_, dictionary_, + corners, ids, params_, cv::noArray(), K, dist); - if (estimate_pose_) { - cv::aruco::estimatePoseSingleMarkers(corners, marker_size_, K, dist, rvecs, tvecs); + if (estimate_pose_) { + cv::aruco::estimatePoseSingleMarkers(corners, marker_size_, K, dist, rvecs, tvecs); + } } - } - list<Shape3D> result; - if (out.hasChannel(channel_out_)) { - result = out.get<list<Shape3D>>(channel_out_); - } + list<Shape3D> result; + if (out.hasChannel(channel_out_)) { + result = out.get<list<Shape3D>>(channel_out_); + } - for (size_t i = 0; i < rvecs.size(); i++) { - if (estimate_pose_) { - auto &t = result.emplace_back(); - t.id = ids[i]; - t.type = ftl::codecs::Shape3DType::ARUCO; - t.pose = (in.getPose() * matrix(rvecs[i], tvecs[i])).cast<float>(); - t.size = Eigen::Vector3f(1.0f, 1.0f, 0.0f)*marker_size_; - t.label = "Aruco-" + std::to_string(ids[i]); + for (size_t i = 0; i < rvecs.size(); i++) { + if (estimate_pose_) { + auto &t = result.emplace_back(); + t.id = ids[i]; + t.type = ftl::codecs::Shape3DType::ARUCO; + t.pose = (in.getPose() * matrix(rvecs[i], tvecs[i])).cast<float>(); + t.size = Eigen::Vector3f(1.0f, 1.0f, 0.0f)*marker_size_; + t.label = "Aruco-" + std::to_string(ids[i]); + } } - } - out.create<list<Shape3D>>(channel_out_).list = result; + out.create<list<Shape3D>>(channel_out_).list = result; + }); return true; } + +void ArUco::wait(cudaStream_t) { + if (job_.valid()) { + job_.wait(); + } +} \ No newline at end of file -- GitLab