Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • nicolaspope/ftl
1 result
Select Git revision
Show changes
Showing
with 374 additions and 262 deletions
......@@ -6,6 +6,7 @@
#include <opencv2/calib3d.hpp>
#define LOGURU_REPLACE_GLOG 1
#include <ftl/profiler.hpp>
#include <loguru.hpp>
using ftl::operators::ArUco;
......@@ -19,6 +20,7 @@ using cv::Point2f;
using cv::Vec3d;
using std::vector;
using std::list;
static cv::Mat rmat(cv::Vec3d &rvec) {
cv::Mat R(cv::Size(3, 3), CV_64FC1);
......@@ -37,70 +39,58 @@ static Eigen::Matrix4d matrix(cv::Vec3d &rvec, cv::Vec3d &tvec) {
return r;
}
ArUco::ArUco(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0));
ArUco::ArUco(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
dictionary_ = cv::aruco::getPredefinedDictionary(
cfg->value("dictionary", int(cv::aruco::DICT_4X4_50)));
params_ = cv::aruco::DetectorParameters::create();
params_->cornerRefinementMethod = cv::aruco::CORNER_REFINE_CONTOUR;
params_->cornerRefinementMinAccuracy = 0.05;
params_->cornerRefinementMaxIterations = 10;
debug_ = cfg->value("debug", false);
//estimate_pose_ = cfg->value("estimate_pose", false);
//auto marker_size = cfg->get<float>("marker_size");
//if (!marker_size || (*marker_size <= 0.0f)) {
// marker_size_ = 0.1f;
// estimate_pose_ = false;
//}
//else {
// marker_size_ = *marker_size;
//}
// default values 13, 23, 10, for speed just one thresholding window size
params_->adaptiveThreshWinSizeMin = 13;
params_->adaptiveThreshWinSizeMax = 23;
params_->adaptiveThreshWinSizeStep = 10;
channel_in_ = Channel::Colour;
channel_out_ = Channel::Shapes3D;
cfg->on("dictionary", [this,cfg](const ftl::config::Event &e) {
dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0));
cfg->on("dictionary", [this,cfg]() {
dictionary_ = cv::aruco::getPredefinedDictionary(
cfg->value("dictionary", 0));
});
}
bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) {
bool ArUco::apply(Frame &in, Frame &out, cudaStream_t) {
if (!in.hasChannel(channel_in_)) { return false; }
Frame *inptr = &in;
Frame *outptr = &out;
estimate_pose_ = config()->value("estimate_pose", true);
debug_ = config()->value("debug", false);
marker_size_ = config()->value("marker_size", 0.1f);
job_ = std::move(ftl::pool.push([this,inptr,outptr,stream](int id) {
Frame &in = *inptr;
Frame &out = *outptr;
auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
//in.download(channel_in_);
//Mat im = in.get<Mat>(channel_in_);
// FIXME: Use internal stream here.
Mat im; // = in.fastDownload(channel_in_, cv::cuda::Stream::Null());
cv::cvtColor(in.fastDownload(channel_in_, cv::cuda::Stream::Null()), im, cv::COLOR_BGRA2BGR);
Mat K = in.getLeftCamera().getCameraMatrix();
Mat dist = cv::Mat::zeros(cv::Size(5, 1), CV_64FC1);
job_ = ftl::pool.push([this, &in, &out](int) {
std::vector<Vec3d> rvecs;
std::vector<Vec3d> tvecs;
std::vector<std::vector<cv::Point2f>> corners;
std::vector<int> ids;
cv::aruco::detectMarkers( im, dictionary_,
corners, ids, params_, cv::noArray(), K);
{
FTL_Profile("ArUco", 0.02);
cv::cvtColor(in.get<cv::Mat>(channel_in_), tmp_, cv::COLOR_BGRA2GRAY);
std::vector<Vec3d> rvecs;
std::vector<Vec3d> tvecs;
const Mat K = in.getLeftCamera().getCameraMatrix(tmp_.size());
const Mat dist;
cv::aruco::detectMarkers(tmp_, dictionary_,
corners, ids, params_, cv::noArray(), K, dist);
if (estimate_pose_) {
cv::aruco::estimatePoseSingleMarkers(corners, marker_size_, K, dist, rvecs, tvecs);
}
}
vector<Shape3D> result;
list<Shape3D> result;
if (out.hasChannel(channel_out_)) {
out.get(channel_out_, result);
result = out.get<list<Shape3D>>(channel_out_);
}
for (size_t i = 0; i < rvecs.size(); i++) {
......@@ -109,39 +99,18 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) {
t.id = ids[i];
t.type = ftl::codecs::Shape3DType::ARUCO;
t.pose = (in.getPose() * matrix(rvecs[i], tvecs[i])).cast<float>();
t.size = Eigen::Vector3f(0.1f,0.1f,0.1f);
t.label = "Aruco";
t.size = Eigen::Vector3f(1.0f, 1.0f, 0.0f)*marker_size_;
t.label = "Aruco-" + std::to_string(ids[i]);
}
}
out.create(channel_out_, result);
if (debug_) {
cv::aruco::drawDetectedMarkers(im, corners, ids);
if (estimate_pose_) {
for (size_t i = 0; i < rvecs.size(); i++) {
cv::aruco::drawAxis(im, K, dist, rvecs[i], tvecs[i], marker_size_);
}
}
}
// TODO: should be uploaded by operator which requires data on GPU
//in.upload(channel_in_);
if (debug_) {
if (in.isGPU(channel_in_)) {
cv::cvtColor(im, im, cv::COLOR_BGR2BGRA);
out.get<cv::cuda::GpuMat>(channel_in_).upload(im);
} else cv::cvtColor(im, in.get<cv::Mat>(channel_in_), cv::COLOR_BGR2BGRA);
}
return true;
}));
out.create<list<Shape3D>>(channel_out_).list = result;
});
return true;
}
void ArUco::wait(cudaStream_t s) {
void ArUco::wait(cudaStream_t) {
if (job_.valid()) {
job_.wait();
job_.get();
}
}
\ No newline at end of file
......@@ -10,7 +10,7 @@ using ftl::operators::ClipScene;
using ftl::codecs::Channel;
using ftl::rgbd::Format;
ClipScene::ClipScene(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
ClipScene::ClipScene(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -59,16 +59,16 @@ bool ClipScene::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cudaStr
bool no_clip = config()->value("no_clip", false);
bool clip_colour = config()->value("clip_colour", false);
std::vector<ftl::codecs::Shape3D> shapes;
std::list<ftl::codecs::Shape3D> shapes;
if (in.hasChannel(Channel::Shapes3D)) {
in.get(Channel::Shapes3D, shapes);
shapes = in.get<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D);
}
shapes.push_back(shape);
in.create(Channel::Shapes3D, shapes);
in.create<std::list<ftl::codecs::Shape3D>>(Channel::Shapes3D).list = shapes;
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f = in.frames[i];
auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
//auto *s = in.sources[i];
if (f.hasChannel(Channel::Depth)) {
......@@ -77,12 +77,12 @@ bool ClipScene::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cudaStr
auto sclip = clip;
sclip.origin = sclip.origin.getInverse() * pose;
if (!no_clip) {
if (clip_colour) {
f.clearPackets(Channel::Colour);
f.clearPackets(Channel::Depth);
if (clip_colour && f.hasChannel(Channel::Colour)) {
f.set<ftl::rgbd::VideoFrame>(Channel::Colour);
f.set<ftl::rgbd::VideoFrame>(Channel::Depth);
ftl::cuda::clipping(f.createTexture<float>(Channel::Depth), f.getTexture<uchar4>(Channel::Colour), f.getLeftCamera(), sclip, stream);
} else {
f.clearPackets(Channel::Depth);
f.set<ftl::rgbd::VideoFrame>(Channel::Depth);
ftl::cuda::clipping(f.createTexture<float>(Channel::Depth), f.getLeftCamera(), sclip, stream);
}
}
......
......@@ -6,7 +6,7 @@
using ftl::operators::ColourChannels;
using ftl::codecs::Channel;
ColourChannels::ColourChannels(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
ColourChannels::ColourChannels(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -15,6 +15,11 @@ ColourChannels::~ColourChannels() {
}
bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
if (!in.hasChannel(Channel::Colour)) {
in.message(ftl::data::Message::Warning_MISSING_CHANNEL, "No colour channel found");
return false;
}
auto &col = in.get<cv::cuda::GpuMat>(Channel::Colour);
// Convert colour from BGR to BGRA if needed
......@@ -25,6 +30,7 @@ bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStre
cv::cuda::swap(col, temp_);
cv::cuda::cvtColor(temp_,col, cv::COLOR_BGR2BGRA, 0, cvstream);*/
in.message(ftl::data::Message::Error_BAD_FORMAT, "Bad colour format");
throw FTL_Error("Left colour must be 4 channels");
}
......@@ -39,17 +45,25 @@ bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStre
cv::cuda::swap(col, temp_);
cv::cuda::cvtColor(temp_,col, cv::COLOR_BGR2BGRA, 0, cvstream);*/
in.message(ftl::data::Message::Error_BAD_FORMAT, "Bad colour format");
throw FTL_Error("Right colour must be 4 channels");
}
}
//in.resetTexture(Channel::Colour);
const auto &vf = in.get<ftl::rgbd::VideoFrame>(Channel::Colour);
if (vf.isGPU()) {
in.createTexture<uchar4>(Channel::Colour, true);
}
if (in.hasChannel(Channel::Right)) {
const auto &vf = in.get<ftl::rgbd::VideoFrame>(Channel::Right);
if (vf.isGPU()) {
in.createTexture<uchar4>(Channel::Right, true);
}
}
if (in.hasChannel(Channel::Depth)) {
/*if (in.hasChannel(Channel::Depth)) {
auto &depth = in.get<cv::cuda::GpuMat>(Channel::Depth);
if (depth.size() != col.size()) {
auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
......@@ -71,7 +85,7 @@ bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStre
throw FTL_Error("Depth and colour channels and different resolutions: " << depth.size() << " vs " << right.size());
}
}
}
}*/
return true;
}
......@@ -9,6 +9,7 @@
#include "ftl/operators/depth.hpp"
#include "ftl/operators/mask.hpp"
#include "ftl/operators/opticalflow.hpp"
#include <ftl/calibration/structures.hpp>
#include "./disparity/opencv/disparity_bilateral_filter.hpp"
......@@ -48,8 +49,8 @@ static void calc_space_weighted_filter(GpuMat& table_space, int win_size, float
// ==== Depth Bilateral Filter =================================================
DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg) :
ftl::operators::Operator(cfg) {
DepthBilateralFilter::DepthBilateralFilter(ftl::operators::Graph *g, ftl::Configurable* cfg) :
ftl::operators::Operator(g, cfg) {
scale_ = 16.0;
radius_ = cfg->value("radius", 7);
......@@ -59,10 +60,10 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg) :
max_disc_ = cfg->value("max_discontinuity", 0.1f);
channel_ = Channel::Depth;
cfg->on("edge_discontinuity", [this](const ftl::config::Event &e) {
cfg->on("edge_discontinuity", [this]() {
edge_disc_ = config()->value("edge_discontinuity", 0.04f);
});
cfg->on("max_discontinuity", [this](const ftl::config::Event &e) {
cfg->on("max_discontinuity", [this]() {
max_disc_ = config()->value("max_discontinuity", 0.1f);
});
......@@ -71,8 +72,8 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg) :
calc_space_weighted_filter(table_space_, radius_ * 2 + 1, radius_ + 1.0f);
}
DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg, const std::tuple<ftl::codecs::Channel> &p) :
ftl::operators::Operator(cfg) {
DepthBilateralFilter::DepthBilateralFilter(ftl::operators::Graph *g, ftl::Configurable* cfg, const std::tuple<ftl::codecs::Channel> &p) :
ftl::operators::Operator(g, cfg) {
scale_ = 16.0;
radius_ = cfg->value("radius", 7);
......@@ -82,10 +83,10 @@ DepthBilateralFilter::DepthBilateralFilter(ftl::Configurable* cfg, const std::tu
max_disc_ = cfg->value("max_discontinuity", 0.1f);
channel_ = std::get<0>(p);
cfg->on("edge_discontinuity", [this](const ftl::config::Event &e) {
cfg->on("edge_discontinuity", [this]() {
edge_disc_ = config()->value("edge_discontinuity", 0.04f);
});
cfg->on("max_discontinuity", [this](const ftl::config::Event &e) {
cfg->on("max_discontinuity", [this]() {
max_disc_ = config()->value("max_discontinuity", 0.1f);
});
......@@ -105,7 +106,7 @@ bool DepthBilateralFilter::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out,
auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
const GpuMat &rgb = in.get<GpuMat>(Channel::Colour);
GpuMat &depth = in.get<GpuMat>(channel_);
const GpuMat &depth = in.get<GpuMat>(channel_);
UNUSED(rgb);
UNUSED(depth);
......@@ -123,25 +124,28 @@ bool DepthBilateralFilter::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out,
// =============================================================================
DepthChannel::DepthChannel(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
DepthChannel::DepthChannel(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
pipe_ = nullptr;
}
DepthChannel::~DepthChannel() {
if (pipe_) delete pipe_;
}
void DepthChannel::_createPipeline(size_t size) {
if (pipe_ != nullptr) return;
pipe_ = ftl::config::create<ftl::operators::Graph>(config(), "depth");
depth_size_ = cv::Size( config()->value("width", 1280),
config()->value("height", 720));
//depth_size_ = cv::Size( config()->value("width", 1280),
// config()->value("height", 720));
depth_size_ = cv::Size(0,0);
pipe_->append<ftl::operators::ColourChannels>("colour"); // Convert BGR to BGRA
pipe_->append<ftl::operators::CrossSupport>("cross");
#ifdef HAVE_OPTFLOW
pipe_->append<ftl::operators::NVOpticalFlow>("optflow", Channel::Colour, Channel::Flow, Channel::Colour2, Channel::Flow2);
// FIXME: OpenCV Nvidia OptFlow has a horrible implementation that causes device syncs
//pipe_->append<ftl::operators::NVOpticalFlow>("optflow", Channel::Colour, Channel::Flow, Channel::Colour2, Channel::Flow2);
//if (size == 1) pipe_->append<ftl::operators::OpticalFlowTemporalSmoothing>("optflow_filter", Channel::Disparity);
#endif
#ifdef HAVE_LIBSGM
......@@ -168,33 +172,34 @@ bool DepthChannel::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
rbuf_.resize(in.frames.size());
int valid_count = 0;
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f = in.frames[i];
auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
if (!f.hasChannel(Channel::Depth) && f.hasChannel(Channel::Right)) {
_createPipeline(in.frames.size());
cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
cv::cuda::GpuMat& depth = f.create<cv::cuda::GpuMat>(Channel::Depth);
depth.create(left.size(), CV_32FC1);
if (f.hasChannel(Channel::CalibrationData)) {
auto &cdata = f.get<ftl::calibration::CalibrationData>(Channel::CalibrationData);
if (!cdata.enabled) continue;
}
const cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
const cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
if (left.empty() || right.empty()) continue;
/*if (depth_size_ != left.size()) {
auto &col2 = f.create<cv::cuda::GpuMat>(Channel::ColourHighRes);
cv::cuda::resize(left, col2, depth_size_, 0.0, 0.0, cv::INTER_CUBIC, cvstream);
f.createTexture<uchar4>(Channel::ColourHighRes, true);
f.swapChannels(Channel::Colour, Channel::ColourHighRes);
}
if (depth_size_ != right.size()) {
cv::cuda::resize(right, rbuf_[i], depth_size_, 0.0, 0.0, cv::INTER_CUBIC, cvstream);
cv::cuda::swap(right, rbuf_[i]);
}*/
cv::cuda::GpuMat& depth = f.create<cv::cuda::GpuMat>(Channel::Depth);
pipe_->apply(f, f, stream);
const auto &intrin = f.getLeft();
depth.create(intrin.height, intrin.width, CV_32FC1);
++valid_count;
}
}
if (valid_count > 0) {
_createPipeline(in.frames.size());
pipe_->apply(in, out);
}
return true;
......@@ -207,28 +212,21 @@ bool DepthChannel::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream
auto &f = in;
if (!f.hasChannel(Channel::Depth) && f.hasChannel(Channel::Right)) {
_createPipeline(1);
cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
cv::cuda::GpuMat& depth = f.create<cv::cuda::GpuMat>(Channel::Depth);
depth.create(depth_size_, CV_32FC1);
if (f.hasChannel(Channel::CalibrationData)) {
auto &cdata = f.get<ftl::calibration::CalibrationData>(Channel::CalibrationData);
if (!cdata.enabled) return true;
}
const cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
const cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
if (left.empty() || right.empty()) return false;
/*if (depth_size_ != left.size()) {
auto &col2 = f.create<cv::cuda::GpuMat>(Channel::ColourHighRes);
cv::cuda::resize(left, col2, depth_size_, 0.0, 0.0, cv::INTER_CUBIC, cvstream);
f.createTexture<uchar4>(Channel::ColourHighRes, true);
f.swapChannels(Channel::Colour, Channel::ColourHighRes);
}
_createPipeline(1);
if (depth_size_ != right.size()) {
cv::cuda::resize(right, rbuf_[i], depth_size_, 0.0, 0.0, cv::INTER_CUBIC, cvstream);
cv::cuda::swap(right, rbuf_[i]);
}*/
cv::cuda::GpuMat& depth = f.create<cv::cuda::GpuMat>(Channel::Depth);
depth.create(depth_size_, CV_32FC1);
pipe_->apply(f, f, stream);
pipe_->apply(f, f);
}
return true;
......
......@@ -20,7 +20,7 @@ using ftl::codecs::Channel;
using ftl::rgbd::Frame;
using ftl::operators::DetectAndTrack;
DetectAndTrack::DetectAndTrack(ftl::Configurable *cfg) : ftl::operators::Operator(cfg), detecting_(false) {
DetectAndTrack::DetectAndTrack(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg), detecting_(false) {
init();
}
......@@ -28,7 +28,7 @@ bool DetectAndTrack::init() {
fname_ = config()->value<string>("filename", FTL_LOCAL_DATA_ROOT "/haarcascades/haarcascade_frontalface_default.xml");
debug_ = config()->value<bool>("debug", false);
config()->on("debug", [this](const ftl::config::Event &e) {
config()->on("debug", [this]() {
debug_ = config()->value<bool>("debug", false);
});
......@@ -229,7 +229,7 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
Mat im; // TODO: Keep this as an internal buffer? Perhaps page locked.
// FIXME: Use internal stream here.
cv::cvtColor(in.fastDownload(channel_in_, cv::cuda::Stream::Null()), im, cv::COLOR_BGRA2BGR);
cv::cvtColor(in.get<cv::Mat>(channel_in_), im, cv::COLOR_BGRA2BGR);
if (im.empty()) {
throw FTL_Error("Empty image in face detection");
......@@ -272,7 +272,7 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
cv::Mat depth;
if (in.hasChannel(Channel::Depth)) {
depth = in.fastDownload(Channel::Depth, cv::cuda::Stream::Null());
depth = in.get<cv::Mat>(Channel::Depth);
}
std::vector<ftl::codecs::Face> result;
......@@ -292,15 +292,15 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
cv::rectangle(im, tracked.object, cv::Scalar(0, 0, 255), 1);
}
}
out.create(channel_out_, result);
out.create<std::vector<ftl::codecs::Face>>(channel_out_) = result;
//in.upload(channel_in_);
// FIXME: This is a bad idea.
if (debug_) {
if (in.isGPU(channel_in_)) {
//if (in.isGPU(channel_in_)) {
cv::cvtColor(im, im, cv::COLOR_BGR2BGRA);
out.get<cv::cuda::GpuMat>(channel_in_).upload(im);
} else cv::cvtColor(im, in.get<cv::Mat>(channel_in_), cv::COLOR_BGR2BGRA);
out.set<cv::cuda::GpuMat>(channel_in_).upload(im);
//} else cv::cvtColor(im, in.get<cv::Mat>(channel_in_), cv::COLOR_BGR2BGRA);
}
return true;
......
......@@ -4,15 +4,17 @@
#include <ftl/operators/cuda/disparity.hpp>
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudawarping.hpp>
using cv::cuda::GpuMat;
using cv::Size;
using ftl::codecs::Channel;
using ftl::operators::DisparityBilateralFilter;
using ftl::operators::Buffer;
DisparityBilateralFilter::DisparityBilateralFilter(ftl::Configurable* cfg) :
ftl::operators::Operator(cfg) {
DisparityBilateralFilter::DisparityBilateralFilter(ftl::operators::Graph *g, ftl::Configurable* cfg) :
ftl::operators::Operator(g, cfg) {
scale_ = 16.0;
n_disp_ = cfg->value("n_disp", 256);
......@@ -27,14 +29,16 @@ bool DisparityBilateralFilter::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out
if (!in.hasChannel(Channel::Colour)) {
throw FTL_Error("Joint Bilateral Filter is missing Colour");
return false;
} else if (!in.hasChannel(Channel::Disparity)) {
}
if (!graph()->hasBuffer(Buffer::Disparity, in.source())) {
// Have depth, so calculate disparity...
if (in.hasChannel(Channel::Depth)) {
// No disparity, so create it.
const auto params = in.getLeftCamera();
const GpuMat &depth = in.get<GpuMat>(Channel::Depth);
GpuMat &disp = out.create<GpuMat>(Channel::Disparity);
GpuMat &disp = graph()->createBuffer(Buffer::Disparity, in.source());
disp.create(depth.size(), CV_32FC1);
//LOG(ERROR) << "Calculated disparity from depth";
......@@ -56,14 +60,31 @@ bool DisparityBilateralFilter::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out
auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
const GpuMat &rgb = in.get<GpuMat>(Channel::Colour);
GpuMat &disp_in = in.get<GpuMat>(Channel::Disparity);
GpuMat &disp_out = out.create<GpuMat>(Channel::Disparity);
//const GpuMat &disp_in = in.get<GpuMat>(Channel::Disparity);
//GpuMat &disp_out = out.create<GpuMat>(Channel::Disparity);
GpuMat disp_in = graph()->getBuffer(Buffer::Disparity, in.source());
disp_int_.create(disp_in.size(), disp_in.type());
GpuMat rgb_buf;
if (rgb.size() != disp_in.size()) {
if (graph()->hasBuffer(Buffer::LowLeft, in.source())) {
rgb_buf = graph()->getBuffer(Buffer::LowLeft, in.source());
} else {
auto &t = graph()->createBuffer(Buffer::LowLeft, in.source());
cv::cuda::resize(rgb, t, disp_in.size(), 0, 0, cv::INTER_LINEAR, cvstream);
rgb_buf = t;
}
} else {
rgb_buf = rgb;
}
//LOG(INFO) << "DISP = " << disp_in.size() << "," << disp_in.type() << " - RGBBUF = " << rgb_buf.size() << "," << rgb_buf.type() << " - RGB = " << rgb.size() << "," << rgb.type();
//disp_in.convertTo(disp_int_, CV_16SC1, scale_, cvstream);
//cv::cuda::cvtColor(rgb, bw_, cv::COLOR_BGRA2GRAY, 0, cvstream);
filter_->apply(disp_in, rgb, disp_int_, cvstream);
cv::cuda::swap(disp_out, disp_int_);
filter_->apply(disp_in, rgb_buf, disp_int_, cvstream);
cv::cuda::swap(disp_in, disp_int_);
//disp_int_result_.convertTo(disp_out, disp_in.type(), 1.0/scale_, cvstream);
return true;
}
\ No newline at end of file
......@@ -3,17 +3,18 @@
using ftl::operators::DisparityToDepth;
using ftl::codecs::Channel;
using ftl::operators::Buffer;
using cv::cuda::GpuMat;
bool DisparityToDepth::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out,
cudaStream_t stream) {
if (!in.hasChannel(Channel::Disparity)) {
if (!graph()->hasBuffer(Buffer::Disparity, in.source())) {
throw FTL_Error("Missing disparity before convert to depth");
}
const GpuMat &disp = in.get<GpuMat>(Channel::Disparity);
const GpuMat &disp = graph()->getBuffer(Buffer::Disparity, in.source());
const auto params = in.getLeftCamera().scaled(disp.cols, disp.rows);
GpuMat &depth = out.create<GpuMat>(Channel::Depth);
......
......@@ -6,6 +6,7 @@
#include <opencv2/cudaimgproc.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudafilters.hpp>
#include <opencv2/cudawarping.hpp>
using cv::Size;
using cv::cuda::GpuMat;
......@@ -15,29 +16,25 @@ using ftl::codecs::Channel;
using ftl::rgbd::Frame;
using ftl::rgbd::Source;
using ftl::operators::FixstarsSGM;
using ftl::operators::Buffer;
static void variance_mask(cv::InputArray in, cv::OutputArray out, int wsize, cv::cuda::Stream &cvstream) {
void FixstarsSGM::_variance_mask(cv::InputArray in, cv::OutputArray out, int wsize, cv::cuda::Stream &cvstream) {
if (in.isGpuMat() && out.isGpuMat()) {
cv::cuda::GpuMat im;
cv::cuda::GpuMat im2;
cv::cuda::GpuMat mean;
cv::cuda::GpuMat mean2;
mean.create(in.size(), CV_32FC1);
mean2.create(in.size(), CV_32FC1);
im2.create(in.size(), CV_32FC1);
in.getGpuMat().convertTo(im, CV_32FC1, cvstream);
cv::cuda::multiply(im, im, im2, 1.0, CV_32FC1, cvstream);
auto filter = cv::cuda::createBoxFilter(CV_32FC1, CV_32FC1, cv::Size(wsize,wsize));
filter->apply(im, mean, cvstream); // E[X]
filter->apply(im2, mean2, cvstream); // E[X^2]
cv::cuda::multiply(mean, mean, mean, 1.0, -1, cvstream); // (E[X])^2
mean_.create(in.size(), CV_32FC1);
mean2_.create(in.size(), CV_32FC1);
im2_.create(in.size(), CV_32FC1);
in.getGpuMat().convertTo(im_, CV_32FC1, cvstream);
cv::cuda::multiply(im_, im_, im2_, 1.0, CV_32FC1, cvstream);
if (!filter_) filter_ = cv::cuda::createBoxFilter(CV_32FC1, CV_32FC1, cv::Size(wsize,wsize));
filter_->apply(im_, mean_, cvstream); // E[X]
filter_->apply(im2_, mean2_, cvstream); // E[X^2]
cv::cuda::multiply(mean_, mean_, mean_, 1.0, -1, cvstream); // (E[X])^2
// NOTE: floating point accuracy in subtraction
// (cv::cuda::createBoxFilter only supports float and 8 bit integer types)
cv::cuda::subtract(mean2, mean, out.getGpuMatRef(), cv::noArray(), -1, cvstream); // E[X^2] - (E[X])^2
cv::cuda::subtract(mean2_, mean_, out.getGpuMatRef(), cv::noArray(), -1, cvstream); // E[X^2] - (E[X])^2
}
else { throw std::exception(); /* todo CPU version */ }
}
......@@ -56,8 +53,8 @@ void FixstarsSGM::computeP2(cudaStream_t &stream) {
}
}
FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
ftl::operators::Operator(cfg) {
FixstarsSGM::FixstarsSGM(ftl::operators::Graph *g, ftl::Configurable* cfg) :
ftl::operators::Operator(g, cfg) {
ssgm_ = nullptr;
size_ = Size(0, 0);
......@@ -65,7 +62,7 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
uniqueness_ = cfg->value("uniqueness", 0.95f);
P1_ = cfg->value("P1", 10);
P2_ = cfg->value("P2", 120);
max_disp_ = cfg->value("max_disp", 256);
max_disp_ = cfg->value("num_disp", 256);
if (uniqueness_ < 0.0 || uniqueness_ > 1.0) {
uniqueness_ = 1.0;
......@@ -82,12 +79,12 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
LOG(WARNING) << "Invalid value for P2, using value of P1 (" << P1_ << ")";
}
if (!(max_disp_ == 256 || max_disp_ == 128)) {
if (!(max_disp_ == 256 || max_disp_ == 128 || max_disp_ == 192)) {
max_disp_ = 256;
LOG(WARNING) << "Invalid value for max_disp, using default value (256)";
}
cfg->on("P1", [this, cfg](const ftl::config::Event&) {
cfg->on("P1", [this, cfg]() {
int P1 = cfg->value("P1", 0);
if (P1 <= 0) {
LOG(WARNING) << "Invalid value for P1 (" << P1 << ")";
......@@ -98,7 +95,7 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
}
});
cfg->on("P2", [this, cfg](const ftl::config::Event&) {
cfg->on("P2", [this, cfg]() {
int P2 = cfg->value("P2", 0);
if (P2 < P1_) {
LOG(WARNING) << "Invalid value for P2 (" << P2 << ")";
......@@ -109,7 +106,7 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
}
});
cfg->on("uniqueness", [this, cfg](const ftl::config::Event&) {
cfg->on("uniqueness", [this, cfg]() {
double uniqueness = cfg->value("uniqueness", 0.0);
if (uniqueness < 0.0 || uniqueness > 1.0) {
LOG(WARNING) << "Invalid value for uniqueness (" << uniqueness << ")";
......@@ -122,11 +119,11 @@ FixstarsSGM::FixstarsSGM(ftl::Configurable* cfg) :
updateP2Parameters();
cfg->on("canny_low", [this, cfg](const ftl::config::Event&) {
cfg->on("canny_low", [this, cfg]() {
updateP2Parameters();
});
cfg->on("canny_high", [this, cfg](const ftl::config::Event&) {
cfg->on("canny_high", [this, cfg]() {
updateP2Parameters();
});
}
......@@ -180,32 +177,48 @@ bool FixstarsSGM::apply(Frame &in, Frame &out, cudaStream_t stream) {
auto &l = in.get<GpuMat>(Channel::Left);
const auto &r = in.get<GpuMat>(Channel::Right);
if (l.size() != size_) {
size_ = l.size();
const auto &intrin = in.getLeft();
if (l.empty() || r.empty() || intrin.width == 0) {
LOG(ERROR) << "Missing data for Fixstars";
return false;
}
if (size_.width != intrin.width) {
size_ = cv::Size(intrin.width, intrin.height);
if (!init()) { return false; }
}
bool has_estimate = in.hasChannel(Channel::Disparity);
auto &disp = (!has_estimate) ? out.create<GpuMat>(Channel::Disparity, Format<short>(l.size())) : in.get<GpuMat>(Channel::Disparity);
bool has_estimate = graph()->hasBuffer(Buffer::Disparity, in.source()); //in.hasChannel(Channel::Disparity);
auto &disp = graph()->createBuffer(Buffer::Disparity, in.source());
disp.create(size_, CV_16SC1);
auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
cv::cuda::cvtColor(l, lbw_, cv::COLOR_BGRA2GRAY, 0, cvstream);
cv::cuda::cvtColor(r, rbw_, cv::COLOR_BGRA2GRAY, 0, cvstream);
cv::cuda::cvtColor(l, lbw_full_, cv::COLOR_BGRA2GRAY, 0, cvstream);
cv::cuda::cvtColor(r, rbw_full_, cv::COLOR_BGRA2GRAY, 0, cvstream);
if (l.size() != size_) {
cv::cuda::resize(lbw_full_, lbw_, size_, 0, 0, cv::INTER_CUBIC, cvstream);
cv::cuda::resize(rbw_full_, rbw_, size_, 0, 0, cv::INTER_CUBIC, cvstream);
} else {
lbw_ = lbw_full_;
rbw_ = rbw_full_;
}
//cvstream.waitForCompletion();
computeP2(stream);
bool use_variance = config()->value("use_variance", true);
if (use_variance) {
variance_mask(lbw_, weightsF_, config()->value("var_wsize", 11), cvstream);
_variance_mask(lbw_, weightsF_, config()->value("var_wsize", 11), cvstream);
float minweight = std::min(1.0f, std::max(0.0f, config()->value("var_minweight", 0.5f)));
cv::cuda::normalize(weightsF_, weightsF_, minweight, 1.0, cv::NORM_MINMAX, -1, cv::noArray(), cvstream);
weightsF_.convertTo(weights_, CV_8UC1, 255.0f);
weightsF_.convertTo(weights_, CV_8UC1, 255.0f, cvstream);
//if ((int)P2_map_.step != P2_map_.cols) LOG(ERROR) << "P2 map step error: " << P2_map_.cols << "," << P2_map_.step;
ssgm_->execute(lbw_.data, rbw_.data, disp_int_.data, P2_map_.data, (uint8_t*) weights_.data, weights_.step1(), stream);
ssgm_->execute(lbw_.data, rbw_.data, disp_int_.data, P2_map_.data, (uint8_t*) weights_.data, weights_.step1(), config()->value("min_disp", 60), stream);
} else {
ssgm_->execute(lbw_.data, rbw_.data, disp_int_.data, P2_map_.data, nullptr, 0, stream);
ssgm_->execute(lbw_.data, rbw_.data, disp_int_.data, P2_map_.data, nullptr, 0, config()->value("min_disp", 60), stream);
}
// GpuMat left_pixels(dispt_, cv::Rect(0, 0, max_disp_, dispt_.rows));
......@@ -215,7 +228,7 @@ bool FixstarsSGM::apply(Frame &in, Frame &out, cudaStream_t stream) {
ftl::cuda::merge_disparities(disp_int_, disp, stream);
}
cv::cuda::threshold(disp_int_, disp, 4096.0f, 0.0f, cv::THRESH_TOZERO_INV, cvstream);
cv::cuda::threshold(disp_int_, disp, 16383.0f, 0.0f, cv::THRESH_TOZERO_INV, cvstream);
if (config()->value("check_reprojection", false)) {
ftl::cuda::check_reprojection(disp, in.getTexture<uchar4>(Channel::Colour),
......@@ -224,13 +237,13 @@ bool FixstarsSGM::apply(Frame &in, Frame &out, cudaStream_t stream) {
}
if (config()->value("show_P2_map", false)) {
cv::cuda::cvtColor(P2_map_, out.get<GpuMat>(Channel::Colour), cv::COLOR_GRAY2BGRA);
cv::cuda::cvtColor(P2_map_, out.get<GpuMat>(Channel::Colour), cv::COLOR_GRAY2BGRA, 0, cvstream);
}
if (config()->value("show_rpe", false)) {
ftl::cuda::show_rpe(disp, l, r, 100.0f, stream);
ftl::cuda::show_rpe(disp, in.set<GpuMat>(Channel::Left), r, 100.0f, stream);
}
if (config()->value("show_disp_density", false)) {
ftl::cuda::show_disp_density(disp, l, 100.0f, stream);
ftl::cuda::show_disp_density(disp, in.set<GpuMat>(Channel::Left), 100.0f, stream);
}
//disp_int_.convertTo(disp, CV_32F, 1.0f / 16.0f, cvstream);
......
......@@ -21,8 +21,8 @@ struct StereoDisparity::Impl {
StereoCensusSgm sgm;
};
StereoDisparity::StereoDisparity(ftl::Configurable* cfg) :
ftl::operators::Operator(cfg), impl_(nullptr) {
StereoDisparity::StereoDisparity(ftl::operators::Graph *g, ftl::Configurable* cfg) :
ftl::operators::Operator(g, cfg), impl_(nullptr) {
init();
}
......@@ -56,7 +56,7 @@ bool StereoDisparity::apply(Frame &in, Frame &out, cudaStream_t stream) {
disp32f_.create(l.size(), CV_32FC1);
bool has_estimate = in.hasChannel(Channel::Disparity);
auto &disp = (!has_estimate) ? out.create<GpuMat>(Channel::Disparity, Format<short>(l.size())) : in.get<GpuMat>(Channel::Disparity);
auto &disp = (!has_estimate) ? out.create<ftl::rgbd::VideoFrame>(Channel::Disparity).createGPU(Format<short>(l.size())) : in.get<GpuMat>(Channel::Disparity);
auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
......
......@@ -328,8 +328,8 @@ namespace ftl { namespace cuda { namespace device
}
if (stream == 0)
cudaSafeCall( cudaDeviceSynchronize() );
//if (stream == 0)
// cudaSafeCall( cudaDeviceSynchronize() );
}
// These are commented out since we don't use them and it slows compile
......
......@@ -18,14 +18,14 @@ using std::vector;
template<typename T> static bool inline isValidDisparity(T d) { return (0.0 < d) && (d < 256.0); } // TODO
OpticalFlowTemporalSmoothing::OpticalFlowTemporalSmoothing(ftl::Configurable* cfg, const std::tuple<ftl::codecs::Channel> &params) :
ftl::operators::Operator(cfg) {
OpticalFlowTemporalSmoothing::OpticalFlowTemporalSmoothing(ftl::operators::Graph *g, ftl::Configurable* cfg, const std::tuple<ftl::codecs::Channel> &params) :
ftl::operators::Operator(g, cfg) {
channel_ = std::get<0>(params);
_init(cfg);
}
OpticalFlowTemporalSmoothing::OpticalFlowTemporalSmoothing(ftl::Configurable* cfg) :
ftl::operators::Operator(cfg) {
OpticalFlowTemporalSmoothing::OpticalFlowTemporalSmoothing(ftl::operators::Graph *g, ftl::Configurable* cfg) :
ftl::operators::Operator(g, cfg) {
_init(cfg);
}
......@@ -47,7 +47,7 @@ void OpticalFlowTemporalSmoothing::_init(ftl::Configurable* cfg) {
threshold_ = cfg->value("threshold", 5.0f);
cfg->on("threshold", [this](const ftl::config::Event&) {
cfg->on("threshold", [this]() {
float threshold = config()->value("threshold", 5.0f);
if (threshold < 0.0) {
LOG(WARNING) << "invalid threshold " << threshold << ", value must be positive";
......@@ -58,7 +58,7 @@ void OpticalFlowTemporalSmoothing::_init(ftl::Configurable* cfg) {
}
});
cfg->on("history_size", [this, &cfg](const ftl::config::Event&) {
cfg->on("history_size", [this, &cfg]() {
int n_max = cfg->value("history_size", 7);
if (n_max < 1) {
......@@ -89,14 +89,14 @@ bool OpticalFlowTemporalSmoothing::apply(Frame &in, Frame &out, cudaStream_t str
auto cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
const cv::cuda::GpuMat &optflow = in.get<cv::cuda::GpuMat>(Channel::Flow);
cv::cuda::GpuMat &data = out.get<cv::cuda::GpuMat>(channel_);
cv::cuda::GpuMat &data = out.set<cv::cuda::GpuMat>(channel_);
if (data.size() != size_) {
size_ = data.size();
if (!init()) { return false; }
}
ftl::cuda::optflow_filter(data, optflow, history_, in.get<cv::cuda::GpuMat>(Channel::Support1), n_max_, threshold_, config()->value("filling", false), cvstream);
ftl::cuda::optflow_filter(data, optflow, history_, in.set<cv::cuda::GpuMat>(Channel::Support1), n_max_, threshold_, config()->value("filling", false), cvstream);
return true;
}
......
......@@ -6,7 +6,7 @@ using ftl::operators::ScanFieldFill;
using ftl::operators::CrossSupportFill;
using ftl::codecs::Channel;
ScanFieldFill::ScanFieldFill(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
ScanFieldFill::ScanFieldFill(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -29,7 +29,7 @@ bool ScanFieldFill::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStrea
}
CrossSupportFill::CrossSupportFill(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
CrossSupportFill::CrossSupportFill(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......
......@@ -117,9 +117,16 @@ __global__ void corresponding_depth_kernel(
if (depth1 > cam1.minDepth && depth1 < cam1.maxDepth && bestcost < 1.0f) {
// Delay making the depth change until later.
conf(pt) = bestadjust;
mask(pt) = mask(pt) | Mask::kMask_Correspondence;
auto m = mask(pt);
m &= ~Mask::kMask_Bad;
mask(pt) = m | Mask::kMask_Correspondence;
screenOut(pt) = bestScreen;
}
if (depth1 > cam1.minDepth && depth1 < cam1.maxDepth && bestcost > 2.0f) {
auto m = mask(pt);
mask(pt) = (m & Mask::kMask_Correspondence) ? m : m | Mask::kMask_Bad;
}
}
}
......
......@@ -53,6 +53,8 @@ __global__ void show_cor_error_kernel(
if (x < colour.width() && y < colour.height()) {
short2 s1 = screen1.tex2D(x,y);
//colour(x,y) = make_uchar4(0,0,0,0);
if (s1.x >= 0 && s1.x < screen2.width() && s1.y < screen2.height()) {
short2 s2 = screen2.tex2D(s1.x, s1.y);
......@@ -120,6 +122,8 @@ __global__ void show_depth_adjust_kernel(
float a = adjust.tex2D(x,y);
short2 s = screen.tex2D(x,y);
//colour(x,y) = make_uchar4(0,0,0,0);
if (s.x >= 0) {
float ncG = min(1.0f, fabsf(a)/scale);
float ncB = -max(-1.0f, min(0.0f, a/scale));
......
......@@ -5,13 +5,14 @@
#include <ftl/cuda/normals.hpp>
#include <opencv2/cudaarithm.hpp>
#include <opencv2/cudawarping.hpp>
using ftl::operators::MultiViewMLS;
using ftl::codecs::Channel;
using cv::cuda::GpuMat;
using ftl::rgbd::Format;
MultiViewMLS::MultiViewMLS(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
MultiViewMLS::MultiViewMLS(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -47,8 +48,19 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
bool show_consistency = config()->value("show_consistency", false);
bool show_adjustment = config()->value("show_adjustment", false);
if (in.frames.size() < 1) return false;
auto size = in.firstFrame().get<GpuMat>(Channel::Depth).size();
if (in.frames.size() < 1 || in.mask == 0) return false;
cv::Size size(0,0);
for (auto &f : in.frames) {
if (f.hasChannel(Channel::Depth)) {
size = f.get<GpuMat>(Channel::Depth).size();
break;
}
}
if (size.width == 0) {
in.firstFrame().message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in MVMLS operator");
return false;
}
// Make sure we have enough buffers
while (normals_horiz_.size() < in.frames.size()) {
......@@ -62,7 +74,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f = in.frames[i];
auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
auto size = f.get<GpuMat>(Channel::Depth).size();
centroid_horiz_[i]->create(size.height, size.width);
normals_horiz_[i]->create(size.height, size.width);
......@@ -77,12 +89,19 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
}
// Create required channels
f.create<GpuMat>(Channel::Confidence, Format<float>(size));
f.create<ftl::rgbd::VideoFrame>(Channel::Confidence).createGPU(Format<float>(size));
f.createTexture<float>(Channel::Confidence);
f.create<GpuMat>(Channel::Screen, Format<short2>(size));
f.create<ftl::rgbd::VideoFrame>(Channel::Screen).createGPU(Format<short2>(size));
f.createTexture<short2>(Channel::Screen);
f.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
f.set<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
if (show_adjustment || show_consistency) {
if (!f.hasChannel(Channel::Overlay)) {
auto &t = f.createTexture<uchar4>(Channel::Overlay, ftl::rgbd::Format<uchar4>(size));
cudaMemset2DAsync(t.devicePtr(), t.pitch(), 0, t.width()*4, t.height(), stream);
}
}
}
//for (int iter=0; iter<iters; ++iter) {
......@@ -95,7 +114,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f1 = in.frames[i];
auto &f1 = in.frames[i].cast<ftl::rgbd::Frame>();
//f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream);
//f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
......@@ -108,7 +127,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
//LOG(INFO) << "Running phase1";
auto &f2 = in.frames[j];
auto &f2 = in.frames[j].cast<ftl::rgbd::Frame>();
//auto s1 = in.sources[i];
//auto s2 = in.sources[j];
......@@ -261,8 +280,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
);*/
if (show_consistency) {
ftl::cuda::show_cor_error(f1.getTexture<uchar4>(Channel::Colour), f1.getTexture<short2>(Channel::Screen), f2.getTexture<short2>(Channel::Screen), 5.0f, stream);
ftl::cuda::show_cor_error(f2.getTexture<uchar4>(Channel::Colour), f2.getTexture<short2>(Channel::Screen), f1.getTexture<short2>(Channel::Screen), 5.0f, stream);
ftl::cuda::show_cor_error(f1.getTexture<uchar4>(Channel::Overlay), f1.getTexture<short2>(Channel::Screen), f2.getTexture<short2>(Channel::Screen), 5.0f, stream);
ftl::cuda::show_cor_error(f2.getTexture<uchar4>(Channel::Overlay), f2.getTexture<short2>(Channel::Screen), f1.getTexture<short2>(Channel::Screen), 5.0f, stream);
}
/*ftl::cuda::remove_cor_error(
......@@ -286,8 +305,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
//}
if (show_adjustment) {
ftl::cuda::show_depth_adjustment(f1.getTexture<uchar4>(Channel::Colour), f1.getTexture<short2>(Channel::Screen), f1.getTexture<float>(Channel::Confidence), 0.04f, stream);
ftl::cuda::show_depth_adjustment(f2.getTexture<uchar4>(Channel::Colour), f2.getTexture<short2>(Channel::Screen), f2.getTexture<float>(Channel::Confidence), 0.04f, stream);
ftl::cuda::show_depth_adjustment(f1.getTexture<uchar4>(Channel::Overlay), f1.getTexture<short2>(Channel::Screen), f1.getTexture<float>(Channel::Confidence), 0.04f, stream);
ftl::cuda::show_depth_adjustment(f2.getTexture<uchar4>(Channel::Overlay), f2.getTexture<short2>(Channel::Screen), f2.getTexture<float>(Channel::Confidence), 0.04f, stream);
}
//} //else {
/*ftl::cuda::correspondence(
......@@ -345,7 +364,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
// Redo normals
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f = in.frames[i];
auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
ftl::cuda::normals(
f.getTexture<half4>(Channel::Normals),
f.getTexture<float>(Channel::Depth),
......@@ -411,7 +430,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f = in.frames[i];
auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
//auto *s = in.sources[i];
// Clear data
......@@ -428,13 +447,27 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
float thresh = (1.0f / f.getLeft().fx) * disconPixels;
const GpuMat &rgb = f.get<GpuMat>(Channel::Colour);
GpuMat rgb_buf;
if (rgb.size() != size) {
if (graph()->hasBuffer(Buffer::LowLeft, f.source())) {
rgb_buf = graph()->getBuffer(Buffer::LowLeft, f.source());
} else {
rgb_buf = graph()->createBuffer(Buffer::LowLeft, f.source());
cv::cuda::resize(rgb, rgb_buf, size, 0, 0, cv::INTER_LINEAR, cvstream);
}
} else {
rgb_buf = rgb;
}
ftl::cuda::mls_aggr_horiz(
f.createTexture<uchar4>((f.hasChannel(Channel::Support2)) ? Channel::Support2 : Channel::Support1),
f.createTexture<half4>(Channel::Normals),
*normals_horiz_[i],
f.createTexture<float>(Channel::Depth),
*centroid_horiz_[i],
f.createTexture<uchar4>(Channel::Colour),
//f.createTexture<uchar4>(Channel::Colour),
rgb_buf,
thresh,
col_smooth,
radius,
......@@ -465,7 +498,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
if (do_aggr) {
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f1 = in.frames[i];
auto &f1 = in.frames[i].cast<ftl::rgbd::Frame>();
//f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream);
//f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream);
......@@ -478,7 +511,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
//LOG(INFO) << "Running phase1";
auto &f2 = in.frames[j];
auto &f2 = in.frames[j].cast<ftl::rgbd::Frame>();
//auto s1 = in.sources[i];
//auto s2 = in.sources[j];
......@@ -520,7 +553,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
// Normalise aggregations and move the points
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f = in.frames[i];
auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
//auto *s = in.sources[i];
auto size = f.get<GpuMat>(Channel::Depth).size();
......
......@@ -5,7 +5,7 @@ using ftl::operators::GTAnalysis;
using ftl::codecs::Channel;
using std::string;
GTAnalysis::GTAnalysis(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
GTAnalysis::GTAnalysis(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
cudaMalloc(&output_, sizeof(ftl::cuda::GTAnalysisData));
}
......@@ -60,7 +60,7 @@ bool GTAnalysis::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
}
std::vector<std::string> msgs;
if (in.hasChannel(Channel::Messages)) { in.get(Channel::Messages, msgs); }
if (in.hasChannel(Channel::Messages)) { msgs = in.get<std::vector<std::string>>(Channel::Messages); }
bool use_disp = config()->value("use_disparity", true);
auto &dmat = in.get<cv::cuda::GpuMat>(Channel::Depth);
......@@ -103,7 +103,7 @@ bool GTAnalysis::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
else { report(msgs, err, o, npixels, "mm", 1000.0); }
}
in.create(Channel::Messages, msgs);
in.create<std::vector<std::string>>(Channel::Messages) = msgs;
return true;
}
......@@ -4,10 +4,11 @@
using ftl::operators::DiscontinuityMask;
using ftl::operators::BorderMask;
using ftl::operators::CullDiscontinuity;
using ftl::operators::DisplayMask;
using ftl::codecs::Channel;
using ftl::rgbd::Format;
DiscontinuityMask::DiscontinuityMask(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
DiscontinuityMask::DiscontinuityMask(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -25,12 +26,16 @@ bool DiscontinuityMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaS
float noiseThresh = config()->value("noise_thresh", 0.8f);
float areaMax = config()->value("area_max", 26.0f); // Cross support radius squared + 1
if (!in.hasChannel(Channel::Depth) || !in.hasChannel(Channel::Support1)) return false;
if (!in.hasChannel(Channel::Depth) || !in.hasChannel(Channel::Support1)) {
out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth or Support Channel in Mask Operator");
return false;
}
if (!out.hasChannel(Channel::Mask)) {
cv::cuda::Stream cvstream = cv::cuda::StreamAccessor::wrapStream(stream);
auto &m = out.create<cv::cuda::GpuMat>(Channel::Mask);
m.create(in.get<cv::cuda::GpuMat>(Channel::Depth).size(), CV_8UC1);
m.setTo(cv::Scalar(0));
m.setTo(cv::Scalar(0), cvstream);
}
/*ftl::cuda::discontinuity(
......@@ -56,7 +61,7 @@ bool DiscontinuityMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaS
BorderMask::BorderMask(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
BorderMask::BorderMask(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -84,7 +89,7 @@ bool BorderMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
CullDiscontinuity::CullDiscontinuity(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
CullDiscontinuity::CullDiscontinuity(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -96,10 +101,10 @@ bool CullDiscontinuity::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaS
if (!in.hasChannel(Channel::Depth) || !in.hasChannel(Channel::Mask)) return false;
uint8_t maskID = config()->value("mask_id", (unsigned int)(ftl::cuda::Mask::kMask_Discontinuity | ftl::cuda::Mask::kMask_Bad));
unsigned int radius = config()->value("radius", 0);
unsigned int radius = config()->value("radius", 2);
bool inverted = config()->value("invert", false);
out.clearPackets(Channel::Depth); // Force reset
out.set<ftl::rgbd::VideoFrame>(Channel::Depth); // Force reset
ftl::cuda::cull_mask(
in.createTexture<uint8_t>(Channel::Mask),
out.createTexture<float>(Channel::Depth),
......@@ -111,3 +116,34 @@ bool CullDiscontinuity::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaS
return true;
}
DisplayMask::DisplayMask(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
DisplayMask::~DisplayMask() {
}
bool DisplayMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
if (!in.hasChannel(Channel::Mask)) {
return true;
}
uint8_t mask = config()->value("mask", 0);
bool invert = config()->value("invert", false);
auto &masktex = in.getTexture<uint8_t>(Channel::Mask);
if (!in.hasChannel(Channel::Overlay)) {
auto &t = in.createTexture<uchar4>(Channel::Overlay, ftl::rgbd::Format<uchar4>(masktex.width(), masktex.height()));
cudaMemset2DAsync(t.devicePtr(), t.pitch(), 0, t.width()*4, t.height(), stream);
}
ftl::cuda::show_mask(in.getTexture<uchar4>(Channel::Overlay), masktex, mask, make_uchar4(255,0,255,255), stream);
return true;
}
......@@ -223,6 +223,10 @@ __device__ inline int segmentID(int u, int v) {
return 0;
}
__device__ inline float4 make_float4(const uchar4 &v) {
return make_float4(float(v.x), float(v.y), float(v.z), float(v.w));
}
/*
* Smooth depth map using Moving Least Squares. This version uses colour
* similarity weights to adjust the spatial smoothing factor. It is naive in
......@@ -237,7 +241,9 @@ __device__ inline int segmentID(int u, int v) {
TextureObject<half4> normals_out,
TextureObject<float> depth_in, // Virtual depth map
TextureObject<float> depth_out, // Accumulated output
TextureObject<uchar4> colour_in,
//TextureObject<uchar4> colour_in,
const uchar4* __restrict__ colour_in,
int colour_pitch,
float smoothing,
float colour_smoothing,
ftl::rgbd::Camera camera) {
......@@ -260,7 +266,8 @@ __device__ inline int segmentID(int u, int v) {
}
float3 X = camera.screenToCam((int)(x),(int)(y),d0);
float4 c0 = colour_in.tex2D((float)x+0.5f, (float)y+0.5f);
//float4 c0 = colour_in.tex2D((float)x+0.5f, (float)y+0.5f);
float4 c0 = make_float4(colour_in[x+y*colour_pitch]);
// Neighbourhood
uchar4 base = region.tex2D(x,y);
......@@ -274,6 +281,7 @@ __device__ inline int segmentID(int u, int v) {
#pragma unroll
for (int u=-RADIUS; u<=RADIUS; ++u) {
if (x+u >= 0 && x+u < depth_in.width() && y+v >= 0 && y+v < depth_in.height()) {
const float d = depth_in.tex2D(x+u, y+v);
//if (d > camera.minDepth && d < camera.maxDepth) {
......@@ -286,7 +294,8 @@ __device__ inline int segmentID(int u, int v) {
// FIXME: Ensure bad normals are removed by setting depth invalid
//if (Ni.x+Ni.y+Ni.z == 0.0f) continue;
const float4 c = colour_in.tex2D(float(x+u) + 0.5f, float(y+v) + 0.5f);
//const float4 c = colour_in.tex2D(float(x+u) + 0.5f, float(y+v) + 0.5f);
const float4 c = make_float4(colour_in[x+u+(y+v)*colour_pitch]);
w *= ftl::cuda::colourWeighting(c0,c,colour_smoothing);
// Allow missing point to borrow z value
......@@ -300,7 +309,7 @@ __device__ inline int segmentID(int u, int v) {
nX += Ni*w;
contrib += w;
//if (FILLING && w > 0.0f && v > -base.z+1 && v < base.w-1 && u > -baseY.x+1 && u < baseY.y-1) segment_check |= segmentID(u,v);
//}
}
}
}
......@@ -335,7 +344,8 @@ void ftl::cuda::colour_mls_smooth_csr(
ftl::cuda::TextureObject<half4> &normals_out,
ftl::cuda::TextureObject<float> &depth_in,
ftl::cuda::TextureObject<float> &depth_out,
ftl::cuda::TextureObject<uchar4> &colour_in,
//ftl::cuda::TextureObject<uchar4> &colour_in,
const cv::cuda::GpuMat &colour_in,
float smoothing,
float colour_smoothing,
bool filling,
......@@ -346,9 +356,9 @@ void ftl::cuda::colour_mls_smooth_csr(
const dim3 blockSize(T_PER_BLOCK, T_PER_BLOCK);
if (filling) {
colour_mls_smooth_csr_kernel<true,5><<<gridSize, blockSize, 0, stream>>>(region, normals_in, normals_out, depth_in, depth_out, colour_in, smoothing, colour_smoothing, camera);
colour_mls_smooth_csr_kernel<true,5><<<gridSize, blockSize, 0, stream>>>(region, normals_in, normals_out, depth_in, depth_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera);
} else {
colour_mls_smooth_csr_kernel<false,5><<<gridSize, blockSize, 0, stream>>>(region, normals_in, normals_out, depth_in, depth_out, colour_in, smoothing, colour_smoothing, camera);
colour_mls_smooth_csr_kernel<false,5><<<gridSize, blockSize, 0, stream>>>(region, normals_in, normals_out, depth_in, depth_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera);
}
cudaSafeCall( cudaGetLastError() );
......@@ -593,7 +603,8 @@ void ftl::cuda::mls_aggr_horiz(
ftl::cuda::TextureObject<half4> &normals_out,
ftl::cuda::TextureObject<float> &depth_in,
ftl::cuda::TextureObject<float4> &centroid_out,
ftl::cuda::TextureObject<uchar4> &colour_in,
//ftl::cuda::TextureObject<uchar4> &colour_in,
const cv::cuda::GpuMat &colour_in,
float smoothing,
float colour_smoothing,
int radius,
......@@ -607,13 +618,13 @@ void ftl::cuda::mls_aggr_horiz(
const dim3 blockSize(THREADS_X, THREADS_Y);
switch(radius) {
case 1: mls_aggr_horiz_kernel<1><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, colour_in.devicePtr(), colour_in.pixelPitch(), smoothing, colour_smoothing, camera); break;
case 2: mls_aggr_horiz_kernel<2><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, colour_in.devicePtr(), colour_in.pixelPitch(), smoothing, colour_smoothing, camera); break;
case 3: mls_aggr_horiz_kernel<3><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, colour_in.devicePtr(), colour_in.pixelPitch(), smoothing, colour_smoothing, camera); break;
case 5: mls_aggr_horiz_kernel<5><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, colour_in.devicePtr(), colour_in.pixelPitch(), smoothing, colour_smoothing, camera); break;
case 10: mls_aggr_horiz_kernel<10><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, colour_in.devicePtr(), colour_in.pixelPitch(), smoothing, colour_smoothing, camera); break;
case 15: mls_aggr_horiz_kernel<15><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, colour_in.devicePtr(), colour_in.pixelPitch(), smoothing, colour_smoothing, camera); break;
case 20: mls_aggr_horiz_kernel<20><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, colour_in.devicePtr(), colour_in.pixelPitch(), smoothing, colour_smoothing, camera); break;
case 1: mls_aggr_horiz_kernel<1><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera); break;
case 2: mls_aggr_horiz_kernel<2><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera); break;
case 3: mls_aggr_horiz_kernel<3><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera); break;
case 5: mls_aggr_horiz_kernel<5><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera); break;
case 10: mls_aggr_horiz_kernel<10><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera); break;
case 15: mls_aggr_horiz_kernel<15><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera); break;
case 20: mls_aggr_horiz_kernel<20><<<gridSize, blockSize, 0, stream>>>(region.devicePtr(), region.pixelPitch(), normals_in.devicePtr(), normals_in.pixelPitch(), normals_out, depth_in.devicePtr(), depth_in.pixelPitch(), centroid_out, (uchar4*)colour_in.data, colour_in.step/4, smoothing, colour_smoothing, camera); break;
default: return;
}
cudaSafeCall( cudaGetLastError() );
......
......@@ -8,7 +8,7 @@ using ftl::operators::SmoothNormals;
using ftl::codecs::Channel;
using ftl::rgbd::Format;
Normals::Normals(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
Normals::Normals(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -18,7 +18,9 @@ Normals::~Normals() {
bool Normals::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
if (!in.hasChannel(Channel::Depth)) {
throw FTL_Error("Missing depth channel in Normals operator");
out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in Normals operator");
//throw FTL_Error("Missing depth channel in Normals operator");
return false;
}
if (out.hasChannel(Channel::Normals)) {
......@@ -37,7 +39,7 @@ bool Normals::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t st
// =============================================================================
NormalDot::NormalDot(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
NormalDot::NormalDot(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -47,7 +49,9 @@ NormalDot::~NormalDot() {
bool NormalDot::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) {
if (!in.hasChannel(Channel::Depth)) {
throw FTL_Error("Missing depth channel in Normals operator");
out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in Normals operator");
//throw FTL_Error("Missing depth channel in Normals operator");
return false;
}
if (out.hasChannel(Channel::Normals)) {
......@@ -67,7 +71,7 @@ bool NormalDot::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t
// =============================================================================
SmoothNormals::SmoothNormals(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) {
SmoothNormals::SmoothNormals(ftl::operators::Graph *g, ftl::Configurable *cfg) : ftl::operators::Operator(g, cfg) {
}
......@@ -80,6 +84,7 @@ bool SmoothNormals::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStrea
int radius = max(0, min(config()->value("radius",1), 5));
if (!in.hasChannel(Channel::Depth)) {
out.message(ftl::data::Message::Warning_MISSING_CHANNEL, "Missing Depth Channel in Normals operator");
throw FTL_Error("Missing depth channel in SmoothNormals operator");
}
......
......@@ -18,13 +18,13 @@ using ftl::operators::NVOpticalFlow;
using cv::Size;
using cv::cuda::GpuMat;
NVOpticalFlow::NVOpticalFlow(ftl::Configurable* cfg) :
ftl::operators::Operator(cfg), channel_in_{ftl::codecs::Channel::Colour,ftl::codecs::Channel::Colour2}, channel_out_{ftl::codecs::Channel::Flow,ftl::codecs::Channel::Flow2} {
NVOpticalFlow::NVOpticalFlow(ftl::operators::Graph *g, ftl::Configurable* cfg) :
ftl::operators::Operator(g, cfg), channel_in_{ftl::codecs::Channel::Colour,ftl::codecs::Channel::Colour2}, channel_out_{ftl::codecs::Channel::Flow,ftl::codecs::Channel::Flow2} {
size_ = Size(0, 0);
}
NVOpticalFlow::NVOpticalFlow(ftl::Configurable*cfg, const std::tuple<ftl::codecs::Channel,ftl::codecs::Channel,ftl::codecs::Channel,ftl::codecs::Channel> &channels) : ftl::operators::Operator(cfg) {
NVOpticalFlow::NVOpticalFlow(ftl::operators::Graph *g, ftl::Configurable*cfg, const std::tuple<ftl::codecs::Channel,ftl::codecs::Channel,ftl::codecs::Channel,ftl::codecs::Channel> &channels) : ftl::operators::Operator(g, cfg) {
channel_in_[0] = std::get<0>(channels);
channel_out_[0] = std::get<1>(channels);
channel_in_[1] = std::get<2>(channels);
......@@ -74,7 +74,7 @@ bool NVOpticalFlow::apply(Frame &in, Frame &out, cudaStream_t stream) {
auto &flow1 = out.create<GpuMat>(channel_out_[0]);
cv::cuda::cvtColor(in.get<GpuMat>(channel_in_[0]), left_gray_, cv::COLOR_BGRA2GRAY, 0, cvstream);
cv::cuda::cvtColor(in.get<GpuMat>(channel_in_[1]), right_gray_, cv::COLOR_BGRA2GRAY, 0, cvstream);
if (both_channels) cv::cuda::cvtColor(in.get<GpuMat>(channel_in_[1]), right_gray_, cv::COLOR_BGRA2GRAY, 0, cvstream);
// TODO: Use optical flow confidence output, perhaps combined with a
// sensitivity adjustment
......