Skip to content
Snippets Groups Projects
Commit f5edca6e authored by Nicolas Pope's avatar Nicolas Pope
Browse files

Merge branch 'bug/downscaledepth' into 'master'

Correctly downsize colour to match depth

See merge request nicolas.pope/ftl!206
parents 2f2c1ce1 78887eb5
No related branches found
No related tags found
1 merge request!206Correctly downsize colour to match depth
Pipeline #17747 passed
......@@ -54,7 +54,7 @@ bool ColourChannels::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, ftl::rgb
if (in.hasChannel(Channel::Right)) {
auto &right = in.get<cv::cuda::GpuMat>(Channel::Right);
if (depth.size() != right.size()) {
cv::cuda::resize(right, rbuf_, depth.size(), 0.0, 0.0, cv::INTER_CUBIC, cvstream);
cv::cuda::resize(right, rbuf_, depth.size(), 0.0, 0.0, cv::INTER_LINEAR, cvstream);
cv::cuda::swap(right, rbuf_);
}
}
......
......@@ -23,8 +23,8 @@ void DepthChannel::_createPipeline() {
if (pipe_ != nullptr) return;
pipe_ = ftl::config::create<ftl::operators::Graph>(config(), "depth");
depth_size_ = cv::Size( pipe_->value("width", 1280),
pipe_->value("height", 720));
depth_size_ = cv::Size( config()->value("width", 1280),
config()->value("height", 720));
pipe_->append<ftl::operators::ColourChannels>("colour"); // Convert BGR to BGRA
pipe_->append<ftl::operators::FixstarsSGM>("algorithm");
......@@ -52,7 +52,7 @@ bool DepthChannel::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat& right = f.get<cv::cuda::GpuMat>(Channel::Right);
cv::cuda::GpuMat& depth = f.create<cv::cuda::GpuMat>(Channel::Depth);
depth.create(left.size(), CV_32FC1);
depth.create(depth_size_, CV_32FC1);
if (left.empty() || right.empty()) continue;
......
......@@ -71,14 +71,19 @@ LocalSource::LocalSource(nlohmann::json &config)
stereo_ = true;
}
dwidth_ = value("depth_width", width_);
dheight_ = value("depth_height", height_);
// Allocate page locked host memory for fast GPU transfer
left_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC3);
right_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC3);
left_hm_ = cv::cuda::HostMem(dheight_, dwidth_, CV_8UC3);
right_hm_ = cv::cuda::HostMem(dheight_, dwidth_, CV_8UC3);
hres_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC3);
}
LocalSource::LocalSource(nlohmann::json &config, const string &vid)
: Configurable(config), timestamp_(0.0) {
LOG(FATAL) << "Stereo video file sources no longer supported";
/*
//flip_ = value("flip", false);
//flip_v_ = value("flip_vert", false);
nostereo_ = value("nostereo", false);
......@@ -119,11 +124,16 @@ LocalSource::LocalSource(nlohmann::json &config, const string &vid)
stereo_ = false;
}
dwidth_ = value("depth_width", width_);
dheight_ = value("depth_height", height_);
// Allocate page locked host memory for fast GPU transfer
left_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC3);
right_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC3);
left_hm_ = cv::cuda::HostMem(dheight_, dwidth_, CV_8UC3);
right_hm_ = cv::cuda::HostMem(dheight_, dwidth_, CV_8UC3);
hres_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC3);
//tps_ = 1.0 / value("max_fps", 25.0);
*/
}
/*bool LocalSource::left(cv::Mat &l) {
......@@ -225,26 +235,32 @@ bool LocalSource::grab() {
return true;
}
bool LocalSource::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, Calibrate *c, cv::cuda::Stream &stream) {
Mat l, r;
bool LocalSource::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, cv::cuda::GpuMat &hres_out, Calibrate *c, cv::cuda::Stream &stream) {
Mat l, r ,hres;
// Use page locked memory
l = left_hm_.createMatHeader();
r = right_hm_.createMatHeader();
hres = hres_hm_.createMatHeader();
Mat &lfull = (!hasHigherRes()) ? l : hres;
Mat &rfull = (!hasHigherRes()) ? r : rtmp_;
if (!camera_a_) return false;
if (camera_b_ || !stereo_) {
if (!camera_a_->retrieve(l)) {
// TODO: Use threads here?
if (!camera_a_->retrieve(lfull)) {
LOG(ERROR) << "Unable to read frame from camera A";
return false;
}
if (camera_b_ && !camera_b_->retrieve(r)) {
if (camera_b_ && !camera_b_->retrieve(rfull)) {
LOG(ERROR) << "Unable to read frame from camera B";
return false;
}
} else {
Mat frame;
LOG(FATAL) << "Stereo video no longer supported";
/*Mat frame;
if (!camera_a_->retrieve(frame)) {
LOG(ERROR) << "Unable to read frame from video";
return false;
......@@ -257,7 +273,7 @@ bool LocalSource::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, Calibrat
//} else {
l = Mat(frame, Rect(0, 0, resx, frame.rows));
r = Mat(frame, Rect(resx, 0, frame.cols-resx, frame.rows));
//}
//}*/
}
/*if (downsize_ != 1.0f) {
......@@ -283,7 +299,18 @@ bool LocalSource::get(cv::cuda::GpuMat &l_out, cv::cuda::GpuMat &r_out, Calibrat
r = tr;
}*/
c->rectifyStereo(l, r);
c->rectifyStereo(lfull, rfull);
// Need to resize
if (hasHigherRes()) {
// TODO: Use threads?
cv::resize(lfull, l, l.size(), 0.0, 0.0, cv::INTER_CUBIC);
cv::resize(rfull, r, r.size(), 0.0, 0.0, cv::INTER_CUBIC);
hres_out.upload(hres, stream);
//LOG(INFO) << "Early Resize: " << l.size() << " from " << lfull.size();
} else {
hres_out = cv::cuda::GpuMat();
}
l_out.upload(l, stream);
r_out.upload(r, stream);
......
......@@ -25,10 +25,15 @@ class LocalSource : public Configurable {
//bool left(cv::Mat &m);
//bool right(cv::Mat &m);
bool grab();
bool get(cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, Calibrate *c, cv::cuda::Stream &stream);
bool get(cv::cuda::GpuMat &l, cv::cuda::GpuMat &r, cv::cuda::GpuMat &h, Calibrate *c, cv::cuda::Stream &stream);
unsigned int width() const { return width_; }
unsigned int height() const { return height_; }
unsigned int width() const { return dwidth_; }
unsigned int height() const { return dheight_; }
unsigned int fullWidth() const { return width_; }
unsigned int fullHeight() const { return height_; }
inline bool hasHigherRes() const { return dwidth_ != width_; }
//void setFramerate(float fps);
//float getFramerate() const;
......@@ -50,9 +55,13 @@ class LocalSource : public Configurable {
cv::VideoCapture *camera_b_;
unsigned int width_;
unsigned int height_;
unsigned int dwidth_;
unsigned int dheight_;
cv::cuda::HostMem left_hm_;
cv::cuda::HostMem right_hm_;
cv::cuda::HostMem hres_hm_;
cv::Mat rtmp_;
};
}
......
......@@ -76,7 +76,10 @@ void StereoVideoSource::init(const string &file) {
pipeline_input_->append<ftl::operators::NVOpticalFlow>("optflow");
#endif
pipeline_depth_ = ftl::config::create<ftl::operators::Graph>(host_, "disparity");
//depth_size_ = cv::Size( host_->value("depth_width", 1280),
// host_->value("depth_height", 720));
/*pipeline_depth_ = ftl::config::create<ftl::operators::Graph>(host_, "disparity");
depth_size_ = cv::Size( pipeline_depth_->value("width", color_size_.width),
pipeline_depth_->value("height", color_size_.height));
......@@ -90,13 +93,13 @@ void StereoVideoSource::init(const string &file) {
pipeline_depth_->append<ftl::operators::Normals>("normals"); // Estimate surface normals
pipeline_depth_->append<ftl::operators::CrossSupport>("cross");
pipeline_depth_->append<ftl::operators::DiscontinuityMask>("discontinuity_mask");
pipeline_depth_->append<ftl::operators::AggreMLS>("mls"); // Perform MLS (using smoothing channel)
pipeline_depth_->append<ftl::operators::AggreMLS>("mls"); // Perform MLS (using smoothing channel)*/
calib_ = ftl::create<Calibrate>(host_, "calibration", color_size_, stream_);
calib_ = ftl::create<Calibrate>(host_, "calibration", cv::Size(lsrc_->fullWidth(), lsrc_->fullHeight()), stream_);
if (!calib_->isCalibrated()) LOG(WARNING) << "Cameras are not calibrated!";
// Generate camera parameters from camera matrix
cv::Mat K = calib_->getCameraMatrixLeft(depth_size_);
cv::Mat K = calib_->getCameraMatrixLeft(color_size_);
params_ = {
K.at<double>(0,0), // Fx
K.at<double>(1,1), // Fy
......@@ -154,9 +157,9 @@ ftl::rgbd::Camera StereoVideoSource::parameters(Channel chan) {
cv::Mat K;
if (chan == Channel::Right) {
K = calib_->getCameraMatrixRight(depth_size_);
K = calib_->getCameraMatrixRight(color_size_);
} else {
K = calib_->getCameraMatrixLeft(depth_size_);
K = calib_->getCameraMatrixLeft(color_size_);
}
// TODO: remove hardcoded values (min/max)
......@@ -165,8 +168,8 @@ ftl::rgbd::Camera StereoVideoSource::parameters(Channel chan) {
K.at<double>(1,1), // Fy
-K.at<double>(0,2), // Cx
-K.at<double>(1,2), // Cy
(unsigned int) depth_size_.width,
(unsigned int) depth_size_.height,
(unsigned int) color_size_.width,
(unsigned int) color_size_.height,
0.0f, // 0m min
15.0f, // 15m max
1.0 / calib_->getQ().at<double>(3,2), // Baseline
......@@ -187,7 +190,12 @@ bool StereoVideoSource::retrieve() {
frame.reset();
auto &left = frame.create<cv::cuda::GpuMat>(Channel::Left);
auto &right = frame.create<cv::cuda::GpuMat>(Channel::Right);
lsrc_->get(left, right, calib_, stream2_);
cv::cuda::GpuMat dummy;
auto &hres = (lsrc_->hasHigherRes()) ? frame.create<cv::cuda::GpuMat>(Channel::ColourHighRes) : dummy;
lsrc_->get(left, right, hres, calib_, stream2_);
//LOG(INFO) << "Channel size: " << hres.size();
pipeline_input_->apply(frame, frame, host_, cv::cuda::StreamAccessor::getStream(stream2_));
stream2_.waitForCompletion();
......
......@@ -535,8 +535,8 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
}
// TODO: Use ColourHighQuality if available
if (fs.frames[j].getPackets(Channel::Colour).size() == 0) {
auto colChan = (fs.frames[j].hasChannel(Channel::ColourHighRes)) ? Channel::ColourHighRes : Channel::Colour;
if (fs.frames[j].getPackets(colChan).size() == 0) {
if (!src->hq_encoder_c1) src->hq_encoder_c1 = ftl::codecs::allocateEncoder(
definition_t::HD1080, hq_devices_, hq_codec_);
auto *enc = src->hq_encoder_c1;
......@@ -544,14 +544,14 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
if (enc) {
// TODO: Stagger the reset between nodes... random phasing
if (insert_iframes_ && fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc->reset();
enc->encode(fs.frames[j].get<cv::cuda::GpuMat>(Channel::Colour), src->hq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
enc->encode(fs.frames[j].get<cv::cuda::GpuMat>(colChan), src->hq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
_transmitPacket(src, blk, Channel::Colour, hasChan2, Quality::High);
});
} else {
LOG(ERROR) << "Insufficient encoder resources";
}
} else {
const auto &packets = fs.frames[j].getPackets(Channel::Colour);
const auto &packets = fs.frames[j].getPackets(colChan);
// FIXME: Adjust block number and total to match number of packets
// Also requires the receiver to decode in block number order.
LOG(INFO) << "Send existing encoding: " << packets.size();
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment