Skip to content
Snippets Groups Projects

Reduce latency in device capture and parallel encoding

Merged Nicolas Pope requested to merge feature/send-parallel into master
15 files
+ 157
115
Compare changes
  • Side-by-side
  • Inline
Files
15
@@ -11,7 +11,7 @@ using ftl::codecs::OpenCVEncoder;
using std::vector;
OpenCVEncoder::OpenCVEncoder(ftl::codecs::definition_t maxdef,
ftl::codecs::definition_t mindef) : Encoder(maxdef, mindef, ftl::codecs::device_t::Software) {
ftl::codecs::definition_t mindef) : Encoder(maxdef, mindef, ftl::codecs::device_t::OpenCV) {
jobs_ = 0;
}
@@ -38,8 +38,6 @@ bool OpenCVEncoder::encode(const cv::cuda::GpuMat &in, ftl::codecs::Packet &pkt)
return false;
}
LOG(WARNING) << "Using Software Encoder!";
in.download(tmp_);
if (!is_colour && in.type() == CV_32F) {
@@ -9,6 +9,7 @@
#include "ftl/operators/depth.hpp"
#include "ftl/operators/mask.hpp"
#include "ftl/operators/opticalflow.hpp"
#include <ftl/calibration/structures.hpp>
#include "./disparity/opencv/disparity_bilateral_filter.hpp"
@@ -179,7 +180,14 @@ bool DepthChannel::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
for (size_t i=0; i<in.frames.size(); ++i) {
if (!in.hasFrame(i)) continue;
auto &f = in.frames[i].cast<ftl::rgbd::Frame>();
if (!f.hasChannel(Channel::Depth) && f.hasChannel(Channel::Right)) {
if (f.hasChannel(Channel::CalibrationData)) {
auto &cdata = f.get<ftl::calibration::CalibrationData>(Channel::CalibrationData);
if (!cdata.enabled) continue;
}
_createPipeline(in.frames.size());
const cv::cuda::GpuMat& left = f.get<cv::cuda::GpuMat>(Channel::Left);
@@ -188,19 +196,6 @@ bool DepthChannel::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda
depth.create(left.size(), CV_32FC1);
if (left.empty() || right.empty()) continue;
/*if (depth_size_ != left.size()) {
auto &col2 = f.create<cv::cuda::GpuMat>(Channel::ColourHighRes);
cv::cuda::resize(left, col2, depth_size_, 0.0, 0.0, cv::INTER_CUBIC, cvstream);
f.createTexture<uchar4>(Channel::ColourHighRes, true);
f.swapChannels(Channel::Colour, Channel::ColourHighRes);
}
if (depth_size_ != right.size()) {
cv::cuda::resize(right, rbuf_[i], depth_size_, 0.0, 0.0, cv::INTER_CUBIC, cvstream);
cv::cuda::swap(right, rbuf_[i]);
}*/
pipe_->apply(f, f, stream);
}
}
@@ -149,9 +149,9 @@ OpenCVDevice::OpenCVDevice(nlohmann::json &config, bool stereo)
right_hm_ = cv::cuda::HostMem(dheight_, dwidth_, CV_8UC4);
hres_hm_ = cv::cuda::HostMem(height_, width_, CV_8UC4);
interpolation_ = value("inter_cubic", false) ? cv::INTER_CUBIC : cv::INTER_LINEAR;
interpolation_ = value("inter_cubic", true) ? cv::INTER_CUBIC : cv::INTER_LINEAR;
on("inter_cubic", [this](){
interpolation_ = value("inter_cubic_", false) ?
interpolation_ = value("inter_cubic_", true) ?
cv::INTER_CUBIC : cv::INTER_LINEAR;
});
}
@@ -347,32 +347,32 @@ bool OpenCVDevice::get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l_out, cv::cud
if (!camera_a_) return false;
std::future<bool> future_b;
//std::future<bool> future_b;
if (camera_b_) {
future_b = std::move(ftl::pool.push([this,&rfull,&r,c,&r_out,&r_hres_out,&stream](int id) {
//future_b = std::move(ftl::pool.push([this,&rfull,&r,c,&r_out,&r_hres_out,&stream](int id) {
if (!camera_b_->retrieve(frame_r_)) {
LOG(ERROR) << "Unable to read frame from camera B";
return false;
} else {
cv::cvtColor(frame_r_, rtmp2_, cv::COLOR_BGR2BGRA);
//if (stereo_) {
c->rectify(rtmp2_, rfull, Channel::Right);
if (hasHigherRes()) {
// TODO: Use threads?
cv::resize(rfull, r, r.size(), 0.0, 0.0, interpolation_);
r_hres_out = rfull;
}
else {
r_hres_out = Mat();
}
//}
r_out.upload(r, stream);
}
cv::cvtColor(frame_r_, rfull, cv::COLOR_BGR2BGRA);
if (stereo_) {
c->rectify(rfull, Channel::Right);
if (hasHigherRes()) {
// TODO: Use threads?
cv::resize(rfull, r, r.size(), 0.0, 0.0, interpolation_);
r_hres_out = rfull;
}
else {
r_hres_out = Mat();
}
}
r_out.upload(r, stream);
return true;
}));
//return true;
//}));
}
if (camera_b_) {
@@ -394,18 +394,19 @@ bool OpenCVDevice::get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l_out, cv::cud
}
}
cv::cvtColor(frame_l_, lfull, cv::COLOR_BGR2BGRA);
if (stereo_) {
cv::cvtColor(frame_l_, ltmp_, cv::COLOR_BGR2BGRA);
//FTL_Profile("Rectification", 0.01);
//c->rectifyStereo(lfull, rfull);
c->rectify(lfull, Channel::Left);
c->rectify(ltmp_, lfull, Channel::Left);
// Need to resize
//if (hasHigherRes()) {
// TODO: Use threads?
// cv::resize(rfull, r, r.size(), 0.0, 0.0, interpolation_);
//}
} else {
cv::cvtColor(frame_l_, lfull, cv::COLOR_BGR2BGRA);
}
if (hasHigherRes()) {
@@ -430,10 +431,10 @@ bool OpenCVDevice::get(ftl::rgbd::Frame &frame, cv::cuda::GpuMat &l_out, cv::cud
cv::imencode(".jpg", thumb, thumbdata, params);
}
if (camera_b_) {
//if (camera_b_) {
//FTL_Profile("WaitCamB", 0.05);
future_b.wait();
}
//future_b.wait();
//}
return true;
}
Loading