Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • nicolaspope/ftl
1 result
Show changes
Showing
with 413 additions and 355 deletions
......@@ -256,7 +256,7 @@ ftl::cuda::TextureObject<T> &Frame::createTexture(ftl::codecs::Channel c) {
//LOG(INFO) << "Creating texture object";
m.tex = ftl::cuda::TextureObject<T>(m.gpu);
} else if (m.tex.cvType() != ftl::traits::OpenCVType<T>::value || m.tex.width() != m.gpu.cols || m.tex.height() != m.gpu.rows || m.tex.devicePtr() != m.gpu.data) {
LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'";
LOG(INFO) << "Recreating texture object for '" << ftl::codecs::name(c) << "'.";
m.tex.free();
m.tex = ftl::cuda::TextureObject<T>(m.gpu);
}
......
......@@ -3,10 +3,10 @@
#include <ftl/cuda_util.hpp>
#include <ftl/configuration.hpp>
#include <ftl/rgbd/camera.hpp>
#include <ftl/threads.hpp>
#include <ftl/net/universe.hpp>
#include <ftl/uri.hpp>
#include <ftl/rgbd/camera.hpp>
#include <ftl/rgbd/detail/source.hpp>
#include <ftl/codecs/packet.hpp>
#include <opencv2/opencv.hpp>
......@@ -46,9 +46,6 @@ class Source : public ftl::Configurable {
friend T *ftl::config::create(ftl::config::json_t &, ARGS ...);
friend class VirtualSource;
//template <typename T, typename... ARGS>
//friend T *ftl::config::create(ftl::Configurable *, const std::string &, ARGS ...);
// This class cannot be constructed directly, use ftl::create
Source()=delete;
......@@ -66,13 +63,16 @@ class Source : public ftl::Configurable {
/**
* Is this source valid and ready to grab?.
*/
bool isReady() { return (impl_) ? impl_->isReady() : params_.width != 0; }
bool isReady() { return (impl_) ? impl_->isReady() : false; }
/**
* Change the second channel source.
*/
bool setChannel(ftl::codecs::Channel c);
/**
* Get the channel allocated to the second source.
*/
ftl::codecs::Channel getChannel() const { return channel_; }
/**
......@@ -114,19 +114,7 @@ class Source : public ftl::Configurable {
* swap rather than a copy, so the parameters should be persistent buffers for
* best performance.
*/
void getFrames(cv::Mat &c, cv::Mat &d);
/**
* Get a copy of the colour frame only.
*/
void getColour(cv::Mat &c);
/**
* Get a copy of the depth frame only.
*/
void getDepth(cv::Mat &d);
int64_t timestamp() const { return timestamp_; }
[[deprecated]] void getFrames(cv::Mat &c, cv::Mat &d);
/**
* Directly upload source RGB and Depth to GPU.
......@@ -136,16 +124,20 @@ class Source : public ftl::Configurable {
void uploadColour(cv::cuda::GpuMat&);
void uploadDepth(cv::cuda::GpuMat&);
bool isVirtual() const { return impl_ == nullptr; }
//bool isVirtual() const { return impl_ == nullptr; }
/**
* Get the source's camera intrinsics.
*/
const Camera &parameters() const {
if (impl_) return impl_->params_;
else return params_;
else throw ftl::exception("Cannot get parameters for bad source");
}
/**
* Get camera intrinsics for another channel. For example the right camera
* in a stereo pair.
*/
const Camera parameters(ftl::codecs::Channel) const;
cv::Mat cameraMatrix() const;
......@@ -169,46 +161,27 @@ class Source : public ftl::Configurable {
capability_t getCapabilities() const;
/**
* Get a point in camera coordinates at specified pixel location.
*/
Eigen::Vector4d point(uint x, uint y);
/**
* Get a point in camera coordinates at specified pixel location, with a
* given depth value.
*/
Eigen::Vector4d point(uint x, uint y, double d);
Eigen::Vector2i point(const Eigen::Vector4d &p);
/**
* Force the internal implementation to be reconstructed.
*/
void reset();
void pause(bool);
bool isPaused() { return paused_; }
void bullet(bool);
bool isBullet() { return bullet_; }
bool thumbnail(cv::Mat &t);
ftl::net::Universe *getNet() const { return net_; }
std::string getURI() { return value("uri", std::string("")); }
void customImplementation(detail::Source *);
//void customImplementation(detail::Source *);
SHARED_MUTEX &mutex() { return mutex_; }
std::function<void(int64_t, cv::Mat &, cv::Mat &)> &callback() { return callback_; }
std::function<void(int64_t, cv::cuda::GpuMat &, cv::cuda::GpuMat &)> &callback() { return callback_; }
/**
* Set the callback that receives decoded frames as they are generated.
* There can be only a single such callback as the buffers can be swapped
* by the callback.
*/
void setCallback(std::function<void(int64_t, cv::Mat &, cv::Mat &)> cb);
void setCallback(std::function<void(int64_t, cv::cuda::GpuMat &, cv::cuda::GpuMat &)> cb);
void removeCallback() { callback_ = nullptr; }
/**
......@@ -218,6 +191,9 @@ class Source : public ftl::Configurable {
*/
void addRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &);
/**
* THIS DOES NOT WORK CURRENTLY.
*/
void removeRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &);
/**
......@@ -225,21 +201,31 @@ class Source : public ftl::Configurable {
*/
void notifyRaw(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt);
/**
* Notify of a decoded or available pair of frames. This calls the source
* callback after having verified the correct resolution of the frames.
*/
void notify(int64_t ts, cv::cuda::GpuMat &c1, cv::cuda::GpuMat &c2);
// ==== Inject Data into stream ============================================
/**
* Generate a stream packet with arbitrary data. The data is packed using
* msgpack and is given the timestamp of the most recent frame.
*/
template <typename... ARGS>
void inject(ftl::codecs::Channel c, ARGS... args);
void inject(const Eigen::Matrix4d &pose);
protected:
detail::Source *impl_;
cv::Mat rgb_;
cv::Mat depth_;
cv::Mat thumb_;
Camera params_; // TODO Find better solution
Eigen::Matrix4d pose_;
ftl::net::Universe *net_;
SHARED_MUTEX mutex_;
bool paused_;
bool bullet_;
ftl::codecs::Channel channel_;
cudaStream_t stream_;
int64_t timestamp_;
std::function<void(int64_t, cv::Mat &, cv::Mat &)> callback_;
std::function<void(int64_t, cv::cuda::GpuMat &, cv::cuda::GpuMat &)> callback_;
std::list<std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)>> rawcallbacks_;
detail::Source *_createImplementation();
......@@ -255,4 +241,40 @@ class Source : public ftl::Configurable {
}
}
class VectorBuffer {
public:
inline explicit VectorBuffer(std::vector<unsigned char> &v) : vector_(v) {}
inline void write(const char *data, std::size_t size) {
vector_.insert(vector_.end(), (const unsigned char*)data, (const unsigned char*)data+size);
}
private:
std::vector<unsigned char> &vector_;
};
template <typename... ARGS>
void ftl::rgbd::Source::inject(ftl::codecs::Channel c, ARGS... args) {
if (!impl_) return;
auto data = std::make_tuple(args...);
ftl::codecs::StreamPacket spkt;
ftl::codecs::Packet pkt;
spkt.timestamp = impl_->timestamp_;
spkt.channel = c;
spkt.channel_count = 0;
spkt.streamID = 0;
pkt.codec = ftl::codecs::codec_t::MSGPACK;
pkt.block_number = 0;
pkt.block_total = 1;
pkt.definition = ftl::codecs::definition_t::Any;
pkt.flags = 0;
VectorBuffer buf(pkt.data);
msgpack::pack(buf, data);
notifyRaw(spkt, pkt);
}
#endif // _FTL_RGBD_SOURCE_HPP_
......@@ -158,6 +158,7 @@ class Streamer : public ftl::Configurable {
ftl::timer::TimerHandle timer_job_;
ftl::codecs::device_t hq_devices_;
ftl::codecs::codec_t hq_codec_;
enum class Quality {
High,
......
......@@ -13,8 +13,6 @@ class VirtualSource : public ftl::rgbd::Source {
void onRender(const std::function<void(ftl::rgbd::Frame &)> &);
void setTimestamp(int64_t ts) { timestamp_ = ts; }
/**
* Write frames into source buffers from an external renderer. Virtual
* sources do not have an internal generator of frames but instead have
......
......@@ -112,7 +112,7 @@ template<> cv::Mat& Frame::get(ftl::codecs::Channel channel) {
// Add channel if not already there
if (!channels_.has(channel)) {
throw ftl::exception("Frame channel does not exist");
throw ftl::exception(ftl::Formatter() << "Frame channel does not exist: " << (int)channel);
}
return _get(channel).host;
......@@ -132,7 +132,7 @@ template<> cv::cuda::GpuMat& Frame::get(ftl::codecs::Channel channel) {
// Add channel if not already there
if (!channels_.has(channel)) {
throw ftl::exception("Frame channel does not exist");
throw ftl::exception(ftl::Formatter() << "Frame channel does not exist: " << (int)channel);
}
return _get(channel).gpu;
......@@ -147,7 +147,7 @@ template<> const cv::Mat& Frame::get(ftl::codecs::Channel channel) const {
LOG(FATAL) << "Getting GPU channel on CPU without explicit 'download'";
}
if (!channels_.has(channel)) throw ftl::exception("Frame channel does not exist");
if (!channels_.has(channel)) throw ftl::exception(ftl::Formatter() << "Frame channel does not exist: " << (int)channel);
return _get(channel).host;
}
......@@ -163,7 +163,7 @@ template<> const cv::cuda::GpuMat& Frame::get(ftl::codecs::Channel channel) cons
// Add channel if not already there
if (!channels_.has(channel)) {
throw ftl::exception("Frame channel does not exist");
throw ftl::exception(ftl::Formatter() << "Frame channel does not exist: " << (int)channel);
}
return _get(channel).gpu;
......
......@@ -50,7 +50,7 @@ void Group::addSource(ftl::rgbd::Source *src) {
size_t ix = sources_.size();
sources_.push_back(src);
src->setCallback([this,ix,src](int64_t timestamp, cv::Mat &rgb, cv::Mat &depth) {
src->setCallback([this,ix,src](int64_t timestamp, cv::cuda::GpuMat &rgb, cv::cuda::GpuMat &depth) {
if (timestamp == 0) return;
auto chan = src->getChannel();
......@@ -78,13 +78,13 @@ void Group::addSource(ftl::rgbd::Source *src) {
// Ensure channels match source mat format
//fs.channel1[ix].create(rgb.size(), rgb.type());
//fs.channel2[ix].create(depth.size(), depth.type());
fs.frames[ix].create<cv::Mat>(Channel::Colour, Format<uchar3>(rgb.size())); //.create(rgb.size(), rgb.type());
if (chan != Channel::None) fs.frames[ix].create<cv::Mat>(chan, ftl::rgbd::FormatBase(depth.cols, depth.rows, depth.type())); //.create(depth.size(), depth.type());
fs.frames[ix].create<cv::cuda::GpuMat>(Channel::Colour, Format<uchar3>(rgb.size())); //.create(rgb.size(), rgb.type());
if (chan != Channel::None) fs.frames[ix].create<cv::cuda::GpuMat>(chan, ftl::rgbd::FormatBase(depth.cols, depth.rows, depth.type())); //.create(depth.size(), depth.type());
//cv::swap(rgb, fs.channel1[ix]);
//cv::swap(depth, fs.channel2[ix]);
cv::swap(rgb, fs.frames[ix].get<cv::Mat>(Channel::Colour));
if (chan != Channel::None) cv::swap(depth, fs.frames[ix].get<cv::Mat>(chan));
cv::cuda::swap(rgb, fs.frames[ix].get<cv::cuda::GpuMat>(Channel::Colour));
if (chan != Channel::None) cv::cuda::swap(depth, fs.frames[ix].get<cv::cuda::GpuMat>(chan));
++fs.count;
fs.mask |= (1 << ix);
......
......@@ -31,14 +31,14 @@ using ftl::rgbd::detail::MiddleburySource;
using ftl::rgbd::capability_t;
using ftl::codecs::Channel;
using ftl::rgbd::detail::FileSource;
using ftl::rgbd::Camera;
std::map<std::string, ftl::rgbd::Player*> Source::readers__;
Source::Source(ftl::config::json_t &cfg) : Configurable(cfg), pose_(Eigen::Matrix4d::Identity()), net_(nullptr) {
impl_ = nullptr;
params_ = {};
//params_ = {};
stream_ = 0;
timestamp_ = 0;
reset();
on("uri", [this](const ftl::config::Event &e) {
......@@ -49,9 +49,8 @@ Source::Source(ftl::config::json_t &cfg) : Configurable(cfg), pose_(Eigen::Matri
Source::Source(ftl::config::json_t &cfg, ftl::net::Universe *net) : Configurable(cfg), pose_(Eigen::Matrix4d::Identity()), net_(net) {
impl_ = nullptr;
params_ = {};
//params_ = {};
stream_ = 0;
timestamp_ = 0;
reset();
on("uri", [this](const ftl::config::Event &e) {
......@@ -69,11 +68,6 @@ cv::Mat Source::cameraMatrix() const {
return m;
}
/*void Source::customImplementation(ftl::rgbd::detail::Source *impl) {
if (impl_) delete impl_;
impl_ = impl;
}*/
ftl::rgbd::detail::Source *Source::_createImplementation() {
auto uristr = get<string>("uri");
if (!uristr) {
......@@ -178,7 +172,7 @@ ftl::rgbd::detail::Source *Source::_createDeviceImpl(const ftl::URI &uri) {
LOG(ERROR) << "You do not have 'librealsense2' installed";
#endif
} else {
params_.width = value("width", 1280);
/*params_.width = value("width", 1280);
params_.height = value("height", 720);
params_.fx = value("focal", 700.0f);
params_.fy = params_.fx;
......@@ -187,7 +181,7 @@ ftl::rgbd::detail::Source *Source::_createDeviceImpl(const ftl::URI &uri) {
params_.minDepth = value("minDepth", 0.1f);
params_.maxDepth = value("maxDepth", 20.0f);
params_.doffs = 0;
params_.baseline = value("baseline", 0.0f);
params_.baseline = value("baseline", 0.0f);*/
}
return nullptr;
}
......@@ -195,45 +189,12 @@ ftl::rgbd::detail::Source *Source::_createDeviceImpl(const ftl::URI &uri) {
void Source::getFrames(cv::Mat &rgb, cv::Mat &depth) {
if (bool(callback_)) LOG(WARNING) << "Cannot use getFrames and callback in source";
SHARED_LOCK(mutex_,lk);
rgb_.copyTo(rgb);
depth_.copyTo(depth);
//rgb_.copyTo(rgb);
//depth_.copyTo(depth);
//rgb = rgb_;
//depth = depth_;
/*cv::Mat tmp;
tmp = rgb;
rgb = rgb_;
rgb_ = tmp;
tmp = depth;
depth = depth_;
depth_ = tmp;*/
}
Eigen::Vector4d Source::point(uint ux, uint uy) {
const auto &params = parameters();
const double x = ((double)ux+params.cx) / params.fx;
const double y = ((double)uy+params.cy) / params.fy;
SHARED_LOCK(mutex_,lk);
const double depth = depth_.at<float>(uy,ux);
return Eigen::Vector4d(x*depth,y*depth,depth,1.0);
}
Eigen::Vector4d Source::point(uint ux, uint uy, double d) {
const auto &params = parameters();
const double x = ((double)ux+params.cx) / params.fx;
const double y = ((double)uy+params.cy) / params.fy;
return Eigen::Vector4d(x*d,y*d,d,1.0);
}
Eigen::Vector2i Source::point(const Eigen::Vector4d &p) {
const auto &params = parameters();
double x = p[0] / p[2];
double y = p[1] / p[2];
x *= params.fx;
y *= params.fy;
return Eigen::Vector2i((int)(x - params.cx), (int)(y - params.cy));
}
void Source::setPose(const Eigen::Matrix4d &pose) {
pose_ = pose;
......@@ -273,54 +234,7 @@ bool Source::retrieve() {
bool Source::compute(int N, int B) {
UNIQUE_LOCK(mutex_,lk);
if (!impl_ && stream_ != 0) {
cudaSafeCall(cudaStreamSynchronize(stream_));
if (depth_.type() == CV_32SC1) depth_.convertTo(depth_, CV_32F, 1.0f / 1000.0f);
stream_ = 0;
return true;
} else if (impl_ && impl_->compute(N,B)) {
timestamp_ = impl_->timestamp_;
/*cv::Mat tmp;
rgb_.create(impl_->rgb_.size(), impl_->rgb_.type());
depth_.create(impl_->depth_.size(), impl_->depth_.type());
tmp = rgb_;
rgb_ = impl_->rgb_;
impl_->rgb_ = tmp;
tmp = depth_;
depth_ = impl_->depth_;
impl_->depth_ = tmp;*/
// TODO:(Nick) Reduce buffer copies
impl_->rgb_.copyTo(rgb_);
impl_->depth_.copyTo(depth_);
//rgb_ = impl_->rgb_;
//depth_ = impl_->depth_;
return true;
}
return false;
}
bool Source::thumbnail(cv::Mat &t) {
if (!impl_ && stream_ != 0) {
cudaSafeCall(cudaStreamSynchronize(stream_));
if (depth_.type() == CV_32SC1) depth_.convertTo(depth_, CV_32F, 1.0f / 1000.0f);
stream_ = 0;
return true;
} else if (impl_) {
UNIQUE_LOCK(mutex_,lk);
impl_->capture(0);
impl_->swap();
impl_->compute(1, 9);
impl_->rgb_.copyTo(rgb_);
impl_->depth_.copyTo(depth_);
}
if (!rgb_.empty()) {
SHARED_LOCK(mutex_,lk);
// Downsize and square the rgb_ image
cv::resize(rgb_, thumb_, cv::Size(320,180));
}
t = thumb_;
return !thumb_.empty();
return impl_ && impl_->compute(N,B);
}
bool Source::setChannel(ftl::codecs::Channel c) {
......@@ -333,14 +247,13 @@ const ftl::rgbd::Camera Source::parameters(ftl::codecs::Channel chan) const {
return (impl_) ? impl_->parameters(chan) : parameters();
}
void Source::setCallback(std::function<void(int64_t, cv::Mat &, cv::Mat &)> cb) {
void Source::setCallback(std::function<void(int64_t, cv::cuda::GpuMat &, cv::cuda::GpuMat &)> cb) {
if (bool(callback_)) LOG(ERROR) << "Source already has a callback: " << getURI();
callback_ = cb;
}
void Source::addRawCallback(const std::function<void(ftl::rgbd::Source*, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt)> &f) {
UNIQUE_LOCK(mutex_,lk);
LOG(INFO) << "ADD RAW";
rawcallbacks_.push_back(f);
}
......@@ -358,7 +271,75 @@ void Source::removeRawCallback(const std::function<void(ftl::rgbd::Source*, cons
void Source::notifyRaw(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
SHARED_LOCK(mutex_,lk);
for (auto &i : rawcallbacks_) {
i(this, spkt, pkt);
}
}
/*
* Scale camera parameters to match resolution.
*/
static Camera scaled(Camera &cam, int width, int height) {
float scaleX = (float)width / (float)cam.width;
float scaleY = (float)height / (float)cam.height;
CHECK( abs(scaleX - scaleY) < 0.000001f );
Camera newcam = cam;
newcam.width = width;
newcam.height = height;
newcam.fx *= scaleX;
newcam.fy *= scaleY;
newcam.cx *= scaleX;
newcam.cy *= scaleY;
return newcam;
}
void Source::notify(int64_t ts, cv::cuda::GpuMat &c1, cv::cuda::GpuMat &c2) {
// Ensure correct scaling of images and parameters.
int max_width = max(impl_->params_.width, max(c1.cols, c2.cols));
int max_height = max(impl_->params_.height, max(c1.rows, c2.rows));
// Do we need to scale camera parameters
if (impl_->params_.width < max_width || impl_->params_.height < max_height) {
impl_->params_ = scaled(impl_->params_, max_width, max_height);
}
// Should channel 1 be scaled?
if (c1.cols < max_width || c1.rows < max_height) {
LOG(WARNING) << "Resizing on GPU";
cv::cuda::resize(c1, c1, cv::Size(max_width, max_height));
}
// Should channel 2 be scaled?
if (!c2.empty() && (c2.cols < max_width || c2.rows < max_height)) {
LOG(WARNING) << "Resizing on GPU";
if (c2.type() == CV_32F) {
cv::cuda::resize(c2, c2, cv::Size(max_width, max_height), 0.0, 0.0, cv::INTER_NEAREST);
} else {
cv::cuda::resize(c2, c2, cv::Size(max_width, max_height));
}
}
if (callback_) callback_(ts, c1, c2);
}
void Source::inject(const Eigen::Matrix4d &pose) {
ftl::codecs::StreamPacket spkt;
ftl::codecs::Packet pkt;
spkt.timestamp = impl_->timestamp_;
spkt.channel_count = 0;
spkt.channel = Channel::Pose;
spkt.streamID = 0;
pkt.codec = ftl::codecs::codec_t::POSE;
pkt.definition = ftl::codecs::definition_t::Any;
pkt.block_number = 0;
pkt.block_total = 1;
pkt.flags = 0;
pkt.data = std::move(std::vector<uint8_t>((uint8_t*)pose.data(), (uint8_t*)pose.data() + 4*4*sizeof(double)));
notifyRaw(spkt, pkt);
}
......@@ -33,25 +33,46 @@ FileSource::FileSource(ftl::rgbd::Source *s, ftl::rgbd::Player *r, int sid) : ft
r->onPacket(sid, [this](const ftl::codecs::StreamPacket &spkt, ftl::codecs::Packet &pkt) {
host_->notifyRaw(spkt, pkt);
// Some channels are to be directly handled by the source object and
// do not proceed to any subsequent step.
// FIXME: Potential problem, these get processed at wrong time
if (spkt.channel == Channel::Configuration) {
std::tuple<std::string, std::string> cfg;
auto unpacked = msgpack::unpack((const char*)pkt.data.data(), pkt.data.size());
unpacked.get().convert(cfg);
LOG(INFO) << "Config Received: " << std::get<1>(cfg);
return;
}
else if (spkt.channel == Channel::Calibration) {
_processCalibration(pkt);
return;
} else if (spkt.channel == Channel::Pose) {
_processPose(pkt);
return;
}
// FIXME: For bad and old FTL files where wrong channel is used
if (pkt.codec == codec_t::POSE) {
Eigen::Matrix4d p = Eigen::Map<Eigen::Matrix4d>((double*)pkt.data.data());
host_->setPose(p);
_processPose(pkt);
return;
} else if (pkt.codec == codec_t::CALIBRATION) {
ftl::rgbd::Camera *camera = (ftl::rgbd::Camera*)pkt.data.data();
LOG(INFO) << "Have calibration: " << camera->fx;
params_ = *camera;
has_calibration_ = true;
} else {
if (pkt.codec == codec_t::HEVC) {
if (ftl::codecs::hevc::isIFrame(pkt.data)) _removeChannel(spkt.channel);
}
cache_[cache_write_].emplace_back();
auto &c = cache_[cache_write_].back();
// TODO: Attempt to avoid this copy operation
c.spkt = spkt;
c.pkt = pkt;
_processCalibration(pkt);
return;
}
// TODO: Check I-Frames for H264
if (pkt.codec == codec_t::HEVC) {
if (ftl::codecs::hevc::isIFrame(pkt.data)) _removeChannel(spkt.channel);
}
cache_[cache_write_].emplace_back();
auto &c = cache_[cache_write_].back();
// TODO: Attempt to avoid this copy operation
c.spkt = spkt;
c.pkt = pkt;
});
}
......@@ -59,6 +80,38 @@ FileSource::~FileSource() {
}
void FileSource::_processPose(ftl::codecs::Packet &pkt) {
LOG(INFO) << "Got POSE channel";
if (pkt.codec == codec_t::POSE) {
Eigen::Matrix4d p = Eigen::Map<Eigen::Matrix4d>((double*)pkt.data.data());
host_->setPose(p);
} else if (pkt.codec == codec_t::MSGPACK) {
}
}
void FileSource::_processCalibration(ftl::codecs::Packet &pkt) {
if (pkt.codec == codec_t::CALIBRATION) {
ftl::rgbd::Camera *camera = (ftl::rgbd::Camera*)pkt.data.data();
params_ = *camera;
has_calibration_ = true;
} else if (pkt.codec == codec_t::MSGPACK) {
std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, ftl::rgbd::capability_t> params;
auto unpacked = msgpack::unpack((const char*)pkt.data.data(), pkt.data.size());
unpacked.get().convert(params);
if (std::get<1>(params) == Channel::Left) {
params_ = std::get<0>(params);
capabilities_ = std::get<2>(params);
has_calibration_ = true;
LOG(INFO) << "Got Calibration channel: " << params_.width << "x" << params_.height;
} else {
//params_right_ = std::get<0>(params);
}
}
}
void FileSource::_removeChannel(ftl::codecs::Channel channel) {
int c = 0;
for (auto i=cache_[cache_write_].begin(); i != cache_[cache_write_].end(); ++i) {
......@@ -98,9 +151,13 @@ bool FileSource::compute(int n, int b) {
int64_t lastts = 0;
int lastc = 0;
// Go through previously read and cached frames in sequence
// needs to be done due to P-Frames
for (auto i=cache_[cache_read_].begin(); i!=cache_[cache_read_].end(); ++i) {
auto &c = *i;
// Check for verifying that both channels are received, ie. two frames
// with the same timestamp.
if (c.spkt.timestamp > lastts) {
lastts = c.spkt.timestamp;
lastc = 1;
......@@ -123,6 +180,7 @@ bool FileSource::compute(int n, int b) {
}
}
// FIXME: Consider case of Channel::None
if (lastc != 2) {
LOG(ERROR) << "Channels not in sync (" << sourceid_ << "): " << lastts;
return false;
......@@ -132,8 +190,8 @@ bool FileSource::compute(int n, int b) {
if (rgb_.empty() || depth_.empty()) return false;
auto cb = host_->callback();
if (cb) cb(timestamp_, rgb_, depth_);
// Inform about a decoded frame pair
host_->notify(timestamp_, rgb_, depth_);
return true;
}
......
......@@ -44,6 +44,8 @@ class FileSource : public detail::Source {
bool realtime_;
void _processCalibration(ftl::codecs::Packet &pkt);
void _processPose(ftl::codecs::Packet &pkt);
void _removeChannel(ftl::codecs::Channel channel);
void _createDecoder(int ix, const ftl::codecs::Packet &pkt);
};
......
......@@ -129,30 +129,30 @@ MiddleburySource::MiddleburySource(ftl::rgbd::Source *host, const string &dir)
disp_->setMask(mask_l_);
// Load image files...
cv::Mat right_tmp;
rgb_ = cv::imread(dir+"/im0.png", cv::IMREAD_COLOR);
cv::Mat left_tmp, right_tmp;
left_tmp = cv::imread(dir+"/im0.png", cv::IMREAD_COLOR);
right_tmp = cv::imread(dir+"/im1.png", cv::IMREAD_COLOR);
cv::resize(rgb_, rgb_, cv::Size(params_.width, params_.height));
cv::resize(left_tmp, left_tmp, cv::Size(params_.width, params_.height));
cv::resize(right_tmp, right_tmp, cv::Size(params_.width, params_.height));
left_.upload(rgb_);
right_.upload(right_tmp);
rgb_.upload(left_tmp, stream_);
right_.upload(right_tmp, stream_);
_performDisparity();
ready_ = true;
}
void MiddleburySource::_performDisparity() {
if (depth_tmp_.empty()) depth_tmp_ = cv::cuda::GpuMat(left_.size(), CV_32FC1);
if (disp_tmp_.empty()) disp_tmp_ = cv::cuda::GpuMat(left_.size(), CV_32FC1);
depth_.create(left_.size(), CV_32FC1);
disp_tmp_.create(left_.size(), CV_32FC1);
//calib_->rectifyStereo(left_, right_, stream_);
disp_->compute(left_, right_, disp_tmp_, stream_);
disp_->compute(rgb_, right_, disp_tmp_, stream_);
//disparityToDepth(disp_tmp_, depth_tmp_, params_, stream_);
ftl::cuda::disparity_to_depth(disp_tmp_, depth_tmp_, params_, stream_);
ftl::cuda::disparity_to_depth(disp_tmp_, depth_, params_, stream_);
//left_.download(rgb_, stream_);
//rgb_ = lsrc_->cachedLeft();
depth_tmp_.download(depth_, stream_);
//depth_tmp_.download(depth_, stream_);
stream_.waitForCompletion();
......
......@@ -52,8 +52,8 @@ NetFrame &NetFrameQueue::getFrame(int64_t ts, const cv::Size &s, int c1type, int
f.chunk_total[1] = 0;
f.channel_count = 0;
f.tx_size = 0;
f.channel1.create(s, c1type);
f.channel2.create(s, c2type);
f.channel[0].create(s, c1type);
f.channel[1].create(s, c2type);
return f;
}
oldest = (f.timestamp < oldest) ? f.timestamp : oldest;
......@@ -72,8 +72,8 @@ NetFrame &NetFrameQueue::getFrame(int64_t ts, const cv::Size &s, int c1type, int
f.chunk_total[1] = 0;
f.channel_count = 0;
f.tx_size = 0;
f.channel1.create(s, c1type);
f.channel2.create(s, c2type);
f.channel[0].create(s, c1type);
f.channel[1].create(s, c2type);
return f;
}
}
......@@ -90,55 +90,6 @@ void NetFrameQueue::freeFrame(NetFrame &f) {
// ===== NetSource =============================================================
bool NetSource::_getCalibration(Universe &net, const UUID &peer, const string &src, ftl::rgbd::Camera &p, ftl::codecs::Channel chan) {
try {
while(true) {
auto [cap,buf] = net.call<tuple<unsigned int,vector<unsigned char>>>(peer_, "source_details", src, chan);
capabilities_ = cap;
if (buf.size() > 0) {
memcpy((char*)&p, buf.data(), buf.size());
if (sizeof(p) != buf.size()) {
LOG(ERROR) << "Corrupted calibration";
return false;
}
LOG(INFO) << "Calibration received: " << p.cx << ", " << p.cy << ", " << p.fx << ", " << p.fy;
if (chan == Channel::Left) {
// Put calibration into config manually
host_->getConfig()["focal"] = p.fx;
host_->getConfig()["centre_x"] = p.cx;
host_->getConfig()["centre_y"] = p.cy;
host_->getConfig()["baseline"] = p.baseline;
host_->getConfig()["doffs"] = p.doffs;
} else {
host_->getConfig()["focal_right"] = p.fx;
host_->getConfig()["centre_x_right"] = p.cx;
host_->getConfig()["centre_y_right"] = p.cy;
host_->getConfig()["baseline_right"] = p.baseline;
host_->getConfig()["doffs_right"] = p.doffs;
}
return true;
} else {
LOG(INFO) << "Could not get calibration, retrying";
sleep_for(milliseconds(500));
}
}
} catch (const std::exception& ex) {
LOG(ERROR) << "Exception: " << ex.what();
return false;
} catch (...) {
LOG(ERROR) << "Unknown exception";
return false;
}
}
NetSource::NetSource(ftl::rgbd::Source *host)
: ftl::rgbd::detail::Source(host), active_(false), minB_(9), maxN_(1), adaptive_(0), queue_(3) {
......@@ -147,9 +98,10 @@ NetSource::NetSource(ftl::rgbd::Source *host)
default_quality_ = host->value("quality", 0);
last_bitrate_ = 0;
params_right_.width = 0;
has_calibration_ = false;
decoder_c1_ = nullptr;
decoder_c2_ = nullptr;
decoder_[0] = nullptr;
decoder_[1] = nullptr;
host->on("gamma", [this,host](const ftl::config::Event&) {
gamma_ = host->value("gamma", 1.0f);
......@@ -221,8 +173,8 @@ NetSource::NetSource(ftl::rgbd::Source *host)
}
NetSource::~NetSource() {
if (decoder_c1_) ftl::codecs::free(decoder_c1_);
if (decoder_c2_) ftl::codecs::free(decoder_c2_);
if (decoder_[0]) ftl::codecs::free(decoder_[0]);
if (decoder_[1]) ftl::codecs::free(decoder_[1]);
if (uri_.size() > 0) {
host_->getNet()->unbind(uri_);
......@@ -260,22 +212,47 @@ NetSource::~NetSource() {
void NetSource::_createDecoder(int chan, const ftl::codecs::Packet &pkt) {
UNIQUE_LOCK(mutex_,lk);
auto *decoder = (chan == 0) ? decoder_c1_ : decoder_c2_;
auto *decoder = decoder_[chan];
if (decoder) {
if (!decoder->accepts(pkt)) {
ftl::codecs::free((chan == 0) ? decoder_c1_ : decoder_c2_);
ftl::codecs::free(decoder_[chan]);
} else {
return;
}
}
if (chan == 0) {
decoder_c1_ = ftl::codecs::allocateDecoder(pkt);
decoder_[chan] = ftl::codecs::allocateDecoder(pkt);
}
void NetSource::_processConfig(const ftl::codecs::Packet &pkt) {
std::tuple<std::string, std::string> cfg;
auto unpacked = msgpack::unpack((const char*)pkt.data.data(), pkt.data.size());
unpacked.get().convert(cfg);
//LOG(INFO) << "Config Received: " << std::get<1>(cfg);
// TODO: This needs to be put in safer / better location
host_->set(std::get<0>(cfg), nlohmann::json::parse(std::get<1>(cfg)));
}
void NetSource::_processCalibration(const ftl::codecs::Packet &pkt) {
std::tuple<ftl::rgbd::Camera, ftl::codecs::Channel, ftl::rgbd::capability_t> params;
auto unpacked = msgpack::unpack((const char*)pkt.data.data(), pkt.data.size());
unpacked.get().convert(params);
if (std::get<1>(params) == Channel::Left) {
params_ = std::get<0>(params);
capabilities_ = std::get<2>(params);
has_calibration_ = true;
LOG(INFO) << "Got Calibration channel: " << params_.width << "x" << params_.height;
} else {
decoder_c2_ = ftl::codecs::allocateDecoder(pkt);
params_right_ = std::get<0>(params);
}
}
void NetSource::_processPose(const ftl::codecs::Packet &pkt) {
LOG(INFO) << "Got POSE channel";
}
void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
// Capture time here for better net latency estimate
int64_t now = std::chrono::time_point_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now()).time_since_epoch().count();
......@@ -288,6 +265,18 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
const ftl::codecs::Channel rchan = spkt.channel;
const int channum = (rchan == Channel::Colour) ? 0 : 1;
// Deal with the special channels...
switch (rchan) {
case Channel::Configuration : _processConfig(pkt); return;
case Channel::Calibration : _processCalibration(pkt); return;
case Channel::Pose : _processPose(pkt); return;
}
if (!has_calibration_) {
LOG(WARNING) << "Missing calibration, skipping frame";
return;
}
NetFrame &frame = queue_.getFrame(spkt.timestamp, cv::Size(params_.width, params_.height), CV_8UC3, (isFloatChannel(chan) ? CV_32FC1 : CV_8UC3));
// Update frame statistics
......@@ -296,19 +285,19 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
// Only decode if this channel is wanted.
if (rchan == Channel::Colour || rchan == chan) {
_createDecoder(channum, pkt);
auto *decoder = (rchan == Channel::Colour) ? decoder_c1_ : decoder_c2_;
auto *decoder = decoder_[channum];
if (!decoder) {
LOG(ERROR) << "No frame decoder available";
return;
}
decoder->decode(pkt, (rchan == Channel::Colour) ? frame.channel1 : frame.channel2);
decoder->decode(pkt, frame.channel[channum]);
} else if (chan != Channel::None && rchan != Channel::Colour) {
// Didn't receive correct second channel so just clear the images
if (isFloatChannel(chan)) {
frame.channel2.setTo(cv::Scalar(0.0f));
frame.channel[1].setTo(cv::Scalar(0.0f));
} else {
frame.channel2.setTo(cv::Scalar(0,0,0));
frame.channel[1].setTo(cv::Scalar(0,0,0));
}
}
......@@ -328,48 +317,39 @@ void NetSource::_recvPacket(short ttimeoff, const ftl::codecs::StreamPacket &spk
}
++frame.chunk_count[channum];
++frame.channel_count;
if (frame.chunk_count[channum] == frame.chunk_total[channum]) ++frame.channel_count;
if (frame.chunk_count[channum] > frame.chunk_total[channum]) LOG(FATAL) << "TOO MANY CHUNKS";
// Capture tx time of first received chunk
if (frame.channel_count == 1 && frame.chunk_count[channum] == 1) {
// FIXME: This seems broken
if (channum == 1 && frame.chunk_count[channum] == 1) {
UNIQUE_LOCK(frame.mtx, flk);
if (frame.chunk_count[channum] == 1) {
frame.tx_latency = int64_t(ttimeoff);
}
}
// Last chunk of both channels now received
if (frame.channel_count == spkt.channel_count &&
frame.chunk_count[0] == frame.chunk_total[0] &&
frame.chunk_count[1] == frame.chunk_total[1]) {
UNIQUE_LOCK(frame.mtx, flk);
// Last chunk of both channels now received, so we are done.
if (frame.channel_count == spkt.channel_count) {
_completeFrame(frame, now-(spkt.timestamp+frame.tx_latency));
}
}
if (frame.timestamp >= 0 && frame.chunk_count[0] == frame.chunk_total[0] && frame.chunk_count[1] == frame.chunk_total[1]) {
timestamp_ = frame.timestamp;
frame.tx_latency = now-(spkt.timestamp+frame.tx_latency);
adaptive_ = abr_.selectBitrate(frame);
//LOG(INFO) << "Frame finished: " << frame.timestamp;
auto cb = host_->callback();
if (cb) {
try {
cb(frame.timestamp, frame.channel1, frame.channel2);
} catch (...) {
LOG(ERROR) << "Exception in net frame callback";
}
} else {
LOG(ERROR) << "NO FRAME CALLBACK";
}
void NetSource::_completeFrame(NetFrame &frame, int64_t latency) {
UNIQUE_LOCK(frame.mtx, flk);
queue_.freeFrame(frame);
// Frame must not have already been freed.
if (frame.timestamp >= 0) {
timestamp_ = frame.timestamp;
frame.tx_latency = latency;
{
// Decrement expected frame counter
N_--;
}
}
// Note: Not used currently
adaptive_ = abr_.selectBitrate(frame);
host_->notify(frame.timestamp, frame.channel[0], frame.channel[1]);
queue_.freeFrame(frame);
N_--;
}
}
......@@ -389,12 +369,6 @@ void NetSource::setPose(const Eigen::Matrix4d &pose) {
ftl::rgbd::Camera NetSource::parameters(ftl::codecs::Channel chan) {
if (chan == ftl::codecs::Channel::Right) {
if (params_right_.width == 0) {
auto uri = host_->get<string>("uri");
if (!uri) return params_;
_getCalibration(*host_->getNet(), peer_, *uri, params_right_, chan);
}
return params_right_;
} else {
return params_;
......@@ -419,29 +393,11 @@ void NetSource::_updateURI() {
}
peer_ = *p;
has_calibration_ = _getCalibration(*host_->getNet(), peer_, *uri, params_, ftl::codecs::Channel::Left);
_getCalibration(*host_->getNet(), peer_, *uri, params_right_, ftl::codecs::Channel::Right);
host_->getNet()->bind(*uri, [this](short ttimeoff, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
//if (chunk == -1) {
//#ifdef HAVE_NVPIPE
//_recvVideo(frame, ttimeoff, bitrate, jpg, d);
//#else
//LOG(ERROR) << "Cannot receive HEVC, no NvPipe support";
//#endif
//} else {
//_recvChunk(frame, ttimeoff, bitrate, chunk, jpg, d);
_recvPacket(ttimeoff, spkt, pkt);
//}
_recvPacket(ttimeoff, spkt, pkt);
});
N_ = 0;
rgb_ = cv::Mat(cv::Size(params_.width, params_.height), CV_8UC3, cv::Scalar(0,0,0));
depth_ = cv::Mat(cv::Size(params_.width, params_.height), CV_32FC1, 0.0f);
//d_rgb_ = cv::Mat(cv::Size(params_.width, params_.height), CV_8UC3, cv::Scalar(0,0,0));
//d_depth_ = cv::Mat(cv::Size(params_.width, params_.height), CV_32FC1, 0.0f);
uri_ = *uri;
active_ = true;
} else {
......@@ -466,9 +422,9 @@ bool NetSource::compute(int n, int b) {
// Verify depth destination is of required type
if (isFloatChannel(chan) && depth_.type() != CV_32F) {
depth_ = cv::Mat(cv::Size(params_.width, params_.height), CV_32FC1, 0.0f);
depth_.create(cv::Size(params_.width, params_.height), CV_32FC1); // 0.0f
} else if (!isFloatChannel(chan) && depth_.type() != CV_8UC3) {
depth_ = cv::Mat(cv::Size(params_.width, params_.height), CV_8UC3, cv::Scalar(0,0,0));
depth_.create(cv::Size(params_.width, params_.height), CV_8UC3); // cv::Scalar(0,0,0)
}
if (prev_chan_ != chan) {
......@@ -491,5 +447,5 @@ bool NetSource::compute(int n, int b) {
}
bool NetSource::isReady() {
return has_calibration_ && !rgb_.empty();
return has_calibration_;
}
......@@ -68,8 +68,7 @@ class NetSource : public detail::Source {
//NvPipe *nv_channel2_decoder_;
//#endif
ftl::codecs::Decoder *decoder_c1_;
ftl::codecs::Decoder *decoder_c2_;
ftl::codecs::Decoder *decoder_[2];
// Adaptive bitrate functionality
ftl::rgbd::detail::bitrate_t adaptive_; // Current adapted bitrate
......@@ -86,6 +85,10 @@ class NetSource : public detail::Source {
void _updateURI();
//void _checkAdaptive(int64_t);
void _createDecoder(int chan, const ftl::codecs::Packet &);
void _completeFrame(NetFrame &frame, int64_t now);
void _processCalibration(const ftl::codecs::Packet &pkt);
void _processConfig(const ftl::codecs::Packet &pkt);
void _processPose(const ftl::codecs::Packet &pkt);
};
}
......
......@@ -54,9 +54,11 @@ bool RealsenseSource::compute(int n, int b) {
float h = depth.get_height();
rscolour_ = frames.first(RS2_STREAM_COLOR); //.get_color_frame();
cv::Mat tmp(cv::Size((int)w, (int)h), CV_16UC1, (void*)depth.get_data(), depth.get_stride_in_bytes());
tmp.convertTo(depth_, CV_32FC1, scale_);
rgb_ = cv::Mat(cv::Size(w, h), CV_8UC4, (void*)rscolour_.get_data(), cv::Mat::AUTO_STEP);
cv::Mat tmp_depth(cv::Size((int)w, (int)h), CV_16UC1, (void*)depth.get_data(), depth.get_stride_in_bytes());
tmp_depth.convertTo(tmp_depth, CV_32FC1, scale_);
depth_.upload(tmp_depth);
cv::Mat tmp_rgb(cv::Size(w, h), CV_8UC4, (void*)rscolour_.get_data(), cv::Mat::AUTO_STEP);
rgb_.upload(tmp_rgb);
auto cb = host_->callback();
if (cb) cb(timestamp_, rgb_, depth_);
......
......@@ -59,8 +59,10 @@ bool SnapshotSource::compute(int n, int b) {
snapshot_.getLeftRGB(camera_idx_, frame_idx_, snap_rgb_);
snapshot_.getLeftDepth(camera_idx_, frame_idx_, snap_depth_);
snap_rgb_.copyTo(rgb_);
snap_depth_.copyTo(depth_);
//snap_rgb_.copyTo(rgb_);
//snap_depth_.copyTo(depth_);
rgb_.upload(snap_rgb_);
depth_.upload(snap_depth_);
auto cb = host_->callback();
if (cb) cb(timestamp_, rgb_, depth_);
......
......@@ -231,21 +231,24 @@ bool StereoVideoSource::compute(int n, int b) {
ftl::cuda::disparity_to_depth(disp, depth, params_, stream_);
left.download(rgb_, stream_);
depth.download(depth_, stream_);
//left.download(rgb_, stream_);
//depth.download(depth_, stream_);
//frame.download(Channel::Left + Channel::Depth);
stream_.waitForCompletion(); // TODO:(Nick) Move to getFrames
stream_.waitForCompletion();
host_->notify(timestamp_, left, depth);
} else if (chan == Channel::Right) {
left.download(rgb_, stream_);
right.download(depth_, stream_);
//left.download(rgb_, stream_);
//right.download(depth_, stream_);
stream_.waitForCompletion(); // TODO:(Nick) Move to getFrames
host_->notify(timestamp_, left, right);
} else {
left.download(rgb_, stream_);
//left.download(rgb_, stream_);
stream_.waitForCompletion(); // TODO:(Nick) Move to getFrames
//LOG(INFO) << "NO SECOND CHANNEL: " << (bool)depth_.empty();
depth_.create(left.size(), left.type());
host_->notify(timestamp_, left, depth_);
}
auto cb = host_->callback();
if (cb) cb(timestamp_, rgb_, depth_);
return true;
}
......
......@@ -7,8 +7,17 @@ using ftl::rgbd::Camera;
class VirtualImpl : public ftl::rgbd::detail::Source {
public:
explicit VirtualImpl(ftl::rgbd::Source *host, const ftl::rgbd::Camera &params) : ftl::rgbd::detail::Source(host) {
params_ = params;
explicit VirtualImpl(ftl::rgbd::Source *host) : ftl::rgbd::detail::Source(host) {
params_.width = host->value("width", 1280);
params_.height = host->value("height", 720);
params_.fx = host->value("focal", 700.0f);
params_.fy = params_.fx;
params_.cx = -(double)params_.width / 2.0;
params_.cy = -(double)params_.height / 2.0;
params_.minDepth = host->value("minDepth", 0.1f);
params_.maxDepth = host->value("maxDepth", 20.0f);
params_.doffs = 0;
params_.baseline = host->value("baseline", 0.0f);
params_right_.width = host->value("width", 1280);
params_right_.height = host->value("height", 720);
......@@ -82,20 +91,19 @@ class VirtualImpl : public ftl::rgbd::detail::Source {
}
if (frame.hasChannel(Channel::Colour)) {
frame.download(Channel::Colour);
cv::swap(frame.get<cv::Mat>(Channel::Colour), rgb_);
//frame.download(Channel::Colour);
cv::cuda::swap(frame.get<cv::cuda::GpuMat>(Channel::Colour), rgb_);
} else {
LOG(ERROR) << "Channel 1 frame in rendering";
}
if ((host_->getChannel() != Channel::None) &&
frame.hasChannel(host_->getChannel())) {
frame.download(host_->getChannel());
cv::swap(frame.get<cv::Mat>(host_->getChannel()), depth_);
//frame.download(host_->getChannel());
cv::cuda::swap(frame.get<cv::cuda::GpuMat>(host_->getChannel()), depth_);
}
auto cb = host_->callback();
if (cb) cb(timestamp_, rgb_, depth_);
host_->notify(timestamp_, rgb_, depth_);
}
return true;
}
......@@ -113,20 +121,7 @@ class VirtualImpl : public ftl::rgbd::detail::Source {
};
VirtualSource::VirtualSource(ftl::config::json_t &cfg) : Source(cfg) {
auto params = params_;
params_.width = value("width", 1280);
params_.height = value("height", 720);
params_.fx = value("focal", 700.0f);
params_.fy = params_.fx;
params_.cx = value("centre_x", -(double)params_.width / 2.0);
params_.cy = value("centre_y", -(double)params_.height / 2.0);
params_.minDepth = value("minDepth", 0.1f);
params_.maxDepth = value("maxDepth", 20.0f);
params_.doffs = 0;
params_.baseline = value("baseline", 0.0f);
impl_ = new VirtualImpl(this, params);
impl_ = new VirtualImpl(this);
}
VirtualSource::~VirtualSource() {
......
......@@ -45,6 +45,7 @@ Streamer::Streamer(nlohmann::json &config, Universe *net)
encode_mode_ = ftl::rgbd::kEncodeVideo;
hq_devices_ = (value("disable_hardware_encode", false)) ? device_t::Software : device_t::Any;
hq_codec_ = value("video_codec", ftl::codecs::codec_t::Any);
//group_.setFPS(value("fps", 20));
group_.setLatency(4);
......@@ -187,6 +188,17 @@ void Streamer::add(Source *src) {
sources_[src->getID()] = s;
group_.addSource(src);
src->addRawCallback([this,s](Source *src, const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
if (spkt.channel == Channel::Calibration) {
// Calibration changed, so lets re-check the bitrate presets
const auto &params = src->parameters();
s->hq_bitrate = ftl::codecs::findPreset(params.width, params.height);
}
//LOG(INFO) << "RAW CALLBACK";
_transmitPacket(s, spkt, pkt, Quality::Any);
});
}
LOG(INFO) << "Streaming: " << src->getID();
......@@ -322,6 +334,14 @@ void Streamer::_addClient(const string &source, int N, int rate, const ftl::UUID
}
++s->clientCount;
// Finally, inject calibration and config data
s->src->inject(Channel::Calibration, s->src->parameters(Channel::Left), Channel::Left, s->src->getCapabilities());
s->src->inject(Channel::Calibration, s->src->parameters(Channel::Right), Channel::Right, s->src->getCapabilities());
//s->src->inject(s->src->getPose());
//if (!(*s->src->get<nlohmann::json>("meta")).is_null()) {
s->src->inject(Channel::Configuration, "/original", s->src->getConfig().dump());
//}
}
void Streamer::remove(Source *) {
......@@ -430,9 +450,9 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
// Do we need to do high quality encoding?
if (src->hq_count > 0) {
if (!src->hq_encoder_c1) src->hq_encoder_c1 = ftl::codecs::allocateEncoder(
definition_t::HD1080, hq_devices_);
definition_t::HD1080, hq_devices_, hq_codec_);
if (!src->hq_encoder_c2 && hasChan2) src->hq_encoder_c2 = ftl::codecs::allocateEncoder(
definition_t::HD1080, hq_devices_);
definition_t::HD1080, hq_devices_, hq_codec_);
// Do we have the resources to do a HQ encoding?
if (src->hq_encoder_c1 && (!hasChan2 || src->hq_encoder_c2)) {
......@@ -448,7 +468,7 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
auto chan = fs.sources[j]->getChannel();
enc2->encode(fs.frames[j].get<cv::Mat>(chan), src->hq_bitrate, [this,src,hasChan2,chan](const ftl::codecs::Packet &blk){
enc2->encode(fs.frames[j].get<cv::cuda::GpuMat>(chan), src->hq_bitrate, [this,src,hasChan2,chan](const ftl::codecs::Packet &blk){
_transmitPacket(src, blk, chan, hasChan2, Quality::High);
});
} else {
......@@ -457,7 +477,7 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
// TODO: Stagger the reset between nodes... random phasing
if (fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc1->reset();
enc1->encode(fs.frames[j].get<cv::Mat>(Channel::Colour), src->hq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
enc1->encode(fs.frames[j].get<cv::cuda::GpuMat>(Channel::Colour), src->hq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
_transmitPacket(src, blk, Channel::Colour, hasChan2, Quality::High);
});
}
......@@ -480,14 +500,14 @@ void Streamer::_process(ftl::rgbd::FrameSet &fs) {
if (hasChan2) {
auto chan = fs.sources[j]->getChannel();
enc2->encode(fs.frames[j].get<cv::Mat>(chan), src->lq_bitrate, [this,src,hasChan2,chan](const ftl::codecs::Packet &blk){
enc2->encode(fs.frames[j].get<cv::cuda::GpuMat>(chan), src->lq_bitrate, [this,src,hasChan2,chan](const ftl::codecs::Packet &blk){
_transmitPacket(src, blk, chan, hasChan2, Quality::Low);
});
} else {
if (enc2) enc2->reset();
}
enc1->encode(fs.frames[j].get<cv::Mat>(Channel::Colour), src->lq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
enc1->encode(fs.frames[j].get<cv::cuda::GpuMat>(Channel::Colour), src->lq_bitrate, [this,src,hasChan2](const ftl::codecs::Packet &blk){
_transmitPacket(src, blk, Channel::Colour, hasChan2, Quality::Low);
});
}
......
Python support for `.ftl` files. At the moment, only reading RGB channels
(left/right) supported.
Required **Python** modules:
* msgpack
* numpy
* skimage **or** OpenCV
Required libraries
* libde265 (available on most Linux distributions)
*.pyc
__pycache__
from . ftlstream import FTLStream
\ No newline at end of file