diff --git a/applications/gui/src/camera.cpp b/applications/gui/src/camera.cpp index 3afbe42648cd434a7ae9a24efaf6d8b985afa993..61a83e223f8a5b26d2a717c798736adb47f985cf 100644 --- a/applications/gui/src/camera.cpp +++ b/applications/gui/src/camera.cpp @@ -138,7 +138,10 @@ void ftl::gui::Camera::draw(std::vector<ftl::rgbd::FrameSet*> &fss) { ftl::rgbd::Frame *frame = nullptr; if ((size_t)fid_ >= fs->frames.size()) return; + if (!fs->hasFrame(fid_)) return; + frame = &fs->frames[fid_]; + if (!frame->hasChannel(channel_)) return; auto &buf = colouriser_->colourise(*frame, channel_, 0); @@ -390,7 +393,8 @@ void ftl::gui::Camera::_draw(std::vector<ftl::rgbd::FrameSet*> &fss) { auto &f = fs2.frames.emplace_back(); fs2.count = 1; fs2.mask = 1; - fs2.stale = false; + //fs2.stale = false; + fs2.set(ftl::data::FSFlag::STALE); frame_.swapTo(Channels<0>(Channel::Colour), f); // Channel::Colour + Channel::Depth fs2.timestamp = ftl::timer::get_time(); fs2.id = 0; diff --git a/applications/gui/src/gltexture.cpp b/applications/gui/src/gltexture.cpp index 8b54e22efb2b5d51d24d69bfcafcba2cb3201d2a..ae3c3a05d852fbff2dc8423de2533c4d2d6f2f8e 100644 --- a/applications/gui/src/gltexture.cpp +++ b/applications/gui/src/gltexture.cpp @@ -137,6 +137,7 @@ void GLTexture::unmap(cudaStream_t stream) { glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, width_, height_, GL_RED, GL_FLOAT, NULL); } glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); + glBindTexture(GL_TEXTURE_2D, 0); glBindBuffer( GL_PIXEL_UNPACK_BUFFER, 0); } diff --git a/applications/gui/src/src_window.cpp b/applications/gui/src/src_window.cpp index 084d7992c4f38fb4948749aa0b6138c794245173..d445f08484e352a9883c2208e2c2be7997635950 100644 --- a/applications/gui/src/src_window.cpp +++ b/applications/gui/src/src_window.cpp @@ -190,20 +190,25 @@ bool SourceWindow::_processFrameset(ftl::rgbd::FrameSet &fs, bool fromstream) { _checkFrameSets(fs.id); if (!paused_) { - // Enforce interpolated colour and GPU upload - for (int i=0; i<fs.frames.size(); ++i) { - fs.frames[i].createTexture<uchar4>(Channel::Colour, true); + if (!fs.test(ftl::data::FSFlag::PARTIAL) || !screen_->root()->value("drop_partial_framesets", false)) { + // Enforce interpolated colour and GPU upload + for (int i=0; i<fs.frames.size(); ++i) { + if (!fs.hasFrame(i)) continue; + fs.frames[i].createTexture<uchar4>(Channel::Colour, true); + + // TODO: Do all channels. This is a fix for screen capture sources. + if (!fs.frames[i].isGPU(Channel::Colour)) fs.frames[i].upload(Channels<0>(Channel::Colour), pre_pipelines_[fs.id]->getStream()); + } - // TODO: Do all channels. This is a fix for screen capture sources. - if (!fs.frames[i].isGPU(Channel::Colour)) fs.frames[i].upload(Channels<0>(Channel::Colour), pre_pipelines_[fs.id]->getStream()); - } + { + FTL_Profile("Prepipe",0.020); + pre_pipelines_[fs.id]->apply(fs, fs, 0); + } - { - FTL_Profile("Prepipe",0.020); - pre_pipelines_[fs.id]->apply(fs, fs, 0); + fs.swapTo(*framesets_[fs.id]); + } else { + LOG(WARNING) << "Dropping frameset: " << fs.timestamp; } - - fs.swapTo(*framesets_[fs.id]); } const auto *cstream = interceptor_; diff --git a/components/audio/src/source.cpp b/components/audio/src/source.cpp index d30d2d045048b70928a6d4624bfe9396d451b2a2..2c976b20721b29dd35bd6a486e7ab55b06ba6569 100644 --- a/components/audio/src/source.cpp +++ b/components/audio/src/source.cpp @@ -153,7 +153,8 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(null frameset_.id = 0; frameset_.count = 1; - frameset_.stale = false; + //frameset_.stale = false; + frameset_.clear(ftl::data::FSFlag::STALE); if (to_read_ < 1 || !buffer_) return true; diff --git a/components/codecs/include/ftl/codecs/codecs.hpp b/components/codecs/include/ftl/codecs/codecs.hpp index 6ab79574fc22615d1622b56d14531a950b2d51f1..5e594429e5dea928ebf1a92d2fc5c0a50f8fcc41 100644 --- a/components/codecs/include/ftl/codecs/codecs.hpp +++ b/components/codecs/include/ftl/codecs/codecs.hpp @@ -18,6 +18,7 @@ enum struct format_t { static constexpr uint8_t kFlagFlipRGB = 0x01; // Swap blue and red channels [deprecated] static constexpr uint8_t kFlagMappedDepth = 0x02; // Use Yuv mapping for float [deprecated] static constexpr uint8_t kFlagFloat = 0x04; // Floating point output +static constexpr uint8_t kFlagPartial = 0x10; // This frameset is not complete static constexpr uint8_t kFlagMultiple = 0x80; // Multiple video frames in single packet /** diff --git a/components/common/cpp/include/ftl/exception.hpp b/components/common/cpp/include/ftl/exception.hpp index d86f9017a45df07de1961ad468712911694161c1..07f7a366adf65cb7baea83624d8f1ff53a3cf848 100644 --- a/components/common/cpp/include/ftl/exception.hpp +++ b/components/common/cpp/include/ftl/exception.hpp @@ -37,8 +37,10 @@ class exception : public std::exception public: explicit exception(const char *msg); explicit exception(const Formatter &msg); + ~exception(); const char * what () const throw () { + processed_ = true; return msg_.c_str(); } @@ -46,9 +48,12 @@ class exception : public std::exception return trace_.c_str(); } + void ignore() const { processed_ = true; } + private: std::string msg_; std::string trace_; + mutable bool processed_; }; } diff --git a/components/common/cpp/src/exception.cpp b/components/common/cpp/src/exception.cpp index 1291adcc5cfba7921cf6acd774a002e28640f3c8..cf74b297ab3218a0786b66127c96ec4cc7d64ea1 100644 --- a/components/common/cpp/src/exception.cpp +++ b/components/common/cpp/src/exception.cpp @@ -1,5 +1,8 @@ #include <ftl/exception.hpp> +#define LOGURU_REPLACE_GLOG 1 +#include <loguru.hpp> + #ifndef WIN32 #include <execinfo.h> #endif @@ -7,26 +10,42 @@ using ftl::exception; using std::string; -exception::exception(const char *msg) : msg_(msg) { - #ifndef WIN32 +static std::string getBackTrace() { +#ifndef WIN32 + + string result; void *trace[16]; - char **messages = (char **)NULL; int trace_size = 0; trace_size = backtrace(trace, 16); - trace_ = "[bt] Trace:\n"; + result = "[bt] Trace:\n"; - messages = backtrace_symbols(trace, trace_size); + char **messages = backtrace_symbols(trace, trace_size); /* skip first stack frame (points here) */ - for (int i=1; i<trace_size; ++i) { - printf("[bt] #%d %s\n", i, messages[i]); - trace_ += string("[bt] #") + std::to_string(i) + string(" ") + messages[i]; + for (int i=2; i<trace_size; ++i) { + //printf("[bt] #%d %s\n", i, messages[i]); + result += string("[bt] #") + std::to_string(i-1) + string(" ") + messages[i] + string("\n"); } - #endif + return result; + +#else + return ""; +#endif +} + +exception::exception(const char *msg) : msg_(msg), processed_(false) { + trace_ = std::move(getBackTrace()); } -exception::exception(const ftl::Formatter &msg) : msg_(msg.str()) { +exception::exception(const ftl::Formatter &msg) : msg_(msg.str()), processed_(false) { + trace_ = std::move(getBackTrace()); +} +exception::~exception() { + if (!processed_) { + LOG(ERROR) << "Unreported exception: " << what(); + LOG(ERROR) << trace_; + } } \ No newline at end of file diff --git a/components/operators/src/fusion/mvmls.cpp b/components/operators/src/fusion/mvmls.cpp index 41e0a2eef1f7b2c44e9b3557c445946abe53de3d..200a9d9a50b0358ddc9bd3491dc44a844aa1b924 100644 --- a/components/operators/src/fusion/mvmls.cpp +++ b/components/operators/src/fusion/mvmls.cpp @@ -48,7 +48,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda bool show_adjustment = config()->value("show_adjustment", false); if (in.frames.size() < 1) return false; - auto size = in.frames[0].get<GpuMat>(Channel::Depth).size(); + auto size = in.firstFrame().get<GpuMat>(Channel::Depth).size(); // Make sure we have enough buffers while (normals_horiz_.size() < in.frames.size()) { @@ -60,6 +60,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Make sure all buffers are at correct resolution and are allocated for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; + auto &f = in.frames[i]; auto size = f.get<GpuMat>(Channel::Depth).size(); centroid_horiz_[i]->create(size.height, size.width); @@ -91,6 +93,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // For each camera combination if (do_corr) { for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; + auto &f1 = in.frames[i]; //f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream); //f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream); @@ -99,6 +103,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda d1 = f1.getPose() * d1; for (size_t j=i+1; j<in.frames.size(); ++j) { + if (!in.hasFrame(j)) continue; if (i == j) continue; //LOG(INFO) << "Running phase1"; @@ -339,6 +344,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Redo normals for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; auto &f = in.frames[i]; ftl::cuda::normals( f.getTexture<half4>(Channel::Normals), @@ -403,6 +409,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Do the horizontal and vertical MLS aggregations for each source // But don't do the final move step. for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; + auto &f = in.frames[i]; //auto *s = in.sources[i]; @@ -456,6 +464,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // For each camera combination if (do_aggr) { for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; auto &f1 = in.frames[i]; //f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream); //f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream); @@ -464,6 +473,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda d1 = f1.getPose() * d1; for (size_t j=0; j<in.frames.size(); ++j) { + if (!in.hasFrame(j)) continue; if (i == j) continue; //LOG(INFO) << "Running phase1"; @@ -509,6 +519,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Step 3: // Normalise aggregations and move the points for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; auto &f = in.frames[i]; //auto *s = in.sources[i]; auto size = f.get<GpuMat>(Channel::Depth).size(); diff --git a/components/operators/src/operator.cpp b/components/operators/src/operator.cpp index 89a4c34c6f377524ba6e8577585dd66951e0df5a..1f000e9a7c57f35da967287f258fd73f78a44be7 100644 --- a/components/operators/src/operator.cpp +++ b/components/operators/src/operator.cpp @@ -64,6 +64,8 @@ bool Graph::apply(FrameSet &in, FrameSet &out, cudaStream_t stream) { } for (size_t j=0; j<in.frames.size(); ++j) { + if (!in.hasFrame(j)) continue; + auto *instance = i.instances[j&0x1]; if (instance->enabled()) { diff --git a/components/renderers/cpp/src/CUDARender.cpp b/components/renderers/cpp/src/CUDARender.cpp index 6d0deb848509996105a550abc5197805b4939d8c..73b078b140f3f0db140729017b66288fa77ab623 100644 --- a/components/renderers/cpp/src/CUDARender.cpp +++ b/components/renderers/cpp/src/CUDARender.cpp @@ -120,6 +120,7 @@ void CUDARender::_renderChannel(ftl::rgbd::Frame &output, ftl::codecs::Channel i if (in == Channel::None) return; for (size_t i=0; i < scene_->frames.size(); ++i) { + if (!scene_->hasFrame(i)) continue; auto &f = scene_->frames[i]; if (!f.hasChannel(in)) { @@ -168,6 +169,7 @@ void CUDARender::_dibr(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream); for (size_t i=0; i < scene_->frames.size(); ++i) { + if (!scene_->hasFrame(i)) continue; auto &f = scene_->frames[i]; //auto *s = scene_->sources[i]; @@ -236,6 +238,7 @@ void CUDARender::_mesh(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre // For each source depth map for (size_t i=0; i < scene_->frames.size(); ++i) { + if (!scene_->hasFrame(i)) continue; auto &f = scene_->frames[i]; //auto *s = scene_->sources[i]; @@ -453,7 +456,7 @@ void CUDARender::_renderPass2(Channels<0> chans, const Eigen::Matrix4d &t) { for (auto chan : chans) { ftl::codecs::Channel mapped = chan; - if (chan == Channel::Colour && scene_->frames[0].hasChannel(Channel::ColourHighRes)) mapped = Channel::ColourHighRes; + if (chan == Channel::Colour && scene_->firstFrame().hasChannel(Channel::ColourHighRes)) mapped = Channel::ColourHighRes; _renderChannel(*out_, mapped, t, stream_); } diff --git a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp index 0276a2e8aa389009450702fba4bd8e12c860ccff..eb646b4b4c70d2dd4140abea9bf85c94ca8df931 100644 --- a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp +++ b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp @@ -127,8 +127,12 @@ class Builder : public Generator { */ void completed(int64_t ts, size_t ix); + void markPartial(int64_t ts); + void setName(const std::string &name); + void setBufferSize(size_t n) { bufferSize_ = n; } + private: std::list<FrameSet*> framesets_; // Active framesets std::list<FrameSet*> allocated_; // Keep memory allocations @@ -147,6 +151,7 @@ class Builder : public Generator { volatile bool skip_; ftl::timer::TimerHandle main_id_; size_t size_; + size_t bufferSize_; std::vector<ftl::rgbd::FrameState*> states_; std::string name_; diff --git a/components/rgbd-sources/src/frameset.cpp b/components/rgbd-sources/src/frameset.cpp index 4217cd3071abce6397bcf9630378e4e5aaa30281..4c9fa2eada0880949decfa8dfb8a99ff9127bc20 100644 --- a/components/rgbd-sources/src/frameset.cpp +++ b/components/rgbd-sources/src/frameset.cpp @@ -62,7 +62,7 @@ void FrameSet::resetFull() { Builder::Builder() : head_(0), id_(0) { jobs_ = 0; skip_ = false; - //setFPS(20); + bufferSize_ = 1; size_ = 0; last_frame_ = 0; @@ -111,9 +111,11 @@ ftl::rgbd::Frame &Builder::get(int64_t timestamp, size_t ix) { // Add new frameset fs = _addFrameset(timestamp); if (!fs) throw FTL_Error("Could not add frameset"); + + _schedule(); } - if (fs->stale) { + if (fs->test(ftl::data::FSFlag::STALE)) { throw FTL_Error("Frameset already completed"); } @@ -152,8 +154,8 @@ void Builder::completed(int64_t ts, size_t ix) { ++fs->count; } - if (!fs->stale && static_cast<unsigned int>(fs->count) >= size_) { - //LOG(INFO) << "Frameset ready... " << fs->timestamp; + // No buffering, so do a schedule here for immediate effect + if (bufferSize_ == 0 && !fs->test(ftl::data::FSFlag::STALE) && static_cast<unsigned int>(fs->count) >= size_) { UNIQUE_LOCK(mutex_, lk); _schedule(); } @@ -162,38 +164,51 @@ void Builder::completed(int64_t ts, size_t ix) { } } +void Builder::markPartial(int64_t ts) { + ftl::rgbd::FrameSet *fs = nullptr; + + { + UNIQUE_LOCK(mutex_, lk); + fs = _findFrameset(ts); + } + + fs->set(ftl::data::FSFlag::PARTIAL); +} + void Builder::_schedule() { if (size_ == 0) return; ftl::rgbd::FrameSet *fs = nullptr; - //UNIQUE_LOCK(mutex_, lk); + // Still working on a previously scheduled frame if (jobs_ > 0) return; - fs = _getFrameset(); - //LOG(INFO) << "Latency for " << name_ << " = " << (latency_*ftl::timer::getInterval()) << "ms"; + // Find a valid / completed frameset to process + fs = _getFrameset(); + // We have a frameset so create a thread job to call the onFrameset callback if (fs) { - //UNIQUE_LOCK(fs->mtx, lk2); - // The buffers are invalid after callback so mark stale - //fs->stale = true; jobs_++; - //lk.unlock(); ftl::pool.push([this,fs](int) { UNIQUE_LOCK(fs->mtx, lk2); + + // Calling onFrameset but without all frames so mark as partial + if (fs->count < fs->frames.size()) fs->set(ftl::data::FSFlag::PARTIAL); + try { if (cb_) cb_(*fs); - //LOG(INFO) << "Frameset processed (" << name_ << "): " << fs->timestamp; + } catch(const ftl::exception &e) { + LOG(ERROR) << "Exception in frameset builder: " << e.what(); + LOG(ERROR) << "Trace = " << e.trace(); } catch(std::exception &e) { LOG(ERROR) << "Exception in frameset builder: " << e.what(); } - //fs->resetFull(); - UNIQUE_LOCK(mutex_, lk); _freeFrameset(fs); - jobs_--; + + // Schedule another frame immediately (or try to) _schedule(); }); } @@ -205,68 +220,7 @@ size_t Builder::size() { } void Builder::onFrameSet(const std::function<bool(ftl::rgbd::FrameSet &)> &cb) { - /*if (!cb) { - main_id_.cancel(); - return; - }*/ - cb_ = cb; - - /*if (main_id_.id() != -1) { - main_id_.cancel(); - }*/ - - // 3. Issue IO retrieve ad compute jobs before finding a valid - // frame at required latency to pass to callback. - /*main_id_ = ftl::timer::add(ftl::timer::kTimerMain, [this,cb](int64_t ts) { - //if (jobs_ > 0) LOG(ERROR) << "SKIPPING TIMER JOB " << ts; - if (jobs_ > 0) return true; - if (size_ == 0) return true; - jobs_++; - - // Find a previous frameset and specified latency and do the sync - // callback with that frameset. - //if (latency_ > 0) { - ftl::rgbd::FrameSet *fs = nullptr; - - UNIQUE_LOCK(mutex_, lk); - fs = _getFrameset(); - - //LOG(INFO) << "Latency for " << name_ << " = " << (latency_*ftl::timer::getInterval()) << "ms"; - - if (fs) { - UNIQUE_LOCK(fs->mtx, lk2); - // The buffers are invalid after callback so mark stale - fs->stale = true; - lk.unlock(); - - //LOG(INFO) << "PROCESS FRAMESET"; - - //ftl::pool.push([this,fs,cb](int) { - try { - cb(*fs); - //LOG(INFO) << "Frameset processed (" << name_ << "): " << fs->timestamp; - } catch(std::exception &e) { - LOG(ERROR) << "Exception in group sync callback: " << e.what(); - } - - //fs->resetFull(); - - lk.lock(); - _freeFrameset(fs); - - jobs_--; - //}); - } else { - //LOG(INFO) << "NO FRAME FOUND: " << name_ << " " << size_; - //latency_++; - jobs_--; - } - //} - - //if (jobs_ == 0) LOG(INFO) << "LAST JOB = Main"; - return true; - });*/ } ftl::rgbd::FrameState &Builder::state(size_t ix) { @@ -320,23 +274,36 @@ ftl::rgbd::FrameSet *Builder::_findFrameset(int64_t ts) { */ ftl::rgbd::FrameSet *Builder::_getFrameset() { //LOG(INFO) << "BUF SIZE = " << framesets_.size(); - for (auto i=framesets_.begin(); i!=framesets_.end(); i++) { + + auto i = framesets_.begin(); + int N = bufferSize_; + + // Skip N frames to fixed buffer location + if (bufferSize_ > 0) { + while (N-- > 0 && i != framesets_.end()) ++i; + // Otherwise skip to first fully completed frame + } else { + while (i != framesets_.end() && (*i)->count < (*i)->frames.size()) ++i; + } + + if (i != framesets_.end()) { auto *f = *i; //LOG(INFO) << "GET: " << f->count << " of " << size_; - if (!f->stale && static_cast<unsigned int>(f->count) >= size_) { + //if (!f->stale && static_cast<unsigned int>(f->count) >= size_) { //LOG(INFO) << "GET FRAMESET and remove: " << f->timestamp; auto j = framesets_.erase(i); last_frame_ = f->timestamp; - f->stale = true; + //f->stale = true; + f->set(ftl::data::FSFlag::STALE); int count = 0; // Merge all previous frames + // TODO: Remove? while (j!=framesets_.end()) { ++count; auto *f2 = *j; - //LOG(INFO) << "MERGE: " << f2->count; j = framesets_.erase(j); mergeFrameset(*f,*f2); _freeFrameset(f2); @@ -349,7 +316,7 @@ ftl::rgbd::FrameSet *Builder::_getFrameset() { _recordStats(framerate, now - f->timestamp); last_ts_ = now; return f; - } + //} } return nullptr; @@ -378,7 +345,7 @@ ftl::rgbd::FrameSet *Builder::_addFrameset(int64_t timestamp) { newf->id = id_; newf->count = 0; newf->mask = 0; - newf->stale = false; + newf->clearFlags(); newf->frames.resize(size_); newf->pose.setIdentity(); newf->clearData(); diff --git a/components/streams/src/filestream.cpp b/components/streams/src/filestream.cpp index 705388a2eea2657af8cb53405cd960c4e6ce540a..fe7ec994d5430db0318512c21a3a199093f56d23 100644 --- a/components/streams/src/filestream.cpp +++ b/components/streams/src/filestream.cpp @@ -137,7 +137,7 @@ bool File::readPacket(std::tuple<ftl::codecs::StreamPacket,ftl::codecs::Packet> obj.convert(data); } catch (std::exception &e) { LOG(INFO) << "Corrupt message: " << buffer_in_.nonparsed_size() << " - " << e.what(); - active_ = false; + //active_ = false; return false; } @@ -249,6 +249,8 @@ bool File::tick(int64_t ts) { timestamp_ += interval_; if (data_.size() == 0 && value("looping", true)) { + buffer_in_.reset(); + buffer_in_.remove_nonparsed_buffer(); _open(); timestart_ = (ftl::timer::get_time() / ftl::timer::getInterval()) * ftl::timer::getInterval(); diff --git a/components/streams/src/receiver.cpp b/components/streams/src/receiver.cpp index b2029d56f06b0a92fb014aea5c0088f98eb1a9b8..23c4ce867828899c2bb91275cb9ef6d1b0a45088 100644 --- a/components/streams/src/receiver.cpp +++ b/components/streams/src/receiver.cpp @@ -27,9 +27,18 @@ Receiver::Receiver(nlohmann::json &config) : ftl::Configurable(config), stream_( timestamp_ = 0; second_channel_ = Channel::Depth; + size_t bsize = value("frameset_buffer_size", 3); for (int i=0; i<ftl::stream::kMaxStreams; ++i) { builder_[i].setID(i); + builder_[i].setBufferSize(bsize); } + + on("frameset_buffer_size", [this](const ftl::config::Event &e) { + size_t bsize = value("frameset_buffer_size", 3); + for (int i=0; i<ftl::stream::kMaxStreams; ++i) { + builder_[i].setBufferSize(bsize); + } + }); } Receiver::~Receiver() { @@ -160,7 +169,8 @@ void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) { fs.id = 0; fs.timestamp = frame.timestamp; fs.count = 1; - fs.stale = false; + //fs.stale = false; + fs.clear(ftl::data::FSFlag::STALE); frame.frame.swapTo(fs.frames.emplace_back()); audio_cb_(fs); @@ -217,6 +227,11 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) { bool apply_Y_filter = value("apply_Y_filter", true); + // Mark a frameset as being partial + if (pkt.flags & ftl::codecs::kFlagPartial) { + builder_[spkt.streamID].markPartial(spkt.timestamp); + } + // Now split the tiles from surface into frames, doing colour conversions // at the same time. // Note: Done in reverse to allocate correct number of frames first time round diff --git a/components/streams/src/sender.cpp b/components/streams/src/sender.cpp index 659e8f5151a6714d11661ca238914679d4a8ff18..742945dad99e46fd9506d8c68e52930b152c7551 100644 --- a/components/streams/src/sender.cpp +++ b/components/streams/src/sender.cpp @@ -245,13 +245,13 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { uint32_t offset = 0; while (offset < fs.frames.size()) { Channel cc = c; - if ((cc == Channel::Colour) && fs.frames[offset].hasChannel(Channel::ColourHighRes)) { + if ((cc == Channel::Colour) && fs.firstFrame().hasChannel(Channel::ColourHighRes)) { cc = Channel::ColourHighRes; } - if ((cc == Channel::Right) && fs.frames[offset].hasChannel(Channel::RightHighRes)) { + if ((cc == Channel::Right) && fs.firstFrame().hasChannel(Channel::RightHighRes)) { cc = Channel::RightHighRes; - fs.frames[offset].upload(cc); + //fs.frames[offset].upload(cc); } StreamPacket spkt; @@ -277,6 +277,7 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { // Upload if in host memory for (auto &f : fs.frames) { + if (!fs.hasFrame(f.id)) continue; if (f.isCPU(c)) { f.upload(Channels<0>(cc), cv::cuda::StreamAccessor::getStream(enc->stream())); } @@ -292,8 +293,6 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { enc->stream().waitForCompletion(); if (enc) { - // FIXME: Timestamps may not always be aligned to interval. - //if (do_inject || fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc->reset(); if (reset) enc->reset(); try { @@ -308,6 +307,9 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { else if (lossless && ftl::codecs::isFloatChannel(cc)) pkt.flags = ftl::codecs::kFlagFloat; else pkt.flags = ftl::codecs::kFlagFlipRGB; + // In the event of partial frames, add a flag to indicate that + if (fs.count < fs.frames.size()) pkt.flags |= ftl::codecs::kFlagPartial; + // Choose correct region of interest into the surface. cv::Rect roi = _generateROI(fs, cc, offset); cv::cuda::GpuMat sroi = tile.surface(roi); @@ -336,9 +338,9 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { } cv::Rect Sender::_generateROI(const ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, int offset) { - const ftl::rgbd::Frame *cframe = &fs.frames[offset]; - int rwidth = cframe->get<cv::cuda::GpuMat>(c).cols; - int rheight = cframe->get<cv::cuda::GpuMat>(c).rows; + const ftl::rgbd::Frame &cframe = fs.firstFrame(); + int rwidth = cframe.get<cv::cuda::GpuMat>(c).cols; + int rheight = cframe.get<cv::cuda::GpuMat>(c).rows; auto [tx,ty] = ftl::codecs::chooseTileConfig(fs.frames.size()-offset); return cv::Rect(0, 0, tx*rwidth, ty*rheight); } @@ -374,13 +376,14 @@ float Sender::_selectFloatMax(Channel c) { int Sender::_generateTiles(const ftl::rgbd::FrameSet &fs, int offset, Channel c, cv::cuda::Stream &stream, bool lossless) { auto &surface = _getTile(fs.id, c); - const ftl::rgbd::Frame *cframe = &fs.frames[offset]; - auto &m = cframe->get<cv::cuda::GpuMat>(c); + const ftl::rgbd::Frame *cframe = nullptr; //&fs.frames[offset]; + + const auto &m = fs.firstFrame().get<cv::cuda::GpuMat>(c); // Choose tile configuration and allocate memory auto [tx,ty] = ftl::codecs::chooseTileConfig(fs.frames.size()); - int rwidth = cframe->get<cv::cuda::GpuMat>(c).cols; - int rheight = cframe->get<cv::cuda::GpuMat>(c).rows; + int rwidth = m.cols; + int rheight = m.rows; int width = tx * rwidth; int height = ty * rheight; int tilecount = tx*ty; @@ -390,28 +393,34 @@ int Sender::_generateTiles(const ftl::rgbd::FrameSet &fs, int offset, Channel c, // Loop over tiles with ROI mats and do colour conversions. while (tilecount > 0 && count+offset < fs.frames.size()) { - auto &m = cframe->get<cv::cuda::GpuMat>(c); - cv::Rect roi((count % tx)*rwidth, (count / tx)*rheight, rwidth, rheight); - cv::cuda::GpuMat sroi = surface.surface(roi); - - if (m.type() == CV_32F) { - if (lossless) { - m.convertTo(sroi, CV_16UC1, 1000, stream); + if (fs.hasFrame(offset+count)) { + cframe = &fs.frames[offset+count]; + auto &m = cframe->get<cv::cuda::GpuMat>(c); + cv::Rect roi((count % tx)*rwidth, (count / tx)*rheight, rwidth, rheight); + cv::cuda::GpuMat sroi = surface.surface(roi); + + if (m.type() == CV_32F) { + if (lossless) { + m.convertTo(sroi, CV_16UC1, 1000, stream); + } else { + ftl::cuda::depth_to_vuya(m, sroi, _selectFloatMax(c), stream); + } + } else if (m.type() == CV_8UC4) { + cv::cuda::cvtColor(m, sroi, cv::COLOR_BGRA2RGBA, 0, stream); + } else if (m.type() == CV_8UC3) { + cv::cuda::cvtColor(m, sroi, cv::COLOR_BGR2RGBA, 0, stream); } else { - ftl::cuda::depth_to_vuya(m, sroi, _selectFloatMax(c), stream); + LOG(ERROR) << "Unsupported colour format: " << m.type(); + return 0; } - } else if (m.type() == CV_8UC4) { - cv::cuda::cvtColor(m, sroi, cv::COLOR_BGRA2RGBA, 0, stream); - } else if (m.type() == CV_8UC3) { - cv::cuda::cvtColor(m, sroi, cv::COLOR_BGR2RGBA, 0, stream); } else { - LOG(ERROR) << "Unsupported colour format: " << m.type(); - return 0; + cv::Rect roi((count % tx)*rwidth, (count / tx)*rheight, rwidth, rheight); + cv::cuda::GpuMat sroi = surface.surface(roi); + sroi.setTo(cv::Scalar(0)); } ++count; --tilecount; - cframe = &fs.frames[offset+count]; } return count; diff --git a/components/streams/test/receiver_unit.cpp b/components/streams/test/receiver_unit.cpp index 062dce960ab7daab5fec0ec6a354c9247f1a5869..2aadaa8e4cc4d51ba2598c75a7f8752d8a9aea91 100644 --- a/components/streams/test/receiver_unit.cpp +++ b/components/streams/test/receiver_unit.cpp @@ -63,6 +63,7 @@ TEST_CASE( "Receiver generating onFrameSet" ) { }; TestStream stream(cfg2); receiver->setStream(&stream); + receiver->set("frameset_buffer_size", 0); ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480); @@ -260,6 +261,7 @@ TEST_CASE( "Receiver sync bugs" ) { }; TestStream stream(cfg2); receiver->setStream(&stream); + receiver->set("frameset_buffer_size", 0); ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480); @@ -329,3 +331,78 @@ TEST_CASE( "Receiver sync bugs" ) { //while (ftl::pool.n_idle() != ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10)); delete receiver; } + +TEST_CASE( "Receiver non zero buffer" ) { + json_t global = json_t{{"$id","ftl://test"}}; + ftl::config::configure(global); + + json_t cfg = json_t{ + {"$id","ftl://test/1"} + }; + auto *receiver = ftl::create<Receiver>(cfg); + + json_t cfg2 = json_t{ + {"$id","ftl://test/2"} + }; + TestStream stream(cfg2); + receiver->setStream(&stream); + receiver->set("frameset_buffer_size", 1); + + ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480); + + ftl::codecs::Packet pkt; + pkt.codec = codec_t::Any; + pkt.bitrate = 255; + pkt.definition = definition_t::Any; + pkt.flags = 0; + pkt.frame_count = 1; + + ftl::codecs::StreamPacket spkt; + spkt.version = 4; + spkt.timestamp = 10; + spkt.frame_number = 0; + spkt.channel = Channel::Colour; + spkt.streamID = 0; + + ftl::rgbd::Frame dummy; + ftl::rgbd::FrameState state; + state.getLeft().width = 1280; + state.getLeft().height = 720; + dummy.setOrigin(&state); + ftl::stream::injectCalibration(&stream, dummy, 0, 0, 0); + + ftl::timer::start(false); + + SECTION("Fixed buffer delay") { + cv::cuda::GpuMat m(cv::Size(1280,720), CV_8UC4, cv::Scalar(0)); + + bool r = encoder.encode(m, pkt); + REQUIRE( r ); + + int count = 0; + receiver->onFrameSet([&count](ftl::rgbd::FrameSet &fs) { + ++count; + + REQUIRE( fs.timestamp == 10 ); + REQUIRE( fs.frames.size() == 1 ); + REQUIRE( fs.frames[0].hasChannel(Channel::Colour) ); + REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 ); + REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 ); + + return true; + }); + + stream.post(spkt, pkt); + spkt.timestamp += 10; + stream.post(spkt, pkt); + + int i=10; + while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10)); + + REQUIRE( count == 1 ); + } + + ftl::timer::stop(true); + //while (ftl::pool.n_idle() != ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10)); + delete receiver; +} diff --git a/components/streams/test/sender_unit.cpp b/components/streams/test/sender_unit.cpp index 7de5f775fa44c0cd6cf6a4f445a0ce1c0244122c..d5a08732807a8b8f4ed0aa30dcd1fadbd0a98ac7 100644 --- a/components/streams/test/sender_unit.cpp +++ b/components/streams/test/sender_unit.cpp @@ -88,6 +88,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Colour), true); fs.count = 1; + fs.mask = 1; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); @@ -110,6 +111,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Colour), true); fs.count = 2; + fs.mask = 3; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); fs.frames.emplace_back(); @@ -135,6 +137,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Depth), true); fs.count = 2; + fs.mask = 3; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f)); fs.frames.emplace_back(); @@ -161,6 +164,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Depth), true); fs.count = 10; + fs.mask = 0x3FF; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f)); @@ -190,6 +194,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Depth), true); fs.count = 2; + fs.mask = 3; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f)); fs.frames.emplace_back(); @@ -217,6 +222,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channel::Colour + Channel::Depth, true); fs.count = 1; + fs.mask = 1; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); @@ -280,6 +286,7 @@ TEST_CASE( "Sender request to control encoding" ) { }); fs.count = 1; + fs.mask = 1; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); diff --git a/components/structures/include/ftl/data/frameset.hpp b/components/structures/include/ftl/data/frameset.hpp index a0839983777d71f13cfaf98786606be2ee4328aa..263f86ff4db7322a0fe5be9752d036fcea9c780b 100644 --- a/components/structures/include/ftl/data/frameset.hpp +++ b/components/structures/include/ftl/data/frameset.hpp @@ -16,6 +16,11 @@ namespace data { //static const size_t kMaxFramesets = 15; static const size_t kMaxFramesInSet = 32; +enum class FSFlag : int { + STALE = 0, + PARTIAL = 1 +}; + /** * Represents a set of synchronised frames, each with two channels. This is * used to collect all frames from multiple computers that have the same @@ -30,11 +35,16 @@ class FrameSet { std::vector<FRAME> frames; std::atomic<int> count; // Number of valid frames std::atomic<unsigned int> mask; // Mask of all sources that contributed - bool stale; // True if buffers have been invalidated + //bool stale; // True if buffers have been invalidated SHARED_MUTEX mtx; Eigen::Matrix4d pose; // Set to identity by default. + void set(FSFlag f) { flags_ |= (1 << static_cast<int>(f)); } + void clear(FSFlag f) { flags_ &= ~(1 << static_cast<int>(f)); } + bool test(FSFlag f) const { return flags_ & (1 << static_cast<int>(f)); } + void clearFlags() { flags_ = 0; } + /** * Move the entire frameset to another frameset object. This will * invalidate the current frameset object as all memory buffers will be @@ -78,6 +88,19 @@ class FrameSet { return false; } + /** + * Check that a given frame is valid in this frameset. + */ + inline bool hasFrame(size_t ix) const { return (1 << ix) & mask; } + + /** + * Get the first valid frame in this frameset. No valid frames throws an + * exception. + */ + FRAME &firstFrame(); + + const FRAME &firstFrame() const; + void clearData() { data_.clear(); data_channels_.clear(); @@ -86,6 +109,7 @@ class FrameSet { private: std::unordered_map<int, std::vector<unsigned char>> data_; ftl::codecs::Channels<2048> data_channels_; + std::atomic<int> flags_; }; /** @@ -137,7 +161,7 @@ void ftl::data::FrameSet<FRAME>::swapTo(ftl::data::FrameSet<FRAME> &fs) { fs.timestamp = timestamp; fs.count = static_cast<int>(count); - fs.stale = stale; + fs.flags_ = (int)flags_; fs.mask = static_cast<unsigned int>(mask); fs.id = id; fs.pose = pose; @@ -150,7 +174,7 @@ void ftl::data::FrameSet<FRAME>::swapTo(ftl::data::FrameSet<FRAME> &fs) { fs.data_channels_ = data_channels_; data_channels_.clear(); - stale = true; + set(ftl::data::FSFlag::STALE); } // Default data channel implementation @@ -196,4 +220,20 @@ void ftl::data::FrameSet<FRAME>::createRawData(ftl::codecs::Channel c, const std data_channels_ += c; } +template <typename FRAME> +FRAME &ftl::data::FrameSet<FRAME>::firstFrame() { + for (size_t i=0; i<frames.size(); ++i) { + if (hasFrame(i)) return frames[i]; + } + throw FTL_Error("No frames in frameset"); +} + +template <typename FRAME> +const FRAME &ftl::data::FrameSet<FRAME>::firstFrame() const { + for (size_t i=0; i<frames.size(); ++i) { + if (hasFrame(i)) return frames[i]; + } + throw FTL_Error("No frames in frameset"); +} + #endif // _FTL_DATA_FRAMESET_HPP_