diff --git a/applications/gui/src/camera.cpp b/applications/gui/src/camera.cpp index 9d19e462d4717a46e83c65a7c0e8c8d7c0f58f10..97d217d0df31e2b6d2c885a9d518e2be8b268481 100644 --- a/applications/gui/src/camera.cpp +++ b/applications/gui/src/camera.cpp @@ -13,8 +13,6 @@ #include <ftl/render/colouriser.hpp> #include <ftl/cuda/transform.hpp> -#include <ftl/codecs/faces.hpp> - #include <ftl/render/overlay.hpp> #include "statsimage.hpp" @@ -140,7 +138,10 @@ void ftl::gui::Camera::draw(std::vector<ftl::rgbd::FrameSet*> &fss) { ftl::rgbd::Frame *frame = nullptr; if ((size_t)fid_ >= fs->frames.size()) return; + if (!fs->hasFrame(fid_)) return; + frame = &fs->frames[fid_]; + if (!frame->hasChannel(channel_)) return; auto &buf = colouriser_->colourise(*frame, channel_, 0); @@ -152,6 +153,11 @@ void ftl::gui::Camera::draw(std::vector<ftl::rgbd::FrameSet*> &fss) { ftl::cuda::flip<uchar4>(dst1, 0); texture1_.unmap(0); + depth1_.make(buf.width(), buf.height()); + dst1 = depth1_.map(0); + dst1.setTo(cv::Scalar(0.5f)); + depth1_.unmap(0); + width_ = texture1_.width(); height_ = texture1_.height(); return; @@ -161,116 +167,114 @@ void ftl::gui::Camera::draw(std::vector<ftl::rgbd::FrameSet*> &fss) { //auto &fs = *fss[fsid_]; + _applyPoseEffects(fss); + UNIQUE_LOCK(mutex_, lk2); //state_.getLeft().fx = intrinsics_->value("focal", 700.0f); //state_.getLeft().fy = state_.getLeft().fx; _draw(fss); +} - if (renderer_->value("window_effect", false)) { - for (auto *fset : fss) { - for (const auto &f : fset->frames) { - if (f.hasChannel(Channel::Faces)) { - std::vector<ftl::codecs::Face> data; - f.get(Channel::Faces, data); - - if (data.size() > 0) { - auto &d = *data.rbegin(); - - cv::Mat over_depth; - over_depth.create(overlay_.size(), CV_32F); - - auto cam = ftl::rgbd::Camera::from(intrinsics_); - - float screenWidth = intrinsics_->value("screen_size", 0.6f); // In meters - float screenHeight = (9.0f/16.0f) * screenWidth; - - float screenDistance = (d.depth > cam.minDepth && d.depth < cam.maxDepth) ? d.depth : intrinsics_->value("screen_dist_default", 0.5f); // Face distance from screen in meters - - auto pos = f.getLeft().screenToCam(d.box.x+(d.box.width/2), d.box.y+(d.box.height/2), screenDistance); - Eigen::Vector3f eye; - eye[0] = -pos.x; - eye[1] = pos.y; - eye[2] = -pos.z; - //eye[3] = 0; - - Eigen::Translation3f trans(eye); - Eigen::Affine3f t(trans); - Eigen::Matrix4f viewPose = t.matrix(); - - // Calculate where the screen is within current camera space - Eigen::Vector4f p1 = viewPose.cast<float>() * (Eigen::Vector4f(screenWidth/2.0, screenHeight/2.0, 0, 1)); - Eigen::Vector4f p2 = viewPose.cast<float>() * (Eigen::Vector4f(screenWidth/2.0, -screenHeight/2.0, 0, 1)); - Eigen::Vector4f p3 = viewPose.cast<float>() * (Eigen::Vector4f(-screenWidth/2.0, screenHeight/2.0, 0, 1)); - Eigen::Vector4f p4 = viewPose.cast<float>() * (Eigen::Vector4f(-screenWidth/2.0, -screenHeight/2.0, 0, 1)); - p1 = p1 / p1[3]; - p2 = p2 / p2[3]; - p3 = p3 / p3[3]; - p4 = p4 / p4[3]; - float2 p1screen = cam.camToScreen<float2>(make_float3(p1[0],p1[1],p1[2])); - float2 p2screen = cam.camToScreen<float2>(make_float3(p2[0],p2[1],p2[2])); - float2 p3screen = cam.camToScreen<float2>(make_float3(p3[0],p3[1],p3[2])); - float2 p4screen = cam.camToScreen<float2>(make_float3(p4[0],p4[1],p4[2])); - - //cv::line(im1_, cv::Point2f(p1screen.x, p1screen.y), cv::Point2f(p2screen.x, p2screen.y), cv::Scalar(255,0,255,0)); - //cv::line(im1_, cv::Point2f(p4screen.x, p4screen.y), cv::Point2f(p2screen.x, p2screen.y), cv::Scalar(255,0,255,0)); - //cv::line(im1_, cv::Point2f(p1screen.x, p1screen.y), cv::Point2f(p3screen.x, p3screen.y), cv::Scalar(255,0,255,0)); - //cv::line(im1_, cv::Point2f(p3screen.x, p3screen.y), cv::Point2f(p4screen.x, p4screen.y), cv::Scalar(255,0,255,0)); - //LOG(INFO) << "DRAW LINE: " << p1screen.x << "," << p1screen.y << " -> " << p2screen.x << "," << p2screen.y; - - std::vector<cv::Point2f> quad_pts; - std::vector<cv::Point2f> squre_pts; - quad_pts.push_back(cv::Point2f(p1screen.x,p1screen.y)); - quad_pts.push_back(cv::Point2f(p2screen.x,p2screen.y)); - quad_pts.push_back(cv::Point2f(p3screen.x,p3screen.y)); - quad_pts.push_back(cv::Point2f(p4screen.x,p4screen.y)); - squre_pts.push_back(cv::Point2f(0,0)); - squre_pts.push_back(cv::Point2f(0,cam.height)); - squre_pts.push_back(cv::Point2f(cam.width,0)); - squre_pts.push_back(cv::Point2f(cam.width,cam.height)); - - cv::Mat transmtx = cv::getPerspectiveTransform(quad_pts,squre_pts); - cv::Mat transformed = cv::Mat::zeros(overlay_.rows, overlay_.cols, CV_8UC4); - //cv::warpPerspective(im1_, im1_, transmtx, im1_.size()); - - ftl::render::ViewPort vp; - vp.x = std::min(p4screen.x, std::min(p3screen.x, std::min(p1screen.x,p2screen.x))); - vp.y = std::min(p4screen.y, std::min(p3screen.y, std::min(p1screen.y,p2screen.y))); - vp.width = std::max(p4screen.x, std::max(p3screen.x, std::max(p1screen.x,p2screen.x))) - vp.x; - vp.height = std::max(p4screen.y, std::max(p3screen.y, std::max(p1screen.y,p2screen.y))) - vp.y; - renderer_->setViewPort(ftl::render::ViewPortMode::Warping, vp); - - //Eigen::Matrix4d windowPose; - //windowPose.setIdentity(); - - //Eigen::Matrix4d fakepose = (viewPose.cast<double>()).inverse() * state_.getPose(); - //ftl::rgbd::Camera fakecam = cam; - - //eye[1] = -eye[1]; - //eye[2] = -eye[2]; - //Eigen::Translation3f trans2(eye); - //Eigen::Affine3f t2(trans2); - //Eigen::Matrix4f viewPose2 = t2.matrix(); - - // Use face tracked window pose for virtual camera - //state_.getLeft() = fakecam; - transform_ix_ = fset->id; // Disable keyboard/mouse pose setting - - state_.setPose(transforms_[transform_ix_] * viewPose.cast<double>()); - - //Eigen::Vector4d pt1 = state_.getPose().inverse() * Eigen::Vector4d(eye[0],eye[1],eye[2],1); - //pt1 /= pt1[3]; - //Eigen::Vector4d pt2 = state_.getPose().inverse() * Eigen::Vector4d(0,0,0,1); - //pt2 /= pt2[3]; - - - //ftl::overlay::draw3DLine(state_.getLeft(), im1_, over_depth, pt1, pt2, cv::Scalar(0,0,255,0)); - //ftl::overlay::drawRectangle(state_.getLeft(), im1_, over_depth, windowPose.inverse() * state_.getPose(), cv::Scalar(255,0,0,0), screenWidth, screenHeight); - //ftl::overlay::drawCamera(state_.getLeft(), im1_, over_depth, fakecam, fakepose, cv::Scalar(255,0,255,255), 1.0,screen_->root()->value("show_frustrum", false)); - } +std::pair<const ftl::rgbd::Frame *, const ftl::codecs::Face *> ftl::gui::Camera::_selectFace(std::vector<ftl::rgbd::FrameSet*> &fss) { + for (auto *fset : fss) { + for (const auto &f : fset->frames) { + if (f.hasChannel(Channel::Faces)) { + std::vector<ftl::codecs::Face> data; + f.get(Channel::Faces, data); + + if (data.size() > 0) { + return {&f,&(*data.rbegin())}; } } } } + return {nullptr, nullptr}; +} + +void ftl::gui::Camera::_generateWindow(const ftl::rgbd::Frame &f, const ftl::codecs::Face &face, Eigen::Matrix4d &pose_adjust, ftl::render::ViewPort &vp) { + auto cam = ftl::rgbd::Camera::from(intrinsics_); + auto d = face; + + float screenWidth = intrinsics_->value("screen_size", 0.6f); // In meters + float screenHeight = (9.0f/16.0f) * screenWidth; + + float screenDistance = (d.depth > cam.minDepth && d.depth < cam.maxDepth) ? d.depth : intrinsics_->value("screen_dist_default", 0.5f); // Face distance from screen in meters + + auto pos = f.getLeft().screenToCam(float(d.box.x+(d.box.width/2)), float(d.box.y+(d.box.height/2)), screenDistance); + Eigen::Vector3f eye; + eye[0] = -pos.x; + eye[1] = pos.y; + eye[2] = -pos.z; + //eye[3] = 0; + + Eigen::Translation3f trans(eye); + Eigen::Affine3f t(trans); + Eigen::Matrix4f viewPose = t.matrix(); + + // Calculate where the screen is within current camera space + Eigen::Vector4f p1 = viewPose.cast<float>() * (Eigen::Vector4f(screenWidth/2.0, screenHeight/2.0, 0, 1)); + Eigen::Vector4f p2 = viewPose.cast<float>() * (Eigen::Vector4f(screenWidth/2.0, -screenHeight/2.0, 0, 1)); + Eigen::Vector4f p3 = viewPose.cast<float>() * (Eigen::Vector4f(-screenWidth/2.0, screenHeight/2.0, 0, 1)); + Eigen::Vector4f p4 = viewPose.cast<float>() * (Eigen::Vector4f(-screenWidth/2.0, -screenHeight/2.0, 0, 1)); + p1 = p1 / p1[3]; + p2 = p2 / p2[3]; + p3 = p3 / p3[3]; + p4 = p4 / p4[3]; + float2 p1screen = cam.camToScreen<float2>(make_float3(p1[0],p1[1],p1[2])); + float2 p2screen = cam.camToScreen<float2>(make_float3(p2[0],p2[1],p2[2])); + float2 p3screen = cam.camToScreen<float2>(make_float3(p3[0],p3[1],p3[2])); + float2 p4screen = cam.camToScreen<float2>(make_float3(p4[0],p4[1],p4[2])); + + std::vector<cv::Point2f> quad_pts; + std::vector<cv::Point2f> squre_pts; + quad_pts.push_back(cv::Point2f(p1screen.x,p1screen.y)); + quad_pts.push_back(cv::Point2f(p2screen.x,p2screen.y)); + quad_pts.push_back(cv::Point2f(p3screen.x,p3screen.y)); + quad_pts.push_back(cv::Point2f(p4screen.x,p4screen.y)); + squre_pts.push_back(cv::Point2f(0,0)); + squre_pts.push_back(cv::Point2f(0,cam.height)); + squre_pts.push_back(cv::Point2f(cam.width,0)); + squre_pts.push_back(cv::Point2f(cam.width,cam.height)); + + cv::Mat transmtx = cv::getPerspectiveTransform(quad_pts,squre_pts); + //cv::Mat transformed = cv::Mat::zeros(overlay_.rows, overlay_.cols, CV_8UC4); + //cv::warpPerspective(im1_, im1_, transmtx, im1_.size()); + + // TODO: Use the transmtx above for perspective distortion.. + + //ftl::render::ViewPort vp; + vp.x = std::min(p4screen.x, std::min(p3screen.x, std::min(p1screen.x,p2screen.x))); + vp.y = std::min(p4screen.y, std::min(p3screen.y, std::min(p1screen.y,p2screen.y))); + vp.width = std::max(p4screen.x, std::max(p3screen.x, std::max(p1screen.x,p2screen.x))) - vp.x; + vp.height = std::max(p4screen.y, std::max(p3screen.y, std::max(p1screen.y,p2screen.y))) - vp.y; + /*vp.warpMatrix.entries[0] = transmtx.at<float>(0,0); + vp.warpMatrix.entries[1] = transmtx.at<float>(1,0); + vp.warpMatrix.entries[2] = transmtx.at<float>(2,0); + vp.warpMatrix.entries[3] = transmtx.at<float>(0,1); + vp.warpMatrix.entries[4] = transmtx.at<float>(1,1); + vp.warpMatrix.entries[5] = transmtx.at<float>(2,1); + vp.warpMatrix.entries[6] = transmtx.at<float>(0,2); + vp.warpMatrix.entries[7] = transmtx.at<float>(1,2); + vp.warpMatrix.entries[8] = transmtx.at<float>(2,2); + vp.warpMatrix = vp.warpMatrix.getInverse(); //.getInverse();*/ + //renderer_->setViewPort(ftl::render::ViewPortMode::Warping, vp); + + pose_adjust = viewPose.cast<double>(); +} + +void ftl::gui::Camera::_applyPoseEffects(std::vector<ftl::rgbd::FrameSet*> &fss) { + if (renderer_->value("window_effect", false)) { + auto [frame,face] = _selectFace(fss); + if (face) { + Eigen::Matrix4d windowPose; + ftl::render::ViewPort windowViewPort; + _generateWindow(*frame, *face, windowPose, windowViewPort); + + // Apply the window effect + renderer_->setViewPort(ftl::render::ViewPortMode::Stretch, windowViewPort); + state_.getPose() = windowPose * state_.getPose(); + } + } } void ftl::gui::Camera::setStereo(bool v) { @@ -396,7 +400,8 @@ void ftl::gui::Camera::_draw(std::vector<ftl::rgbd::FrameSet*> &fss) { auto &f = fs2.frames.emplace_back(); fs2.count = 1; fs2.mask = 1; - fs2.stale = false; + //fs2.stale = false; + fs2.set(ftl::data::FSFlag::STALE); frame_.swapTo(Channels<0>(Channel::Colour), f); // Channel::Colour + Channel::Depth fs2.timestamp = ftl::timer::get_time(); fs2.id = 0; @@ -451,6 +456,7 @@ void ftl::gui::Camera::update(std::vector<ftl::rgbd::FrameSet*> &fss) { } else { name_ = "No name"; } + state_.getLeft() = frame->getLeftCamera(); return; } } diff --git a/applications/gui/src/camera.hpp b/applications/gui/src/camera.hpp index 94a5df4f85b9aead8b15d4e093dd21a66eebf0bd..d8c2dda708e9858b4929e4613fb779dc2510a271 100644 --- a/applications/gui/src/camera.hpp +++ b/applications/gui/src/camera.hpp @@ -9,6 +9,7 @@ #include <ftl/streams/filestream.hpp> #include <ftl/streams/sender.hpp> +#include <ftl/codecs/faces.hpp> #include <string> #include <array> @@ -189,6 +190,9 @@ class Camera { void _downloadFrames(ftl::cuda::TextureObject<uchar4> &); void _downloadFrames(); void _draw(std::vector<ftl::rgbd::FrameSet*> &fss); + void _applyPoseEffects(std::vector<ftl::rgbd::FrameSet*> &fss); + std::pair<const ftl::rgbd::Frame *, const ftl::codecs::Face *> _selectFace(std::vector<ftl::rgbd::FrameSet*> &fss); + void _generateWindow(const ftl::rgbd::Frame &, const ftl::codecs::Face &face, Eigen::Matrix4d &pose_adjust, ftl::render::ViewPort &vp); }; } diff --git a/applications/gui/src/gltexture.cpp b/applications/gui/src/gltexture.cpp index 8b54e22efb2b5d51d24d69bfcafcba2cb3201d2a..ae3c3a05d852fbff2dc8423de2533c4d2d6f2f8e 100644 --- a/applications/gui/src/gltexture.cpp +++ b/applications/gui/src/gltexture.cpp @@ -137,6 +137,7 @@ void GLTexture::unmap(cudaStream_t stream) { glTexSubImage2D( GL_TEXTURE_2D, 0, 0, 0, width_, height_, GL_RED, GL_FLOAT, NULL); } glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); + glBindTexture(GL_TEXTURE_2D, 0); glBindBuffer( GL_PIXEL_UNPACK_BUFFER, 0); } diff --git a/applications/gui/src/screen.cpp b/applications/gui/src/screen.cpp index 4b55bb1e28ff120a4c63d176fcbf1469b193ce92..ea32b222b76510361ce946e25ba1af9af8d4ba22 100644 --- a/applications/gui/src/screen.cpp +++ b/applications/gui/src/screen.cpp @@ -1,5 +1,8 @@ #include "screen.hpp" +#include <ftl/streams/netstream.hpp> +#include <ftl/rgbd/frameset.hpp> + #include <nanogui/opengl.h> #include <nanogui/glutil.h> #include <nanogui/screen.h> @@ -10,6 +13,8 @@ #include <nanogui/toolbutton.h> #include <nanogui/popupbutton.h> +#include <sstream> + #include <nlohmann/json.hpp> #include <loguru.hpp> @@ -62,6 +67,14 @@ namespace { })"; } +template <typename T> +std::string to_string_with_precision(const T a_value, const int n = 6) { + std::ostringstream out; + out.precision(n); + out << std::fixed << a_value; + return out.str(); +} + ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl::ctrl::Master *controller) : nanogui::Screen(Eigen::Vector2i(1024, 768), "FT-Lab Remote Presence"), status_("FT-Lab Remote Presence System") { @@ -70,6 +83,7 @@ ftl::gui::Screen::Screen(ftl::Configurable *proot, ftl::net::Universe *pnet, ftl ctrl_ = controller; root_ = proot; camera_ = nullptr; + last_stats_count_ = 0; #ifdef HAVE_OPENVR HMD_ = nullptr; @@ -589,6 +603,29 @@ void ftl::gui::Screen::draw(NVGcontext *ctx) { } nvgTextAlign(ctx, NVG_ALIGN_RIGHT); + + if (root()->value("show_information", true)) { + string msg; + + auto &stats = getStatistics(); + + msg = string("Frame rate: ") + std::to_string((int)stats.fps); + nvgText(ctx, screenSize[0]-10, 20, msg.c_str(), NULL); + msg = string("Latency: ") + std::to_string((int)stats.latency) + string("ms"); + nvgText(ctx, screenSize[0]-10, 40, msg.c_str(), NULL); + + msg = string("Bitrate: ") + to_string_with_precision(stats.bitrate, 2) + string("Mbps"); + nvgText(ctx, screenSize[0]-10, 60, msg.c_str(), NULL); + + if (camera_) { + auto intrin = camera_->getIntrinsics(); + msg = string("Resolution: ") + std::to_string(intrin.width) + string("x") + std::to_string(intrin.height); + nvgText(ctx, screenSize[0]-10, 80, msg.c_str(), NULL); + msg = string("Focal: ") + to_string_with_precision(intrin.fx, 2); + nvgText(ctx, screenSize[0]-10, 100, msg.c_str(), NULL); + } + } + nvgText(ctx, screenSize[0]-10, screenSize[1]-20, status_.c_str(), NULL); /* Draw the user interface */ @@ -596,6 +633,17 @@ void ftl::gui::Screen::draw(NVGcontext *ctx) { nanogui::Screen::draw(ctx); } +const ftl::gui::Statistics &ftl::gui::Screen::getStatistics() { + if (--last_stats_count_ <= 0) { + auto [fps,latency] = ftl::rgbd::Builder::getStatistics(); + stats_.fps = fps; + stats_.latency = latency; + stats_.bitrate = ftl::stream::Net::getRequiredBitrate(); + last_stats_count_ = 20; + } + return stats_; +} + void ftl::gui::Screen::drawFast() { if (camera_) { camera_->captureFrame(); diff --git a/applications/gui/src/screen.hpp b/applications/gui/src/screen.hpp index 9191729cf3d46ced606187c9005b0048c2bf0414..7af733bd78a4ff2a237074878c6e56fe163167e6 100644 --- a/applications/gui/src/screen.hpp +++ b/applications/gui/src/screen.hpp @@ -23,6 +23,12 @@ namespace gui { class Camera; class MediaPanel; +struct Statistics { + float fps; + float latency; + float bitrate; +}; + class Screen : public nanogui::Screen { public: explicit Screen(ftl::Configurable *root, ftl::net::Universe *net, ftl::ctrl::Master *controller); @@ -46,6 +52,8 @@ class Screen : public nanogui::Screen { void setActiveCamera(ftl::gui::Camera*); ftl::gui::Camera *activeCamera() { return camera_; } + const ftl::gui::Statistics &getStatistics(); + #ifdef HAVE_OPENVR // initialize OpenVR bool initVR(); @@ -110,6 +118,9 @@ class Screen : public nanogui::Screen { #ifdef HAVE_OPENVR vr::IVRSystem *HMD_; #endif + + ftl::gui::Statistics stats_; + int last_stats_count_; }; } diff --git a/applications/gui/src/src_window.cpp b/applications/gui/src/src_window.cpp index 15e9e96f75f5b51c9e9a10f9004d3d597dd9974e..105a37d546153ec7b405f6bc5fcfd43cff9bd077 100644 --- a/applications/gui/src/src_window.cpp +++ b/applications/gui/src/src_window.cpp @@ -155,6 +155,11 @@ SourceWindow::SourceWindow(ftl::gui::Screen *screen) } else if (path.rfind("device:", 0) == 0) { ftl::URI uri(path); uri.to_json(screen->root()->getConfig()["sources"].emplace_back()); + } else { + ftl::URI uri(path); + if (uri.getScheme() == ftl::URI::SCHEME_TCP || uri.getScheme() == ftl::URI::SCHEME_WS) { + screen->net()->connect(path); + } } } @@ -190,20 +195,25 @@ bool SourceWindow::_processFrameset(ftl::rgbd::FrameSet &fs, bool fromstream) { _checkFrameSets(fs.id); if (!paused_) { - // Enforce interpolated colour and GPU upload - for (int i=0; i<fs.frames.size(); ++i) { - fs.frames[i].createTexture<uchar4>(Channel::Colour, true); + if (!fs.test(ftl::data::FSFlag::PARTIAL) || !screen_->root()->value("drop_partial_framesets", false)) { + // Enforce interpolated colour and GPU upload + for (int i=0; i<fs.frames.size(); ++i) { + if (!fs.hasFrame(i)) continue; + fs.frames[i].createTexture<uchar4>(Channel::Colour, true); + + // TODO: Do all channels. This is a fix for screen capture sources. + if (!fs.frames[i].isGPU(Channel::Colour)) fs.frames[i].upload(Channels<0>(Channel::Colour), pre_pipelines_[fs.id]->getStream()); + } - // TODO: Do all channels. This is a fix for screen capture sources. - if (!fs.frames[i].isGPU(Channel::Colour)) fs.frames[i].upload(Channels<0>(Channel::Colour), pre_pipelines_[fs.id]->getStream()); - } + { + FTL_Profile("Prepipe",0.020); + pre_pipelines_[fs.id]->apply(fs, fs, 0); + } - { - FTL_Profile("Prepipe",0.020); - pre_pipelines_[fs.id]->apply(fs, fs, 0); + fs.swapTo(*framesets_[fs.id]); + } else { + LOG(WARNING) << "Dropping frameset: " << fs.timestamp; } - - fs.swapTo(*framesets_[fs.id]); } const auto *cstream = interceptor_; @@ -241,6 +251,7 @@ void SourceWindow::_checkFrameSets(int id) { p->append<ftl::operators::CullWeight>("remove_weights")->value("enabled", false); p->append<ftl::operators::DegradeWeight>("degrade"); p->append<ftl::operators::VisCrossSupport>("viscross")->set("enabled", false); + p->append<ftl::operators::BorderMask>("border_mask"); p->append<ftl::operators::CullDiscontinuity>("remove_discontinuity"); p->append<ftl::operators::MultiViewMLS>("mvmls")->value("enabled", false); @@ -319,19 +330,35 @@ void SourceWindow::_updateCameras(const vector<string> &netcams) { if (netcams.size() == 0) return; for (auto s : netcams) { + LOG(INFO) << "ADDING CAMERA: " << s; + + if (available_.count(s) == 0) { + auto *stream = ftl::create<ftl::stream::Net>(screen_->root(), string("netstream")+std::to_string(available_.size()-1), screen_->net()); + available_[s] = stream; + stream->set("uri", s); + bool isspecial = (stream->get<std::string>("uri") == screen_->root()->value("data_stream",std::string(""))); + stream_->add(stream, (isspecial) ? 1 : 0); + + LOG(INFO) << "Add Stream: " << stream->value("uri", std::string("NONE")); + } + // FIXME: Check for already existing... //if (streams_.find(s) == cameras_.end()) { - available_.push_back(s); - json_t srcjson; - srcjson["uri"] = s; - screen_->root()->getConfig()["streams"].push_back(srcjson); + //available_.push_back(s); + //json_t srcjson; + //srcjson["uri"] = s; + //screen_->root()->getConfig()["streams"].push_back(srcjson); + //screen_->root()->getConfig()["receivers"].push_back(json_t{}); //} } - std::vector<ftl::stream::Net*> strms = ftl::createArray<ftl::stream::Net>(screen_->root(), "streams", screen_->net()); + //stream_->reset(); + stream_->begin(); - for (int i=0; i<strms.size(); ++i) { + //std::vector<ftl::stream::Net*> strms = ftl::createArray<ftl::stream::Net>(screen_->root(), "streams", screen_->net()); + + /*for (int i=0; i<strms.size(); ++i) { auto *stream = strms[i]; bool isspecial = (stream->get<std::string>("uri") == screen_->root()->value("data_stream",std::string(""))); if (isspecial) LOG(INFO) << "Adding special stream"; @@ -342,18 +369,8 @@ void SourceWindow::_updateCameras(const vector<string> &netcams) { //Scene *scene = new Scene(receiver); //scenes_.push_back(scene); - /*if (.find(src->getURI()) == cameras_.end()) { - LOG(INFO) << "Making camera: " << src->getURI(); - - // TODO: Need to have GUI wrapper for an entire stream... which - // manages a set of cameras. - auto *cam = new ftl::gui::Camera(screen_, src); - cameras_[src->getURI()] = cam; - } else { - //LOG(INFO) << "Camera already exists: " << s; - }*/ - } + }*/ //refresh_thumbs_ = true; //if (thumbs_.size() != available_.size()) { diff --git a/applications/gui/src/src_window.hpp b/applications/gui/src/src_window.hpp index 165143c14d7ce2f9f52614aa798f3c09ea4be488..c420c0e7cbafde19e5670ed99bc75d6cf8ccae8b 100644 --- a/applications/gui/src/src_window.hpp +++ b/applications/gui/src/src_window.hpp @@ -9,6 +9,7 @@ #include <ftl/threads.hpp> #include <vector> #include <map> +#include <unordered_map> #include <string> #include "gltexture.hpp" @@ -66,7 +67,7 @@ class SourceWindow : public nanogui::Window { ftl::stream::Intercept *interceptor_; ftl::stream::File *recorder_; ftl::stream::Receiver *receiver_; - std::vector<std::string> available_; + std::unordered_map<std::string, ftl::stream::Stream*> available_; std::vector<GLTexture> thumbs_; bool refresh_thumbs_; nanogui::Widget *ipanel_; diff --git a/applications/reconstruct/src/main.cpp b/applications/reconstruct/src/main.cpp index 3e35b5ad96eec52c81a26dd50d6e66e8ac52405c..c713a56a8ca6e35896af803da53f9ecaf0a9887a 100644 --- a/applications/reconstruct/src/main.cpp +++ b/applications/reconstruct/src/main.cpp @@ -191,6 +191,11 @@ static void run(ftl::Configurable *root) { sender->post(fs); return true; }); + } else { + ftl::URI uri(path); + if (uri.getScheme() == ftl::URI::SCHEME_TCP || uri.getScheme() == ftl::URI::SCHEME_WS) { + net->connect(path)->waitConnection(); + } } } diff --git a/applications/vision/src/main.cpp b/applications/vision/src/main.cpp index 3f8dca5abd7fa299d903f0c2cf4969b1c5dda4f6..07e6b4ae547bf7ec960d0d01ac74285f840d4595 100644 --- a/applications/vision/src/main.cpp +++ b/applications/vision/src/main.cpp @@ -75,9 +75,17 @@ static void run(ftl::Configurable *root) { source->setChannel(Channel::Depth); grp->addSource(source); - grp->onFrameSet([sender](ftl::rgbd::FrameSet &fs) { + int stats_count = 0; + + grp->onFrameSet([sender,&stats_count](ftl::rgbd::FrameSet &fs) { fs.id = 0; sender->post(fs); + + if (--stats_count <= 0) { + auto [fps,latency] = ftl::rgbd::Builder::getStatistics(); + LOG(INFO) << "Frame rate: " << fps << ", Latency: " << latency; + stats_count = 20; + } return true; }); diff --git a/components/audio/src/source.cpp b/components/audio/src/source.cpp index d30d2d045048b70928a6d4624bfe9396d451b2a2..2c976b20721b29dd35bd6a486e7ab55b06ba6569 100644 --- a/components/audio/src/source.cpp +++ b/components/audio/src/source.cpp @@ -153,7 +153,8 @@ Source::Source(nlohmann::json &config) : ftl::Configurable(config), buffer_(null frameset_.id = 0; frameset_.count = 1; - frameset_.stale = false; + //frameset_.stale = false; + frameset_.clear(ftl::data::FSFlag::STALE); if (to_read_ < 1 || !buffer_) return true; diff --git a/components/codecs/include/ftl/codecs/codecs.hpp b/components/codecs/include/ftl/codecs/codecs.hpp index 6ab79574fc22615d1622b56d14531a950b2d51f1..5e594429e5dea928ebf1a92d2fc5c0a50f8fcc41 100644 --- a/components/codecs/include/ftl/codecs/codecs.hpp +++ b/components/codecs/include/ftl/codecs/codecs.hpp @@ -18,6 +18,7 @@ enum struct format_t { static constexpr uint8_t kFlagFlipRGB = 0x01; // Swap blue and red channels [deprecated] static constexpr uint8_t kFlagMappedDepth = 0x02; // Use Yuv mapping for float [deprecated] static constexpr uint8_t kFlagFloat = 0x04; // Floating point output +static constexpr uint8_t kFlagPartial = 0x10; // This frameset is not complete static constexpr uint8_t kFlagMultiple = 0x80; // Multiple video frames in single packet /** diff --git a/components/common/cpp/include/ftl/exception.hpp b/components/common/cpp/include/ftl/exception.hpp index d86f9017a45df07de1961ad468712911694161c1..07f7a366adf65cb7baea83624d8f1ff53a3cf848 100644 --- a/components/common/cpp/include/ftl/exception.hpp +++ b/components/common/cpp/include/ftl/exception.hpp @@ -37,8 +37,10 @@ class exception : public std::exception public: explicit exception(const char *msg); explicit exception(const Formatter &msg); + ~exception(); const char * what () const throw () { + processed_ = true; return msg_.c_str(); } @@ -46,9 +48,12 @@ class exception : public std::exception return trace_.c_str(); } + void ignore() const { processed_ = true; } + private: std::string msg_; std::string trace_; + mutable bool processed_; }; } diff --git a/components/common/cpp/src/exception.cpp b/components/common/cpp/src/exception.cpp index 1291adcc5cfba7921cf6acd774a002e28640f3c8..cf74b297ab3218a0786b66127c96ec4cc7d64ea1 100644 --- a/components/common/cpp/src/exception.cpp +++ b/components/common/cpp/src/exception.cpp @@ -1,5 +1,8 @@ #include <ftl/exception.hpp> +#define LOGURU_REPLACE_GLOG 1 +#include <loguru.hpp> + #ifndef WIN32 #include <execinfo.h> #endif @@ -7,26 +10,42 @@ using ftl::exception; using std::string; -exception::exception(const char *msg) : msg_(msg) { - #ifndef WIN32 +static std::string getBackTrace() { +#ifndef WIN32 + + string result; void *trace[16]; - char **messages = (char **)NULL; int trace_size = 0; trace_size = backtrace(trace, 16); - trace_ = "[bt] Trace:\n"; + result = "[bt] Trace:\n"; - messages = backtrace_symbols(trace, trace_size); + char **messages = backtrace_symbols(trace, trace_size); /* skip first stack frame (points here) */ - for (int i=1; i<trace_size; ++i) { - printf("[bt] #%d %s\n", i, messages[i]); - trace_ += string("[bt] #") + std::to_string(i) + string(" ") + messages[i]; + for (int i=2; i<trace_size; ++i) { + //printf("[bt] #%d %s\n", i, messages[i]); + result += string("[bt] #") + std::to_string(i-1) + string(" ") + messages[i] + string("\n"); } - #endif + return result; + +#else + return ""; +#endif +} + +exception::exception(const char *msg) : msg_(msg), processed_(false) { + trace_ = std::move(getBackTrace()); } -exception::exception(const ftl::Formatter &msg) : msg_(msg.str()) { +exception::exception(const ftl::Formatter &msg) : msg_(msg.str()), processed_(false) { + trace_ = std::move(getBackTrace()); +} +exception::~exception() { + if (!processed_) { + LOG(ERROR) << "Unreported exception: " << what(); + LOG(ERROR) << trace_; + } } \ No newline at end of file diff --git a/components/net/cpp/include/ftl/net/peer.hpp b/components/net/cpp/include/ftl/net/peer.hpp index a8b12288eff07d15c592ff381bb5a6261cb2130c..6ac41b2f7477fde4014f49a7e05ba06ba5724844 100644 --- a/components/net/cpp/include/ftl/net/peer.hpp +++ b/components/net/cpp/include/ftl/net/peer.hpp @@ -16,6 +16,7 @@ #include <ftl/uri.hpp> #include <ftl/uuid.hpp> #include <ftl/threads.hpp> +#include <ftl/timer.hpp> #include <iostream> #include <sstream> @@ -228,6 +229,8 @@ class Peer { void _updateURI(); int _send(); + + void _waitCall(int id, std::condition_variable &cv, bool &hasreturned, const std::string &name); template<typename... ARGS> void _trigger(const std::vector<std::function<void(Peer &, ARGS...)>> &hs, ARGS... args) { @@ -319,35 +322,17 @@ void Peer::bind(const std::string &name, F func) { template <typename R, typename... ARGS> R Peer::call(const std::string &name, ARGS... args) { bool hasreturned = false; - std::mutex m; + //std::mutex m; std::condition_variable cv; R result; int id = asyncCall<R>(name, [&](const R &r) { - //UNIQUE_LOCK(m,lk); - std::unique_lock<std::mutex> lk(m); - hasreturned = true; result = r; - lk.unlock(); + hasreturned = true; cv.notify_one(); }, std::forward<ARGS>(args)...); - // While waiting, do some other thread jobs... - /*std::function<void(int)> j; - while (!hasreturned && (bool)(j=ftl::pool.pop())) { - LOG(INFO) << "Doing job whilst waiting..."; - j(-1); - }*/ - - { // Block thread until async callback notifies us - std::unique_lock<std::mutex> lk(m); - cv.wait_for(lk, std::chrono::seconds(1), [&hasreturned]{return hasreturned;}); - } - - if (!hasreturned) { - cancelCall(id); - throw FTL_Error("RPC failed with timeout: " << name); - } + _waitCall(id, cv, hasreturned, name); return result; } diff --git a/components/net/cpp/include/ftl/net/universe.hpp b/components/net/cpp/include/ftl/net/universe.hpp index 90c12419b83da749178bd1d4f80790a68bd45fb3..f5af2edef3d8a90cf4d2f9a0b53f25538b30e051 100644 --- a/components/net/cpp/include/ftl/net/universe.hpp +++ b/components/net/cpp/include/ftl/net/universe.hpp @@ -142,6 +142,21 @@ class Universe : public ftl::Configurable { template <typename R, typename... ARGS> R call(const UUID &pid, const std::string &name, ARGS... args); + + /** + * Non-blocking Remote Procedure Call using a callback function. + * + * @param pid Peer GUID + * @param name RPC Function name. + * @param cb Completion callback. + * @param args A variable number of arguments for RPC function. + * + * @return A call id for use with cancelCall() if needed. + */ + template <typename R, typename... ARGS> + int asyncCall(const UUID &pid, const std::string &name, + std::function<void(const R&)> cb, + ARGS... args); template <typename... ARGS> bool send(const UUID &pid, const std::string &name, ARGS... args); @@ -284,7 +299,8 @@ template <typename... ARGS> void Universe::broadcast(const std::string &name, ARGS... args) { SHARED_LOCK(net_mutex_,lk); for (auto p : peers_) { - if (p->isConnected()) p->send(name, args...); + if (!p->waitConnection()) continue; + p->send(name, args...); } } @@ -310,8 +326,9 @@ std::optional<R> Universe::findOne(const std::string &name, ARGS... args) { SHARED_LOCK(net_mutex_,lk); for (auto p : peers_) { + if (!p->waitConnection()) continue; count++; - if (p->isConnected()) record[p] = p->asyncCall<std::optional<R>>(name, handler, args...); + record[p] = p->asyncCall<std::optional<R>>(name, handler, args...); } lk.unlock(); @@ -356,9 +373,7 @@ std::vector<R> Universe::findAll(const std::string &name, ARGS... args) { std::map<Peer*, int> record; SHARED_LOCK(net_mutex_,lk); for (auto p : peers_) { - if (!p->isConnected()) { - continue; - } + if (!p->waitConnection()) continue; sentcount++; record[p] = p->asyncCall<std::vector<R>>(name, handler, args...); } @@ -392,6 +407,16 @@ R Universe::call(const ftl::UUID &pid, const std::string &name, ARGS... args) { return p->call<R>(name, args...); } +template <typename R, typename... ARGS> +int Universe::asyncCall(const ftl::UUID &pid, const std::string &name, std::function<void(const R&)> cb, ARGS... args) { + Peer *p = getPeer(pid); + if (p == nullptr || !p->isConnected()) { + if (p == nullptr) throw FTL_Error("Attempting to call an unknown peer : " << pid.to_string()); + else throw FTL_Error("Attempting to call an disconnected peer : " << pid.to_string()); + } + return p->asyncCall(name, cb, args...); +} + template <typename... ARGS> bool Universe::send(const ftl::UUID &pid, const std::string &name, ARGS... args) { Peer *p = getPeer(pid); diff --git a/components/net/cpp/src/peer.cpp b/components/net/cpp/src/peer.cpp index 24aac7bb3eb6076d8f662fc4ed610e421d357028..b86b3553b6213920b890794308380511d751294b 100644 --- a/components/net/cpp/src/peer.cpp +++ b/components/net/cpp/src/peer.cpp @@ -519,9 +519,16 @@ bool Peer::_data() { obj.convert(hs); if (get<1>(hs) != "__handshake__") { - _badClose(false); - LOG(ERROR) << "Missing handshake - got '" << get<1>(hs) << "'"; - return false; + LOG(WARNING) << "Missing handshake - got '" << get<1>(hs) << "'"; + + // Allow a small delay in case another thread is doing the handshake + lk.unlock(); + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + if (status_ == kConnecting) { + LOG(ERROR) << "Failed to get handshake"; + _badClose(false); + return false; + } } else { // Must handle immediately with no other thread able // to read next message before completion. @@ -530,9 +537,16 @@ bool Peer::_data() { return true; } } catch(...) { - _badClose(false); - LOG(ERROR) << "Bad first message format"; - return false; + LOG(WARNING) << "Bad first message format... waiting"; + + // Allow a small delay in case another thread is doing the handshake + lk.unlock(); + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + if (status_ == kConnecting) { + LOG(ERROR) << "Failed to get handshake"; + _badClose(false); + return false; + } } } } @@ -580,8 +594,33 @@ void Peer::_sendResponse(uint32_t id, const msgpack::object &res) { _send(); } +void Peer::_waitCall(int id, std::condition_variable &cv, bool &hasreturned, const std::string &name) { + std::mutex m; + + int64_t beginat = ftl::timer::get_time(); + std::function<void(int)> j; + while (!hasreturned) { + // Attempt to do a thread pool job if available + if ((bool)(j=ftl::pool.pop())) { + j(-1); + } else { + // Block for a little otherwise + std::unique_lock<std::mutex> lk(m); + cv.wait_for(lk, std::chrono::milliseconds(2), [&hasreturned]{return hasreturned;}); + } + + if (ftl::timer::get_time() - beginat > 1000) break; + } + + if (!hasreturned) { + cancelCall(id); + throw FTL_Error("RPC failed with timeout: " << name); + } +} + bool Peer::waitConnection() { if (status_ == kConnected) return true; + else if (status_ != kConnecting) return false; std::mutex m; //UNIQUE_LOCK(m,lk); @@ -594,7 +633,7 @@ bool Peer::waitConnection() { } }); - cv.wait_for(lk, seconds(5)); + cv.wait_for(lk, seconds(1), [this](){return status_ == kConnected;}); universe_->removeCallback(h); return status_ == kConnected; } diff --git a/components/net/cpp/test/net_integration.cpp b/components/net/cpp/test/net_integration.cpp index 7cca78a481a33f57e037765b206d39bb77745867..200e3c5f999e1428128dc2f382d92dee425b88ba 100644 --- a/components/net/cpp/test/net_integration.cpp +++ b/components/net/cpp/test/net_integration.cpp @@ -1,5 +1,6 @@ #include "catch.hpp" #include <ftl/net.hpp> +#include <ftl/timer.hpp> #include <thread> #include <chrono> @@ -102,7 +103,7 @@ TEST_CASE("Universe::onConnect()", "[net]") { }); b.connect("tcp://localhost:7077")->waitConnection(); - sleep_for(milliseconds(100)); + //sleep_for(milliseconds(100)); REQUIRE( done ); } } @@ -124,7 +125,7 @@ TEST_CASE("Universe::onDisconnect()", "[net]") { p->waitConnection(); sleep_for(milliseconds(100)); p->close(); - sleep_for(milliseconds(1100)); + sleep_for(milliseconds(100)); REQUIRE( done ); } @@ -139,7 +140,7 @@ TEST_CASE("Universe::onDisconnect()", "[net]") { p->waitConnection(); sleep_for(milliseconds(100)); p->close(); - sleep_for(milliseconds(1100)); + sleep_for(milliseconds(100)); REQUIRE( done ); } } @@ -158,7 +159,7 @@ TEST_CASE("Universe::broadcast()", "[net]") { b.broadcast("done"); - sleep_for(milliseconds(100)); + sleep_for(milliseconds(50)); REQUIRE( !done ); } @@ -172,7 +173,7 @@ TEST_CASE("Universe::broadcast()", "[net]") { b.broadcast("hello"); - sleep_for(milliseconds(100)); + while (!done) sleep_for(milliseconds(5)); REQUIRE( done ); } @@ -187,7 +188,7 @@ TEST_CASE("Universe::broadcast()", "[net]") { b.broadcast("hello", 676); - sleep_for(milliseconds(100)); + while (done == 0) sleep_for(milliseconds(5)); REQUIRE( done == 676 ); } @@ -209,7 +210,7 @@ TEST_CASE("Universe::broadcast()", "[net]") { }); REQUIRE( a.numberOfPeers() == 2 ); - sleep_for(milliseconds(100)); // NOTE: Binding might not be ready + //sleep_for(milliseconds(100)); // NOTE: Binding might not be ready a.broadcast("hello", 676); @@ -251,10 +252,54 @@ TEST_CASE("Universe::findAll()", "") { return {6,7,8}; }); - sleep_for(milliseconds(100)); // NOTE: Binding might not be ready + //sleep_for(milliseconds(100)); // NOTE: Binding might not be ready auto res = a.findAll<int>("test_all"); REQUIRE( (res.size() == 6) ); REQUIRE( (res[0] == 3 || res[0] == 6) ); } } + +TEST_CASE("Peer::call() __ping__", "") { + Universe a; + Universe b; + Universe c; + + a.listen("tcp://localhost:7077"); + auto *p = b.connect("tcp://localhost:7077"); + p->waitConnection(); + + SECTION("single ping") { + int64_t res = p->call<int64_t>("__ping__"); + REQUIRE((res <= ftl::timer::get_time() && res > 0)); + } + + SECTION("large number of pings") { + for (int i=0; i<100; ++i) { + int64_t res = p->call<int64_t>("__ping__"); + REQUIRE(res > 0); + } + } + + SECTION("large number of parallel pings") { + std::atomic<int> count = 0; + for (int i=0; i<100; ++i) { + ftl::pool.push([&count, p](int id) { + int64_t res = p->call<int64_t>("__ping__"); + count++; + }); + } + + while (count < 100) std::this_thread::sleep_for(milliseconds(5)); + } + + SECTION("single invalid rpc") { + bool errored = false; + try { + int64_t res = p->call<int64_t>("__ping2__"); + } catch (const ftl::exception &e) { + errored = true; + } + REQUIRE(errored); + } +} diff --git a/components/operators/include/ftl/operators/mask.hpp b/components/operators/include/ftl/operators/mask.hpp index 5c542207dbf7216658b2de0d85cf446808db76d3..294caf54f31d12fce9674dfcf51e204f3b2ade5d 100644 --- a/components/operators/include/ftl/operators/mask.hpp +++ b/components/operators/include/ftl/operators/mask.hpp @@ -23,6 +23,20 @@ class DiscontinuityMask : public ftl::operators::Operator { }; +/** + * Generate a depth border mask. + */ +class BorderMask : public ftl::operators::Operator { + public: + explicit BorderMask(ftl::Configurable*); + ~BorderMask(); + + inline Operator::Type type() const override { return Operator::Type::OneToOne; } + + bool apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) override; + +}; + /** * Remove depth values marked with the discontinuity mask. */ diff --git a/components/operators/include/ftl/operators/mask_cuda.hpp b/components/operators/include/ftl/operators/mask_cuda.hpp index f780d5178d80a0be6691648e618d48e4ce2031e5..aadab5e5a640c7200bc87f76b4b80d496b6dbe59 100644 --- a/components/operators/include/ftl/operators/mask_cuda.hpp +++ b/components/operators/include/ftl/operators/mask_cuda.hpp @@ -69,6 +69,11 @@ void discontinuity( float area_max, cudaStream_t stream); +void border_mask( + ftl::cuda::TextureObject<ftl::cuda::Mask::type> &mask, + int left, int right, int top, int bottom, + cudaStream_t stream); + void cull_mask( ftl::cuda::TextureObject<ftl::cuda::Mask::type> &mask, ftl::cuda::TextureObject<float> &depth, diff --git a/components/operators/src/aruco.cpp b/components/operators/src/aruco.cpp index 18beb6638d3772f7aae44682f60b549ec386d0dc..b8f54eb4c254fcca166b2c2f48cc46f360a1adf5 100644 --- a/components/operators/src/aruco.cpp +++ b/components/operators/src/aruco.cpp @@ -1,6 +1,9 @@ #include "ftl/operators/detectandtrack.hpp" #include "ftl/codecs/transformation.hpp" +#define LOGURU_REPLACE_GLOG 1 +#include <loguru.hpp> + using ftl::operators::ArUco; using ftl::rgbd::Frame; @@ -18,18 +21,22 @@ ArUco::ArUco(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) { params_ = cv::aruco::DetectorParameters::create(); debug_ = cfg->value("debug", false); - estimate_pose_ = cfg->value("estimate_pose", false); - auto marker_size = cfg->get<float>("marker_size"); - if (!marker_size || (*marker_size < 0.0f)) { - marker_size_ = 0.0; - estimate_pose_ = false; - } - else { - marker_size_ = *marker_size; - } + //estimate_pose_ = cfg->value("estimate_pose", false); + //auto marker_size = cfg->get<float>("marker_size"); + //if (!marker_size || (*marker_size <= 0.0f)) { + // marker_size_ = 0.1f; + // estimate_pose_ = false; + //} + //else { + // marker_size_ = *marker_size; + //} channel_in_ = Channel::Colour; channel_out_ = Channel::Transforms; + + cfg->on("dictionary", [this,cfg](const ftl::config::Event &e) { + dictionary_ = cv::aruco::getPredefinedDictionary(cfg->value("dictionary", 0)); + }); } bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) { @@ -38,6 +45,10 @@ bool ArUco::apply(Frame &in, Frame &out, cudaStream_t stream) { Frame *inptr = ∈ Frame *outptr = &out; + estimate_pose_ = config()->value("estimate_pose", false); + debug_ = config()->value("debug", false); + marker_size_ = config()->value("marker_size",0.1f); + job_ = std::move(ftl::pool.push([this,inptr,outptr,stream](int id) { Frame &in = *inptr; Frame &out = *outptr; diff --git a/components/operators/src/clipping.cpp b/components/operators/src/clipping.cpp index e6380fffcfa737ab430f18ee7ee681afa9c7fc07..97a28296ed06a0b520886d2a3c78fba627b6ffbf 100644 --- a/components/operators/src/clipping.cpp +++ b/components/operators/src/clipping.cpp @@ -61,14 +61,17 @@ bool ClipScene::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cudaStr in.create(Channel::Shapes3D, shapes); for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; auto &f = in.frames[i]; //auto *s = in.sources[i]; - auto pose = MatrixConversion::toCUDA(f.getPose().cast<float>()); + if (f.hasChannel(Channel::Depth)) { + auto pose = MatrixConversion::toCUDA(f.getPose().cast<float>()); - auto sclip = clip; - sclip.origin = sclip.origin.getInverse() * pose; - if (!no_clip) ftl::cuda::clipping(f.createTexture<float>(Channel::Depth), f.getLeftCamera(), sclip, stream); + auto sclip = clip; + sclip.origin = sclip.origin.getInverse() * pose; + if (!no_clip) ftl::cuda::clipping(f.createTexture<float>(Channel::Depth), f.getLeftCamera(), sclip, stream); + } } return true; diff --git a/components/operators/src/fusion/mvmls.cpp b/components/operators/src/fusion/mvmls.cpp index 55b992957805ec2cb6fe9d00baad4b7f5193202f..200a9d9a50b0358ddc9bd3491dc44a844aa1b924 100644 --- a/components/operators/src/fusion/mvmls.cpp +++ b/components/operators/src/fusion/mvmls.cpp @@ -48,7 +48,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda bool show_adjustment = config()->value("show_adjustment", false); if (in.frames.size() < 1) return false; - auto size = in.frames[0].get<GpuMat>(Channel::Depth).size(); + auto size = in.firstFrame().get<GpuMat>(Channel::Depth).size(); // Make sure we have enough buffers while (normals_horiz_.size() < in.frames.size()) { @@ -60,6 +60,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Make sure all buffers are at correct resolution and are allocated for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; + auto &f = in.frames[i]; auto size = f.get<GpuMat>(Channel::Depth).size(); centroid_horiz_[i]->create(size.height, size.width); @@ -91,6 +93,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // For each camera combination if (do_corr) { for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; + auto &f1 = in.frames[i]; //f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream); //f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream); @@ -99,6 +103,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda d1 = f1.getPose() * d1; for (size_t j=i+1; j<in.frames.size(); ++j) { + if (!in.hasFrame(j)) continue; if (i == j) continue; //LOG(INFO) << "Running phase1"; @@ -339,6 +344,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Redo normals for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; auto &f = in.frames[i]; ftl::cuda::normals( f.getTexture<half4>(Channel::Normals), @@ -403,6 +409,8 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Do the horizontal and vertical MLS aggregations for each source // But don't do the final move step. for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; + auto &f = in.frames[i]; //auto *s = in.sources[i]; @@ -456,6 +464,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // For each camera combination if (do_aggr) { for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; auto &f1 = in.frames[i]; //f1.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0.0f), cvstream); //f1.get<GpuMat>(Channel::Confidence).setTo(cv::Scalar(0.0f), cvstream); @@ -464,6 +473,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda d1 = f1.getPose() * d1; for (size_t j=0; j<in.frames.size(); ++j) { + if (!in.hasFrame(j)) continue; if (i == j) continue; //LOG(INFO) << "Running phase1"; @@ -509,6 +519,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda // Step 3: // Normalise aggregations and move the points for (size_t i=0; i<in.frames.size(); ++i) { + if (!in.hasFrame(i)) continue; auto &f = in.frames[i]; //auto *s = in.sources[i]; auto size = f.get<GpuMat>(Channel::Depth).size(); @@ -526,6 +537,7 @@ bool MultiViewMLS::apply(ftl::rgbd::FrameSet &in, ftl::rgbd::FrameSet &out, cuda f.getTexture<half4>(Channel::Normals), *centroid_vert_[i], f.createTexture<float>(Channel::Depth2, ftl::rgbd::Format<float>(size)), + f.getTexture<float>(Channel::Depth), f.getLeftCamera(), stream ); diff --git a/components/operators/src/mask.cpp b/components/operators/src/mask.cpp index beec2bd285f496a2d91f2fbbe6fbbbd39ffce090..1c53630cf7b43112d2fe7ceba19110e9b89024b8 100644 --- a/components/operators/src/mask.cpp +++ b/components/operators/src/mask.cpp @@ -2,6 +2,7 @@ #include <ftl/operators/mask_cuda.hpp> using ftl::operators::DiscontinuityMask; +using ftl::operators::BorderMask; using ftl::operators::CullDiscontinuity; using ftl::codecs::Channel; using ftl::rgbd::Format; @@ -49,6 +50,34 @@ bool DiscontinuityMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaS +BorderMask::BorderMask(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) { + +} + +BorderMask::~BorderMask() { + +} + +bool BorderMask::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) { + int leftm = config()->value("left", 100); + int rightm = config()->value("right", 5); + int topm = config()->value("top",5); + int bottomm = config()->value("bottom",5); + + if (!in.hasChannel(Channel::Depth)) { + return true; + } + + ftl::cuda::border_mask( + out.createTexture<uint8_t>(Channel::Mask, ftl::rgbd::Format<uint8_t>(in.get<cv::cuda::GpuMat>(Channel::Depth).size())), + leftm, rightm, topm, bottomm, stream + ); + + return true; +} + + + CullDiscontinuity::CullDiscontinuity(ftl::Configurable *cfg) : ftl::operators::Operator(cfg) { } @@ -60,8 +89,8 @@ CullDiscontinuity::~CullDiscontinuity() { bool CullDiscontinuity::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t stream) { if (!in.hasChannel(Channel::Depth) || !in.hasChannel(Channel::Mask)) return false; - uint8_t maskID = config()->value("mask_id", (unsigned int)ftl::cuda::Mask::kMask_Discontinuity); - unsigned int radius = config()->value("radius", 2); + uint8_t maskID = config()->value("mask_id", (unsigned int)(ftl::cuda::Mask::kMask_Discontinuity | ftl::cuda::Mask::kMask_Bad)); + unsigned int radius = config()->value("radius", 0); bool inverted = config()->value("invert", false); out.clearPackets(Channel::Depth); // Force reset diff --git a/components/operators/src/mask.cu b/components/operators/src/mask.cu index 7c7ce32ad2775433dcd239bf0024ee36185b4d3a..85fde5190a7c1e4fd69432b61b515a0c868501de 100644 --- a/components/operators/src/mask.cu +++ b/components/operators/src/mask.cu @@ -68,12 +68,48 @@ void ftl::cuda::discontinuity( ftl::cuda::TextureObject<uint8_t> &mask_out, ftl: // ============================================================================= +__global__ void border_mask_kernel(uint8_t* __restrict__ mask_out, + int pitch, int width, int height, + int left, int right, int top, int bottom) { + + const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; + const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; + + if (x < width && y < height) { + Mask mask(mask_out[x+y*pitch]); + if (x < left || x >= width-right || y < top || y >= height-bottom) { + mask.isBad(true); + mask_out[x+y*pitch] = (int)mask; + } + } +} + +void ftl::cuda::border_mask(ftl::cuda::TextureObject<uint8_t> &mask_out, + int left, int right, int top, int bottom, cudaStream_t stream) { + + static constexpr int THREADS_X = 128; + static constexpr int THREADS_Y = 4; + + const dim3 gridSize((mask_out.width() + THREADS_X - 1)/THREADS_X, (mask_out.height() + THREADS_Y - 1)/THREADS_Y); + const dim3 blockSize(THREADS_X, THREADS_Y); + + border_mask_kernel<<<gridSize, blockSize, 0, stream>>>(mask_out.devicePtr(), mask_out.pixelPitch(), + mask_out.width(), mask_out.height(), left, right, top, bottom); + cudaSafeCall( cudaGetLastError() ); + + #ifdef _DEBUG + cudaSafeCall(cudaDeviceSynchronize()); + #endif +} + +// ============================================================================= + template <int RADIUS, bool INVERT> __global__ void cull_mask_kernel(ftl::cuda::TextureObject<uint8_t> mask, ftl::cuda::TextureObject<float> depth, uint8_t id) { const unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; - if (x < depth.width()-RADIUS && y < depth.height()-RADIUS) { + if (x < depth.width() && y < depth.height()) { bool isdiscon = false; #pragma unroll diff --git a/components/operators/src/mls.cu b/components/operators/src/mls.cu index afe15d5592bc396da1abc5703c9f8c0a96b00d49..ee3ce41255e1f24d3f6066e648f26e7caa501f0e 100644 --- a/components/operators/src/mls.cu +++ b/components/operators/src/mls.cu @@ -559,6 +559,7 @@ __global__ void mls_adjust_depth_kernel( TextureObject<half4> normals_in, TextureObject<float4> centroid_in, // Virtual depth map TextureObject<float> depth_out, + TextureObject<float> depth_in, ftl::rgbd::Camera camera) { const int x = blockIdx.x*blockDim.x + threadIdx.x; @@ -568,8 +569,8 @@ __global__ void mls_adjust_depth_kernel( const float3 aX = make_float3(centroid_in(x,y)); const float3 nX = make_float3(normals_in(x,y)); - //float d0 = depth_in.tex2D(x, y); - depth_out(x,y) = aX.z; + float d0 = depth_in.tex2D(x, y); + depth_out(x,y) = d0; if (aX.z > camera.minDepth && aX.z < camera.maxDepth) { float3 X = camera.screenToCam((int)(x),(int)(y),aX.z); @@ -663,6 +664,7 @@ void ftl::cuda::mls_adjust_depth( ftl::cuda::TextureObject<half4> &normals_in, ftl::cuda::TextureObject<float4> ¢roid_in, ftl::cuda::TextureObject<float> &depth_out, + ftl::cuda::TextureObject<float> &depth_in, const ftl::rgbd::Camera &camera, cudaStream_t stream) { @@ -672,7 +674,7 @@ void ftl::cuda::mls_adjust_depth( const dim3 gridSize((depth_out.width() + THREADS_X - 1)/THREADS_X, (depth_out.height() + THREADS_Y - 1)/THREADS_Y); const dim3 blockSize(THREADS_X, THREADS_Y); - mls_adjust_depth_kernel<<<gridSize, blockSize, 0, stream>>>(normals_in, centroid_in, depth_out, camera); + mls_adjust_depth_kernel<<<gridSize, blockSize, 0, stream>>>(normals_in, centroid_in, depth_out, depth_in, camera); cudaSafeCall( cudaGetLastError() ); diff --git a/components/operators/src/operator.cpp b/components/operators/src/operator.cpp index 89a4c34c6f377524ba6e8577585dd66951e0df5a..1f000e9a7c57f35da967287f258fd73f78a44be7 100644 --- a/components/operators/src/operator.cpp +++ b/components/operators/src/operator.cpp @@ -64,6 +64,8 @@ bool Graph::apply(FrameSet &in, FrameSet &out, cudaStream_t stream) { } for (size_t j=0; j<in.frames.size(); ++j) { + if (!in.hasFrame(j)) continue; + auto *instance = i.instances[j&0x1]; if (instance->enabled()) { diff --git a/components/operators/src/smoothing.cpp b/components/operators/src/smoothing.cpp index 49ca3f4b9e76b8f7bebbd28b6b4dfa8fa1b64bb6..7391626224673e90e481750905f13191acd4755c 100644 --- a/components/operators/src/smoothing.cpp +++ b/components/operators/src/smoothing.cpp @@ -307,6 +307,7 @@ bool AggreMLS::apply(ftl::rgbd::Frame &in, ftl::rgbd::Frame &out, cudaStream_t s in.createTexture<half4>(Channel::Normals), centroid_vert_, temp_.createTexture<float>(Channel::Depth, ftl::rgbd::Format<float>(size)), + in.getTexture<float>(Channel::Depth), in.getLeftCamera(), stream ); diff --git a/components/operators/src/smoothing_cuda.hpp b/components/operators/src/smoothing_cuda.hpp index 06c6d713f81ab41e14695bcad21943be7d71e3d4..5454d91e2aea43c49ae806524a98bc0cc85ef086 100644 --- a/components/operators/src/smoothing_cuda.hpp +++ b/components/operators/src/smoothing_cuda.hpp @@ -46,6 +46,7 @@ void mls_adjust_depth( ftl::cuda::TextureObject<half4> &normals_in, ftl::cuda::TextureObject<float4> ¢roid_in, ftl::cuda::TextureObject<float> &depth_out, + ftl::cuda::TextureObject<float> &depth_in, const ftl::rgbd::Camera &camera, cudaStream_t stream); diff --git a/components/renderers/cpp/include/ftl/render/render_params.hpp b/components/renderers/cpp/include/ftl/render/render_params.hpp index c109d7c1e035f12c961f025787e377746b28e8e1..b1bcce98b29a04d88cb2f29e84ca95d907240a64 100644 --- a/components/renderers/cpp/include/ftl/render/render_params.hpp +++ b/components/renderers/cpp/include/ftl/render/render_params.hpp @@ -25,19 +25,21 @@ struct ViewPort { return px >= x && px <= x2() && py >= y && py <= y2(); } - __device__ __host__ inline uint2 map(const ftl::rgbd::Camera &cam, const float2 &pt) const { - return make_uint2( + __device__ __host__ inline float2 map(const ftl::rgbd::Camera &cam, const float2 &pt) const { + return make_float2( (pt.x - static_cast<float>(x)) * (static_cast<float>(cam.width) / static_cast<float>(width)), (pt.y - static_cast<float>(y)) * (static_cast<float>(cam.height) / static_cast<float>(height)) ); } - __device__ __host__ inline uint2 reverseMap(const ftl::rgbd::Camera &cam, const float2 &pt) const { - return make_uint2( + __device__ __host__ inline float2 reverseMap(const ftl::rgbd::Camera &cam, const float2 &pt) const { + return make_float2( (pt.x * (static_cast<float>(width) / static_cast<float>(cam.width))) + static_cast<float>(x), (pt.y * (static_cast<float>(height) / static_cast<float>(cam.height))) + static_cast<float>(y) ); } + + //float3x3 warpMatrix; }; /** @@ -46,7 +48,8 @@ struct ViewPort { enum class ViewPortMode : uint8_t { Disabled = 0, // Do not use the viewport data Clipping = 1, // Clip the rendering to within the viewport for stencil like effect - Warping = 2 // Stretch viewport region over entire frame + Warping = 2, // Stretch and perspective warp the viewport (requires warp matrix) + Stretch = 3 // Stretch viewport region over entire frame }; enum class AccumulationFunction : uint8_t { diff --git a/components/renderers/cpp/src/CUDARender.cpp b/components/renderers/cpp/src/CUDARender.cpp index 6d0deb848509996105a550abc5197805b4939d8c..73b078b140f3f0db140729017b66288fa77ab623 100644 --- a/components/renderers/cpp/src/CUDARender.cpp +++ b/components/renderers/cpp/src/CUDARender.cpp @@ -120,6 +120,7 @@ void CUDARender::_renderChannel(ftl::rgbd::Frame &output, ftl::codecs::Channel i if (in == Channel::None) return; for (size_t i=0; i < scene_->frames.size(); ++i) { + if (!scene_->hasFrame(i)) continue; auto &f = scene_->frames[i]; if (!f.hasChannel(in)) { @@ -168,6 +169,7 @@ void CUDARender::_dibr(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre temp_.get<GpuMat>(Channel::Depth2).setTo(cv::Scalar(0x7FFFFFFF), cvstream); for (size_t i=0; i < scene_->frames.size(); ++i) { + if (!scene_->hasFrame(i)) continue; auto &f = scene_->frames[i]; //auto *s = scene_->sources[i]; @@ -236,6 +238,7 @@ void CUDARender::_mesh(ftl::rgbd::Frame &out, const Eigen::Matrix4d &t, cudaStre // For each source depth map for (size_t i=0; i < scene_->frames.size(); ++i) { + if (!scene_->hasFrame(i)) continue; auto &f = scene_->frames[i]; //auto *s = scene_->sources[i]; @@ -453,7 +456,7 @@ void CUDARender::_renderPass2(Channels<0> chans, const Eigen::Matrix4d &t) { for (auto chan : chans) { ftl::codecs::Channel mapped = chan; - if (chan == Channel::Colour && scene_->frames[0].hasChannel(Channel::ColourHighRes)) mapped = Channel::ColourHighRes; + if (chan == Channel::Colour && scene_->firstFrame().hasChannel(Channel::ColourHighRes)) mapped = Channel::ColourHighRes; _renderChannel(*out_, mapped, t, stream_); } diff --git a/components/renderers/cpp/src/colouriser.cpp b/components/renderers/cpp/src/colouriser.cpp index bcaaf28d9e0bc21d51947e6e1f6317d4865bb598..69ad566a20ea391c32fea468ab5e7cb38ef63027 100644 --- a/components/renderers/cpp/src/colouriser.cpp +++ b/components/renderers/cpp/src/colouriser.cpp @@ -195,10 +195,10 @@ TextureObject<uchar4> &Colouriser::_processColour(ftl::rgbd::Frame &f, Channel c buf.to_gpumat().setTo(colour, cvstream); } - // FIXME: This doesn't work with the buffer yet. if (f.hasChannel(Channel::Mask)) { if (show_mask > 0) { - ftl::cuda::show_mask(f.getTexture<uchar4>(c), f.getTexture<uint8_t>(Channel::Mask), show_mask, make_uchar4(255,0,255,255), stream); + f.get<cv::cuda::GpuMat>(c).copyTo(buf.to_gpumat()); + ftl::cuda::show_mask(buf, f.getTexture<uint8_t>(Channel::Mask), show_mask, make_uchar4(255,0,255,255), stream); } } diff --git a/components/renderers/cpp/src/overlay.cpp b/components/renderers/cpp/src/overlay.cpp index 78415f5634f9ad90f695503adf1a5a3057f36856..9b3328b336a410c29758f3cdec54e23323fde8ef 100644 --- a/components/renderers/cpp/src/overlay.cpp +++ b/components/renderers/cpp/src/overlay.cpp @@ -237,6 +237,8 @@ void Overlay::_drawAxis(const Eigen::Matrix4d &pose, const Eigen::Vector3f &scal } void Overlay::draw(ftl::rgbd::FrameSet &fs, ftl::rgbd::FrameState &state, const Eigen::Vector2f &screenSize) { + if (!value("enabled", false)) return; + double zfar = 8.0f; auto intrin = state.getLeft(); intrin = intrin.scaled(screenSize[0], screenSize[1]); diff --git a/components/renderers/cpp/src/reprojection.cu b/components/renderers/cpp/src/reprojection.cu index ce0745003c5415cdf07e3941c0e092b60f4b65cc..f34759fcfdcd57c39cc1b0078a5e56a829db9edc 100644 --- a/components/renderers/cpp/src/reprojection.cu +++ b/components/renderers/cpp/src/reprojection.cu @@ -114,12 +114,21 @@ __device__ inline void accumulateOutput(TextureObject<T> &out, TextureObject<int } template <ViewPortMode VPMODE> -__device__ inline uint2 convertScreen(const Parameters ¶ms, int x, int y) { - return make_uint2(x,y); +__device__ inline float2 convertScreen(const Parameters ¶ms, int x, int y) { + return make_float2(x,y); } +/*template <> +__device__ inline float2 convertScreen<ViewPortMode::Warping>(const Parameters ¶ms, int x, int y) { + const float coeff = 1.0f / (params.viewport.warpMatrix.entries[6] * x + params.viewport.warpMatrix.entries[7] * y + params.viewport.warpMatrix.entries[8]); + const float xcoo = coeff * (params.viewport.warpMatrix.entries[0] * x + params.viewport.warpMatrix.entries[1] * y + params.viewport.warpMatrix.entries[2]); + const float ycoo = coeff * (params.viewport.warpMatrix.entries[3] * x + params.viewport.warpMatrix.entries[4] * y + params.viewport.warpMatrix.entries[5]); + float2 pt = params.viewport.reverseMap(params.camera, make_float2(xcoo,ycoo)); + return pt; +}*/ + template <> -__device__ inline uint2 convertScreen<ViewPortMode::Warping>(const Parameters ¶ms, int x, int y) { +__device__ inline float2 convertScreen<ViewPortMode::Stretch>(const Parameters ¶ms, int x, int y) { return params.viewport.reverseMap(params.camera, make_float2(x,y)); } @@ -167,7 +176,7 @@ __global__ void reprojection_kernel( const float d = depth_in.tex2D((int)x, (int)y); if (d > params.camera.minDepth && d < params.camera.maxDepth) { - const uint2 rpt = convertScreen<VPMODE>(params, x, y); + const float2 rpt = convertScreen<VPMODE>(params, x, y); const float3 camPos = transform * params.camera.screenToCam(rpt.x, rpt.y, d); if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) { const float2 screenPos = camera.camToScreen<float2>(camPos); @@ -227,7 +236,7 @@ __global__ void reprojection_kernel( const float d = depth_in.tex2D((int)x, (int)y); if (d > params.camera.minDepth && d < params.camera.maxDepth) { - const uint2 rpt = convertScreen<VPMODE>(params, x, y); + const float2 rpt = convertScreen<VPMODE>(params, x, y); const float3 camPos = transform * params.camera.screenToCam(rpt.x, rpt.y, d); if (camPos.z > camera.minDepth && camPos.z < camera.maxDepth) { const float2 screenPos = camera.camToScreen<float2>(camPos); @@ -272,31 +281,31 @@ void ftl::cuda::reproject( switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::BestWeight) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::Simple) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::ColourDiscard) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, *normals, out, contrib, params, camera, transform, transformR); break; } } } else { @@ -304,31 +313,31 @@ void ftl::cuda::reproject( switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::CloseWeights><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::BestWeight) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::BestWeight><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::Simple) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::Simple><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::ColourDiscard) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscard><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; } } else if (params.accumulationMode == AccumulationFunction::ColourDiscardSmooth) { switch (params.viewPortMode) { case ViewPortMode::Disabled: reprojection_kernel<A,B,ViewPortMode::Disabled,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; case ViewPortMode::Clipping: reprojection_kernel<A,B,ViewPortMode::Clipping,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; - case ViewPortMode::Warping: reprojection_kernel<A,B,ViewPortMode::Warping,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; + case ViewPortMode::Stretch: reprojection_kernel<A,B,ViewPortMode::Stretch,AccumulationFunction::ColourDiscardSmooth><<<gridSize, blockSize, 0, stream>>>(in, depth_src, depth_in, weights, out, contrib, params, camera, transform, transformR); break; } } } diff --git a/components/renderers/cpp/src/screen.cu b/components/renderers/cpp/src/screen.cu index 1197f875fa7b0ca5a1649ceaecc70f0670d608f9..cb041ac0bddb2eee2518314ea7b703775e374f08 100644 --- a/components/renderers/cpp/src/screen.cu +++ b/components/renderers/cpp/src/screen.cu @@ -10,6 +10,10 @@ using ftl::render::ViewPortMode; #define T_PER_BLOCK 8 +__device__ inline uint2 make_uint2(const float2 &f) { + return {static_cast<uint>(f.x), static_cast<uint>(f.y)}; +} + template <ViewPortMode VPMODE> __device__ inline uint2 convertToScreen(const Parameters ¶ms, const float3 &camPos); @@ -25,10 +29,19 @@ __device__ inline uint2 convertToScreen<ViewPortMode::Clipping>(const Parameters } template <> -__device__ inline uint2 convertToScreen<ViewPortMode::Warping>(const Parameters ¶ms, const float3 &camPos) { - return params.viewport.map(params.camera, params.camera.camToScreen<float2>(camPos)); +__device__ inline uint2 convertToScreen<ViewPortMode::Stretch>(const Parameters ¶ms, const float3 &camPos) { + return make_uint2(params.viewport.map(params.camera, params.camera.camToScreen<float2>(camPos))); } +/*template <> +__device__ inline uint2 convertToScreen<ViewPortMode::Warping>(const Parameters ¶ms, const float3 &camPos) { + float2 pt = params.camera.camToScreen<float2>(camPos); //params.viewport.map(params.camera, params.camera.camToScreen<float2>(camPos)); + const float coeff = 1.0f / (params.viewport.warpMatrix.entries[6] * pt.x + params.viewport.warpMatrix.entries[7] * pt.y + params.viewport.warpMatrix.entries[8]); + const float xcoo = coeff * (params.viewport.warpMatrix.entries[0] * pt.x + params.viewport.warpMatrix.entries[1] * pt.y + params.viewport.warpMatrix.entries[2]); + const float ycoo = coeff * (params.viewport.warpMatrix.entries[3] * pt.x + params.viewport.warpMatrix.entries[4] * pt.y + params.viewport.warpMatrix.entries[5]); + return make_uint2(xcoo, ycoo); +}*/ + /* * Convert source screen position to output screen coordinates. */ @@ -68,7 +81,7 @@ void ftl::cuda::screen_coord(TextureObject<float> &depth, TextureObject<float> & switch (params.viewPortMode) { case ViewPortMode::Disabled: screen_coord_kernel<ViewPortMode::Disabled><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break; case ViewPortMode::Clipping: screen_coord_kernel<ViewPortMode::Clipping><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break; - case ViewPortMode::Warping: screen_coord_kernel<ViewPortMode::Warping><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break; + case ViewPortMode::Stretch: screen_coord_kernel<ViewPortMode::Stretch><<<gridSize, blockSize, 0, stream>>>(depth, depth_out, screen_out, params, pose, camera); break; } cudaSafeCall( cudaGetLastError() ); } diff --git a/components/rgbd-sources/include/ftl/rgbd/camera.hpp b/components/rgbd-sources/include/ftl/rgbd/camera.hpp index e34c88f4ccecb6fcd5545de5aec0c0793354ffa3..2abcf8f8755d096d4e099e4911204e3600158367 100644 --- a/components/rgbd-sources/include/ftl/rgbd/camera.hpp +++ b/components/rgbd-sources/include/ftl/rgbd/camera.hpp @@ -40,7 +40,17 @@ struct __align__(16) Camera { /** * Convert screen plus depth into camera coordinates. */ - __device__ __host__ float3 screenToCam(uint ux, uint uy, float depth) const; + __device__ float3 screenToCam(int ux, int uy, float depth) const; + + /** + * Convert screen plus depth into camera coordinates. + */ + __device__ float3 screenToCam(uint ux, uint uy, float depth) const; + + /** + * Convert screen plus depth into camera coordinates. + */ + __device__ float3 screenToCam(float ux, float uy, float depth) const; #ifndef __CUDACC__ @@ -87,4 +97,18 @@ inline float3 ftl::rgbd::Camera::screenToCam(uint ux, uint uy, float depth) cons return make_float3(depth*x, depth*y, depth); } +__device__ +inline float3 ftl::rgbd::Camera::screenToCam(int ux, int uy, float depth) const { + const float x = static_cast<float>(((float)ux+cx) / fx); + const float y = static_cast<float>(((float)uy+cy) / fy); + return make_float3(depth*x, depth*y, depth); +} + +__device__ +inline float3 ftl::rgbd::Camera::screenToCam(float ux, float uy, float depth) const { + const float x = static_cast<float>((ux+cx) / fx); + const float y = static_cast<float>((uy+cy) / fy); + return make_float3(depth*x, depth*y, depth); +} + #endif diff --git a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp index 0276a2e8aa389009450702fba4bd8e12c860ccff..1c4f7da20ddf8a392405a054c1b808bb865e70ae 100644 --- a/components/rgbd-sources/include/ftl/rgbd/frameset.hpp +++ b/components/rgbd-sources/include/ftl/rgbd/frameset.hpp @@ -122,13 +122,28 @@ class Builder : public Generator { */ ftl::rgbd::Frame &get(int64_t timestamp, size_t ix); + /** + * Get the entire frameset for a given timestamp. + */ + ftl::rgbd::FrameSet *get(int64_t timestamp); + /** * Mark a frame as completed. */ void completed(int64_t ts, size_t ix); + void markPartial(int64_t ts); + void setName(const std::string &name); + void setBufferSize(size_t n) { bufferSize_ = n; } + + /** + * Retrieve an fps + latency pair, averaged since last call to this + * function. + */ + static std::pair<float,float> getStatistics(); + private: std::list<FrameSet*> framesets_; // Active framesets std::list<FrameSet*> allocated_; // Keep memory allocations @@ -137,9 +152,6 @@ class Builder : public Generator { ftl::rgbd::VideoCallback cb_; MUTEX mutex_; int mspf_; - float latency_; - float fps_; - int stats_count_; int64_t last_ts_; int64_t last_frame_; int id_; @@ -147,10 +159,16 @@ class Builder : public Generator { volatile bool skip_; ftl::timer::TimerHandle main_id_; size_t size_; + size_t bufferSize_; std::vector<ftl::rgbd::FrameState*> states_; std::string name_; + static MUTEX msg_mutex__; + static float latency__; + static float fps__; + static int stats_count__; + /* Insert a new frameset into the buffer, along with all intermediate * framesets between the last in buffer and the new one. */ @@ -158,6 +176,7 @@ class Builder : public Generator { /* Find a frameset with given latency in frames. */ ftl::rgbd::FrameSet *_getFrameset(); + ftl::rgbd::FrameSet *_get(int64_t timestamp); /* Search for a matching frameset. */ ftl::rgbd::FrameSet *_findFrameset(int64_t ts); diff --git a/components/rgbd-sources/src/frameset.cpp b/components/rgbd-sources/src/frameset.cpp index 4217cd3071abce6397bcf9630378e4e5aaa30281..0ed610f7909a68b8ee84d397ea44c11023831734 100644 --- a/components/rgbd-sources/src/frameset.cpp +++ b/components/rgbd-sources/src/frameset.cpp @@ -16,6 +16,11 @@ using ftl::codecs::Channel; using ftl::rgbd::FrameSet; using ftl::codecs::Channels; +float Builder::latency__ = 0.0f; +float Builder::fps__ = 0.0f; +int Builder::stats_count__ = 0; +MUTEX Builder::msg_mutex__; + /*void FrameSet::upload(ftl::codecs::Channels<0> c, cudaStream_t stream) { for (auto &f : frames) { f.upload(c, stream); @@ -62,17 +67,13 @@ void FrameSet::resetFull() { Builder::Builder() : head_(0), id_(0) { jobs_ = 0; skip_ = false; - //setFPS(20); + bufferSize_ = 1; size_ = 0; last_frame_ = 0; mspf_ = ftl::timer::getInterval(); name_ = "NoName"; - latency_ = 0.0f;; - stats_count_ = 0; - fps_ = 0.0f; - if (size_ > 0) states_.resize(size_); } @@ -86,21 +87,15 @@ Builder::~Builder() { } } -ftl::rgbd::Frame &Builder::get(int64_t timestamp, size_t ix) { - if (timestamp <= 0 || ix >= kMaxFramesInSet) throw FTL_Error("Invalid frame timestamp or index"); +ftl::rgbd::FrameSet *Builder::get(int64_t timestamp) { + if (timestamp <= 0) throw FTL_Error("Invalid frame timestamp"); UNIQUE_LOCK(mutex_, lk); - //LOG(INFO) << "BUILDER PUSH: " << timestamp << ", " << ix << ", " << size_; - - // Size is determined by largest frame index received... note that size - // cannot therefore reduce. - if (ix >= size_) { - size_ = ix+1; - states_.resize(size_); - } - //states_[ix] = frame.origin(); + return _get(timestamp); +} +ftl::rgbd::FrameSet *Builder::_get(int64_t timestamp) { if (timestamp <= last_frame_) { throw FTL_Error("Frameset already completed: " << timestamp); } @@ -111,11 +106,32 @@ ftl::rgbd::Frame &Builder::get(int64_t timestamp, size_t ix) { // Add new frameset fs = _addFrameset(timestamp); if (!fs) throw FTL_Error("Could not add frameset"); + + _schedule(); } - if (fs->stale) { + if (fs->test(ftl::data::FSFlag::STALE)) { throw FTL_Error("Frameset already completed"); } + return fs; +} + +ftl::rgbd::Frame &Builder::get(int64_t timestamp, size_t ix) { + if (timestamp <= 0 || ix >= kMaxFramesInSet) throw FTL_Error("Invalid frame timestamp or index"); + + UNIQUE_LOCK(mutex_, lk); + + //LOG(INFO) << "BUILDER PUSH: " << timestamp << ", " << ix << ", " << size_; + + // Size is determined by largest frame index received... note that size + // cannot therefore reduce. + if (ix >= size_) { + size_ = ix+1; + states_.resize(size_); + } + //states_[ix] = frame.origin(); + + auto *fs = _get(timestamp); if (ix >= fs->frames.size()) { throw FTL_Error("Frame index out-of-bounds"); @@ -152,8 +168,8 @@ void Builder::completed(int64_t ts, size_t ix) { ++fs->count; } - if (!fs->stale && static_cast<unsigned int>(fs->count) >= size_) { - //LOG(INFO) << "Frameset ready... " << fs->timestamp; + // No buffering, so do a schedule here for immediate effect + if (bufferSize_ == 0 && !fs->test(ftl::data::FSFlag::STALE) && static_cast<unsigned int>(fs->count) >= size_) { UNIQUE_LOCK(mutex_, lk); _schedule(); } @@ -162,38 +178,50 @@ void Builder::completed(int64_t ts, size_t ix) { } } +void Builder::markPartial(int64_t ts) { + ftl::rgbd::FrameSet *fs = nullptr; + + { + UNIQUE_LOCK(mutex_, lk); + fs = _findFrameset(ts); + if (fs) fs->set(ftl::data::FSFlag::PARTIAL); + } +} + void Builder::_schedule() { if (size_ == 0) return; ftl::rgbd::FrameSet *fs = nullptr; - //UNIQUE_LOCK(mutex_, lk); + // Still working on a previously scheduled frame if (jobs_ > 0) return; - fs = _getFrameset(); - //LOG(INFO) << "Latency for " << name_ << " = " << (latency_*ftl::timer::getInterval()) << "ms"; + // Find a valid / completed frameset to process + fs = _getFrameset(); + // We have a frameset so create a thread job to call the onFrameset callback if (fs) { - //UNIQUE_LOCK(fs->mtx, lk2); - // The buffers are invalid after callback so mark stale - //fs->stale = true; jobs_++; - //lk.unlock(); ftl::pool.push([this,fs](int) { UNIQUE_LOCK(fs->mtx, lk2); + + // Calling onFrameset but without all frames so mark as partial + if (fs->count < fs->frames.size()) fs->set(ftl::data::FSFlag::PARTIAL); + try { if (cb_) cb_(*fs); - //LOG(INFO) << "Frameset processed (" << name_ << "): " << fs->timestamp; + } catch(const ftl::exception &e) { + LOG(ERROR) << "Exception in frameset builder: " << e.what(); + LOG(ERROR) << "Trace = " << e.trace(); } catch(std::exception &e) { LOG(ERROR) << "Exception in frameset builder: " << e.what(); } - //fs->resetFull(); - UNIQUE_LOCK(mutex_, lk); _freeFrameset(fs); - jobs_--; + + // Schedule another frame immediately (or try to) _schedule(); }); } @@ -205,68 +233,7 @@ size_t Builder::size() { } void Builder::onFrameSet(const std::function<bool(ftl::rgbd::FrameSet &)> &cb) { - /*if (!cb) { - main_id_.cancel(); - return; - }*/ - cb_ = cb; - - /*if (main_id_.id() != -1) { - main_id_.cancel(); - }*/ - - // 3. Issue IO retrieve ad compute jobs before finding a valid - // frame at required latency to pass to callback. - /*main_id_ = ftl::timer::add(ftl::timer::kTimerMain, [this,cb](int64_t ts) { - //if (jobs_ > 0) LOG(ERROR) << "SKIPPING TIMER JOB " << ts; - if (jobs_ > 0) return true; - if (size_ == 0) return true; - jobs_++; - - // Find a previous frameset and specified latency and do the sync - // callback with that frameset. - //if (latency_ > 0) { - ftl::rgbd::FrameSet *fs = nullptr; - - UNIQUE_LOCK(mutex_, lk); - fs = _getFrameset(); - - //LOG(INFO) << "Latency for " << name_ << " = " << (latency_*ftl::timer::getInterval()) << "ms"; - - if (fs) { - UNIQUE_LOCK(fs->mtx, lk2); - // The buffers are invalid after callback so mark stale - fs->stale = true; - lk.unlock(); - - //LOG(INFO) << "PROCESS FRAMESET"; - - //ftl::pool.push([this,fs,cb](int) { - try { - cb(*fs); - //LOG(INFO) << "Frameset processed (" << name_ << "): " << fs->timestamp; - } catch(std::exception &e) { - LOG(ERROR) << "Exception in group sync callback: " << e.what(); - } - - //fs->resetFull(); - - lk.lock(); - _freeFrameset(fs); - - jobs_--; - //}); - } else { - //LOG(INFO) << "NO FRAME FOUND: " << name_ << " " << size_; - //latency_++; - jobs_--; - } - //} - - //if (jobs_ == 0) LOG(INFO) << "LAST JOB = Main"; - return true; - });*/ } ftl::rgbd::FrameState &Builder::state(size_t ix) { @@ -287,18 +254,33 @@ static void mergeFrameset(ftl::rgbd::FrameSet &f1, ftl::rgbd::FrameSet &f2) { } void Builder::_recordStats(float fps, float latency) { - latency_ += latency; - fps_ += fps; - ++stats_count_; + UNIQUE_LOCK(msg_mutex__, lk); + latency__ += latency; + fps__ += fps; + ++stats_count__; - if (fps_/float(stats_count_) <= float(stats_count_)) { + /*if (fps_/float(stats_count_) <= float(stats_count_)) { fps_ /= float(stats_count_); latency_ /= float(stats_count_); LOG(INFO) << name_ << ": fps = " << fps_ << ", latency = " << latency_; fps_ = 0.0f; latency_ = 0.0f; stats_count_ = 0; - } + }*/ +} + +std::pair<float,float> Builder::getStatistics() { + UNIQUE_LOCK(msg_mutex__, lk); + if (stats_count__ == 0.0f) return {0.0f,0.0f}; + fps__ /= float(stats_count__); + latency__ /= float(stats_count__); + float fps = fps__; + float latency = latency__; + //LOG(INFO) << name_ << ": fps = " << fps_ << ", latency = " << latency_; + fps__ = 0.0f; + latency__ = 0.0f; + stats_count__ = 0; + return {fps,latency}; } ftl::rgbd::FrameSet *Builder::_findFrameset(int64_t ts) { @@ -320,23 +302,36 @@ ftl::rgbd::FrameSet *Builder::_findFrameset(int64_t ts) { */ ftl::rgbd::FrameSet *Builder::_getFrameset() { //LOG(INFO) << "BUF SIZE = " << framesets_.size(); - for (auto i=framesets_.begin(); i!=framesets_.end(); i++) { + + auto i = framesets_.begin(); + int N = bufferSize_; + + // Skip N frames to fixed buffer location + if (bufferSize_ > 0) { + while (N-- > 0 && i != framesets_.end()) ++i; + // Otherwise skip to first fully completed frame + } else { + while (i != framesets_.end() && (*i)->count < (*i)->frames.size()) ++i; + } + + if (i != framesets_.end()) { auto *f = *i; //LOG(INFO) << "GET: " << f->count << " of " << size_; - if (!f->stale && static_cast<unsigned int>(f->count) >= size_) { + //if (!f->stale && static_cast<unsigned int>(f->count) >= size_) { //LOG(INFO) << "GET FRAMESET and remove: " << f->timestamp; auto j = framesets_.erase(i); last_frame_ = f->timestamp; - f->stale = true; + //f->stale = true; + f->set(ftl::data::FSFlag::STALE); int count = 0; // Merge all previous frames + // TODO: Remove? while (j!=framesets_.end()) { ++count; auto *f2 = *j; - //LOG(INFO) << "MERGE: " << f2->count; j = framesets_.erase(j); mergeFrameset(*f,*f2); _freeFrameset(f2); @@ -349,7 +344,7 @@ ftl::rgbd::FrameSet *Builder::_getFrameset() { _recordStats(framerate, now - f->timestamp); last_ts_ = now; return f; - } + //} } return nullptr; @@ -378,7 +373,7 @@ ftl::rgbd::FrameSet *Builder::_addFrameset(int64_t timestamp) { newf->id = id_; newf->count = 0; newf->mask = 0; - newf->stale = false; + newf->clearFlags(); newf->frames.resize(size_); newf->pose.setIdentity(); newf->clearData(); diff --git a/components/streams/include/ftl/streams/netstream.hpp b/components/streams/include/ftl/streams/netstream.hpp index 3c7aef3c64e917e4daac0722340414b2123dffd8..0db0013196f0fe8aa03adc74480223a004863c95 100644 --- a/components/streams/include/ftl/streams/netstream.hpp +++ b/components/streams/include/ftl/streams/netstream.hpp @@ -60,6 +60,12 @@ class Net : public Stream { inline const ftl::UUID &getPeer() const { return peer_; } + /** + * Return the average bitrate of all streams since the last call to this + * function. Units are Mbps. + */ + static float getRequiredBitrate(); + private: SHARED_MUTEX mutex_; bool active_; @@ -76,10 +82,10 @@ class Net : public Stream { std::array<std::atomic<int>,32> reqtally_; ftl::codecs::Channels<0> last_selected_; - float req_bitrate_; - float sample_count_; - int64_t last_msg_; - MUTEX msg_mtx_; + static float req_bitrate__; + static float sample_count__; + static int64_t last_msg__; + static MUTEX msg_mtx__; std::list<detail::StreamClient> clients_; diff --git a/components/streams/src/filestream.cpp b/components/streams/src/filestream.cpp index 705388a2eea2657af8cb53405cd960c4e6ce540a..fe7ec994d5430db0318512c21a3a199093f56d23 100644 --- a/components/streams/src/filestream.cpp +++ b/components/streams/src/filestream.cpp @@ -137,7 +137,7 @@ bool File::readPacket(std::tuple<ftl::codecs::StreamPacket,ftl::codecs::Packet> obj.convert(data); } catch (std::exception &e) { LOG(INFO) << "Corrupt message: " << buffer_in_.nonparsed_size() << " - " << e.what(); - active_ = false; + //active_ = false; return false; } @@ -249,6 +249,8 @@ bool File::tick(int64_t ts) { timestamp_ += interval_; if (data_.size() == 0 && value("looping", true)) { + buffer_in_.reset(); + buffer_in_.remove_nonparsed_buffer(); _open(); timestart_ = (ftl::timer::get_time() / ftl::timer::getInterval()) * ftl::timer::getInterval(); diff --git a/components/streams/src/netstream.cpp b/components/streams/src/netstream.cpp index c91ca80aa03e9f4fa1990e05d9ddf1a2931f4d88..e50b494d24928991c430665d7ac09ce245d46ac2 100644 --- a/components/streams/src/netstream.cpp +++ b/components/streams/src/netstream.cpp @@ -17,6 +17,11 @@ using std::optional; static constexpr int kTallyScale = 10; +float Net::req_bitrate__ = 0.0f; +float Net::sample_count__ = 0.0f; +int64_t Net::last_msg__ = 0; +MUTEX Net::msg_mtx__; + Net::Net(nlohmann::json &config, ftl::net::Universe *net) : Stream(config), active_(false), net_(net), clock_adjust_(0) { // TODO: Install "find_stream" binding if not installed... if (!net_->isBound("find_stream")) { @@ -40,7 +45,6 @@ Net::Net(nlohmann::json &config, ftl::net::Universe *net) : Stream(config), acti } last_frame_ = 0; - last_msg_ = 0; time_peer_ = ftl::UUID(0); } @@ -129,6 +133,7 @@ bool Net::post(const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet } bool Net::begin() { + if (active_) return true; if (!get<string>("uri")) return false; active_ = true; @@ -271,26 +276,27 @@ bool Net::_sendRequest(Channel c, uint8_t frameset, uint8_t frames, uint8_t coun net_->send(peer_, uri_, (short)0, spkt, pkt); - if (true) { // TODO: Not every time + // FIXME: Find a way to use this for correct stream latency info + if (false) { // TODO: Not every time auto start = std::chrono::high_resolution_clock::now(); - int64_t mastertime; + //int64_t mastertime; try { - mastertime = net_->call<int64_t>(peer_, "__ping__"); + net_->asyncCall<int64_t>(peer_, "__ping__", [this, start](const int64_t &mastertime) { + auto elapsed = std::chrono::high_resolution_clock::now() - start; + int64_t latency = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count(); + clock_adjust_ = mastertime - (ftl::timer::get_time() + (latency/2)); + + if (clock_adjust_ > 0) { + LOG(INFO) << "Clock adjustment: " << clock_adjust_; + } + }); } catch (...) { LOG(ERROR) << "Ping failed"; // Reset time peer and remove timer time_peer_ = ftl::UUID(0); return false; } - - auto elapsed = std::chrono::high_resolution_clock::now() - start; - int64_t latency = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count(); - clock_adjust_ = mastertime - (ftl::timer::get_time() + (latency/2)); - - if (clock_adjust_ > 0) { - LOG(INFO) << "Clock adjustment: " << clock_adjust_; - } } return true; } @@ -347,27 +353,26 @@ bool Net::_processRequest(ftl::net::Peer &p, const ftl::codecs::Packet &pkt) { // Sync clocks! if (ftl::timer::isClockSlave() && p.id() == time_peer_) { auto start = std::chrono::high_resolution_clock::now(); - int64_t mastertime; try { - mastertime = net_->call<int64_t>(time_peer_, "__ping__"); + net_->asyncCall<int64_t>(time_peer_, "__ping__", [this, start](const int64_t &mastertime) { + auto elapsed = std::chrono::high_resolution_clock::now() - start; + int64_t latency = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count(); + auto clock_adjust = mastertime - (ftl::timer::get_time() + (latency/2)); + + if (clock_adjust > 0) { + LOG(INFO) << "Clock adjustment: " << clock_adjust; + //LOG(INFO) << "Latency: " << (latency / 2); + //LOG(INFO) << "Local: " << std::chrono::time_point_cast<std::chrono::milliseconds>(start).time_since_epoch().count() << ", master: " << mastertime; + ftl::timer::setClockAdjustment(clock_adjust); + } + }); } catch (...) { LOG(ERROR) << "Ping failed"; // Reset time peer and remove timer time_peer_ = ftl::UUID(0); return false; } - - auto elapsed = std::chrono::high_resolution_clock::now() - start; - int64_t latency = std::chrono::duration_cast<std::chrono::milliseconds>(elapsed).count(); - auto clock_adjust = mastertime - (ftl::timer::get_time() + (latency/2)); - - if (clock_adjust > 0) { - LOG(INFO) << "Clock adjustment: " << clock_adjust; - //LOG(INFO) << "Latency: " << (latency / 2); - //LOG(INFO) << "Local: " << std::chrono::time_point_cast<std::chrono::milliseconds>(start).time_since_epoch().count() << ", master: " << mastertime; - ftl::timer::setClockAdjustment(clock_adjust); - } } return false; @@ -378,16 +383,26 @@ void Net::_checkDataRate(size_t tx_size, int64_t tx_latency, int64_t ts) { float min_mbps = (float(tx_size) * 8.0f * (1000.0f / float(ftl::timer::getInterval()))) / 1048576.0f; //if (actual_mbps > 0.0f && actual_mbps < min_mbps) LOG(WARNING) << "Bitrate = " << actual_mbps << "Mbps, min required = " << min_mbps << "Mbps"; - UNIQUE_LOCK(msg_mtx_,lk); - req_bitrate_ += float(tx_size) * 8.0f; - sample_count_ += 1.0f; + UNIQUE_LOCK(msg_mtx__,lk); + req_bitrate__ += float(tx_size) * 8.0f; + sample_count__ += 1.0f; - if (ts - last_msg_ >= 1000) { + /*if (ts - last_msg_ >= 1000) { DLOG(INFO) << "Required Bitrate = " << (req_bitrate_ / float(ts - last_msg_) * 1000.0f / 1048576.0f) << "Mbps"; last_msg_ = ts; req_bitrate_ = 0.0f; sample_count_ = 0.0f; - } + }*/ +} + +float Net::getRequiredBitrate() { + int64_t ts = ftl::timer::get_time(); + UNIQUE_LOCK(msg_mtx__,lk); + float r = (req_bitrate__ / float(ts - last_msg__) * 1000.0f / 1048576.0f); + last_msg__ = ts; + req_bitrate__ = 0.0f; + sample_count__ = 0.0f; + return r; } bool Net::end() { diff --git a/components/streams/src/parsers.cpp b/components/streams/src/parsers.cpp index 9a5f1ee29dd6b3fc8193b5a1bc33dc98f0198e38..95c521da670eaead6ac6723e4636e9b1440d8b2e 100644 --- a/components/streams/src/parsers.cpp +++ b/components/streams/src/parsers.cpp @@ -10,11 +10,11 @@ ftl::rgbd::Camera ftl::stream::parseCalibration(const ftl::codecs::Packet &pkt) auto unpacked = msgpack::unpack((const char*)pkt.data.data(), pkt.data.size()); unpacked.get().convert(params); - LOG(INFO) << "Got Calibration: " - << std::get<0>(params).width << "x" << std::get<0>(params).height - << ", fx: " << std::get<0>(params).fx - << ", cx: " << std::get<0>(params).cx - << ", cy: " << std::get<0>(params).cy; + //LOG(INFO) << "Got Calibration: " + // << std::get<0>(params).width << "x" << std::get<0>(params).height + // << ", fx: " << std::get<0>(params).fx + // << ", cx: " << std::get<0>(params).cx + // << ", cy: " << std::get<0>(params).cy; return std::get<0>(params); } @@ -40,6 +40,6 @@ std::string ftl::stream::parseConfig(const ftl::codecs::Packet &pkt) { auto unpacked = msgpack::unpack((const char*)pkt.data.data(), pkt.data.size()); unpacked.get().convert(cfg); - LOG(INFO) << "Config Received: " << cfg; + //LOG(INFO) << "Config Received: " << cfg; return cfg; } diff --git a/components/streams/src/receiver.cpp b/components/streams/src/receiver.cpp index d09405b1dfb8ddb37718b969b8e3ac0a61957b2a..b98b65534a7c0e8cd391e26da98d897adbc230ae 100644 --- a/components/streams/src/receiver.cpp +++ b/components/streams/src/receiver.cpp @@ -27,9 +27,18 @@ Receiver::Receiver(nlohmann::json &config) : ftl::Configurable(config), stream_( timestamp_ = 0; second_channel_ = Channel::Depth; + size_t bsize = value("frameset_buffer_size", 3); for (int i=0; i<ftl::stream::kMaxStreams; ++i) { builder_[i].setID(i); + builder_[i].setBufferSize(bsize); } + + on("frameset_buffer_size", [this](const ftl::config::Event &e) { + size_t bsize = value("frameset_buffer_size", 3); + for (int i=0; i<ftl::stream::kMaxStreams; ++i) { + builder_[i].setBufferSize(bsize); + } + }); } Receiver::~Receiver() { @@ -125,8 +134,15 @@ void Receiver::_processState(const StreamPacket &spkt, const Packet &pkt) { void Receiver::_processData(const StreamPacket &spkt, const Packet &pkt) { //InternalVideoStates &frame = _getVideoFrame(spkt); - auto &frame = builder_[spkt.streamID].get(spkt.timestamp, spkt.frame_number); - frame.createRawData(spkt.channel, pkt.data); + if (spkt.frameNumber() == 255) { + auto *fs = builder_[spkt.streamID].get(spkt.timestamp); + if (fs) { + fs->createRawData(spkt.channel, pkt.data); + } + } else { + auto &frame = builder_[spkt.streamID].get(spkt.timestamp, spkt.frame_number); + frame.createRawData(spkt.channel, pkt.data); + } } void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) { @@ -161,7 +177,8 @@ void Receiver::_processAudio(const StreamPacket &spkt, const Packet &pkt) { fs.id = 0; fs.timestamp = frame.timestamp; fs.count = 1; - fs.stale = false; + //fs.stale = false; + fs.clear(ftl::data::FSFlag::STALE); frame.frame.swapTo(fs.frames.emplace_back()); audio_cb_(fs); @@ -218,6 +235,11 @@ void Receiver::_processVideo(const StreamPacket &spkt, const Packet &pkt) { bool apply_Y_filter = value("apply_Y_filter", true); + // Mark a frameset as being partial + if (pkt.flags & ftl::codecs::kFlagPartial) { + builder_[spkt.streamID].markPartial(spkt.timestamp); + } + // Now split the tiles from surface into frames, doing colour conversions // at the same time. // Note: Done in reverse to allocate correct number of frames first time round @@ -340,6 +362,12 @@ void Receiver::setStream(ftl::stream::Stream *s) { //if (spkt.frameSetID() > 0) LOG(INFO) << "Frameset " << spkt.frameSetID() << " received: " << (int)spkt.channel; if (spkt.frameSetID() >= ftl::stream::kMaxStreams) return; + // Frameset level data channels + if (spkt.frameNumber() == 255 && pkt.data.size() > 0) { + _processData(spkt,pkt); + return; + } + // Too many frames, so ignore. if (spkt.frameNumber() >= value("max_frames",32)) return; diff --git a/components/streams/src/sender.cpp b/components/streams/src/sender.cpp index 659e8f5151a6714d11661ca238914679d4a8ff18..b0a5684b68f2248d766854186055ed2d11b4a693 100644 --- a/components/streams/src/sender.cpp +++ b/components/streams/src/sender.cpp @@ -139,6 +139,25 @@ void Sender::post(ftl::rgbd::FrameSet &fs) { FTL_Profile("SenderPost", 0.02); + // Send any frameset data channels + for (auto c : fs.getDataChannels()) { + StreamPacket spkt; + spkt.version = 4; + spkt.timestamp = fs.timestamp; + spkt.streamID = 0; //fs.id; + spkt.frame_number = 255; + spkt.channel = c; + + ftl::codecs::Packet pkt; + pkt.codec = ftl::codecs::codec_t::MSGPACK; + pkt.definition = ftl::codecs::definition_t::Any; + pkt.frame_count = 1; + pkt.flags = 0; + pkt.bitrate = 0; + pkt.data = fs.getRawData(c); + stream_->post(spkt, pkt); + } + for (size_t i=0; i<fs.frames.size(); ++i) { const auto &frame = fs.frames[i]; @@ -245,13 +264,13 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { uint32_t offset = 0; while (offset < fs.frames.size()) { Channel cc = c; - if ((cc == Channel::Colour) && fs.frames[offset].hasChannel(Channel::ColourHighRes)) { + if ((cc == Channel::Colour) && fs.firstFrame().hasChannel(Channel::ColourHighRes)) { cc = Channel::ColourHighRes; } - if ((cc == Channel::Right) && fs.frames[offset].hasChannel(Channel::RightHighRes)) { + if ((cc == Channel::Right) && fs.firstFrame().hasChannel(Channel::RightHighRes)) { cc = Channel::RightHighRes; - fs.frames[offset].upload(cc); + //fs.frames[offset].upload(cc); } StreamPacket spkt; @@ -277,6 +296,7 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { // Upload if in host memory for (auto &f : fs.frames) { + if (!fs.hasFrame(f.id)) continue; if (f.isCPU(c)) { f.upload(Channels<0>(cc), cv::cuda::StreamAccessor::getStream(enc->stream())); } @@ -292,8 +312,6 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { enc->stream().waitForCompletion(); if (enc) { - // FIXME: Timestamps may not always be aligned to interval. - //if (do_inject || fs.timestamp % (10*ftl::timer::getInterval()) == 0) enc->reset(); if (reset) enc->reset(); try { @@ -308,6 +326,9 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { else if (lossless && ftl::codecs::isFloatChannel(cc)) pkt.flags = ftl::codecs::kFlagFloat; else pkt.flags = ftl::codecs::kFlagFlipRGB; + // In the event of partial frames, add a flag to indicate that + if (fs.count < fs.frames.size()) pkt.flags |= ftl::codecs::kFlagPartial; + // Choose correct region of interest into the surface. cv::Rect roi = _generateROI(fs, cc, offset); cv::cuda::GpuMat sroi = tile.surface(roi); @@ -336,9 +357,9 @@ void Sender::_encodeChannel(ftl::rgbd::FrameSet &fs, Channel c, bool reset) { } cv::Rect Sender::_generateROI(const ftl::rgbd::FrameSet &fs, ftl::codecs::Channel c, int offset) { - const ftl::rgbd::Frame *cframe = &fs.frames[offset]; - int rwidth = cframe->get<cv::cuda::GpuMat>(c).cols; - int rheight = cframe->get<cv::cuda::GpuMat>(c).rows; + const ftl::rgbd::Frame &cframe = fs.firstFrame(); + int rwidth = cframe.get<cv::cuda::GpuMat>(c).cols; + int rheight = cframe.get<cv::cuda::GpuMat>(c).rows; auto [tx,ty] = ftl::codecs::chooseTileConfig(fs.frames.size()-offset); return cv::Rect(0, 0, tx*rwidth, ty*rheight); } @@ -374,13 +395,14 @@ float Sender::_selectFloatMax(Channel c) { int Sender::_generateTiles(const ftl::rgbd::FrameSet &fs, int offset, Channel c, cv::cuda::Stream &stream, bool lossless) { auto &surface = _getTile(fs.id, c); - const ftl::rgbd::Frame *cframe = &fs.frames[offset]; - auto &m = cframe->get<cv::cuda::GpuMat>(c); + const ftl::rgbd::Frame *cframe = nullptr; //&fs.frames[offset]; + + const auto &m = fs.firstFrame().get<cv::cuda::GpuMat>(c); // Choose tile configuration and allocate memory auto [tx,ty] = ftl::codecs::chooseTileConfig(fs.frames.size()); - int rwidth = cframe->get<cv::cuda::GpuMat>(c).cols; - int rheight = cframe->get<cv::cuda::GpuMat>(c).rows; + int rwidth = m.cols; + int rheight = m.rows; int width = tx * rwidth; int height = ty * rheight; int tilecount = tx*ty; @@ -390,28 +412,34 @@ int Sender::_generateTiles(const ftl::rgbd::FrameSet &fs, int offset, Channel c, // Loop over tiles with ROI mats and do colour conversions. while (tilecount > 0 && count+offset < fs.frames.size()) { - auto &m = cframe->get<cv::cuda::GpuMat>(c); - cv::Rect roi((count % tx)*rwidth, (count / tx)*rheight, rwidth, rheight); - cv::cuda::GpuMat sroi = surface.surface(roi); - - if (m.type() == CV_32F) { - if (lossless) { - m.convertTo(sroi, CV_16UC1, 1000, stream); + if (fs.hasFrame(offset+count)) { + cframe = &fs.frames[offset+count]; + auto &m = cframe->get<cv::cuda::GpuMat>(c); + cv::Rect roi((count % tx)*rwidth, (count / tx)*rheight, rwidth, rheight); + cv::cuda::GpuMat sroi = surface.surface(roi); + + if (m.type() == CV_32F) { + if (lossless) { + m.convertTo(sroi, CV_16UC1, 1000, stream); + } else { + ftl::cuda::depth_to_vuya(m, sroi, _selectFloatMax(c), stream); + } + } else if (m.type() == CV_8UC4) { + cv::cuda::cvtColor(m, sroi, cv::COLOR_BGRA2RGBA, 0, stream); + } else if (m.type() == CV_8UC3) { + cv::cuda::cvtColor(m, sroi, cv::COLOR_BGR2RGBA, 0, stream); } else { - ftl::cuda::depth_to_vuya(m, sroi, _selectFloatMax(c), stream); + LOG(ERROR) << "Unsupported colour format: " << m.type(); + return 0; } - } else if (m.type() == CV_8UC4) { - cv::cuda::cvtColor(m, sroi, cv::COLOR_BGRA2RGBA, 0, stream); - } else if (m.type() == CV_8UC3) { - cv::cuda::cvtColor(m, sroi, cv::COLOR_BGR2RGBA, 0, stream); } else { - LOG(ERROR) << "Unsupported colour format: " << m.type(); - return 0; + cv::Rect roi((count % tx)*rwidth, (count / tx)*rheight, rwidth, rheight); + cv::cuda::GpuMat sroi = surface.surface(roi); + sroi.setTo(cv::Scalar(0)); } ++count; --tilecount; - cframe = &fs.frames[offset+count]; } return count; diff --git a/components/streams/test/receiver_unit.cpp b/components/streams/test/receiver_unit.cpp index 062dce960ab7daab5fec0ec6a354c9247f1a5869..2aadaa8e4cc4d51ba2598c75a7f8752d8a9aea91 100644 --- a/components/streams/test/receiver_unit.cpp +++ b/components/streams/test/receiver_unit.cpp @@ -63,6 +63,7 @@ TEST_CASE( "Receiver generating onFrameSet" ) { }; TestStream stream(cfg2); receiver->setStream(&stream); + receiver->set("frameset_buffer_size", 0); ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480); @@ -260,6 +261,7 @@ TEST_CASE( "Receiver sync bugs" ) { }; TestStream stream(cfg2); receiver->setStream(&stream); + receiver->set("frameset_buffer_size", 0); ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480); @@ -329,3 +331,78 @@ TEST_CASE( "Receiver sync bugs" ) { //while (ftl::pool.n_idle() != ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10)); delete receiver; } + +TEST_CASE( "Receiver non zero buffer" ) { + json_t global = json_t{{"$id","ftl://test"}}; + ftl::config::configure(global); + + json_t cfg = json_t{ + {"$id","ftl://test/1"} + }; + auto *receiver = ftl::create<Receiver>(cfg); + + json_t cfg2 = json_t{ + {"$id","ftl://test/2"} + }; + TestStream stream(cfg2); + receiver->setStream(&stream); + receiver->set("frameset_buffer_size", 1); + + ftl::codecs::NvPipeEncoder encoder(definition_t::HD1080, definition_t::SD480); + + ftl::codecs::Packet pkt; + pkt.codec = codec_t::Any; + pkt.bitrate = 255; + pkt.definition = definition_t::Any; + pkt.flags = 0; + pkt.frame_count = 1; + + ftl::codecs::StreamPacket spkt; + spkt.version = 4; + spkt.timestamp = 10; + spkt.frame_number = 0; + spkt.channel = Channel::Colour; + spkt.streamID = 0; + + ftl::rgbd::Frame dummy; + ftl::rgbd::FrameState state; + state.getLeft().width = 1280; + state.getLeft().height = 720; + dummy.setOrigin(&state); + ftl::stream::injectCalibration(&stream, dummy, 0, 0, 0); + + ftl::timer::start(false); + + SECTION("Fixed buffer delay") { + cv::cuda::GpuMat m(cv::Size(1280,720), CV_8UC4, cv::Scalar(0)); + + bool r = encoder.encode(m, pkt); + REQUIRE( r ); + + int count = 0; + receiver->onFrameSet([&count](ftl::rgbd::FrameSet &fs) { + ++count; + + REQUIRE( fs.timestamp == 10 ); + REQUIRE( fs.frames.size() == 1 ); + REQUIRE( fs.frames[0].hasChannel(Channel::Colour) ); + REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).rows == 720 ); + REQUIRE( fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).type() == CV_8UC4 ); + + return true; + }); + + stream.post(spkt, pkt); + spkt.timestamp += 10; + stream.post(spkt, pkt); + + int i=10; + while (i-- > 0 && count < 1) std::this_thread::sleep_for(std::chrono::milliseconds(10)); + + REQUIRE( count == 1 ); + } + + ftl::timer::stop(true); + //while (ftl::pool.n_idle() != ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10)); + delete receiver; +} diff --git a/components/streams/test/sender_unit.cpp b/components/streams/test/sender_unit.cpp index 7de5f775fa44c0cd6cf6a4f445a0ce1c0244122c..d5a08732807a8b8f4ed0aa30dcd1fadbd0a98ac7 100644 --- a/components/streams/test/sender_unit.cpp +++ b/components/streams/test/sender_unit.cpp @@ -88,6 +88,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Colour), true); fs.count = 1; + fs.mask = 1; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); @@ -110,6 +111,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Colour), true); fs.count = 2; + fs.mask = 3; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); fs.frames.emplace_back(); @@ -135,6 +137,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Depth), true); fs.count = 2; + fs.mask = 3; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f)); fs.frames.emplace_back(); @@ -161,6 +164,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Depth), true); fs.count = 10; + fs.mask = 0x3FF; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f)); @@ -190,6 +194,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channels(Channel::Depth), true); fs.count = 2; + fs.mask = 3; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Depth).setTo(cv::Scalar(0.0f)); fs.frames.emplace_back(); @@ -217,6 +222,7 @@ TEST_CASE( "Sender::post() video frames" ) { stream.select(0, Channel::Colour + Channel::Depth, true); fs.count = 1; + fs.mask = 1; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); fs.frames[0].create<cv::cuda::GpuMat>(Channel::Depth).create(cv::Size(1280,720), CV_32F); @@ -280,6 +286,7 @@ TEST_CASE( "Sender request to control encoding" ) { }); fs.count = 1; + fs.mask = 1; fs.frames[0].create<cv::cuda::GpuMat>(Channel::Colour).create(cv::Size(1280,720), CV_8UC4); fs.frames[0].get<cv::cuda::GpuMat>(Channel::Colour).setTo(cv::Scalar(0)); diff --git a/components/structures/include/ftl/data/frameset.hpp b/components/structures/include/ftl/data/frameset.hpp index a0839983777d71f13cfaf98786606be2ee4328aa..de5d6f68fdf57d09b309b24777a9ef9a2f96ca5f 100644 --- a/components/structures/include/ftl/data/frameset.hpp +++ b/components/structures/include/ftl/data/frameset.hpp @@ -16,6 +16,11 @@ namespace data { //static const size_t kMaxFramesets = 15; static const size_t kMaxFramesInSet = 32; +enum class FSFlag : int { + STALE = 0, + PARTIAL = 1 +}; + /** * Represents a set of synchronised frames, each with two channels. This is * used to collect all frames from multiple computers that have the same @@ -30,11 +35,16 @@ class FrameSet { std::vector<FRAME> frames; std::atomic<int> count; // Number of valid frames std::atomic<unsigned int> mask; // Mask of all sources that contributed - bool stale; // True if buffers have been invalidated + //bool stale; // True if buffers have been invalidated SHARED_MUTEX mtx; Eigen::Matrix4d pose; // Set to identity by default. + void set(FSFlag f) { flags_ |= (1 << static_cast<int>(f)); } + void clear(FSFlag f) { flags_ &= ~(1 << static_cast<int>(f)); } + bool test(FSFlag f) const { return flags_ & (1 << static_cast<int>(f)); } + void clearFlags() { flags_ = 0; } + /** * Move the entire frameset to another frameset object. This will * invalidate the current frameset object as all memory buffers will be @@ -78,14 +88,30 @@ class FrameSet { return false; } + /** + * Check that a given frame is valid in this frameset. + */ + inline bool hasFrame(size_t ix) const { return (1 << ix) & mask; } + + /** + * Get the first valid frame in this frameset. No valid frames throws an + * exception. + */ + FRAME &firstFrame(); + + const FRAME &firstFrame() const; + void clearData() { data_.clear(); data_channels_.clear(); } + ftl::codecs::Channels<2048> getDataChannels() const { return data_channels_; } + private: std::unordered_map<int, std::vector<unsigned char>> data_; ftl::codecs::Channels<2048> data_channels_; + std::atomic<int> flags_; }; /** @@ -137,7 +163,7 @@ void ftl::data::FrameSet<FRAME>::swapTo(ftl::data::FrameSet<FRAME> &fs) { fs.timestamp = timestamp; fs.count = static_cast<int>(count); - fs.stale = stale; + fs.flags_ = (int)flags_; fs.mask = static_cast<unsigned int>(mask); fs.id = id; fs.pose = pose; @@ -150,7 +176,7 @@ void ftl::data::FrameSet<FRAME>::swapTo(ftl::data::FrameSet<FRAME> &fs) { fs.data_channels_ = data_channels_; data_channels_.clear(); - stale = true; + set(ftl::data::FSFlag::STALE); } // Default data channel implementation @@ -196,4 +222,20 @@ void ftl::data::FrameSet<FRAME>::createRawData(ftl::codecs::Channel c, const std data_channels_ += c; } +template <typename FRAME> +FRAME &ftl::data::FrameSet<FRAME>::firstFrame() { + for (size_t i=0; i<frames.size(); ++i) { + if (hasFrame(i)) return frames[i]; + } + throw FTL_Error("No frames in frameset"); +} + +template <typename FRAME> +const FRAME &ftl::data::FrameSet<FRAME>::firstFrame() const { + for (size_t i=0; i<frames.size(); ++i) { + if (hasFrame(i)) return frames[i]; + } + throw FTL_Error("No frames in frameset"); +} + #endif // _FTL_DATA_FRAMESET_HPP_