Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • bug/335/nettimeadjust
  • bug/feedrecord
  • bug/mlsdestruct2
  • bug/optflow-disparity
  • calibration
  • censuseval
  • cherry-pick-75b9f6b3
  • chromatest
  • enhancement/235
  • exp/327/colourcor
  • exp/candidatemask
  • exp/labcolours
  • exp/multires-sgm
  • exp/triangleperf-2
  • exp/triangleperf3
  • feature/134/usegroup-nb
  • feature/231/segment
  • feature/274/lossyfilter
  • feature/329/dualoptflow-doffs
  • feature/330/audiocompress
  • feature/375/fullres-fstream
  • feature/SKR
  • feature/aruco
  • feature/autocalibration
  • feature/ceres
  • feature/compiletime
  • feature/corr_smooth
  • feature/depth-touch
  • feature/disconflow
  • feature/fixedres
  • feature/gui2-nanogui-mitsuba
  • feature/gui2-pch
  • feature/multiplexer-pose
  • feature/poses
  • feature/python
  • feature/sdk-python
  • feature/sgm-experimental
  • feature/stereocalib
  • feature/use-new-frame
  • feature/use10bit
  • feature/vr
  • feature/warpcorr-costing
  • feature/web-service/camMover
  • feature/web-service/configurations
  • feature/web-service/vanillaClient
  • feature/websocket-pose
  • guirefactor
  • master
  • v0.0.1
  • v0.0.2
  • v0.0.3
  • v0.0.4
  • v0.0.5
  • v0.0.6
54 results

Target

Select target project
  • nicolaspope/ftl
1 result
Select Git revision
  • bug/335/nettimeadjust
  • bug/feedrecord
  • bug/mlsdestruct2
  • bug/optflow-disparity
  • calibration
  • censuseval
  • cherry-pick-75b9f6b3
  • chromatest
  • enhancement/235
  • exp/327/colourcor
  • exp/candidatemask
  • exp/labcolours
  • exp/multires-sgm
  • exp/triangleperf-2
  • exp/triangleperf3
  • feature/134/usegroup-nb
  • feature/231/segment
  • feature/274/lossyfilter
  • feature/329/dualoptflow-doffs
  • feature/330/audiocompress
  • feature/375/fullres-fstream
  • feature/SKR
  • feature/aruco
  • feature/autocalibration
  • feature/ceres
  • feature/compiletime
  • feature/corr_smooth
  • feature/depth-touch
  • feature/disconflow
  • feature/fixedres
  • feature/gui2-nanogui-mitsuba
  • feature/gui2-pch
  • feature/multiplexer-pose
  • feature/poses
  • feature/python
  • feature/sdk-python
  • feature/sgm-experimental
  • feature/stereocalib
  • feature/use-new-frame
  • feature/use10bit
  • feature/vr
  • feature/warpcorr-costing
  • feature/web-service/camMover
  • feature/web-service/configurations
  • feature/web-service/vanillaClient
  • feature/websocket-pose
  • guirefactor
  • master
  • v0.0.1
  • v0.0.2
  • v0.0.3
  • v0.0.4
  • v0.0.5
  • v0.0.6
54 results
Show changes
Showing
with 871 additions and 468 deletions
#pragma once
#include <nanogui/entypo.h>
#include <ftl/audio/mixer.hpp>
#include "popupbutton.hpp"
namespace ftl {
namespace gui2 {
class VolumeButton : public ftl::gui2::PopupButton {
public:
VolumeButton(nanogui::Widget *parent, ftl::audio::StereoMixerF<100> *mixer);
virtual ~VolumeButton();
// callback, new value passed in argument
void setCallback(std::function<void(float)> cb);
// set value (updates slider value and highlight and changes icon)
void setValue(float v);
float value();
// get/set mute status (changes volume highlight color and icon)
void setMuted(bool v);
bool muted();
virtual bool mouseButtonEvent(const nanogui::Vector2i &p, int button, bool down, int modifiers) override;
virtual bool scrollEvent(const nanogui::Vector2i &p, const nanogui::Vector2f &rel) override;
// icons: 3 levels and muted
int ICON_VOLUME_3 = ENTYPO_ICON_SOUND; // [67, 100]
int ICON_VOLUME_2 = ENTYPO_ICON_SOUND; // [33,67)
int ICON_VOLUME_1 = ENTYPO_ICON_SOUND; // [0,33)
int ICON_MUTED = ENTYPO_ICON_SOUND_MUTE;
private:
void update();
nanogui::Slider* slider_;
std::function<void(float)> cb_;
ftl::audio::StereoMixerF<100> *mixer_;
float scroll_step_ = 0.02f;
float value_;
bool muted_;
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
}
}
#pragma once
#include <nanogui/window.h>
namespace ftl {
namespace gui2 {
/**
* Non-movable Window widget
*/
class FixedWindow : public nanogui::Window {
public:
FixedWindow(nanogui::Widget *parent, const std::string name="") :
nanogui::Window(parent, name) {};
virtual bool mouseDragEvent(const nanogui::Vector2i&, const nanogui::Vector2i&, int, int) override { return false; }
virtual ~FixedWindow() {}
public:
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
}
}
......@@ -19,7 +19,7 @@ namespace rgbd {
* calculating disparity, before converting to depth. Calibration of the images
* is also performed.
*/
class VirtualSource : public ftl::rgbd::detail::Source {
class VirtualSource : public ftl::rgbd::BaseSourceImpl {
public:
VirtualSource(ftl::rgbd::Source*);
~VirtualSource();
......
......@@ -10,7 +10,6 @@
#include <ftl/configuration.hpp>
#include <ftl/depth_camera.hpp>
#include <ftl/rgbd.hpp>
#include <ftl/rgbd/virtual.hpp>
#include <ftl/master.hpp>
#include <ftl/rgbd/group.hpp>
#include <ftl/threads.hpp>
......
......@@ -28,8 +28,6 @@ Reconstruction::Reconstruction(nlohmann::json &config, const std::string name) :
pipeline_->append<ftl::operators::DisparityToDepth>("calculate_depth")->value("enabled", false);
pipeline_->append<ftl::operators::ColourChannels>("colour"); // Convert BGR to BGRA
pipeline_->append<ftl::operators::ClipScene>("clipping")->value("enabled", false);
pipeline_->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
pipeline_->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
//pipeline_->append<ftl::operators::HFSmoother>("hfnoise"); // Remove high-frequency noise
pipeline_->append<ftl::operators::Normals>("normals"); // Estimate surface normals
//pipeline_->append<ftl::operators::SmoothChannel>("smoothing"); // Generate a smoothing channel
......@@ -43,7 +41,8 @@ Reconstruction::Reconstruction(nlohmann::json &config, const std::string name) :
pipeline_->append<ftl::operators::VisCrossSupport>("viscross")->value("enabled", false);
pipeline_->append<ftl::operators::MultiViewMLS>("mvmls");
pipeline_->append<ftl::operators::Poser>("poser")->value("enabled", false);
pipeline_->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
pipeline_->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
//pipeline_->set("enabled", false);
}
......
# Need to include staged files and libs
#include_directories(${PROJECT_SOURCE_DIR}/reconstruct/include)
#include_directories(${PROJECT_BINARY_DIR})
set(REPSRC
src/main.cpp
)
add_executable(ftl-reconstruct2 ${REPSRC})
#target_include_directories(ftl-reconstruct PUBLIC
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
# $<INSTALL_INTERFACE:include>
# PRIVATE src)
if (CUDA_FOUND)
set_property(TARGET ftl-reconstruct2 PROPERTY CUDA_SEPARABLE_COMPILATION ON)
endif()
#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
target_link_libraries(ftl-reconstruct2 ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlctrl ftlnet ftlrender ftloperators ftlstreams ftlaudio)
#include <ftl/configuration.hpp>
#include <ftl/net.hpp>
#include <ftl/streams/feed.hpp>
#include <ftl/master.hpp>
#include <nlohmann/json.hpp>
#include <loguru.hpp>
#include "ftl/operators/smoothing.hpp"
#include "ftl/operators/colours.hpp"
#include "ftl/operators/normals.hpp"
#include "ftl/operators/filling.hpp"
#include "ftl/operators/segmentation.hpp"
#include "ftl/operators/mask.hpp"
#include "ftl/operators/antialiasing.hpp"
#include "ftl/operators/mvmls.hpp"
#include "ftl/operators/clipping.hpp"
#include <ftl/operators/disparity.hpp>
#include <ftl/operators/poser.hpp>
#include <ftl/operators/detectandtrack.hpp>
using ftl::net::Universe;
using ftl::stream::Feed;
using ftl::codecs::Channel;
using std::vector;
using std::string;
static void threadSetCUDADevice() {
// Ensure all threads have correct cuda device
std::atomic<int> ijobs = 0;
for (int i=0; i<ftl::pool.size(); ++i) {
ftl::pool.push([&ijobs](int id) {
ftl::cuda::setDevice();
++ijobs;
while (ijobs < ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
});
}
while (ijobs < ftl::pool.size()) std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
static void run(ftl::Configurable *root) {
// Use other GPU if available.
ftl::cuda::setDevice(ftl::cuda::deviceCount()-1);
threadSetCUDADevice();
ftl::timer::setClockSlave(false);
ftl::timer::setHighPrecision(true);
Universe *net = ftl::create<Universe>(root, "net");
ftl::ctrl::Master ctrl(root, net);
net->start();
net->waitConnections();
Feed *feed = ftl::create<Feed>(root, "feed", net);
std::string group_name = root->value("group", std::string("Reconstruction"));
feed->set("uri", root->value("uri", std::string("ftl://ftlab.utu.fi/reconstruction")));
feed->setPipelineCreator([](ftl::operators::Graph *pipeline) {
LOG(INFO) << "Using reconstruction pipeline creator";
pipeline->restore("reconstruction_pipeline", {
"clipping"
});
pipeline->append<ftl::operators::DepthChannel>("depth")->value("enabled", false); // Ensure there is a depth channel
pipeline->append<ftl::operators::DisparityBilateralFilter>("bilateral_filter")->value("enabled", false);
pipeline->append<ftl::operators::DisparityToDepth>("calculate_depth")->value("enabled", false);
pipeline->append<ftl::operators::ColourChannels>("colour"); // Convert BGR to BGRA
pipeline->append<ftl::operators::ClipScene>("clipping")->value("enabled", false);
//pipeline_->append<ftl::operators::HFSmoother>("hfnoise"); // Remove high-frequency noise
pipeline->append<ftl::operators::Normals>("normals"); // Estimate surface normals
//pipeline_->append<ftl::operators::SmoothChannel>("smoothing"); // Generate a smoothing channel
//pipeline_->append<ftl::operators::ScanFieldFill>("filling"); // Generate a smoothing channel
pipeline->append<ftl::operators::CrossSupport>("cross");
pipeline->append<ftl::operators::DiscontinuityMask>("discontinuity");
pipeline->append<ftl::operators::CrossSupport>("cross2")->value("discon_support", true);
pipeline->append<ftl::operators::BorderMask>("border_mask")->value("enabled", false);
pipeline->append<ftl::operators::CullDiscontinuity>("remove_discontinuity")->set("enabled", false);
//pipeline_->append<ftl::operators::AggreMLS>("mls"); // Perform MLS (using smoothing channel)
pipeline->append<ftl::operators::VisCrossSupport>("viscross")->value("enabled", false);
pipeline->append<ftl::operators::MultiViewMLS>("mvmls");
pipeline->append<ftl::operators::Poser>("poser")->value("enabled", false);
pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
});
bool has_file = false;
// Add sources here
if (root->getConfig().contains("sources")) {
for (const auto &s : root->getConfig()["sources"]) {
ftl::URI uri(s);
if (uri.getScheme() == ftl::URI::scheme_t::SCHEME_FILE) has_file = true;
uri.setAttribute("group", group_name);
feed->add(uri);
}
}
// Add sources from command line as well
auto paths = root->get<vector<string>>("paths");
string file = "";
for (auto &x : *paths) {
if (x != "") {
ftl::URI uri(x);
if (uri.getScheme() == ftl::URI::scheme_t::SCHEME_FILE) has_file = true;
uri.setAttribute("group", group_name);
feed->add(uri);
}
}
// Automatically add any new sources
/*auto nsrc_handle = feed->onNewSources([feed,group_name](const vector<string> &srcs) {
for (const auto &s : srcs) {
ftl::URI uri(s);
if (uri.hasAttribute("group")) {
if (uri.getAttribute<std::string>("group") == group_name) {
//uri.setAttribute("group", group_name);
feed->add(uri);
}
}
return true;
});*/
auto *filter = feed->filter({Channel::Colour, Channel::Depth, Channel::AudioStereo});
//feed->lowLatencyMode();
feed->startStreaming(filter);
// Just do whatever jobs are available
if (has_file) {
ftl::timer::start(true);
} else {
while (ftl::running) {
auto f = ftl::pool.pop();
if (f) {
f(-1);
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
}
//nsrc_handle.cancel();
feed->stopRecording();
feed->removeFilter(filter);
ftl::config::save();
net->shutdown();
LOG(INFO) << "Stopping...";
ftl::timer::stop(true);
LOG(INFO) << "Timer stopped...";
ftl::pool.stop(true);
LOG(INFO) << "All threads stopped.";
delete feed;
delete net;
delete root;
}
int main(int argc, char **argv) {
run(ftl::configure(argc, argv, "reconstruction_default"));
// Save config changes and delete final objects
ftl::config::cleanup();
return ftl::exit_code;
}
add_subdirectory(codec_eval)
#add_subdirectory(codec_eval)
#if (HAVE_ASSIMP)
# add_subdirectory(model_truth)
#endif()
add_subdirectory(middlebury_gen)
add_subdirectory(simple_viewer)
add_subdirectory(recorder)
......@@ -87,7 +87,6 @@ static void run(ftl::Configurable *root) {
ftl::codecs::Packet pkt;
pkt.codec = codec_t::HEVC;
pkt.bitrate = 255;
pkt.definition = definition_t::Any;
pkt.flags = ftl::codecs::kFlagFloat | ftl::codecs::kFlagMappedDepth;
pkt.frame_count = 1;
......@@ -154,7 +153,6 @@ static void run(ftl::Configurable *root) {
ftl::codecs::Packet pkt;
pkt.codec = codec_t::HEVC;
pkt.bitrate = 255;
pkt.definition = definition_t::Any;
pkt.flags = ftl::codecs::kFlagFloat | ftl::codecs::kFlagMappedDepth;
pkt.frame_count = 1;
......
......@@ -15,3 +15,4 @@ endif()
#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
target_link_libraries(middlebury-gen ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlrender ftloperators ftlstreams)
set_property(TARGET middlebury-gen PROPERTY CUDA_ARCHITECTURES OFF)
......@@ -4,6 +4,8 @@
#include <ftl/codecs/opencv_encoder.hpp>
#include <ftl/streams/injectors.hpp>
#include <ftl/data/framepool.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
......@@ -212,10 +214,12 @@ int main(int argc, char **argv) {
// For each middlebury test folder
auto paths = (*root->get<nlohmann::json>("paths"));
ftl::rgbd::Frame frame;
ftl::rgbd::FrameState state;
ftl::data::Pool pool(1,1);
ftl::data::Frame dframe = pool.allocate(ftl::data::FrameID(0,0), 10);
ftl::rgbd::Frame &frame = dframe.cast<ftl::rgbd::Frame>();
frame.store();
ftl::operators::DisparityToDepth disp2depth(ftl::create<ftl::Configurable>(root, "disparity"));
ftl::operators::DisparityToDepth disp2depth(nullptr, ftl::create<ftl::Configurable>(root, "disparity"));
ftl::codecs::OpenCVEncoder encoder(ftl::codecs::definition_t::Any, ftl::codecs::definition_t::Any);
......@@ -249,7 +253,7 @@ int main(int argc, char **argv) {
// Load the ground truth
//frame.create<cv::Mat>(Channel::Disparity) = cv::imread(path+"/disp0.pfm", cv::IMREAD_UNCHANGED);
readFilePFM(frame.create<cv::Mat>(Channel::Disparity), path+"/disp0.pfm");
cv::Mat &disp = frame.get<cv::Mat>(Channel::Disparity);
cv::Mat &disp = frame.set<cv::Mat>(Channel::Disparity);
float aspect = float(disp.cols) / float(disp.rows);
float scaling = float(height) / float(disp.rows);
cv::resize(disp, disp, cv::Size(int(aspect*float(height)),height), 0.0, 0.0, cv::INTER_NEAREST);
......@@ -277,14 +281,16 @@ int main(int argc, char **argv) {
intrin1.width = c1.cols;
intrin2.width = c2.cols;
state.setLeft(intrin1);
state.setRight(intrin2);
frame.setOrigin(&state);
ftl::stream::injectCalibration(out, frame, 0, 0, i, false);
ftl::stream::injectCalibration(out, frame, 0, 0, i, true);
frame.setLeft() = intrin1;
frame.setRight() = intrin2;
//ftl::stream::injectCalibration(out, frame, 0, 0, i, false);
//ftl::stream::injectCalibration(out, frame, 0, 0, i, true);
// Convert disparity to depth
frame.upload(Channel::Disparity + Channel::Colour + Channel::Colour2);
frame.upload(Channel::Disparity);
frame.upload(Channel::Colour);
frame.upload(Channel::Colour2);
disp2depth.apply(frame, frame, 0);
......@@ -297,7 +303,6 @@ int main(int argc, char **argv) {
spkt.streamID = 0;
spkt.version = 4;
pkt.codec = codec_t::Any;
pkt.definition = definition_t::Any;
pkt.bitrate = 0;
pkt.flags = 0;
pkt.frame_count = 1;
......@@ -309,7 +314,6 @@ int main(int argc, char **argv) {
out->post(spkt, pkt);
pkt.codec = codec_t::Any;
pkt.definition = definition_t::Any;
spkt.channel = Channel::Colour2;
if (!encoder.encode(frame.get<cv::cuda::GpuMat>(Channel::Colour2), pkt)) {
LOG(ERROR) << "Encode failed for colour2";
......@@ -319,7 +323,6 @@ int main(int argc, char **argv) {
spkt.channel = Channel::GroundTruth;
pkt.flags = ftl::codecs::kFlagFloat;
pkt.codec = codec_t::Any;
pkt.definition = definition_t::Any;
if (!encoder.encode(frame.get<cv::cuda::GpuMat>(Channel::Depth), pkt)) {
LOG(ERROR) << "Encode failed for depth";
}
......
# Need to include staged files and libs
#include_directories(${PROJECT_SOURCE_DIR}/reconstruct/include)
#include_directories(${PROJECT_BINARY_DIR})
set(RECSRC
src/main.cpp
)
add_executable(ftl-recorder ${RECSRC})
#target_include_directories(ftl-reconstruct PUBLIC
# $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
# $<INSTALL_INTERFACE:include>
# PRIVATE src)
if (CUDA_FOUND)
set_property(TARGET ftl-recorder PROPERTY CUDA_SEPARABLE_COMPILATION ON)
endif()
#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
target_link_libraries(ftl-recorder ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlctrl ftlnet ftlrender ftloperators ftlstreams ftlaudio)
#include <ftl/configuration.hpp>
#include <ftl/net.hpp>
#include <ftl/master.hpp>
#include <nlohmann/json.hpp>
#include <loguru.hpp>
#include <ftl/streams/filestream.hpp>
#include <ftl/streams/netstream.hpp>
#include <unordered_set>
using ftl::net::Universe;
using ftl::codecs::Channel;
using std::vector;
using std::string;
static std::atomic_int src_count = 0;
static void run(ftl::Configurable *root) {
Universe *net = ftl::create<Universe>(root, "net");
ftl::ctrl::Master ctrl(root, net);
ftl::stream::Muxer *mux_in = ftl::create<ftl::stream::Muxer>(root, "muxer");
ftl::stream::File *file_out = ftl::create<ftl::stream::File>(root, "output");
std::unordered_set<ftl::codecs::Channel> channels;
channels.insert(Channel::Colour);
if (root->value("depth", false)) channels.insert(Channel::Depth);
if (root->value("right", false)) channels.insert(Channel::Right);
if (root->value("audio", false)) channels.insert(Channel::Audio);
file_out->set("filename", root->value("filename", std::string("out.ftl")));
file_out->setMode(ftl::stream::File::Mode::Write);
file_out->begin();
auto h1 = mux_in->onPacket([file_out](const ftl::codecs::StreamPacket &spkt, const ftl::codecs::Packet &pkt) {
file_out->post(spkt, pkt);
return true;
});
mux_in->begin();
net->onConnect([mux_in,net,root,&channels](ftl::net::Peer *p) {
ftl::pool.push([mux_in,root,net,p,&channels](int id) {
try {
auto peerstreams = p->call<std::vector<std::string>>("list_streams");
for (const auto &s : peerstreams) {
int fsid = src_count++;
auto *ns = ftl::create<ftl::stream::Net>(root, std::string("input") + std::to_string(fsid), net);
ns->set("uri", s);
mux_in->add(ns, fsid);
mux_in->begin();
mux_in->select(fsid, channels, true);
LOG(INFO) << "Recording: " << s;
}
} catch (...) {
}
});
});
if (net->isBound("add_stream")) net->unbind("add_stream");
net->bind("add_stream", [mux_in,root,net,&channels](ftl::net::Peer &p, std::string uri){
int fsid = src_count++;
auto *ns = ftl::create<ftl::stream::Net>(root, std::string("input") + std::to_string(fsid), net);
ns->set("uri", uri);
mux_in->add(ns, fsid);
mux_in->begin();
mux_in->select(fsid, channels, true);
LOG(INFO) << "Recording: " << uri;
});
net->start();
net->waitConnections();
// Add sources here
if (root->getConfig().contains("sources")) {
for (const auto &s : root->getConfig()["sources"]) {
ftl::URI uri(s);
auto scheme = uri.getScheme();
if (scheme == ftl::URI::scheme_t::SCHEME_TCP || scheme == ftl::URI::scheme_t::SCHEME_WS) {
net->connect(s);
} else {
LOG(ERROR) << "Unsupported URI: " << s;
}
}
}
// Add sources from command line as well
auto paths = root->get<vector<string>>("paths");
for (auto &x : *paths) {
if (x != "") {
ftl::URI uri(x);
auto scheme = uri.getScheme();
if (scheme == ftl::URI::scheme_t::SCHEME_TCP || scheme == ftl::URI::scheme_t::SCHEME_WS) {
net->connect(x);
} else {
LOG(ERROR) << "Unsupported URI: " << x;
}
}
}
// Just do whatever jobs are available
while (ftl::running) {
auto f = ftl::pool.pop();
if (f) {
f(-1);
} else {
std::this_thread::sleep_for(std::chrono::milliseconds(10));
}
}
mux_in->end();
file_out->end();
delete mux_in;
delete file_out;
ftl::config::save();
net->shutdown();
LOG(INFO) << "Stopping...";
//ftl::timer::stop(true);
//LOG(INFO) << "Timer stopped...";
ftl::pool.stop(true);
LOG(INFO) << "All threads stopped.";
delete net;
}
int main(int argc, char **argv) {
run(ftl::configure(argc, argv, "recorder_default"));
// Save config changes and delete final objects
ftl::config::cleanup();
return ftl::exit_code;
}
# Need to include staged files and libs
#include_directories(${PROJECT_SOURCE_DIR}/reconstruct/include)
#include_directories(${PROJECT_BINARY_DIR})
set(SIMPVIEWSRC
main.cpp
)
add_executable(simple-viewer ${SIMPVIEWSRC})
#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
target_link_libraries(simple-viewer ftlcommon ftlrgbd Threads::Threads ${OpenCV_LIBS} ftlctrl ftlnet ftlrender ftloperators ftlstreams ftlaudio)
/*
* Copyright 2019 Nicolas Pope. All rights reserved.
*
* See LICENSE.
*/
#define LOGURU_WITH_STREAMS 1
#include <loguru.hpp>
#include <ftl/config.h>
#include <ftl/configuration.hpp>
#include <ftl/master.hpp>
#include <ftl/threads.hpp>
#include <ftl/codecs/channels.hpp>
#include <ftl/codecs/depth_convert_cuda.hpp>
#include <ftl/data/framepool.hpp>
#include <ftl/audio/speaker.hpp>
#include <nlohmann/json.hpp>
#include <fstream>
#include <string>
#include <vector>
#include <thread>
#include <chrono>
#include <opencv2/opencv.hpp>
#include <opencv2/quality/qualitypsnr.hpp>
#include <ftl/net/universe.hpp>
#include <ftl/streams/filestream.hpp>
#include <ftl/streams/receiver.hpp>
#include <ftl/streams/sender.hpp>
#include <ftl/streams/netstream.hpp>
#include <ftl/operators/colours.hpp>
#include <ftl/operators/mask.hpp>
#include <ftl/operators/segmentation.hpp>
#include <ftl/operators/depth.hpp>
#ifdef WIN32
#pragma comment(lib, "Rpcrt4.lib")
#endif
using ftl::net::Universe;
using std::string;
using std::vector;
using ftl::config::json_t;
using ftl::codecs::Channel;
using ftl::codecs::codec_t;
using ftl::codecs::definition_t;
using json = nlohmann::json;
using std::this_thread::sleep_for;
using std::chrono::milliseconds;
static ftl::data::Generator *createFileGenerator(ftl::Configurable *root, ftl::data::Pool *pool, const std::string &filename) {
ftl::stream::File *stream = ftl::create<ftl::stream::File>(root, "player");
stream->set("filename", filename);
ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver", pool);
gen->setStream(stream);
stream->begin();
stream->select(0, Channel::Colour + Channel::Depth); // TODO: Choose these elsewhere
return gen;
}
static void visualizeDepthMap( const cv::Mat &depth, cv::Mat &out,
const float max_depth)
{
DCHECK(max_depth > 0.0);
depth.convertTo(out, CV_8U, 255.0f / max_depth);
out = 255 - out;
//cv::Mat mask = (depth >= max_depth); // TODO (mask for invalid pixels)
applyColorMap(out, out, cv::COLORMAP_JET);
//out.setTo(cv::Scalar(0), mask);
//cv::cvtColor(out,out, cv::COLOR_BGR2BGRA);
}
static void run(ftl::Configurable *root) {
Universe *net = ftl::create<Universe>(root, "net");
ftl::ctrl::Master ctrl(root, net);
net->start();
net->waitConnections();
std::list<ftl::Handle> handles;
ftl::data::Pool pool(2,10);
std::list<ftl::data::Generator*> generators;
// Check paths for FTL files to load.
auto paths = (*root->get<nlohmann::json>("paths"));
int i = 0; //groups.size();
for (auto &x : paths.items()) {
std::string path = x.value().get<std::string>();
auto eix = path.find_last_of('.');
auto ext = path.substr(eix+1);
// Command line path is ftl file
if (ext == "ftl") {
auto *gen = createFileGenerator(root, &pool, path);
generators.push_back(gen);
++i;
} else {
ftl::URI uri(path);
if (uri.getScheme() == ftl::URI::SCHEME_TCP || uri.getScheme() == ftl::URI::SCHEME_WS) {
net->connect(path)->waitConnection();
}
}
}
auto stream_uris = net->findAll<std::string>("list_streams");
if (stream_uris.size() > 0) {
ftl::stream::Muxer *stream = ftl::create<ftl::stream::Muxer>(root, "muxstream");
ftl::stream::Receiver *gen = ftl::create<ftl::stream::Receiver>(root, "receiver", &pool);
ftl::stream::Sender *sender = ftl::create<ftl::stream::Sender>(root, "sender");
gen->setStream(stream);
sender->setStream(stream);
int count = 0;
for (auto &s : stream_uris) {
LOG(INFO) << " --- found stream: " << s;
auto *nstream = ftl::create<ftl::stream::Net>(stream, std::string("netstream")+std::to_string(count), net);
nstream->set("uri", s);
//nstream->select(0, {Channel::Colour}, true);
stream->add(nstream);
++count;
}
generators.push_back(gen);
stream->begin();
stream->select(0, Channel::Colour + Channel::Depth + Channel::AudioStereo, true);
handles.push_back(std::move(pool.onFlush([sender](ftl::data::Frame &f, ftl::codecs::Channel c) {
// Send only reponse channels on a per frame basis
if (f.mode() == ftl::data::FrameMode::RESPONSE) {
sender->post(f, c);
}
return true;
})));
}
ftl::audio::Speaker *speaker = ftl::create<ftl::audio::Speaker>(root, "speaker");
for (auto *g : generators) {
handles.push_back(std::move(g->onFrameSet([&](std::shared_ptr<ftl::data::FrameSet> fs) {
LOG(INFO) << "Got frameset: " << fs->timestamp();
for (auto &f : fs->frames) {
if (f.has(Channel::Colour)) {
cv::Mat tmp;
f.get<cv::cuda::GpuMat>(Channel::Colour).download(tmp);
cv::imshow(std::string("Frame")+std::to_string(f.id().id), tmp);
}
if (f.has(Channel::Depth)) {
cv::Mat tmp;
f.get<cv::cuda::GpuMat>(Channel::Depth).download(tmp);
visualizeDepthMap(tmp,tmp,8.0f);
cv::imshow(std::string("Depth")+std::to_string(f.id().id), tmp);
}
if (f.has(Channel::AudioStereo)) {
const auto &audio = f.get<std::list<ftl::audio::Audio>>(Channel::AudioStereo).front();
LOG(INFO) << "Got stereo: " << audio.data().size();
if (f.source() == 0) {
speaker->queue(f.timestamp(), f);
}
}
}
int k = cv::waitKey(10);
// Send the key back to vision node (TESTING)
if (k >= 0) {
auto rf = fs->firstFrame().response();
rf.create<int>(Channel::Control) = k;
}
return true;
})));
}
LOG(INFO) << "Start timer";
ftl::timer::start(true);
LOG(INFO) << "Shutting down...";
ftl::timer::stop();
ftl::pool.stop(true);
ctrl.stop();
net->shutdown();
//cudaProfilerStop();
LOG(INFO) << "Deleting...";
delete net;
ftl::config::cleanup(); // Remove any last configurable objects.
LOG(INFO) << "Done.";
}
int main(int argc, char **argv) {
run(ftl::configure(argc, argv, "tools_default"));
}
......@@ -10,6 +10,7 @@ set(CVNODESRC
)
add_executable(ftl-vision ${CVNODESRC})
install(TARGETS ftl-vision DESTINATION bin COMPONENT vision)
target_include_directories(ftl-vision PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/include>
......@@ -21,6 +22,6 @@ set_property(TARGET ftl-vision PROPERTY CUDA_SEPARABLE_COMPILATION OFF)
endif()
#target_include_directories(cv-node PUBLIC ${PROJECT_SOURCE_DIR}/include)
target_link_libraries(ftl-vision ftlrgbd ftlcommon ftlstreams ftlctrl ${OpenCV_LIBS} ${LIBSGM_LIBRARIES} ${CUDA_LIBRARIES} ftlnet ftlaudio)
target_link_libraries(ftl-vision ftlrgbd ftlcommon ftlstreams ftlctrl ${OpenCV_LIBS} ${CUDA_LIBRARIES} ftlnet ftlaudio)
target_precompile_headers(ftl-vision REUSE_FROM ftldata)
......@@ -14,18 +14,23 @@
#include <vector>
#include <fstream>
#include <thread>
#include <set>
#include <opencv2/opencv.hpp>
#include <ftl/rgbd.hpp>
#include <ftl/data/framepool.hpp>
#include <ftl/streams/builder.hpp>
//#include <ftl/middlebury.hpp>
#include <ftl/net/universe.hpp>
#include <ftl/master.hpp>
#include <nlohmann/json.hpp>
#include <ftl/operators/disparity.hpp>
#include <ftl/operators/detectandtrack.hpp>
#include <ftl/operators/clipping.hpp>
#include <ftl/streams/netstream.hpp>
#include <ftl/streams/sender.hpp>
#include <ftl/streams/receiver.hpp>
#include <ftl/audio/source.hpp>
......@@ -34,6 +39,10 @@
#include "opencv2/highgui.hpp"
#include "opencv2/core/utility.hpp"
#ifdef HAVE_PYLON
#include <pylon/PylonIncludes.h>
#endif
#ifdef WIN32
#pragma comment(lib, "Rpcrt4.lib")
#endif
......@@ -51,8 +60,11 @@ using std::chrono::milliseconds;
using cv::Mat;
using json = nlohmann::json;
static bool quiet = false;
static void run(ftl::Configurable *root) {
Universe *net = ftl::create<Universe>(root, "net");
ftl::ctrl::Master ctrl(root, net);
ftl::timer::setHighPrecision(true);
......@@ -79,10 +91,8 @@ static void run(ftl::Configurable *root) {
}
int sync_counter = 0;
ftl::ctrl::Master ctrl(root, net);
// Sync clocks!
ftl::timer::add(ftl::timer::kTimerMain, [&time_peer,&sync_counter,net](int64_t ts) {
auto timer = ftl::timer::add(ftl::timer::kTimerMain, [&time_peer,&sync_counter,net](int64_t ts) {
if (sync_counter-- <= 0 && time_peer != ftl::UUID(0) ) {
sync_counter = 20;
auto start = std::chrono::high_resolution_clock::now();
......@@ -96,7 +106,7 @@ static void run(ftl::Configurable *root) {
//LOG(INFO) << "LATENCY: " << float(latency)/1000.0f << "ms";
if (clock_adjust != 0) {
LOG(INFO) << "Clock adjustment: " << clock_adjust << ", latency=" << float(latency)/1000.0f << "ms";
LOG(INFO) << "Clock adjustment: " << clock_adjust << ", latency=" << float(latency)/2000.0f << "ms";
ftl::timer::setClockAdjustment(clock_adjust);
}
});
......@@ -110,86 +120,220 @@ static void run(ftl::Configurable *root) {
auto paths = root->get<vector<string>>("paths");
string file = "";
if (paths && (*paths).size() > 0) file = (*paths)[(*paths).size()-1];
Source *source = nullptr;
source = ftl::create<Source>(root, "source", net);
for (auto &x : *paths) {
if (x != "") {
ftl::URI uri(x);
if (uri.isValid()) {
switch (uri.getScheme()) {
case ftl::URI::SCHEME_WS :
case ftl::URI::SCHEME_TCP : net->connect(x)->waitConnection(); break;
case ftl::URI::SCHEME_DEVICE :
case ftl::URI::SCHEME_FILE : file = x; break;
default: break;
}
}
}
}
if (file != "") {
//source->set("uri", file);
ftl::URI uri(file);
uri.to_json(source->getConfig());
source->set("uri", uri.getBaseURI());
uri.to_json(root->getConfig()["source"]);
}
Source *source = nullptr;
source = ftl::create<Source>(root, "source");
ftl::stream::Sender *sender = ftl::create<ftl::stream::Sender>(root, "sender");
ftl::stream::Net *outstream = ftl::create<ftl::stream::Net>(root, "stream", net);
outstream->set("uri", outstream->getID());
outstream->set("uri", root->value("uri", outstream->getID()));
outstream->begin();
sender->setStream(outstream);
auto *grp = new ftl::rgbd::Group();
source->setChannel(Channel::Depth);
grp->addSource(source);
ftl::audio::Source *audioSrc = ftl::create<ftl::audio::Source>(root, "audio");
ftl::data::Pool pool(root->value("mempool_min", 2),root->value("mempool_max", 5));
auto *creator = new ftl::streams::IntervalSourceBuilder(&pool, 0, {source, audioSrc});
std::shared_ptr<ftl::streams::BaseBuilder> creatorptr(creator);
ftl::stream::Receiver *receiver = ftl::create<ftl::stream::Receiver>(root, "receiver", &pool);
receiver->setStream(outstream);
receiver->registerBuilder(creatorptr);
// Which channels should be encoded
std::set<Channel> encodable;
std::set<Channel> previous_encodable;
// Send channels on flush
auto flushhandle = pool.onFlushSet([sender,&encodable](ftl::data::FrameSet &fs, ftl::codecs::Channel c) {
//if (c != Channel::EndFrame && !fs.test(ftl::data::FSFlag::AUTO_SEND)) return true;
// Always send data channels
if ((int)c >= 32) sender->post(fs, c);
else {
// Only encode some of the video channels
if (encodable.count(c)) {
sender->post(fs, c);
} else {
sender->post(fs, c, true);
}
}
return true;
});
int stats_count = 0;
int frames = 0;
float latency = 0.0f;
int64_t stats_time = 0;
root->on("quiet", quiet, false);
auto *pipeline = ftl::config::create<ftl::operators::Graph>(root, "pipeline");
pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
pipeline->append<ftl::operators::DepthChannel>("depth"); // Ensure there is a depth channel
pipeline->append<ftl::operators::ClipScene>("clipping")->value("enabled", false);
pipeline->restore("vision_pipeline", { "clipping" });
auto h = creator->onFrameSet([sender,outstream,&stats_count,&latency,&frames,&stats_time,pipeline,&encodable,&previous_encodable](const ftl::data::FrameSetPtr &fs) {
// Decide what to encode here, based upon what remote users select
const auto sel = outstream->selectedNoExcept(fs->frameset());
encodable.clear();
encodable.insert(sel.begin(), sel.end());
// Only allow the two encoders to exist, remove the rest
int max_encodeable = sender->value("max_encodeable", 2);
if (encodable.size() > max_encodeable) {
auto enciter = encodable.begin();
std::advance(enciter, max_encodeable);
encodable.erase(enciter, encodable.end());
}
// This ensures we cleanup other encoders
if (encodable != previous_encodable) sender->resetEncoders(fs->frameset());
previous_encodable = encodable;
fs->set(ftl::data::FSFlag::AUTO_SEND);
bool did_pipe = pipeline->queue(fs, [fs,&frames,&latency]() {
if (fs->hasAnyChanged(Channel::Depth)) fs->flush(Channel::Depth);
++frames;
latency += float(ftl::timer::get_time() - fs->timestamp());
const_cast<ftl::data::FrameSetPtr&>(fs).reset();
});
if (!did_pipe) {
LOG(WARNING) << "Depth pipeline drop: " << fs->timestamp();
fs->firstFrame().message(ftl::data::Message::Warning_PIPELINE_DROP, "Depth pipeline drop");
}
grp->onFrameSet([sender,&stats_count](ftl::rgbd::FrameSet &fs) {
fs.id = 0;
sender->post(fs);
// Do some encoding (eg. colour) whilst pipeline runs
ftl::pool.push([fs,&stats_count,&latency,&frames,&stats_time](int id){
if (fs->hasAnyChanged(Channel::Audio)) {
fs->flush(ftl::codecs::Channel::Audio);
}
// Make sure upload has completed.
cudaSafeCall(cudaEventSynchronize(fs->frames[0].uploadEvent()));
// TODO: Try depth pipeline again here if failed first time.
fs->flush(ftl::codecs::Channel::Colour);
if (--stats_count <= 0) {
auto [fps,latency] = ftl::rgbd::Builder::getStatistics();
const_cast<ftl::data::FrameSetPtr&>(fs).reset();
if (!quiet && --stats_count <= 0) {
latency /= float(frames);
int64_t nowtime = ftl::timer::get_time();
stats_time = nowtime - stats_time;
float fps = float(frames) / (float(stats_time) / 1000.0f);
LOG(INFO) << "Frame rate: " << fps << ", Latency: " << latency;
stats_count = 20;
frames = 0;
latency = 0.0f;
stats_time = nowtime;
}
return true;
});
// TODO: TEMPORARY
ftl::audio::Source *audioSrc = ftl::create<ftl::audio::Source>(root, "audio_test");
audioSrc->onFrameSet([sender](ftl::audio::FrameSet &fs) {
sender->post(fs);
const_cast<ftl::data::FrameSetPtr&>(fs).reset();
return true;
});
auto pipeline = ftl::config::create<ftl::operators::Graph>(root, "pipeline");
pipeline->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
pipeline->append<ftl::operators::ArUco>("aruco")->value("enabled", false);
pipeline->append<ftl::operators::DepthChannel>("depth"); // Ensure there is a depth channel
grp->addPipeline(pipeline);
// Start the timed generation of frames
creator->start();
// Only now start listening for connections
net->start();
LOG(INFO) << "Running...";
ftl::timer::start(true);
ftl::timer::start(true); // Blocks
LOG(INFO) << "Stopping...";
ctrl.stop();
ftl::config::save();
net->shutdown();
ftl::pool.stop();
delete grp;
delete source;
delete receiver;
delete sender;
delete pipeline;
delete audioSrc;
delete outstream;
//delete source; // TODO(Nick) Add ftl::destroy
delete net;
}
int main(int argc, char **argv) {
#ifdef HAVE_PYLON
Pylon::PylonAutoInitTerm autoInitTerm;
#endif
#ifdef WIN32
SetPriorityClass(GetCurrentProcess(), HIGH_PRIORITY_CLASS);
#endif
std::cout << "FTL Vision Node " << FTL_VERSION_LONG << std::endl;
auto root = ftl::configure(argc, argv, "vision_default");
try {
auto root = ftl::configure(argc, argv, "vision_default", {
"uri",
"fps",
"time_master",
"time_peer",
"quiet"
});
root->value("restart", 0);
// Allow config controlled restart
root->on("restart", [root]() {
auto val = root->get<int>("restart");
if (val) {
ftl::exit_code = *val;
ftl::running = false;
}
});
// Use other GPU if available.
//ftl::cuda::setDevice(ftl::cuda::deviceCount()-1);
std::cout << "Loading..." << std::endl;
run(root);
delete root;
ftl::config::cleanup();
LOG(INFO) << "Terminating with code " << ftl::exit_code;
LOG(INFO) << "Branch: " << ftl::branch_name;
} catch (const std::exception &e) {
LOG(ERROR) << "Main Exception: " << e.what();
return -1;
}
return ftl::exit_code;
}
#include <ftl/middlebury.hpp>
#include <loguru.hpp>
#include <ftl/rgbd.hpp>
#include <string>
#include <algorithm>
#include <nlohmann/json.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using cv::Mat;
using cv::Size;
using std::string;
using std::min;
using std::max;
using std::isnan;
static void skip_comment(FILE *fp) {
// skip comment lines in the headers of pnm files
char c;
while ((c=getc(fp)) == '#')
while (getc(fp) != '\n') ;
ungetc(c, fp);
}
static void skip_space(FILE *fp) {
// skip white space in the headers or pnm files
char c;
do {
c = getc(fp);
} while (c == '\n' || c == ' ' || c == '\t' || c == '\r');
ungetc(c, fp);
}
static void read_header(FILE *fp, const char *imtype, char c1, char c2,
int *width, int *height, int *nbands, int thirdArg)
{
// read the header of a pnmfile and initialize width and height
char c;
if (getc(fp) != c1 || getc(fp) != c2)
LOG(FATAL) << "ReadFilePGM: wrong magic code for " << imtype << " file";
skip_space(fp);
skip_comment(fp);
skip_space(fp);
fscanf(fp, "%d", width);
skip_space(fp);
fscanf(fp, "%d", height);
if (thirdArg) {
skip_space(fp);
fscanf(fp, "%d", nbands);
}
// skip SINGLE newline character after reading image height (or third arg)
c = getc(fp);
if (c == '\r') // <cr> in some files before newline
c = getc(fp);
if (c != '\n') {
if (c == ' ' || c == '\t' || c == '\r')
LOG(FATAL) << "newline expected in file after image height";
else
LOG(FATAL) << "whitespace expected in file after image height";
}
}
// check whether machine is little endian
static int littleendian() {
int intval = 1;
uchar *uval = (uchar *)&intval;
return uval[0] == 1;
}
// 1-band PFM image, see http://netpbm.sourceforge.net/doc/pfm.html
// 3-band not yet supported
void ftl::middlebury::readFilePFM(Mat &img, const string &filename)
{
// Open the file and read the header
FILE *fp = fopen(filename.c_str(), "rb");
if (fp == 0)
LOG(FATAL) << "ReadFilePFM: could not open \"" << filename << "\"";
int width, height, nBands;
read_header(fp, "PFM", 'P', 'f', &width, &height, &nBands, 0);
skip_space(fp);
float scalef;
fscanf(fp, "%f", &scalef); // scale factor (if negative, little endian)
// skip SINGLE newline character after reading third arg
char c = getc(fp);
if (c == '\r') // <cr> in some files before newline
c = getc(fp);
if (c != '\n') {
if (c == ' ' || c == '\t' || c == '\r')
LOG(FATAL) << "newline expected in file after scale factor";
else
LOG(FATAL) << "whitespace expected in file after scale factor";
}
// Allocate the image if necessary
img = Mat(height, width, CV_32FC1);
// Set the image shape
//Size sh = img.size();
int littleEndianFile = (scalef < 0);
int littleEndianMachine = littleendian();
int needSwap = (littleEndianFile != littleEndianMachine);
//printf("endian file = %d, endian machine = %d, need swap = %d\n",
// littleEndianFile, littleEndianMachine, needSwap);
for (int y = height-1; y >= 0; y--) { // PFM stores rows top-to-bottom!!!!
int n = width;
float* ptr = &img.at<float>(y, 0, 0);
if ((int)fread(ptr, sizeof(float), n, fp) != n)
LOG(FATAL) << "ReadFilePFM(" << filename << "): file is too short";
if (needSwap) { // if endianness doesn't agree, swap bytes
uchar* ptr = (uchar *)&img.at<uchar>(y, 0, 0);
int x = 0;
uchar tmp = 0;
while (x < n) {
tmp = ptr[0]; ptr[0] = ptr[3]; ptr[3] = tmp;
tmp = ptr[1]; ptr[1] = ptr[2]; ptr[2] = tmp;
ptr += 4;
x++;
}
}
}
if (fclose(fp))
LOG(FATAL) << "ReadFilePGM(" << filename << "): error closing file";
}
// 1-band PFM image, see http://netpbm.sourceforge.net/doc/pfm.html
// 3-band not yet supported
void ftl::middlebury::writeFilePFM(const Mat &img, const char* filename, float scalefactor)
{
// Write a PFM file
Size sh = img.size();
int nBands = img.channels();
if (nBands != 1)
LOG(FATAL) << "WriteFilePFM(" << filename << "): can only write 1-band image as pfm for now";
// Open the file
FILE *stream = fopen(filename, "wb");
if (stream == 0)
LOG(FATAL) << "WriteFilePFM: could not open " << filename;
// sign of scalefact indicates endianness, see pfms specs
if (littleendian())
scalefactor = -scalefactor;
// write the header: 3 lines: Pf, dimensions, scale factor (negative val == little endian)
fprintf(stream, "Pf\n%d %d\n%f\n", sh.width, sh.height, scalefactor);
int n = sh.width;
// write rows -- pfm stores rows in inverse order!
for (int y = sh.height-1; y >= 0; y--) {
const float* ptr = &img.at<float>(0, y, 0);
if ((int)fwrite(ptr, sizeof(float), n, stream) != n)
LOG(FATAL) << "WriteFilePFM(" << filename << "): file is too short";
}
// close file
if (fclose(stream))
LOG(FATAL) << "WriteFilePFM(" << filename << "): error closing file";
}
void ftl::middlebury::evaldisp(const Mat &disp, const Mat &gtdisp, const Mat &mask, float badthresh, int maxdisp, int rounddisp)
{
Size sh = gtdisp.size();
Size sh2 = disp.size();
Size msh = mask.size();
int width = sh.width, height = sh.height;
int width2 = sh2.width, height2 = sh2.height;
int scale = width / width2;
if ((!(scale == 1 || scale == 2 || scale == 4))
|| (scale * width2 != width)
|| (scale * height2 != height)) {
printf(" disp size = %4d x %4d\n", width2, height2);
printf("GT disp size = %4d x %4d\n", width, height);
LOG(ERROR) << "GT disp size must be exactly 1, 2, or 4 * disp size";
}
int usemask = (msh.width > 0 && msh.height > 0);
if (usemask && (msh != sh))
LOG(ERROR) << "mask image must have same size as GT";
int n = 0;
int bad = 0;
int invalid = 0;
float serr = 0;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
float gt = gtdisp.at<float>(y, x, 0);
if (gt == INFINITY) // unknown
continue;
float d = scale * disp.at<float>(y / scale, x / scale, 0);
int valid = (!isnan(d) && d < 256.0f); // NOTE: Is meant to be infinity in middlebury
if (valid) {
float maxd = scale * maxdisp; // max disp range
d = max(0.0f, min(maxd, d)); // clip disps to max disp range
}
if (valid && rounddisp)
d = round(d);
float err = fabs(d - gt);
if (usemask && mask.at<float>(y, x, 0) != 255) { // don't evaluate pixel
} else {
n++;
if (valid) {
serr += err;
if (err > badthresh) {
bad++;
}
} else {// invalid (i.e. hole in sparse disp map)
invalid++;
}
}
}
}
float badpercent = 100.0*bad/n;
float invalidpercent = 100.0*invalid/n;
float totalbadpercent = 100.0*(bad+invalid)/n;
float avgErr = serr / (n - invalid); // CHANGED 10/14/2014 -- was: serr / n
printf("mask bad%.1f invalid totbad avgErr\n", badthresh);
printf("%4.1f %6.2f %6.2f %6.2f %6.2f\n", 100.0*n/(width * height),
badpercent, invalidpercent, totalbadpercent, avgErr);
}
void ftl::middlebury::test(nlohmann::json &config) {
// Load dataset images
Mat l = cv::imread((string)config["middlebury"]["dataset"] + "/im0.png");
Mat r = cv::imread((string)config["middlebury"]["dataset"] + "/im1.png");
// Load ground truth
Mat gt;
readFilePFM(gt, (string)config["middlebury"]["dataset"] + "/disp0.pfm");
if ((float)config["middlebury"]["scale"] != 1.0f) {
float scale = (float)config["middlebury"]["scale"];
//cv::resize(gt, gt, cv::Size(gt.cols * scale,gt.rows * scale), 0, 0, cv::INTER_LINEAR);
cv::resize(l, l, cv::Size(l.cols * scale,l.rows * scale), 0, 0, cv::INTER_LINEAR);
cv::resize(r, r, cv::Size(r.cols * scale,r.rows * scale), 0, 0, cv::INTER_LINEAR);
}
// TODO(Nick) Update to use an RGBD Image source
// Run algorithm
//auto disparity = ftl::Disparity::create(config["disparity"]);
Mat disp;
// disparity->compute(l,r,disp);
//disp.convertTo(disp, CV_32F);
// Display results
evaldisp(disp, gt, Mat(), (float)config["middlebury"]["threshold"], (int)config["disparity"]["maximum"], 0);
/*if (gt.cols > 1600) {
cv::resize(gt, gt, cv::Size(gt.cols * 0.25,gt.rows * 0.25), 0, 0, cv::INTER_LINEAR);
}*/
if (disp.cols > 1600) {
cv::resize(disp, disp, cv::Size(disp.cols * 0.25,disp.rows * 0.25), 0, 0, cv::INTER_LINEAR);
}
cv::resize(gt, gt, cv::Size(disp.cols,disp.rows), 0, 0, cv::INTER_LINEAR);
double mindisp, mindisp_gt;
double maxdisp, maxdisp_gt;
Mat mask;
threshold(disp,mask,255.0, 255, cv::THRESH_BINARY_INV);
normalize(mask, mask, 0, 255, cv::NORM_MINMAX, CV_8U);
cv::minMaxLoc(disp, &mindisp, &maxdisp, 0, 0, mask);
cv::minMaxLoc(gt, &mindisp_gt, &maxdisp_gt, 0, 0);
//disp = (disp < 256.0f);
//disp = disp + (mindisp_gt - mindisp);
disp.convertTo(disp, CV_8U, 255.0f / (maxdisp_gt*(float)config["middlebury"]["scale"]));
disp = disp & mask;
gt = gt / maxdisp_gt; // TODO Read from calib.txt
gt.convertTo(gt, CV_8U, 255.0f);
//disp = disp / maxdisp;
imshow("Ground Truth", gt);
imshow("Disparity", disp);
imshow("Diff", gt - disp);
while (cv::waitKey(10) != 27);
/*cv::putText(yourImageMat,
"Here is some text",
cv::Point(5,5), // Coordinates
cv::FONT_HERSHEY_COMPLEX_SMALL, // Font
1.0, // Scale. 2.0 = 2x bigger
cv::Scalar(255,255,255), // BGR Color
1, // Line Thickness (Optional)
cv::CV_AA); // Anti-alias (Optional)*/
}
#include <loguru.hpp>
#include <ftl/streamer.hpp>
#include <vector>
// #include <zlib.h>
// #include <lz4.h>
using ftl::Streamer;
using ftl::net::Universe;
using cv::Mat;
using nlohmann::json;
using std::string;
using std::vector;
Streamer::Streamer(Universe &net, json &config) : net_(net), config_(config) {
uri_ = string("ftl://utu.fi/")+(string)config["name"]+string("/rgb-d");
net.createResource(uri_);
}
Streamer::~Streamer() {
}
void Streamer::send(const Mat &rgb, const Mat &depth) {
// Compress the rgb as jpeg.
vector<unsigned char> rgb_buf;
cv::imencode(".jpg", rgb, rgb_buf);
Mat d2;
depth.convertTo(d2, CV_16UC1, 16*100);
vector<unsigned char> d_buf;
/*d_buf.resize(d2.step*d2.rows);
z_stream defstream;
defstream.zalloc = Z_NULL;
defstream.zfree = Z_NULL;
defstream.opaque = Z_NULL;
defstream.avail_in = d2.step*d2.rows;
defstream.next_in = (Bytef *)d2.data; // input char array
defstream.avail_out = (uInt)d2.step*d2.rows; // size of output
defstream.next_out = (Bytef *)d_buf.data(); // output char array
deflateInit(&defstream, Z_DEFAULT_COMPRESSION);
deflate(&defstream, Z_FINISH);
deflateEnd(&defstream);
d2.copyTo(last);
d_buf.resize(defstream.total_out);*/
// LZ4 Version
// d_buf.resize(LZ4_compressBound(depth.step*depth.rows));
// int s = LZ4_compress_default((char*)depth.data, (char*)d_buf.data(), depth.step*depth.rows, d_buf.size());
// d_buf.resize(s);
cv::imencode(".png", d2, d_buf);
//LOG(INFO) << "Depth Size = " << ((float)d_buf.size() / (1024.0f*1024.0f));
try {
net_.publish(uri_, rgb_buf, d_buf);
} catch (...) {
LOG(ERROR) << "Exception on net publish to " << uri_;
}
}
/*
* Copyright 2019 Nicolas Pope
*/
#include <ftl/synched.hpp>
using ftl::SyncSource;
using cv::Mat;
SyncSource::SyncSource() {
channels_.push_back(Mat());
channels_.push_back(Mat());
}
void SyncSource::addChannel(const std::string &c) {
}
void SyncSource::feed(int channel, cv::Mat &m, double ts) {
if (channel > static_cast<int>(channels_.size())) return;
channels_[channel] = m;
}
bool SyncSource::get(int channel, cv::Mat &m) {
if (channel > static_cast<int>(channels_.size())) return false;
m = channels_[channel];
return true;
}
double SyncSource::latency() const {
return 0.0;
}