Skip to content
Snippets Groups Projects
Commit b29e7a08 authored by Nicolas Pope's avatar Nicolas Pope
Browse files

WIP Refactor and alloc all before integrate

parent 3bc7f01b
No related branches found
No related tags found
1 merge request!55Resolves #103 with integration refactor
...@@ -14,6 +14,12 @@ ...@@ -14,6 +14,12 @@
namespace ftl { namespace ftl {
namespace voxhash { namespace voxhash {
struct Cameras {
ftl::rgbd::Source *source;
DepthCameraData gpu;
DepthCameraParams params;
};
class SceneRep : public ftl::Configurable { class SceneRep : public ftl::Configurable {
public: public:
SceneRep(nlohmann::json &config); SceneRep(nlohmann::json &config);
...@@ -24,7 +30,7 @@ class SceneRep : public ftl::Configurable { ...@@ -24,7 +30,7 @@ class SceneRep : public ftl::Configurable {
/** /**
* Send all camera frames to GPU and allocate required voxels. * Send all camera frames to GPU and allocate required voxels.
*/ */
void upload(); int upload();
/** /**
* Merge all camera frames into the voxel hash datastructure. * Merge all camera frames into the voxel hash datastructure.
...@@ -86,7 +92,9 @@ class SceneRep : public ftl::Configurable { ...@@ -86,7 +92,9 @@ class SceneRep : public ftl::Configurable {
//CUDAScan m_cudaScan; //CUDAScan m_cudaScan;
unsigned int m_numIntegratedFrames; //used for garbage collect unsigned int m_numIntegratedFrames; //used for garbage collect
unsigned int m_frameCount;
bool do_reset_; bool do_reset_;
std::vector<Cameras> cameras_;
}; };
}; // namespace voxhash }; // namespace voxhash
......
...@@ -88,24 +88,17 @@ static void run(ftl::Configurable *root) { ...@@ -88,24 +88,17 @@ static void run(ftl::Configurable *root) {
return; return;
} }
for (int i=0; i<sources.size(); i++) {
Source *in = sources[i];
auto &cam = inputs.emplace_back();
cam.source = in;
cam.params.m_imageWidth = 0;
}
// TODO move this block in its own method and add automated tests // TODO move this block in its own method and add automated tests
// for error cases // for error cases
std::optional<json_t> merge = root->get<json_t>("merge"); std::optional<json_t> merge = root->get<json_t>("merge");
if (!merge) { if (!merge) {
LOG(WARNING) << "Input merging not configured, using only first input in configuration"; LOG(WARNING) << "Input merging not configured, using only first input in configuration";
inputs = { inputs[0] }; sources = { sources[0] };
inputs[0].source->setPose(Eigen::Matrix4d::Identity()); sources[0]->setPose(Eigen::Matrix4d::Identity());
} }
if (inputs.size() > 1) { if (sources.size() > 1) {
std::map<std::string, Eigen::Matrix4d> transformations; std::map<std::string, Eigen::Matrix4d> transformations;
/*if ((*merge)["register"]) { /*if ((*merge)["register"]) {
...@@ -138,25 +131,25 @@ static void run(ftl::Configurable *root) { ...@@ -138,25 +131,25 @@ static void run(ftl::Configurable *root) {
} }
//} //}
for (auto &input : inputs) { for (auto &input : sources) {
string uri = input.source->getURI(); string uri = input->getURI();
auto T = transformations.find(uri); auto T = transformations.find(uri);
if (T == transformations.end()) { if (T == transformations.end()) {
LOG(ERROR) << "Camera pose for " + uri + " not found in transformations"; LOG(ERROR) << "Camera pose for " + uri + " not found in transformations";
LOG(WARNING) << "Using only first configured source"; LOG(WARNING) << "Using only first configured source";
// TODO: use target source if configured and found // TODO: use target source if configured and found
inputs = { inputs[0] }; sources = { sources[0] };
inputs[0].source->setPose(Eigen::Matrix4d::Identity()); sources[0]->setPose(Eigen::Matrix4d::Identity());
break; break;
} }
input.source->setPose(T->second); input->setPose(T->second);
} }
} }
// Displays and Inputs configured // Displays and Inputs configured
LOG(INFO) << "Using sources:"; //LOG(INFO) << "Using sources:";
for (auto &input : inputs) { LOG(INFO) << " " + (string) input.source->getURI(); } //for (auto &input : inputs) { LOG(INFO) << " " + (string) input.source->getURI(); }
//ftl::rgbd::Display *display = ftl::create<ftl::rgbd::Display>(root, "display"); //ftl::rgbd::Display *display = ftl::create<ftl::rgbd::Display>(root, "display");
ftl::rgbd::Source *virt = ftl::create<ftl::rgbd::Source>(root, "virtual", net); ftl::rgbd::Source *virt = ftl::create<ftl::rgbd::Source>(root, "virtual", net);
...@@ -171,8 +164,10 @@ static void run(ftl::Configurable *root) { ...@@ -171,8 +164,10 @@ static void run(ftl::Configurable *root) {
ftl::rgbd::Streamer *stream = ftl::create<ftl::rgbd::Streamer>(root, "stream", net); ftl::rgbd::Streamer *stream = ftl::create<ftl::rgbd::Streamer>(root, "stream", net);
stream->add(virt); stream->add(virt);
// Also proxy all inputs // Also proxy all inputs
for (auto &in : inputs) { for (int i=0; i<sources.size(); i++) {
stream->add(in.source); Source *in = sources[i];
stream->add(in);
scene->addSource(in);
} }
unsigned char frameCount = 0; unsigned char frameCount = 0;
...@@ -183,7 +178,7 @@ static void run(ftl::Configurable *root) { ...@@ -183,7 +178,7 @@ static void run(ftl::Configurable *root) {
// if (key == 32) paused = !paused; // if (key == 32) paused = !paused;
//}); //});
int active = inputs.size(); int active = sources.size();
while (ftl::running) { while (ftl::running) {
if (active == 0) { if (active == 0) {
LOG(INFO) << "Waiting for sources..."; LOG(INFO) << "Waiting for sources...";
...@@ -196,63 +191,16 @@ static void run(ftl::Configurable *root) { ...@@ -196,63 +191,16 @@ static void run(ftl::Configurable *root) {
//net.broadcast("grab"); // To sync cameras //net.broadcast("grab"); // To sync cameras
scene->nextFrame(); scene->nextFrame();
// TODO(Nick) Improve sync further... active = scene->upload();
for (size_t i = 0; i < inputs.size(); i++) {
inputs[i].source->grab();
}
stream->wait(); stream->wait();
for (size_t i = 0; i < inputs.size(); i++) { scene->integrate();
if (!inputs[i].source->isReady()) {
inputs[i].params.m_imageWidth = 0;
// TODO(Nick) : Free gpu allocs if was ready before
continue;
} else {
auto &cam = inputs[i];
auto in = inputs[i].source;
if (cam.params.m_imageWidth == 0) {
cam.params.fx = in->parameters().fx;
cam.params.fy = in->parameters().fy;
cam.params.mx = -in->parameters().cx;
cam.params.my = -in->parameters().cy;
cam.params.m_imageWidth = in->parameters().width;
cam.params.m_imageHeight = in->parameters().height;
cam.params.m_sensorDepthWorldMax = in->parameters().maxDepth;
cam.params.m_sensorDepthWorldMin = in->parameters().minDepth;
cam.gpu.alloc(cam.params);
}
//LOG(INFO) << in->getURI() << " loaded " << cam.params.fx;
}
// Get the RGB-Depth frame from input
Source *input = inputs[i].source;
Mat rgb, depth;
input->getFrames(rgb,depth);
active += 1;
if (depth.cols == 0) continue; // scene->garbage();
// Must be in RGBA for GPU
Mat rgba;
cv::cvtColor(rgb,rgba, cv::COLOR_BGR2BGRA);
inputs[i].params.flags = frameCount;
// Send to GPU and merge view into scene
inputs[i].gpu.updateParams(inputs[i].params);
inputs[i].gpu.updateData(depth, rgba);
// TODO(Nick): Use double pose
scene->integrate(inputs[i].source->getPose().cast<float>(), inputs[i].gpu, inputs[i].params, nullptr);
}
} else { } else {
active = 1; active = 1;
} }
frameCount++;
stream->poll(); stream->poll();
//display->update(); //display->update();
//sleep_for(milliseconds(10)); //sleep_for(milliseconds(10));
......
#include <ftl/voxel_scene.hpp> #include <ftl/voxel_scene.hpp>
using namespace ftl::voxhash; using namespace ftl::voxhash;
using ftl::rgbd::Source;
using ftl::Configurable; using ftl::Configurable;
using cv::Mat;
#define SAFE_DELETE_ARRAY(a) { delete [] (a); (a) = NULL; } #define SAFE_DELETE_ARRAY(a) { delete [] (a); (a) = NULL; }
...@@ -18,18 +20,8 @@ extern "C" void starveVoxelsKernelCUDA(ftl::voxhash::HashData& hashData, const f ...@@ -18,18 +20,8 @@ extern "C" void starveVoxelsKernelCUDA(ftl::voxhash::HashData& hashData, const f
extern "C" void garbageCollectIdentifyCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams); extern "C" void garbageCollectIdentifyCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams);
extern "C" void garbageCollectFreeCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams); extern "C" void garbageCollectFreeCUDA(ftl::voxhash::HashData& hashData, const ftl::voxhash::HashParams& hashParams);
SceneRep::SceneRep(nlohmann::json &config) : Configurable(config), do_reset_(false) { SceneRep::SceneRep(nlohmann::json &config) : Configurable(config), do_reset_(false), m_frameCount(0) {
REQUIRED({ // Allocates voxel structure on GPU
{"hashNumBuckets", "Desired hash entries divide bucket size", "number"},
{"hashMaxCollisionLinkedListSize", "", "number"},
{"hashNumSDFBlocks", "", "number"},
{"SDFVoxelSize", "Size in meters of one voxel", "number"},
{"SDFMaxIntegrationDistance", "", "number"},
{"SDFTruncation", "Base error size", "number"},
{"SDFTruncationScale", "Error size scale with depth", "number"},
{"SDFIntegrationWeightSample", "", "number"},
{"SDFIntegrationWeightMax", "", "number"}
});
_create(_parametersFromConfig()); _create(_parametersFromConfig());
on("SDFVoxelSize", [this](const ftl::config::Event &e) { on("SDFVoxelSize", [this](const ftl::config::Event &e) {
...@@ -59,6 +51,94 @@ SceneRep::~SceneRep() { ...@@ -59,6 +51,94 @@ SceneRep::~SceneRep() {
_destroy(); _destroy();
} }
void SceneRep::addSource(ftl::rgbd::Source *src) {
auto &cam = cameras_.emplace_back();
cam.source = src;
cam.params.m_imageWidth = 0;
}
int SceneRep::upload() {
int active = 0;
for (size_t i=0; i<cameras_.size(); ++i) {
cameras_[i].source->grab();
}
for (size_t i=0; i<cameras_.size(); ++i) {
auto &cam = cameras_[i];
if (!cam.source->isReady()) {
cam.params.m_imageWidth = 0;
// TODO(Nick) : Free gpu allocs if was ready before
continue;
} else {
auto in = cam.source;
// Only now do we have camera parameters for allocations...
if (cam.params.m_imageWidth == 0) {
cam.params.fx = in->parameters().fx;
cam.params.fy = in->parameters().fy;
cam.params.mx = -in->parameters().cx;
cam.params.my = -in->parameters().cy;
cam.params.m_imageWidth = in->parameters().width;
cam.params.m_imageHeight = in->parameters().height;
cam.params.m_sensorDepthWorldMax = in->parameters().maxDepth;
cam.params.m_sensorDepthWorldMin = in->parameters().minDepth;
cam.gpu.alloc(cam.params);
}
}
// Get the RGB-Depth frame from input
Source *input = cam.source;
Mat rgb, depth;
// TODO(Nick) Direct GPU upload to save copy
input->getFrames(rgb,depth);
active += 1;
if (depth.cols == 0) continue;
// Must be in RGBA for GPU
Mat rgba;
cv::cvtColor(rgb,rgba, cv::COLOR_BGR2BGRA);
cam.params.flags = m_frameCount;
// Send to GPU and merge view into scene
cam.gpu.updateParams(cam.params);
cam.gpu.updateData(depth, rgba);
setLastRigidTransform(input->getPose().cast<float>());
//make the rigid transform available on the GPU
m_hashData.updateParams(m_hashParams);
//allocate all hash blocks which are corresponding to depth map entries
_alloc(cam.gpu, cam.params, nullptr);
}
return active;
}
void SceneRep::integrate() {
for (size_t i=0; i<cameras_.size(); ++i) {
auto &cam = cameras_[i];
setLastRigidTransform(cam.source->getPose().cast<float>());
m_hashData.updateParams(m_hashParams);
//generate a linear hash array with only occupied entries
_compactifyHashEntries();
//volumetrically integrate the depth data into the depth SDFBlocks
_integrateDepthMap(cam.gpu, cam.params);
_garbageCollect(cam.gpu);
m_numIntegratedFrames++;
}
}
void SceneRep::integrate(const Eigen::Matrix4f& lastRigidTransform, const DepthCameraData& depthCameraData, const DepthCameraParams& depthCameraParams, unsigned int* d_bitMask) { void SceneRep::integrate(const Eigen::Matrix4f& lastRigidTransform, const DepthCameraData& depthCameraData, const DepthCameraParams& depthCameraParams, unsigned int* d_bitMask) {
setLastRigidTransform(lastRigidTransform); setLastRigidTransform(lastRigidTransform);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment