Skip to content
Snippets Groups Projects
Commit 02fb3fee authored by Nicolas Pope's avatar Nicolas Pope
Browse files

Remove all ref to bucket size

parent 11394738
No related branches found
No related tags found
1 merge request!60Resolves #63 bucket removal
Pipeline #11960 passed
...@@ -36,9 +36,7 @@ typedef signed char schar; ...@@ -36,9 +36,7 @@ typedef signed char schar;
#include <ftl/depth_camera.hpp> #include <ftl/depth_camera.hpp>
#define HANDLE_COLLISIONS
#define SDF_BLOCK_SIZE 8 #define SDF_BLOCK_SIZE 8
#define HASH_BUCKET_SIZE 1
#ifndef MINF #ifndef MINF
#define MINF __int_as_float(0xff800000) #define MINF __int_as_float(0xff800000)
......
...@@ -21,8 +21,8 @@ struct __align__(16) HashParams { ...@@ -21,8 +21,8 @@ struct __align__(16) HashParams {
float4x4 m_rigidTransformInverse; float4x4 m_rigidTransformInverse;
unsigned int m_hashNumBuckets; unsigned int m_hashNumBuckets;
unsigned int m_hashBucketSize; unsigned int m_deprecated1;
unsigned int m_hashMaxCollisionLinkedListSize; unsigned int m_deprecated2; //m_hashMaxCollisionLinkedListSize;
unsigned int m_numSDFBlocks; unsigned int m_numSDFBlocks;
int m_SDFBlockSize; int m_SDFBlockSize;
......
...@@ -68,7 +68,7 @@ __global__ void compactifyVisibleKernel(HashData hashData, HashParams hashParams ...@@ -68,7 +68,7 @@ __global__ void compactifyVisibleKernel(HashData hashData, HashParams hashParams
//const HashParams& hashParams = c_hashParams; //const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
#ifdef COMPACTIFY_HASH_SIMPLE #ifdef COMPACTIFY_HASH_SIMPLE
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) { if (idx < hashParams.m_hashNumBuckets) {
if (hashData.d_hash[idx].ptr != FREE_ENTRY) { if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
if (hashData.isSDFBlockInCameraFrustumApprox(hashParams, camera, hashData.d_hash[idx].pos)) if (hashData.isSDFBlockInCameraFrustumApprox(hashParams, camera, hashData.d_hash[idx].pos))
{ {
...@@ -83,7 +83,7 @@ __global__ void compactifyVisibleKernel(HashData hashData, HashParams hashParams ...@@ -83,7 +83,7 @@ __global__ void compactifyVisibleKernel(HashData hashData, HashParams hashParams
__syncthreads(); __syncthreads();
int addrLocal = -1; int addrLocal = -1;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) { if (idx < hashParams.m_hashNumBuckets) {
if (hashData.d_hash[idx].ptr != FREE_ENTRY) { if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
if (hashData.isSDFBlockInCameraFrustumApprox(hashParams, camera, hashData.d_hash[idx].pos)) if (hashData.isSDFBlockInCameraFrustumApprox(hashParams, camera, hashData.d_hash[idx].pos))
{ {
...@@ -109,7 +109,7 @@ __global__ void compactifyVisibleKernel(HashData hashData, HashParams hashParams ...@@ -109,7 +109,7 @@ __global__ void compactifyVisibleKernel(HashData hashData, HashParams hashParams
void ftl::cuda::compactifyVisible(HashData& hashData, const HashParams& hashParams, const DepthCameraParams &camera, cudaStream_t stream) { void ftl::cuda::compactifyVisible(HashData& hashData, const HashParams& hashParams, const DepthCameraParams &camera, cudaStream_t stream) {
const unsigned int threadsPerBlock = COMPACTIFY_HASH_THREADS_PER_BLOCK; const unsigned int threadsPerBlock = COMPACTIFY_HASH_THREADS_PER_BLOCK;
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + threadsPerBlock - 1) / threadsPerBlock, 1); const dim3 gridSize((hashParams.m_hashNumBuckets + threadsPerBlock - 1) / threadsPerBlock, 1);
const dim3 blockSize(threadsPerBlock, 1); const dim3 blockSize(threadsPerBlock, 1);
cudaSafeCall(cudaMemsetAsync(hashData.d_hashCompactifiedCounter, 0, sizeof(int),stream)); cudaSafeCall(cudaMemsetAsync(hashData.d_hashCompactifiedCounter, 0, sizeof(int),stream));
...@@ -129,7 +129,7 @@ __global__ void compactifyAllocatedKernel(HashData hashData) ...@@ -129,7 +129,7 @@ __global__ void compactifyAllocatedKernel(HashData hashData)
const HashParams& hashParams = c_hashParams; const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
#ifdef COMPACTIFY_HASH_SIMPLE #ifdef COMPACTIFY_HASH_SIMPLE
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) { if (idx < hashParams.m_hashNumBuckets) {
if (hashData.d_hash[idx].ptr != FREE_ENTRY) { if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
int addr = atomicAdd(hashData.d_hashCompactifiedCounter, 1); int addr = atomicAdd(hashData.d_hashCompactifiedCounter, 1);
hashData.d_hashCompactified[addr] = hashData.d_hash[idx]; hashData.d_hashCompactified[addr] = hashData.d_hash[idx];
...@@ -141,7 +141,7 @@ __global__ void compactifyAllocatedKernel(HashData hashData) ...@@ -141,7 +141,7 @@ __global__ void compactifyAllocatedKernel(HashData hashData)
__syncthreads(); __syncthreads();
int addrLocal = -1; int addrLocal = -1;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) { if (idx < hashParams.m_hashNumBuckets) {
if (hashData.d_hash[idx].ptr != FREE_ENTRY) { if (hashData.d_hash[idx].ptr != FREE_ENTRY) {
addrLocal = atomicAdd(&localCounter, 1); addrLocal = atomicAdd(&localCounter, 1);
} }
...@@ -164,7 +164,7 @@ __global__ void compactifyAllocatedKernel(HashData hashData) ...@@ -164,7 +164,7 @@ __global__ void compactifyAllocatedKernel(HashData hashData)
void ftl::cuda::compactifyAllocated(HashData& hashData, const HashParams& hashParams, cudaStream_t stream) { void ftl::cuda::compactifyAllocated(HashData& hashData, const HashParams& hashParams, cudaStream_t stream) {
const unsigned int threadsPerBlock = COMPACTIFY_HASH_THREADS_PER_BLOCK; const unsigned int threadsPerBlock = COMPACTIFY_HASH_THREADS_PER_BLOCK;
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + threadsPerBlock - 1) / threadsPerBlock, 1); const dim3 gridSize((hashParams.m_hashNumBuckets + threadsPerBlock - 1) / threadsPerBlock, 1);
const dim3 blockSize(threadsPerBlock, 1); const dim3 blockSize(threadsPerBlock, 1);
cudaSafeCall(cudaMemsetAsync(hashData.d_hashCompactifiedCounter, 0, sizeof(int), stream)); cudaSafeCall(cudaMemsetAsync(hashData.d_hashCompactifiedCounter, 0, sizeof(int), stream));
......
...@@ -98,7 +98,7 @@ __global__ void resetHashKernel(HashData hashData) ...@@ -98,7 +98,7 @@ __global__ void resetHashKernel(HashData hashData)
{ {
const HashParams& hashParams = c_hashParams; const HashParams& hashParams = c_hashParams;
const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x; const unsigned int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < hashParams.m_hashNumBuckets * HASH_BUCKET_SIZE) { if (idx < hashParams.m_hashNumBuckets) {
hashData.deleteHashEntry(hashData.d_hash[idx]); hashData.deleteHashEntry(hashData.d_hash[idx]);
hashData.deleteHashEntry(hashData.d_hashCompactified[idx]); hashData.deleteHashEntry(hashData.d_hashCompactified[idx]);
} }
...@@ -133,7 +133,7 @@ extern "C" void resetCUDA(HashData& hashData, const HashParams& hashParams) ...@@ -133,7 +133,7 @@ extern "C" void resetCUDA(HashData& hashData, const HashParams& hashParams)
{ {
//resetting the hash //resetting the hash
const dim3 gridSize((HASH_BUCKET_SIZE * hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1); const dim3 gridSize((hashParams.m_hashNumBuckets + (T_PER_BLOCK*T_PER_BLOCK) - 1)/(T_PER_BLOCK*T_PER_BLOCK), 1);
const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1); const dim3 blockSize((T_PER_BLOCK*T_PER_BLOCK), 1);
resetHashKernel<<<gridSize, blockSize>>>(hashData); resetHashKernel<<<gridSize, blockSize>>>(hashData);
......
...@@ -8,20 +8,20 @@ void HashData::allocate(const HashParams& params, bool dataOnGPU) { ...@@ -8,20 +8,20 @@ void HashData::allocate(const HashParams& params, bool dataOnGPU) {
if (m_bIsOnGPU) { if (m_bIsOnGPU) {
cudaSafeCall(cudaMalloc(&d_heap, sizeof(unsigned int) * params.m_numSDFBlocks)); cudaSafeCall(cudaMalloc(&d_heap, sizeof(unsigned int) * params.m_numSDFBlocks));
cudaSafeCall(cudaMalloc(&d_heapCounter, sizeof(unsigned int))); cudaSafeCall(cudaMalloc(&d_heapCounter, sizeof(unsigned int)));
cudaSafeCall(cudaMalloc(&d_hash, sizeof(HashEntry)* params.m_hashNumBuckets * params.m_hashBucketSize)); cudaSafeCall(cudaMalloc(&d_hash, sizeof(HashEntry)* params.m_hashNumBuckets));
cudaSafeCall(cudaMalloc(&d_hashDecision, sizeof(int)* params.m_hashNumBuckets * params.m_hashBucketSize)); cudaSafeCall(cudaMalloc(&d_hashDecision, sizeof(int)* params.m_hashNumBuckets));
cudaSafeCall(cudaMalloc(&d_hashDecisionPrefix, sizeof(int)* params.m_hashNumBuckets * params.m_hashBucketSize)); cudaSafeCall(cudaMalloc(&d_hashDecisionPrefix, sizeof(int)* params.m_hashNumBuckets));
cudaSafeCall(cudaMalloc(&d_hashCompactified, sizeof(HashEntry)* params.m_hashNumBuckets * params.m_hashBucketSize)); cudaSafeCall(cudaMalloc(&d_hashCompactified, sizeof(HashEntry)* params.m_hashNumBuckets));
cudaSafeCall(cudaMalloc(&d_hashCompactifiedCounter, sizeof(int))); cudaSafeCall(cudaMalloc(&d_hashCompactifiedCounter, sizeof(int)));
cudaSafeCall(cudaMalloc(&d_SDFBlocks, sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize)); cudaSafeCall(cudaMalloc(&d_SDFBlocks, sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize));
cudaSafeCall(cudaMalloc(&d_hashBucketMutex, sizeof(int)* params.m_hashNumBuckets)); cudaSafeCall(cudaMalloc(&d_hashBucketMutex, sizeof(int)* params.m_hashNumBuckets));
} else { } else {
d_heap = new unsigned int[params.m_numSDFBlocks]; d_heap = new unsigned int[params.m_numSDFBlocks];
d_heapCounter = new unsigned int[1]; d_heapCounter = new unsigned int[1];
d_hash = new HashEntry[params.m_hashNumBuckets * params.m_hashBucketSize]; d_hash = new HashEntry[params.m_hashNumBuckets];
d_hashDecision = new int[params.m_hashNumBuckets * params.m_hashBucketSize]; d_hashDecision = new int[params.m_hashNumBuckets];
d_hashDecisionPrefix = new int[params.m_hashNumBuckets * params.m_hashBucketSize]; d_hashDecisionPrefix = new int[params.m_hashNumBuckets];
d_hashCompactified = new HashEntry[params.m_hashNumBuckets * params.m_hashBucketSize]; d_hashCompactified = new HashEntry[params.m_hashNumBuckets];
d_hashCompactifiedCounter = new int[1]; d_hashCompactifiedCounter = new int[1];
d_SDFBlocks = new Voxel[params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize]; d_SDFBlocks = new Voxel[params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize];
d_hashBucketMutex = new int[params.m_hashNumBuckets]; d_hashBucketMutex = new int[params.m_hashNumBuckets];
...@@ -78,10 +78,10 @@ HashData HashData::download() const { ...@@ -78,10 +78,10 @@ HashData HashData::download() const {
hashData.allocate(params, false); //allocate the data on the CPU hashData.allocate(params, false); //allocate the data on the CPU
cudaSafeCall(cudaMemcpy(hashData.d_heap, d_heap, sizeof(unsigned int) * params.m_numSDFBlocks, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_heap, d_heap, sizeof(unsigned int) * params.m_numSDFBlocks, cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_heapCounter, d_heapCounter, sizeof(unsigned int), cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_heapCounter, d_heapCounter, sizeof(unsigned int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_hash, d_hash, sizeof(HashEntry)* params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_hash, d_hash, sizeof(HashEntry)* params.m_hashNumBuckets, cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_hashDecision, d_hashDecision, sizeof(int)*params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_hashDecision, d_hashDecision, sizeof(int)*params.m_hashNumBuckets, cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_hashDecisionPrefix, d_hashDecisionPrefix, sizeof(int)*params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_hashDecisionPrefix, d_hashDecisionPrefix, sizeof(int)*params.m_hashNumBuckets, cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_hashCompactified, d_hashCompactified, sizeof(HashEntry)* params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_hashCompactified, d_hashCompactified, sizeof(HashEntry)* params.m_hashNumBuckets, cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_hashCompactifiedCounter, d_hashCompactifiedCounter, sizeof(unsigned int), cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_hashCompactifiedCounter, d_hashCompactifiedCounter, sizeof(unsigned int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_SDFBlocks, d_SDFBlocks, sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_SDFBlocks, d_SDFBlocks, sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize, cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashData.d_hashBucketMutex, d_hashBucketMutex, sizeof(int)* params.m_hashNumBuckets, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashData.d_hashBucketMutex, d_hashBucketMutex, sizeof(int)* params.m_hashNumBuckets, cudaMemcpyDeviceToHost));
...@@ -97,10 +97,10 @@ HashData HashData::upload() const { ...@@ -97,10 +97,10 @@ HashData HashData::upload() const {
hashData.allocate(params, false); //allocate the data on the CPU hashData.allocate(params, false); //allocate the data on the CPU
cudaSafeCall(cudaMemcpy(hashData.d_heap, d_heap, sizeof(unsigned int) * params.m_numSDFBlocks, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_heap, d_heap, sizeof(unsigned int) * params.m_numSDFBlocks, cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_heapCounter, d_heapCounter, sizeof(unsigned int), cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_heapCounter, d_heapCounter, sizeof(unsigned int), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_hash, d_hash, sizeof(HashEntry)* params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_hash, d_hash, sizeof(HashEntry)* params.m_hashNumBuckets, cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_hashDecision, d_hashDecision, sizeof(int)*params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_hashDecision, d_hashDecision, sizeof(int)*params.m_hashNumBuckets, cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_hashDecisionPrefix, d_hashDecisionPrefix, sizeof(int)*params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_hashDecisionPrefix, d_hashDecisionPrefix, sizeof(int)*params.m_hashNumBuckets, cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_hashCompactified, d_hashCompactified, sizeof(HashEntry)* params.m_hashNumBuckets * params.m_hashBucketSize, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_hashCompactified, d_hashCompactified, sizeof(HashEntry)* params.m_hashNumBuckets, cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_hashCompactifiedCounter, d_hashCompactifiedCounter, sizeof(unsigned int), cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_hashCompactifiedCounter, d_hashCompactifiedCounter, sizeof(unsigned int), cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_SDFBlocks, d_SDFBlocks, sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_SDFBlocks, d_SDFBlocks, sizeof(Voxel) * params.m_numSDFBlocks * params.m_SDFBlockSize*params.m_SDFBlockSize*params.m_SDFBlockSize, cudaMemcpyHostToDevice));
cudaSafeCall(cudaMemcpy(hashData.d_hashBucketMutex, d_hashBucketMutex, sizeof(int)* params.m_hashNumBuckets, cudaMemcpyHostToDevice)); cudaSafeCall(cudaMemcpy(hashData.d_hashBucketMutex, d_hashBucketMutex, sizeof(int)* params.m_hashNumBuckets, cudaMemcpyHostToDevice));
......
...@@ -23,7 +23,7 @@ HashEntry HashData::getHashEntryForSDFBlockPos(const int3& sdfBlock) const { ...@@ -23,7 +23,7 @@ HashEntry HashData::getHashEntryForSDFBlockPos(const int3& sdfBlock) const {
if (curr.offset == 0) break; if (curr.offset == 0) break;
i += curr.offset; //go to next element in the list i += curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets); //check for overflow i %= (params().m_hashNumBuckets); //check for overflow
++maxIter; ++maxIter;
} }
...@@ -50,7 +50,7 @@ unsigned int HashData::getNumHashLinkedList(unsigned int bucketID) { ...@@ -50,7 +50,7 @@ unsigned int HashData::getNumHashLinkedList(unsigned int bucketID) {
if (curr.offset == 0) break; if (curr.offset == 0) break;
i += curr.offset; //go to next element in the list i += curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets); //check for overflow i %= (params().m_hashNumBuckets); //check for overflow
++listLen; ++listLen;
++maxIter; ++maxIter;
} }
...@@ -74,7 +74,7 @@ void HashData::allocBlock(const int3& pos, const uchar frame) { ...@@ -74,7 +74,7 @@ void HashData::allocBlock(const int3& pos, const uchar frame) {
if (curr.offset == 0) break; if (curr.offset == 0) break;
i += curr.offset; //go to next element in the list i += curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets); //check for overflow i %= (params().m_hashNumBuckets); //check for overflow
++maxIter; ++maxIter;
} }
...@@ -104,7 +104,7 @@ void HashData::allocBlock(const int3& pos, const uchar frame) { ...@@ -104,7 +104,7 @@ void HashData::allocBlock(const int3& pos, const uchar frame) {
} }
++j; ++j;
j %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets); //check for overflow j %= (params().m_hashNumBuckets); //check for overflow
++maxIter; ++maxIter;
} }
} }
...@@ -231,7 +231,7 @@ bool HashData::deleteHashEntryElement(const int3& sdfBlock) { ...@@ -231,7 +231,7 @@ bool HashData::deleteHashEntryElement(const int3& sdfBlock) {
} }
prev = i; prev = i;
i += curr.offset; //go to next element in the list i += curr.offset; //go to next element in the list
i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets); //check for overflow i %= (params().m_hashNumBuckets); //check for overflow
++maxIter; ++maxIter;
} }
......
...@@ -263,7 +263,7 @@ unsigned int SceneRep::getHeapFreeCount() { ...@@ -263,7 +263,7 @@ unsigned int SceneRep::getHeapFreeCount() {
//! debug only! //! debug only!
void SceneRep::debugHash() { void SceneRep::debugHash() {
HashEntry* hashCPU = new HashEntry[m_hashParams.m_hashBucketSize*m_hashParams.m_hashNumBuckets]; HashEntry* hashCPU = new HashEntry[m_hashParams.m_hashNumBuckets];
unsigned int* heapCPU = new unsigned int[m_hashParams.m_numSDFBlocks]; unsigned int* heapCPU = new unsigned int[m_hashParams.m_numSDFBlocks];
unsigned int heapCounterCPU; unsigned int heapCounterCPU;
...@@ -271,7 +271,7 @@ void SceneRep::debugHash() { ...@@ -271,7 +271,7 @@ void SceneRep::debugHash() {
heapCounterCPU++; //points to the first free entry: number of blocks is one more heapCounterCPU++; //points to the first free entry: number of blocks is one more
cudaSafeCall(cudaMemcpy(heapCPU, m_hashData.d_heap, sizeof(unsigned int)*m_hashParams.m_numSDFBlocks, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(heapCPU, m_hashData.d_heap, sizeof(unsigned int)*m_hashParams.m_numSDFBlocks, cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(hashCPU, m_hashData.d_hash, sizeof(HashEntry)*m_hashParams.m_hashBucketSize*m_hashParams.m_hashNumBuckets, cudaMemcpyDeviceToHost)); cudaSafeCall(cudaMemcpy(hashCPU, m_hashData.d_hash, sizeof(HashEntry)*m_hashParams.m_hashNumBuckets, cudaMemcpyDeviceToHost));
//Check for duplicates //Check for duplicates
class myint3Voxel { class myint3Voxel {
...@@ -316,7 +316,7 @@ void SceneRep::debugHash() { ...@@ -316,7 +316,7 @@ void SceneRep::debugHash() {
std::list<myint3Voxel> l; std::list<myint3Voxel> l;
//std::vector<myint3Voxel> v; //std::vector<myint3Voxel> v;
for (unsigned int i = 0; i < m_hashParams.m_hashBucketSize*m_hashParams.m_hashNumBuckets; i++) { for (unsigned int i = 0; i < m_hashParams.m_hashNumBuckets; i++) {
if (hashCPU[i].ptr == -1) { if (hashCPU[i].ptr == -1) {
numMinusOne++; numMinusOne++;
} }
...@@ -376,8 +376,6 @@ HashParams SceneRep::_parametersFromConfig() { ...@@ -376,8 +376,6 @@ HashParams SceneRep::_parametersFromConfig() {
params.m_rigidTransform.setIdentity(); params.m_rigidTransform.setIdentity();
params.m_rigidTransformInverse.setIdentity(); params.m_rigidTransformInverse.setIdentity();
params.m_hashNumBuckets = value("hashNumBuckets", 100000); params.m_hashNumBuckets = value("hashNumBuckets", 100000);
params.m_hashBucketSize = HASH_BUCKET_SIZE;
params.m_hashMaxCollisionLinkedListSize = value("hashMaxCollisionLinkedListSize", 7);
params.m_SDFBlockSize = SDF_BLOCK_SIZE; params.m_SDFBlockSize = SDF_BLOCK_SIZE;
params.m_numSDFBlocks = value("hashNumSDFBlocks",500000); params.m_numSDFBlocks = value("hashNumSDFBlocks",500000);
params.m_virtualVoxelSize = value("SDFVoxelSize", 0.006f); params.m_virtualVoxelSize = value("SDFVoxelSize", 0.006f);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment