diff --git a/applications/reconstruct/include/ftl/cuda_operators.hpp b/applications/reconstruct/include/ftl/cuda_operators.hpp
index eeb6f26c239cea0ab3673b8d6a8795aa09d92f06..21e109b89bdc98f6c504648eee0536875270c806 100644
--- a/applications/reconstruct/include/ftl/cuda_operators.hpp
+++ b/applications/reconstruct/include/ftl/cuda_operators.hpp
@@ -662,6 +662,11 @@ inline __host__ __device__ void operator/=(int3 &a, int s)
     a.x /= s; a.y /= s; a.z /= s;
 }
 
+// Comparison
+inline __host__ __device__ bool operator==(const int3 &a, const int3 &b) {
+	return a.x == b.x && a.y == b.y && a.z == b.z;
+}
+
 // clamp
 inline __device__ __host__ int clamp(int f, int a, int b)
 {
diff --git a/applications/reconstruct/include/ftl/voxel_hash.hpp b/applications/reconstruct/include/ftl/voxel_hash.hpp
index 3b388b4b6fd8d3b604193f9420e736a9eedb2fb0..b2c355b37424394ecf22cd7420c6663c66c27e74 100644
--- a/applications/reconstruct/include/ftl/voxel_hash.hpp
+++ b/applications/reconstruct/include/ftl/voxel_hash.hpp
@@ -38,7 +38,7 @@ typedef signed char schar;
 
 #define HANDLE_COLLISIONS
 #define SDF_BLOCK_SIZE 8
-#define HASH_BUCKET_SIZE 2
+#define HASH_BUCKET_SIZE 1
 
 #ifndef MINF
 #define MINF __int_as_float(0xff800000)
diff --git a/applications/reconstruct/src/voxel_hash.cu b/applications/reconstruct/src/voxel_hash.cu
index 3c1dee256d4d99437d38304f73b5b4f83f7cf160..d80534393970484ea1300feafd20db08777e959f 100644
--- a/applications/reconstruct/src/voxel_hash.cu
+++ b/applications/reconstruct/src/voxel_hash.cu
@@ -2,64 +2,35 @@
 
 using namespace ftl::voxhash;
 
+#define COLLISION_LIST_SIZE 5
+
 //! returns the hash entry for a given sdf block id; if there was no hash entry the returned entry will have a ptr with FREE_ENTRY set
 __device__ 
-HashEntry HashData::getHashEntryForSDFBlockPos(const int3& sdfBlock) const
-{
-	uint h = computeHashPos(sdfBlock);			//hash bucket
-	uint hp = h * HASH_BUCKET_SIZE;	//hash position
-
-	HashEntry entry;
-	entry.pos = sdfBlock;
-	entry.offset = 0;
-	entry.ptr = FREE_ENTRY;
-
-	for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
-		uint i = j + hp;
-		HashEntry curr = d_hash[i];
-		if (curr.pos.x == entry.pos.x && curr.pos.y == entry.pos.y && curr.pos.z == entry.pos.z && curr.ptr != FREE_ENTRY) {
-			return curr;
-		}
-	}
+HashEntry HashData::getHashEntryForSDFBlockPos(const int3& sdfBlock) const {
+	uint h = computeHashPos(sdfBlock); //hash
+	int3 pos = sdfBlock;
 
-#ifdef HANDLE_COLLISIONS
-	const uint idxLastEntryInBucket = (h+1)*HASH_BUCKET_SIZE - 1;
-	int i = idxLastEntryInBucket;	//start with the last entry of the current bucket
 	HashEntry curr;
-	//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
 
+	int i = h;
 	unsigned int maxIter = 0;
-	uint g_MaxLoopIterCount = params().m_hashMaxCollisionLinkedListSize;
-	#pragma unroll 1 
-	while (maxIter < g_MaxLoopIterCount) {
+
+	#pragma unroll 2
+	while (maxIter < COLLISION_LIST_SIZE) {
 		curr = d_hash[i];
 
-		if (curr.pos.x == entry.pos.x && curr.pos.y == entry.pos.y && curr.pos.z == entry.pos.z && curr.ptr != FREE_ENTRY) {
-			return curr;
-		}
+		if (curr.pos == pos && curr.ptr != FREE_ENTRY) return curr;
+		if (curr.offset == 0) break;
 
-		if (curr.offset == 0) {	//we have found the end of the list
-			break;
-		}
-		i = idxLastEntryInBucket + curr.offset;						//go to next element in the list
-		i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
-
-		maxIter++;
+		i +=  curr.offset;  //go to next element in the list
+		i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);  //check for overflow
+		++maxIter;
 	}
-#endif
-	return entry;
-}
 
-//for histogram (no collision traversal)
-__device__ 
-unsigned int HashData::getNumHashEntriesPerBucket(unsigned int bucketID) {
-	unsigned int h = 0;
-	for (uint i = 0; i < HASH_BUCKET_SIZE; i++) {
-		if (d_hash[bucketID*HASH_BUCKET_SIZE+i].ptr != FREE_ENTRY) {
-			h++;
-		}
-	} 
-	return h;
+	// Could not find so return dummy
+	curr.pos = pos;
+	curr.ptr = FREE_ENTRY;
+	return curr;
 }
 
 //for histogram (collisions traversal only)
@@ -67,31 +38,22 @@ __device__
 unsigned int HashData::getNumHashLinkedList(unsigned int bucketID) {
 	unsigned int listLen = 0;
 
-#ifdef HANDLE_COLLISIONS
-	const uint idxLastEntryInBucket = (bucketID+1)*HASH_BUCKET_SIZE - 1;
-	unsigned int i = idxLastEntryInBucket;	//start with the last entry of the current bucket
-	//int offset = 0;
+	unsigned int i = bucketID;	//start with the last entry of the current bucket
 	HashEntry curr;	curr.offset = 0;
-	//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
 
 	unsigned int maxIter = 0;
-	uint g_MaxLoopIterCount = params().m_hashMaxCollisionLinkedListSize;
-	#pragma unroll 1 
-	while (maxIter < g_MaxLoopIterCount) {
-		//offset = curr.offset;
-		//curr = getHashEntry(g_Hash, i);
+
+	#pragma unroll 2 
+	while (maxIter < COLLISION_LIST_SIZE) {
 		curr = d_hash[i];
 
-		if (curr.offset == 0) {	//we have found the end of the list
-			break;
-		}
-		i = idxLastEntryInBucket + curr.offset;		//go to next element in the list
-		i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
-		listLen++;
+		if (curr.offset == 0) break;
 
-		maxIter++;
+		i += curr.offset;		//go to next element in the list
+		i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
+		++listLen;
+		++maxIter;
 	}
-#endif
 	
 	return listLen;
 }
@@ -99,119 +61,57 @@ unsigned int HashData::getNumHashLinkedList(unsigned int bucketID) {
 //pos in SDF block coordinates
 __device__
 void HashData::allocBlock(const int3& pos, const uchar frame) {
-
-
 	uint h = computeHashPos(pos);				//hash bucket
-	uint hp = h * HASH_BUCKET_SIZE;	//hash position
-
-	int firstEmpty = -1;
-	for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
-		uint i = j + hp;		
-		HashEntry& curr = d_hash[i];
-
-		//in that case the SDF-block is already allocated and corresponds to the current position -> exit thread
-		if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
-			//curr.flags = frame;  // Flag block as valid in this frame (Nick)
-			return;
-		}
-
-		//store the first FREE_ENTRY hash entry
-		if (firstEmpty == -1 && curr.ptr == FREE_ENTRY) {
-			firstEmpty = i;
-		}
-	}
-
-
-#ifdef HANDLE_COLLISIONS
-	//updated variables as after the loop
-	const uint idxLastEntryInBucket = (h+1)*HASH_BUCKET_SIZE - 1;	//get last index of bucket
-	uint i = idxLastEntryInBucket;											//start with the last entry of the current bucket
-	//int offset = 0;
+	uint i = h;
 	HashEntry curr;	curr.offset = 0;
-	//traverse list until end: memorize idx at list end and memorize offset from last element of bucket to list end
-	//int k = 0;
 
 	unsigned int maxIter = 0;
-	uint g_MaxLoopIterCount = c_hashParams.m_hashMaxCollisionLinkedListSize;
-	#pragma  unroll 1 
-	while (maxIter < g_MaxLoopIterCount) {
+	#pragma  unroll 2
+	while (maxIter < COLLISION_LIST_SIZE) {
 		//offset = curr.offset;
 		curr = d_hash[i];	//TODO MATTHIAS do by reference
-		if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
-			//curr.flags = frame;  // Flag block as valid in this frame (Nick)
-			return;
-		}
-		if (curr.offset == 0) {	//we have found the end of the list
-			break;
-		}
-		i = idxLastEntryInBucket + curr.offset;		//go to next element in the list
-		i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
+		if (curr.pos == pos && curr.ptr != FREE_ENTRY) return;
+		if (curr.offset == 0) break;
 
-		maxIter++;
-	}
-#endif
-
-	if (firstEmpty != -1) {	//if there is an empty entry and we haven't allocated the current entry before
-		//int prevValue = 0;
-		//InterlockedExchange(d_hashBucketMutex[h], LOCK_ENTRY, prevValue);	//lock the hash bucket
-		int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
-		if (prevValue != LOCK_ENTRY) {	//only proceed if the bucket has been locked
-			HashEntry& entry = d_hash[firstEmpty];
-			entry.pos = pos;
-			entry.offset = NO_OFFSET;	
-			entry.flags = 0;  // Flag block as valid in this frame (Nick)	
-			entry.ptr = consumeHeap() * SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;	//memory alloc
-		}
-		return;
+		i += curr.offset;		//go to next element in the list
+		i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
+		++maxIter;
 	}
 
-#ifdef HANDLE_COLLISIONS
-	//if (i != idxLastEntryInBucket) return;
-	int offset = 0;
-	//linear search for free entry
+	// Limit reached...
+	if (curr.offset != 0) return;
 
-	maxIter = 0;
-	#pragma  unroll 1 
-	while (maxIter < g_MaxLoopIterCount) {
-		offset++;
-		i = (idxLastEntryInBucket + offset) % (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//go to next hash element
-		if ((offset % HASH_BUCKET_SIZE) == 0) continue;			//cannot insert into a last bucket element (would conflict with other linked lists)
-		curr = d_hash[i];
-		//if (curr.pos.x == pos.x && curr.pos.y == pos.y && curr.pos.z == pos.z && curr.ptr != FREE_ENTRY) {
-		//	return;
-		//} 
-		if (curr.ptr == FREE_ENTRY) {	//this is the first free entry
-			//int prevValue = 0;
-			//InterlockedExchange(g_HashBucketMutex[h], LOCK_ENTRY, prevValue);	//lock the original hash bucket
-			int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
+	int j = i+1;
+	while (maxIter < COLLISION_LIST_SIZE) {
+		//offset = curr.offset;
+		curr = d_hash[j];	//TODO MATTHIAS do by reference
+		if (curr.ptr == FREE_ENTRY) {
+			int prevValue = atomicExch(&d_hashBucketMutex[i], LOCK_ENTRY);
 			if (prevValue != LOCK_ENTRY) {
-				HashEntry lastEntryInBucket = d_hash[idxLastEntryInBucket];
-				h = i / HASH_BUCKET_SIZE;
 				//InterlockedExchange(g_HashBucketMutex[h], LOCK_ENTRY, prevValue);	//lock the hash bucket where we have found a free entry
-				prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
+				prevValue = atomicExch(&d_hashBucketMutex[j], LOCK_ENTRY);
 				if (prevValue != LOCK_ENTRY) {	//only proceed if the bucket has been locked
-					HashEntry& entry = d_hash[i];
+					HashEntry& entry = d_hash[j];
 					entry.pos = pos;
-					entry.offset = lastEntryInBucket.offset;
+					entry.offset = 0;
 					entry.flags = 0;  // Flag block as valid in this frame (Nick)		
 					entry.ptr = consumeHeap() * SDF_BLOCK_SIZE*SDF_BLOCK_SIZE*SDF_BLOCK_SIZE;	//memory alloc
-
-					lastEntryInBucket.offset = offset;
-					d_hash[idxLastEntryInBucket] = lastEntryInBucket;
+					d_hash[i].offset = j-i;
 					//setHashEntry(g_Hash, idxLastEntryInBucket, lastEntryInBucket);
 				}
 			} 
 			return;	//bucket was already locked
 		}
 
-		maxIter++;
-	} 
-#endif
+		++j;
+		j %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
+		++maxIter;
+	}
 }
 
 
 //!inserts a hash entry without allocating any memory: used by streaming: TODO MATTHIAS check the atomics in this function
-__device__
+/*__device__
 bool HashData::insertHashEntry(HashEntry entry)
 {
 	uint h = computeHashPos(entry.pos);
@@ -286,7 +186,7 @@ bool HashData::insertHashEntry(HashEntry entry)
 #endif
 
 	return false;
-}
+}*/
 
 
 
@@ -294,88 +194,47 @@ bool HashData::insertHashEntry(HashEntry entry)
 __device__
 bool HashData::deleteHashEntryElement(const int3& sdfBlock) {
 	uint h = computeHashPos(sdfBlock);	//hash bucket
-	uint hp = h * HASH_BUCKET_SIZE;		//hash position
 
-	for (uint j = 0; j < HASH_BUCKET_SIZE; j++) {
-		uint i = j + hp;
-		const HashEntry& curr = d_hash[i];
-		if (curr.pos.x == sdfBlock.x && curr.pos.y == sdfBlock.y && curr.pos.z == sdfBlock.z && curr.ptr != FREE_ENTRY) {
-#ifndef HANDLE_COLLISIONS
-			const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
-			appendHeap(curr.ptr / linBlockSize);
-			//heapAppend.Append(curr.ptr / linBlockSize);
-			deleteHashEntry(i);
-			return true;
-#endif
-#ifdef HANDLE_COLLISIONS
-			if (curr.offset != 0) {	//if there was a pointer set it to the next list element
-				//int prevValue = 0;
-				//InterlockedExchange(bucketMutex[h], LOCK_ENTRY, prevValue);	//lock the hash bucket
-				int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
-				if (prevValue == LOCK_ENTRY)	return false;
-				if (prevValue != LOCK_ENTRY) {
-					const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
-					appendHeap(curr.ptr / linBlockSize);
-					//heapAppend.Append(curr.ptr / linBlockSize);
-					int nextIdx = (i + curr.offset) % (HASH_BUCKET_SIZE*params().m_hashNumBuckets);
-					//setHashEntry(hash, i, getHashEntry(hash, nextIdx));
-					d_hash[i] = d_hash[nextIdx];
-					deleteHashEntry(nextIdx);
-					return true;
-				}
-			} else {
-				const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
-				appendHeap(curr.ptr / linBlockSize);
-				//heapAppend.Append(curr.ptr / linBlockSize);
-				deleteHashEntry(i);
-				return true;
-			}
-#endif	//HANDLE_COLLSISION
-		}
-	}	
-#ifdef HANDLE_COLLISIONS
-	const uint idxLastEntryInBucket = (h+1)*HASH_BUCKET_SIZE - 1;
-	int i = idxLastEntryInBucket;
+	int i = h;
+	int prev = -1;
 	HashEntry curr;
-	curr = d_hash[i];
-	int prevIdx = i;
-	i = idxLastEntryInBucket + curr.offset;							//go to next element in the list
-	i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
-
 	unsigned int maxIter = 0;
-	uint g_MaxLoopIterCount = params().m_hashMaxCollisionLinkedListSize;
 
-	#pragma  unroll 1 
-	while (maxIter < g_MaxLoopIterCount) {
+	#pragma  unroll 2 
+	while (maxIter < COLLISION_LIST_SIZE) {
 		curr = d_hash[i];
+	
 		//found that dude that we need/want to delete
-		if (curr.pos.x == sdfBlock.x && curr.pos.y == sdfBlock.y && curr.pos.z == sdfBlock.z && curr.ptr != FREE_ENTRY) {
+		if (curr.pos == sdfBlock && curr.ptr != FREE_ENTRY) {
 			//int prevValue = 0;
 			//InterlockedExchange(bucketMutex[h], LOCK_ENTRY, prevValue);	//lock the hash bucket
-			int prevValue = atomicExch(&d_hashBucketMutex[h], LOCK_ENTRY);
+			int prevValue = atomicExch(&d_hashBucketMutex[i], LOCK_ENTRY);
 			if (prevValue == LOCK_ENTRY)	return false;
 			if (prevValue != LOCK_ENTRY) {
-				const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
-				appendHeap(curr.ptr / linBlockSize);
-				//heapAppend.Append(curr.ptr / linBlockSize);
-				deleteHashEntry(i);
-				HashEntry prev = d_hash[prevIdx];				
-				prev.offset = curr.offset;
-				//setHashEntry(hash, prevIdx, prev);
-				d_hash[prevIdx] = prev;
-				return true;
+				prevValue = (prev >= 0) ? atomicExch(&d_hashBucketMutex[prev], LOCK_ENTRY) : 0;
+				if (prevValue == LOCK_ENTRY)	return false;
+				if (prevValue != LOCK_ENTRY) {
+					const uint linBlockSize = SDF_BLOCK_SIZE * SDF_BLOCK_SIZE * SDF_BLOCK_SIZE;
+					appendHeap(curr.ptr / linBlockSize);
+					deleteHashEntry(i);
+
+					if (prev >= 0) {
+						d_hash[prev].offset = curr.offset;
+					}
+					return true;
+				}
 			}
 		}
 
 		if (curr.offset == 0) {	//we have found the end of the list
 			return false;	//should actually never happen because we need to find that guy before
 		}
-		prevIdx = i;
-		i = idxLastEntryInBucket + curr.offset;		//go to next element in the list
+		prev = i;
+		i += curr.offset;		//go to next element in the list
 		i %= (HASH_BUCKET_SIZE * params().m_hashNumBuckets);	//check for overflow
 
-		maxIter++;
+		++maxIter;
 	}
-#endif	// HANDLE_COLLSISION
+
 	return false;
 }
\ No newline at end of file