17#ifndef NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
18#define NANOVDB_GRIDBUILDER_H_HAS_BEEN_INCLUDED
43 AbsDiff(
float tolerance = -1.0f) : mTolerance(tolerance) {}
53 return Abs(exact - approx) <= mTolerance;
69 RelDiff(
float tolerance = -1.0f) : mTolerance(tolerance) {}
79 return Abs(exact - approx)/
Max(
Abs(exact),
Abs(approx)) <= mTolerance;
90template<
typename ValueT,
typename BuildT = ValueT,
typename StatsT = Stats<ValueT>>
94 template<
typename ChildT>
96 template<
typename ChildT>
100 struct Codec {
float min,
max; uint16_t log2, size;};
103 using SrcNode1 = BuildNode<SrcNode0>;
104 using SrcNode2 = BuildNode<SrcNode1>;
105 using SrcRootT = BuildRoot<SrcNode2>;
116 uint64_t mBufferOffsets[9];
118 uint64_t mBlindDataSize;
120 std::vector<SrcNode0*> mArray0;
121 std::vector<SrcNode1*> mArray1;
122 std::vector<SrcNode2*> mArray2;
123 std::unique_ptr<Codec[]> mCodec;
130 template<
typename OracleT,
typename BufferT>
133 template <
typename T,
typename OracleT>
135 compression(uint64_t&, OracleT) {}
137 template <
typename T,
typename OracleT>
139 compression(uint64_t &offset, OracleT oracle);
146 processLeafs(std::vector<T*>&);
152 processLeafs(std::vector<T*>&);
156 processLeafs(std::vector<T*>&);
158 template<
typename SrcNodeT>
159 void processNodes(std::vector<SrcNodeT*>&);
165 DstGridT* processGrid(
const Map&,
const std::string&);
167 template<
typename T,
typename FlagT>
169 setFlag(
const T&,
const T&, FlagT& flag)
const { flag &= ~FlagT(1); }
171 template<
typename T,
typename FlagT>
173 setFlag(
const T&
min,
const T&
max, FlagT& flag)
const;
178 uint64_t blindDataSize = 0);
204 template<
typename OracleT = AbsDiff,
typename BufferT = HostBuffer>
207 const std::string& name =
"",
208 const OracleT& oracle = OracleT(),
209 const BufferT& buffer = BufferT());
212 template<
typename OracleT = AbsDiff,
typename BufferT = HostBuffer>
214 const std::string& name =
"",
215 const OracleT& oracle = OracleT(),
216 const BufferT& buffer = BufferT());
229 template<
typename Func>
236template<
typename ValueT,
typename BuildT,
typename StatsT>
241 , mBlindDataSize(blindDataSize)
250template<
typename ValueT,
typename BuildT,
typename StatsT>
251template<
typename Func>
255 static_assert(
is_same<ValueT,
typename std::result_of<Func(
const Coord&)>::type>
::value,
"GridBuilder: mismatched ValueType");
259 const CoordBBox leafBBox(voxelBBox[0] >> LeafT::TOTAL, voxelBBox[1] >> LeafT::TOTAL);
262 LeafT* leaf =
nullptr;
263 for (
auto it = b.begin(); it; ++it) {
266 max.minComponent(voxelBBox.max()));
267 if (leaf ==
nullptr) {
268 leaf =
new LeafT(bbox[0], mRoot.mBackground,
false);
270 leaf->mOrigin = bbox[0] & ~LeafT::MASK;
273 leaf->mDstOffset = 0;
274 for (
auto ijk = bbox.begin(); ijk; ++ijk) {
275 const auto v = func(*ijk);
276 if (v == mRoot.mBackground) {
279 leaf->setValue(*ijk, v);
281 if (!leaf->mValueMask.isOff()) {
282 if (leaf->mValueMask.isOn()) {
283 const auto first = leaf->getFirstValue();
286 if (leaf->mValues[n++] != first)
break;
288 if (n == 512) leaf->mDstOffset = 1;
290 std::lock_guard<std::mutex> guard(mutex);
303 for (
auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
304 if (
auto *upper = it2->second.child) {
305 for (
auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
306 auto *lower = upper->mTable[*it1].child;
307 for (
auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
308 auto *leaf = lower->mTable[*it0].child;
309 if (leaf->mDstOffset) {
310 lower->mTable[*it0].value = leaf->getFirstValue();
311 lower->mChildMask.setOff(*it0);
312 lower->mValueMask.setOn(*it0);
316 if (lower->mChildMask.isOff()) {
317 const auto first = lower->getFirstValue();
320 if (lower->mTable[n++].value != first)
break;
323 upper->mTable[*it1].value = first;
324 upper->mChildMask.setOff(*it1);
325 upper->mValueMask.setOn(*it1);
330 if (upper->mChildMask.isOff()) {
331 const auto first = upper->getFirstValue();
334 if (upper->mTable[n++].value != first)
break;
337 it2->second.value = first;
338 it2->second.state = upper->mValueMask.isOn();
339 it2->second.child =
nullptr;
349template<
typename ValueT,
typename BuildT,
typename StatsT>
350template<
typename OracleT,
typename BufferT>
352initHandle(
const OracleT &oracle,
const BufferT& buffer)
357 mArray0.reserve(mRoot.template nodeCount<SrcNode0>());
358 mArray1.reserve(mRoot.template nodeCount<SrcNode1>());
359 mArray2.reserve(mRoot.template nodeCount<SrcNode2>());
361 uint64_t offset[3] = {0};
362 for (
auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
363 if (SrcNode2 *upper = it2->second.child) {
364 upper->mDstOffset = offset[2];
365 mArray2.emplace_back(upper);
367 for (
auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
368 SrcNode1 *lower = upper->mTable[*it1].child;
369 lower->mDstOffset = offset[1];
370 mArray1.emplace_back(lower);
372 for (
auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
373 SrcNode0 *leaf = lower->mTable[*it0].child;
374 leaf->mDstOffset = offset[0];
375 mArray0.emplace_back(leaf);
382 this->
template compression<BuildT, OracleT>(offset[0], oracle);
384 mBufferOffsets[0] = 0;
388 mBufferOffsets[4] = offset[2];
389 mBufferOffsets[5] = offset[1];
390 mBufferOffsets[6] = offset[0];
392 mBufferOffsets[8] = mBlindDataSize;
395 for (
int i = 2; i < 9; ++i) {
396 mBufferOffsets[i] += mBufferOffsets[i - 1];
399 GridHandle<BufferT> handle(BufferT::create(mBufferOffsets[8], &buffer));
400 mBufferPtr = handle.data();
406template<
typename ValueT,
typename BuildT,
typename StatsT>
407template <
typename T,
typename OracleT>
409GridBuilder<ValueT, BuildT, StatsT>::compression(uint64_t &offset, OracleT oracle)
415 static const float halfWidth = 3.0f;
416 oracle.setTolerance(0.1f * mRoot.mBackground / halfWidth);
418 oracle.setTolerance(0.01f);
420 oracle.setTolerance(0.0f);
424 const size_t size = mArray0.size();
425 mCodec.reset(
new Codec[size]);
427 DitherLUT lut(mDitherOn);
428 auto kernel = [&](
const Range1D &r) {
429 for (
auto i=r.begin(); i!=r.end(); ++i) {
430 const float *data = mArray0[i]->mValues;
432 for (
int j=0; j<512; ++j) {
439 const float range =
max -
min;
440 uint16_t logBitWidth = 0;
441 while (range > 0.0f && logBitWidth < 4u) {
442 const uint32_t mask = (uint32_t(1) << (uint32_t(1) << logBitWidth)) - 1u;
443 const float encode = mask/range;
444 const float decode = range/mask;
447 const float exact = data[j];
448 const uint32_t code = uint32_t(encode*(exact -
min) + lut(j));
449 const float approx = code * decode +
min;
450 j += oracle(exact, approx) ? 1 : 513;
455 mCodec[i].log2 = logBitWidth;
462 uint32_t counters[5+1] = {0};
463 ++counters[mCodec[0].log2];
464 for (
size_t i=1; i<size; ++i) {
465 ++counters[mCodec[i].log2];
466 mArray0[i]->mDstOffset = mArray0[i-1]->mDstOffset + mCodec[i-1].size;
468 std::cout <<
"\n" << oracle << std::endl;
469 std::cout <<
"Dithering: " << (mDitherOn ?
"enabled" :
"disabled") << std::endl;
471 for (uint32_t i=0; i<=5; ++i) {
472 if (uint32_t n = counters[i]) {
473 avg += n * float(1 << i);
474 printf(
"%2i bits: %6u leaf nodes, i.e. %4.1f%%\n",1<<i, n, 100.0f*n/
float(size));
477 printf(
"%4.1f bits per value on average\n", avg/
float(size));
479 for (
size_t i=1; i<size; ++i) {
480 mArray0[i]->mDstOffset = mArray0[i-1]->mDstOffset + mCodec[i-1].size;
483 offset = mArray0[size-1]->mDstOffset + mCodec[size-1].size;
488template<
typename ValueT,
typename BuildT,
typename StatsT>
495 mArray0.reserve(mRoot.template nodeCount<SrcNode0>());
496 mArray1.reserve(mRoot.template nodeCount<SrcNode1>());
497 mArray2.reserve(mRoot.template nodeCount<SrcNode2>());
499 for (
auto it2 = mRoot.mTable.begin(); it2 != mRoot.mTable.end(); ++it2) {
500 if (SrcNode2 *upper = it2->second.child) {
501 mArray2.emplace_back(upper);
502 for (
auto it1 = upper->mChildMask.beginOn(); it1; ++it1) {
503 SrcNode1 *lower = upper->mTable[*it1].child;
504 mArray1.emplace_back(lower);
505 for (
auto it0 = lower->mChildMask.beginOn(); it0; ++it0) {
506 mArray0.emplace_back(lower->mTable[*it0].child);
513 const ValueT outside = mRoot.mBackground;
515 for (
auto i = r.begin(); i != r.end(); ++i)
516 mArray0[i]->signedFloodFill(outside);
519 for (
auto i = r.begin(); i != r.end(); ++i)
520 mArray1[i]->signedFloodFill(outside);
523 for (
auto i = r.begin(); i != r.end(); ++i)
524 mArray2[i]->signedFloodFill(outside);
526 mRoot.signedFloodFill(outside);
532template<
typename ValueT,
typename BuildT,
typename StatsT>
533template<
typename OracleT,
typename BufferT>
537 const std::string& name,
538 const OracleT& oracle,
539 const BufferT& buffer)
542 throw std::runtime_error(
"GridBuilder: voxel size is zero or negative");
545 const double Tx = p0[0], Ty = p0[1], Tz = p0[2];
546 const double mat[4][4] = {
552 const double invMat[4][4] = {
553 {1 / dx, 0.0, 0.0, 0.0},
554 {0.0, 1 / dx, 0.0, 0.0},
555 {0.0, 0.0, 1 / dx, 0.0},
556 {-Tx, -Ty, -Tz, 1.0},
558 map.
set(mat, invMat, 1.0);
559 return this->getHandle(map, name, oracle, buffer);
564template<
typename ValueT,
typename BuildT,
typename StatsT>
565template<
typename OracleT,
typename BufferT>
568 const std::string& name,
569 const OracleT& oracle,
570 const BufferT& buffer)
573 throw std::runtime_error(
"Level sets are expected to be floating point types");
575 throw std::runtime_error(
"Fog volumes are expected to be floating point types");
578 auto handle = this->
template initHandle<OracleT, BufferT>(oracle, buffer);
580 this->processLeafs(mArray0);
582 this->processNodes(mArray1);
584 this->processNodes(mArray2);
586 auto *grid = this->processGrid(map, name);
597template<
typename ValueT,
typename BuildT,
typename StatsT>
598template<
typename T,
typename FlagT>
603 if (mDelta > 0 && (
min > mDelta ||
max < -mDelta)) {
612template<
typename ValueT,
typename BuildT,
typename StatsT>
616 this->sdfToLevelSet();
618 const ValueT d = -mRoot.mBackground, w = 1.0f / d;
619 auto op = [&](ValueT& v) ->
bool {
624 v = v > d ? v * w : ValueT(1);
627 auto kernel0 = [&](
const Range1D& r) {
628 for (
auto i = r.begin(); i != r.end(); ++i) {
630 for (uint32_t i = 0; i < SrcNode0::SIZE; ++i)
634 auto kernel1 = [&](
const Range1D& r) {
635 for (
auto i = r.begin(); i != r.end(); ++i) {
636 SrcNode1* node = mArray1[i];
637 for (uint32_t i = 0; i < SrcNode1::SIZE; ++i) {
638 if (node->mChildMask.isOn(i)) {
639 SrcNode0* leaf = node->mTable[i].child;
642 node->mChildMask.setOff(i);
646 node->
mValueMask.set(i, op(node->mTable[i].value));
651 auto kernel2 = [&](
const Range1D& r) {
652 for (
auto i = r.begin(); i != r.end(); ++i) {
653 SrcNode2* node = mArray2[i];
654 for (uint32_t i = 0; i < SrcNode2::SIZE; ++i) {
655 if (node->mChildMask.isOn(i)) {
656 SrcNode1*
child = node->mTable[i].child;
657 if (
child->mChildMask.isOff() &&
child->mValueMask.isOff()) {
658 node->mTable[i].value =
child->getFirstValue();
659 node->mChildMask.setOff(i);
663 node->mValueMask.set(i, op(node->mTable[i].value));
672 for (
auto it = mRoot.mTable.begin(); it != mRoot.mTable.end(); ++it) {
673 SrcNode2*
child = it->second.child;
674 if (
child ==
nullptr) {
675 it->second.state = op(it->second.value);
676 }
else if (
child->mChildMask.isOff() &&
child->mValueMask.isOff()) {
677 it->second.value =
child->getFirstValue();
678 it->second.state =
false;
679 it->second.child =
nullptr;
688template<
typename ValueT,
typename BuildT,
typename StatsT>
699 auto kernel = [&](
const Range1D& r) {
700 auto *ptr = mBufferPtr + mBufferOffsets[5];
701 for (
auto i = r.begin(); i != r.end(); ++i) {
702 auto *srcLeaf = srcLeafs[i];
703 auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
704 auto *data = dstLeaf->data();
705 srcLeaf->mDstNode = dstLeaf;
706 data->mBBoxMin = srcLeaf->mOrigin;
707 data->mBBoxDif[0] = 0u;
708 data->mBBoxDif[1] = 0u;
709 data->mBBoxDif[2] = 0u;
711 data->mValueMask = srcLeaf->mValueMask;
712 const ValueT* src = srcLeaf->mValues;
713 for (ValueT *dst = data->mValues, *end = dst + SrcNode0::SIZE; dst != end; dst += 4, src += 4) {
726template<
typename ValueT,
typename BuildT,
typename StatsT>
731GridBuilder<ValueT, BuildT, StatsT>::
732 processLeafs(std::vector<T*>& srcLeafs)
735 using ArrayT =
typename DstNode0::DataType::ArrayType;
736 using FloatT =
typename std::conditional<DstNode0::DataType::bitWidth()>=16, double,
float>::type;
737 static constexpr FloatT UNITS = FloatT((1 << DstNode0::DataType::bitWidth()) - 1);
738 DitherLUT lut(mDitherOn);
740 auto kernel = [&](
const Range1D& r) {
741 uint8_t* ptr = mBufferPtr + mBufferOffsets[5];
742 for (
auto i = r.begin(); i != r.end(); ++i) {
743 auto *srcLeaf = srcLeafs[i];
744 auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
745 srcLeaf->mDstNode = dstLeaf;
746 auto *data = dstLeaf->data();
747 data->mBBoxMin = srcLeaf->mOrigin;
748 data->mBBoxDif[0] = 0u;
749 data->mBBoxDif[1] = 0u;
750 data->mBBoxDif[2] = 0u;
752 data->mValueMask = srcLeaf->mValueMask;
753 const float* src = srcLeaf->mValues;
756 for (
int i=0; i<512; ++i) {
757 const float v = src[i];
761 data->init(
min,
max, DstNode0::DataType::bitWidth());
763 const FloatT encode = UNITS/(
max-
min);
764 auto *code =
reinterpret_cast<ArrayT*
>(data->mCode);
767 for (
int j=0; j<128; ++j) {
768 auto tmp = ArrayT(encode * (*src++ -
min) + lut(offset++));
769 *code++ = ArrayT(encode * (*src++ -
min) + lut(offset++)) << 4 | tmp;
770 tmp = ArrayT(encode * (*src++ -
min) + lut(offset++));
771 *code++ = ArrayT(encode * (*src++ -
min) + lut(offset++)) << 4 | tmp;
774 for (
int j=0; j<128; ++j) {
775 *code++ = ArrayT(encode * (*src++ -
min) + lut(offset++));
776 *code++ = ArrayT(encode * (*src++ -
min) + lut(offset++));
777 *code++ = ArrayT(encode * (*src++ -
min) + lut(offset++));
778 *code++ = ArrayT(encode * (*src++ -
min) + lut(offset++));
788template<
typename ValueT,
typename BuildT,
typename StatsT>
791GridBuilder<ValueT, BuildT, StatsT>::
792 processLeafs(std::vector<T*>& srcLeafs)
796 DitherLUT lut(mDitherOn);
797 auto kernel = [&](
const Range1D& r) {
798 uint8_t* ptr = mBufferPtr + mBufferOffsets[5];
799 for (
auto i = r.begin(); i != r.end(); ++i) {
800 auto *srcLeaf = srcLeafs[i];
801 auto *dstLeaf = PtrAdd<DstNode0>(ptr, srcLeaf->mDstOffset);
802 auto *data = dstLeaf->data();
803 data->mBBoxMin = srcLeaf->mOrigin;
804 data->mBBoxDif[0] = 0u;
805 data->mBBoxDif[1] = 0u;
806 data->mBBoxDif[2] = 0u;
807 srcLeaf->mDstNode = dstLeaf;
808 const uint8_t logBitWidth = uint8_t(mCodec[i].log2);
809 data->mFlags = logBitWidth << 5;
810 data->mValueMask = srcLeaf->mValueMask;
811 const float* src = srcLeaf->mValues;
812 const float min = mCodec[i].min,
max = mCodec[i].max;
813 data->init(
min,
max, uint8_t(1) << logBitWidth);
816 switch (logBitWidth) {
818 auto *dst =
reinterpret_cast<uint8_t*
>(data+1);
819 const float encode = 1.0f/(
max -
min);
820 for (
int j=0; j<64; ++j) {
822 for (
int k=0; k<8; ++k) {
823 a |= uint8_t(encode * (*src++ -
min) + lut(offset++)) << k;
830 auto *dst =
reinterpret_cast<uint8_t*
>(data+1);
831 const float encode = 3.0f/(
max -
min);
832 for (
int j=0; j<128; ++j) {
833 auto a = uint8_t(encode * (*src++ -
min) + lut(offset++));
834 a |= uint8_t(encode * (*src++ -
min) + lut(offset++)) << 2;
835 a |= uint8_t(encode * (*src++ -
min) + lut(offset++)) << 4;
836 *dst++ = uint8_t(encode * (*src++ -
min) + lut(offset++)) << 6 | a;
841 auto *dst =
reinterpret_cast<uint8_t*
>(data+1);
842 const float encode = 15.0f/(
max -
min);
843 for (
int j=0; j<128; ++j) {
844 auto a = uint8_t(encode * (*src++ -
min) + lut(offset++));
845 *dst++ = uint8_t(encode * (*src++ -
min) + lut(offset++)) << 4 | a;
846 a = uint8_t(encode * (*src++ -
min) + lut(offset++));
847 *dst++ = uint8_t(encode * (*src++ -
min) + lut(offset++)) << 4 | a;
852 auto *dst =
reinterpret_cast<uint8_t*
>(data+1);
853 const float encode = 255.0f/(
max -
min);
854 for (
int j=0; j<128; ++j) {
855 *dst++ = uint8_t(encode * (*src++ -
min) + lut(offset++));
856 *dst++ = uint8_t(encode * (*src++ -
min) + lut(offset++));
857 *dst++ = uint8_t(encode * (*src++ -
min) + lut(offset++));
858 *dst++ = uint8_t(encode * (*src++ -
min) + lut(offset++));
863 auto *dst =
reinterpret_cast<uint16_t*
>(data+1);
864 const double encode = 65535.0/(
max -
min);
865 for (
int j=0; j<128; ++j) {
866 *dst++ = uint16_t(encode * (*src++ -
min) + lut(offset++));
867 *dst++ = uint16_t(encode * (*src++ -
min) + lut(offset++));
868 *dst++ = uint16_t(encode * (*src++ -
min) + lut(offset++));
869 *dst++ = uint16_t(encode * (*src++ -
min) + lut(offset++));
880template<
typename ValueT,
typename BuildT,
typename StatsT>
881template<
typename SrcNodeT>
882void GridBuilder<ValueT, BuildT, StatsT>::
883 processNodes(std::vector<SrcNodeT*>& srcNodes)
885 using DstNodeT =
typename SrcNodeT::NanoNodeT;
886 static_assert(DstNodeT::LEVEL == 1 || DstNodeT::LEVEL == 2,
"Expected internal node");
887 auto kernel = [&](
const Range1D& r) {
888 uint8_t* ptr = mBufferPtr + mBufferOffsets[5 - DstNodeT::LEVEL];
889 for (
auto i = r.begin(); i != r.end(); ++i) {
890 SrcNodeT *srcNode = srcNodes[i];
891 DstNodeT *dstNode = PtrAdd<DstNodeT>(ptr, srcNode->mDstOffset);
892 auto *data = dstNode->data();
893 srcNode->mDstNode = dstNode;
894 data->mBBox[0] = srcNode->mOrigin;
895 data->mValueMask = srcNode->mValueMask;
896 data->mChildMask = srcNode->mChildMask;
897 for (uint32_t j = 0; j != SrcNodeT::SIZE; ++j) {
898 if (data->mChildMask.isOn(j)) {
899 data->setChild(j, srcNode->mTable[j].child->mDstNode);
901 data->setValue(j, srcNode->mTable[j].value);
910template<
typename ValueT,
typename BuildT,
typename StatsT>
911NanoRoot<BuildT>* GridBuilder<ValueT, BuildT, StatsT>::processRoot()
913 auto *dstRoot =
reinterpret_cast<DstRootT*
>(mBufferPtr + mBufferOffsets[2]);
914 auto *data = dstRoot->data();
915 data->mTableSize = uint32_t(mRoot.mTable.size());
916 data->mMinimum = data->mMaximum = data->mBackground = mRoot.mBackground;
920 for (
auto iter = mRoot.mTable.begin(); iter != mRoot.mTable.end(); ++iter) {
921 auto *dstTile = data->tile(tileID++);
922 if (
auto* srcChild = iter->second.child) {
923 dstTile->setChild(srcChild->mOrigin, srcChild->mDstNode, data);
925 dstTile->setValue(iter->first, iter->second.state, iter->second.value);
933template<
typename ValueT,
typename BuildT,
typename StatsT>
934NanoTree<BuildT>* GridBuilder<ValueT, BuildT, StatsT>::processTree()
936 auto *dstTree =
reinterpret_cast<DstTreeT*
>(mBufferPtr + mBufferOffsets[1]);
937 auto *data = dstTree->data();
938 data->setRoot( this->processRoot() );
940 DstNode2 *node2 = mArray2.empty() ? nullptr :
reinterpret_cast<DstNode2*
>(mBufferPtr + mBufferOffsets[3]);
941 data->setFirstNode(node2);
943 DstNode1 *node1 = mArray1.empty() ? nullptr :
reinterpret_cast<DstNode1*
>(mBufferPtr + mBufferOffsets[4]);
944 data->setFirstNode(node1);
946 DstNode0 *node0 = mArray0.empty() ? nullptr :
reinterpret_cast<DstNode0*
>(mBufferPtr + mBufferOffsets[5]);
947 data->setFirstNode(node0);
949 data->mNodeCount[0] = mArray0.size();
950 data->mNodeCount[1] = mArray1.size();
951 data->mNodeCount[2] = mArray2.size();
954 data->mTileCount[0] =
reduce(mArray1, uint32_t(0), [&](
Range1D &r, uint32_t sum){
955 for (
auto i=r.begin(); i!=r.end(); ++i) sum += mArray1[i]->mValueMask.countOn();
956 return sum;}, std::plus<uint32_t>());
959 data->mTileCount[1] =
reduce(mArray2, uint32_t(0), [&](
Range1D &r, uint32_t sum){
960 for (
auto i=r.begin(); i!=r.end(); ++i) sum += mArray2[i]->mValueMask.countOn();
961 return sum;}, std::plus<uint32_t>());
965 for (
auto &tile : mRoot.mTable) {
966 if (tile.second.child==
nullptr && tile.second.state) ++sum;
968 data->mTileCount[2] = sum;
971 data->mVoxelCount =
reduce(mArray0, uint64_t(0), [&](
Range1D &r, uint64_t sum){
972 for (
auto i=r.begin(); i!=r.end(); ++i) sum += mArray0[i]->mValueMask.countOn();
973 return sum;}, std::plus<uint64_t>());
975 data->mVoxelCount += data->mTileCount[0]*DstNode0::NUM_VALUES;
976 data->mVoxelCount += data->mTileCount[1]*DstNode1::NUM_VALUES;
977 data->mVoxelCount += data->mTileCount[2]*DstNode2::NUM_VALUES;
984template<
typename ValueT,
typename BuildT,
typename StatsT>
985NanoGrid<BuildT>* GridBuilder<ValueT, BuildT, StatsT>::
986processGrid(
const Map& map,
987 const std::string& name)
989 auto *dstGrid =
reinterpret_cast<DstGridT*
>(mBufferPtr + mBufferOffsets[0]);
991 auto* data = dstGrid->data();
993 data->mChecksum = 0u;
994 data->mVersion = Version();
996 data->mGridIndex = 0;
997 data->mGridCount = 1;
998 data->mGridSize = mBufferOffsets[8];
999 data->mWorldBBox = BBox<Vec3R>();
1000 data->mBlindMetadataOffset = 0;
1001 data->mBlindMetadataCount = 0;
1002 data->mGridClass = mGridClass;
1003 data->mGridType = mapToGridType<BuildT>();
1005 if (!
isValid(data->mGridType, data->mGridClass)) {
1006 std::stringstream ss;
1007 ss <<
"Invalid combination of GridType("<<int(data->mGridType)
1008 <<
") and GridClass("<<int(data->mGridClass)<<
"). See NanoVDB.h for details!";
1009 throw std::runtime_error(ss.str());
1014 std::stringstream ss;
1016 throw std::runtime_error(ss.str());
1019 data->mVoxelSize = map.applyMap(
Vec3d(1)) - map.applyMap(
Vec3d(0));
1022 if (mBlindDataSize>0) {
1023 auto *metaData =
reinterpret_cast<GridBlindMetaData*
>(mBufferPtr + mBufferOffsets[6]);
1024 data->mBlindMetadataOffset =
PtrDiff(metaData, dstGrid);
1025 data->mBlindMetadataCount = 1u;
1026 auto *blindData =
reinterpret_cast<char*
>(mBufferPtr + mBufferOffsets[7]);
1027 metaData->setBlindData(blindData);
1035template<
typename ValueT,
typename BuildT,
typename StatsT>
1036template<
typename ChildT>
1037struct GridBuilder<ValueT, BuildT, StatsT>::BuildRoot
1039 using ValueType =
typename ChildT::ValueType;
1040 using ChildType = ChildT;
1041 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL;
1058 using MapT = std::map<Coord, Tile>;
1062 BuildRoot(
const ValueT& background)
1063 : mBackground(background)
1066 BuildRoot(
const BuildRoot&) =
delete;
1067 BuildRoot(BuildRoot&&) =
default;
1068 BuildRoot& operator=(
const BuildRoot&) =
delete;
1069 BuildRoot& operator=(BuildRoot&&) =
default;
1071 ~BuildRoot() { this->clear(); }
1073 bool empty()
const {
return mTable.empty(); }
1077 for (
auto iter = mTable.begin(); iter != mTable.end(); ++iter)
1078 delete iter->second.child;
1082 static Coord CoordToKey(
const Coord& ijk) {
return ijk & ~ChildT::MASK; }
1084 template<
typename AccT>
1085 bool isActiveAndCache(
const Coord& ijk, AccT& acc)
const
1087 auto iter = mTable.find(CoordToKey(ijk));
1088 if (iter == mTable.end())
1090 if (iter->second.child) {
1091 acc.insert(ijk, iter->second.child);
1092 return iter->second.child->isActiveAndCache(ijk, acc);
1094 return iter->second.state;
1097 const ValueT& getValue(
const Coord& ijk)
const
1099 auto iter = mTable.find(CoordToKey(ijk));
1100 if (iter == mTable.end()) {
1102 }
else if (iter->second.child) {
1103 return iter->second.child->getValue(ijk);
1105 return iter->second.value;
1109 template<
typename AccT>
1110 const ValueT& getValueAndCache(
const Coord& ijk, AccT& acc)
const
1112 auto iter = mTable.find(CoordToKey(ijk));
1113 if (iter == mTable.end())
1115 if (iter->second.child) {
1116 acc.insert(ijk, iter->second.child);
1117 return iter->second.child->getValueAndCache(ijk, acc);
1119 return iter->second.value;
1122 template<
typename AccT>
1123 void setValueAndCache(
const Coord& ijk,
const ValueT&
value, AccT& acc)
1125 ChildT*
child =
nullptr;
1126 const Coord key = CoordToKey(ijk);
1127 auto iter = mTable.find(key);
1128 if (iter == mTable.end()) {
1129 child =
new ChildT(ijk, mBackground,
false);
1130 mTable[key] = Tile(
child);
1131 }
else if (iter->second.child !=
nullptr) {
1132 child = iter->second.child;
1134 child =
new ChildT(ijk, iter->second.value, iter->second.state);
1135 iter->second.child =
child;
1138 acc.insert(ijk,
child);
1143 template<
typename NodeT>
1144 uint32_t nodeCount()
const
1147 static_assert(NodeT::LEVEL < LEVEL,
"Root::getNodes: LEVEL error");
1149 for (
auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1150 if (iter->second.child ==
nullptr)
1155 sum += iter->second.child->template nodeCount<NodeT>();
1161 template<
typename NodeT>
1162 void getNodes(std::vector<NodeT*>& array)
1165 static_assert(NodeT::LEVEL < LEVEL,
"Root::getNodes: LEVEL error");
1166 for (
auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1167 if (iter->second.child ==
nullptr)
1170 array.push_back(
reinterpret_cast<NodeT*
>(iter->second.child));
1172 iter->second.child->getNodes(array);
1177 void addChild(ChildT*&
child)
1180 const Coord key = CoordToKey(
child->mOrigin);
1181 auto iter = mTable.find(key);
1182 if (iter != mTable.end() && iter->second.child !=
nullptr) {
1183 delete iter->second.child;
1184 iter->second.child =
child;
1186 mTable[key] = Tile(
child);
1191 template<
typename NodeT>
1192 void addNode(NodeT*& node)
1195 this->addChild(
reinterpret_cast<ChildT*&
>(node));
1197 ChildT*
child =
nullptr;
1198 const Coord key = CoordToKey(node->mOrigin);
1199 auto iter = mTable.find(key);
1200 if (iter == mTable.end()) {
1201 child =
new ChildT(node->mOrigin, mBackground,
false);
1202 mTable[key] = Tile(
child);
1203 }
else if (iter->second.child !=
nullptr) {
1204 child = iter->second.child;
1206 child =
new ChildT(node->mOrigin, iter->second.value, iter->second.state);
1207 iter->second.child =
child;
1209 child->addNode(node);
1213 template<
typename T>
1217 template<
typename T>
1224template<
typename ValueT,
typename BuildT,
typename StatsT>
1225template<
typename ChildT>
1231 std::map<Coord, ChildT*> nodeKeys;
1232 for (
auto iter = mTable.begin(); iter != mTable.end(); ++iter) {
1233 if (iter->second.child ==
nullptr)
1235 nodeKeys.insert(std::pair<Coord, ChildT*>(iter->first, iter->second.child));
1240 auto b = nodeKeys.begin(), e = nodeKeys.end();
1243 for (
auto a = b++; b != e; ++a, ++b) {
1244 Coord d = b->first - a->first;
1245 if (d[0] != 0 || d[1] != 0 || d[2] ==
int(ChildT::DIM))
1247 const ValueT fill[] = {a->second->getLastValue(), b->second->getFirstValue()};
1248 if (!(fill[0] < 0) || !(fill[1] < 0))
1250 Coord c = a->first + Coord(0u, 0u, ChildT::DIM);
1251 for (; c[2] != b->first[2]; c[2] += ChildT::DIM) {
1252 const Coord key = SrcRootT::CoordToKey(c);
1253 mTable[key] =
typename SrcRootT::Tile(-outside,
false);
1260template<
typename ValueT,
typename BuildT,
typename StatsT>
1261template<
typename ChildT>
1262struct GridBuilder<ValueT, BuildT, StatsT>::
1265 using ValueType = ValueT;
1266 using BuildType = BuildT;
1267 using ChildType = ChildT;
1268 static constexpr uint32_t LOG2DIM = ChildT::LOG2DIM + 1;
1269 static constexpr uint32_t TOTAL = LOG2DIM + ChildT::TOTAL;
1270 static constexpr uint32_t DIM = 1u << TOTAL;
1271 static constexpr uint32_t SIZE = 1u << (3 * LOG2DIM);
1272 static constexpr int32_t MASK = DIM - 1;
1273 static constexpr uint32_t LEVEL = 1 + ChildT::LEVEL;
1274 static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL);
1275 using MaskT = Mask<LOG2DIM>;
1276 using NanoNodeT =
typename NanoNode<BuildT, LEVEL>::Type;
1300 BuildNode(
const Coord& origin,
const ValueT&
value,
bool state)
1301 : mOrigin(origin & ~MASK)
1306 for (uint32_t i = 0; i < SIZE; ++i) {
1307 mTable[i].value =
value;
1310 BuildNode(
const BuildNode&) =
delete;
1311 BuildNode(BuildNode&&) =
delete;
1312 BuildNode& operator=(
const BuildNode&) =
delete;
1313 BuildNode& operator=(BuildNode&&) =
delete;
1316 for (
auto iter = mChildMask.beginOn(); iter; ++iter) {
1317 delete mTable[*iter].child;
1321 static uint32_t CoordToOffset(
const Coord& ijk)
1323 return (((ijk[0] & MASK) >> ChildT::TOTAL) << (2 * LOG2DIM)) +
1324 (((ijk[1] & MASK) >> ChildT::TOTAL) << (LOG2DIM)) +
1325 ((ijk[2] & MASK) >> ChildT::TOTAL);
1328 static Coord OffsetToLocalCoord(uint32_t n)
1331 const uint32_t m = n & ((1 << 2 * LOG2DIM) - 1);
1332 return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & ((1 << LOG2DIM) - 1));
1335 void localToGlobalCoord(Coord& ijk)
const
1337 ijk <<= ChildT::TOTAL;
1341 Coord offsetToGlobalCoord(uint32_t n)
const
1343 Coord ijk = BuildNode::OffsetToLocalCoord(n);
1344 this->localToGlobalCoord(ijk);
1348 template<
typename AccT>
1349 bool isActiveAndCache(
const Coord& ijk, AccT& acc)
const
1351 const uint32_t n = CoordToOffset(ijk);
1352 if (mChildMask.isOn(n)) {
1353 acc.insert(ijk,
const_cast<ChildT*
>(mTable[n].
child));
1354 return mTable[n].child->isActiveAndCache(ijk, acc);
1356 return mValueMask.isOn(n);
1359 ValueT getFirstValue()
const {
return mChildMask.isOn(0) ? mTable[0].child->getFirstValue() : mTable[0].value; }
1360 ValueT getLastValue()
const {
return mChildMask.isOn(SIZE - 1) ? mTable[SIZE - 1].child->getLastValue() : mTable[SIZE - 1].value; }
1362 const ValueT& getValue(
const Coord& ijk)
const
1364 const uint32_t n = CoordToOffset(ijk);
1365 if (mChildMask.isOn(n)) {
1366 return mTable[n].child->getValue(ijk);
1368 return mTable[n].value;
1371 template<
typename AccT>
1372 const ValueT& getValueAndCache(
const Coord& ijk, AccT& acc)
const
1374 const uint32_t n = CoordToOffset(ijk);
1375 if (mChildMask.isOn(n)) {
1376 acc.insert(ijk,
const_cast<ChildT*
>(mTable[n].
child));
1377 return mTable[n].child->getValueAndCache(ijk, acc);
1379 return mTable[n].value;
1382 void setValue(
const Coord& ijk,
const ValueT&
value)
1384 const uint32_t n = CoordToOffset(ijk);
1385 ChildT*
child =
nullptr;
1386 if (mChildMask.isOn(n)) {
1387 child = mTable[n].child;
1389 child =
new ChildT(ijk, mTable[n].
value, mValueMask.isOn(n));
1390 mTable[n].child =
child;
1391 mChildMask.setOn(n);
1396 template<
typename AccT>
1397 void setValueAndCache(
const Coord& ijk,
const ValueT&
value, AccT& acc)
1399 const uint32_t n = CoordToOffset(ijk);
1400 ChildT*
child =
nullptr;
1401 if (mChildMask.isOn(n)) {
1402 child = mTable[n].child;
1404 child =
new ChildT(ijk, mTable[n].
value, mValueMask.isOn(n));
1405 mTable[n].child =
child;
1406 mChildMask.setOn(n);
1408 acc.insert(ijk,
child);
1412 template<
typename NodeT>
1413 uint32_t nodeCount()
const
1419 sum += mChildMask.countOn();
1421 for (
auto iter = mChildMask.beginOn(); iter; ++iter) {
1422 sum += mTable[*iter].child->template nodeCount<NodeT>();
1428 template<
typename NodeT>
1429 void getNodes(std::vector<NodeT*>& array)
1433 for (
auto iter = mChildMask.beginOn(); iter; ++iter) {
1435 array.push_back(
reinterpret_cast<NodeT*
>(mTable[*iter].
child));
1437 mTable[*iter].child->getNodes(array);
1442 void addChild(ChildT*&
child)
1445 const uint32_t n = CoordToOffset(
child->mOrigin);
1446 if (mChildMask.isOn(n)) {
1447 delete mTable[n].child;
1449 mChildMask.setOn(n);
1451 mTable[n].child =
child;
1455 template<
typename NodeT>
1456 void addNode(NodeT*& node)
1459 this->addChild(
reinterpret_cast<ChildT*&
>(node));
1461 const uint32_t n = CoordToOffset(node->mOrigin);
1462 ChildT*
child =
nullptr;
1463 if (mChildMask.isOn(n)) {
1464 child = mTable[n].child;
1466 child =
new ChildT(node->mOrigin, mTable[n].value, mValueMask.isOn(n));
1467 mTable[n].child =
child;
1468 mChildMask.setOn(n);
1470 child->addNode(node);
1474 template<
typename T>
1477 template<
typename T>
1484template<
typename ValueT,
typename BuildT,
typename StatsT>
1485template<
typename ChildT>
1491 const uint32_t first = *mChildMask.beginOn();
1492 if (first < NUM_VALUES) {
1493 bool xInside = mTable[first].child->getFirstValue() < 0;
1494 bool yInside = xInside, zInside = xInside;
1495 for (uint32_t x = 0; x != (1 << LOG2DIM); ++x) {
1496 const uint32_t x00 = x << (2 * LOG2DIM);
1497 if (mChildMask.isOn(x00)) {
1498 xInside = mTable[x00].child->getLastValue() < 0;
1501 for (uint32_t y = 0; y != (1u << LOG2DIM); ++y) {
1502 const uint32_t xy0 = x00 + (y << LOG2DIM);
1503 if (mChildMask.isOn(xy0))
1504 yInside = mTable[xy0].
child->getLastValue() < 0;
1506 for (uint32_t z = 0; z != (1 << LOG2DIM); ++z) {
1507 const uint32_t xyz = xy0 + z;
1508 if (mChildMask.isOn(xyz)) {
1509 zInside = mTable[xyz].child->getLastValue() < 0;
1511 mTable[xyz].value = zInside ? -outside : outside;
1521template<
typename ValueT,
typename BuildT,
typename StatsT>
1527 static constexpr uint32_t LOG2DIM = 3;
1528 static constexpr uint32_t TOTAL = LOG2DIM;
1529 static constexpr uint32_t DIM = 1u << TOTAL;
1530 static constexpr uint32_t SIZE = 1u << 3 * LOG2DIM;
1531 static constexpr int32_t MASK = DIM - 1;
1532 static constexpr uint32_t LEVEL = 0;
1533 static constexpr uint64_t NUM_VALUES = uint64_t(1) << (3 * TOTAL);
1539 ValueT mValues[SIZE];
1546 : mOrigin(ijk & ~MASK)
1550 ValueT* target = mValues;
1564 return ((ijk[0] & MASK) << (2 * LOG2DIM)) + ((ijk[1] & MASK) << LOG2DIM) + (ijk[2] & MASK);
1570 const int32_t m = n & ((1 << 2 * LOG2DIM) - 1);
1571 return Coord(n >> 2 * LOG2DIM, m >> LOG2DIM, m & MASK);
1582 this->localToGlobalCoord(ijk);
1586 template<
typename AccT>
1589 return mValueMask.
isOn(CoordToOffset(ijk));
1597 return mValues[CoordToOffset(ijk)];
1600 template<
typename AccT>
1603 return mValues[CoordToOffset(ijk)];
1606 template<
typename AccT>
1609 const uint32_t n = CoordToOffset(ijk);
1610 mValueMask.
setOn(n);
1616 const uint32_t n = CoordToOffset(ijk);
1617 mValueMask.
setOn(n);
1621 template<
typename NodeT>
1624 template<
typename NodeT>
1627 template<
typename NodeT>
1634 template<
typename T>
1637 template<
typename T>
1644template<
typename ValueT,
typename BuildT,
typename StatsT>
1650 const uint32_t first = *
mValueMask.beginOn();
1652 bool xInside =
mValues[first] < 0, yInside = xInside, zInside = xInside;
1653 for (uint32_t x = 0; x !=
DIM; ++x) {
1654 const uint32_t x00 = x << (2 *
LOG2DIM);
1658 for (uint32_t y = 0; y !=
DIM; ++y) {
1659 const uint32_t xy0 = x00 + (y <<
LOG2DIM);
1663 for (uint32_t z = 0; z != (1 <<
LOG2DIM); ++z) {
1664 const uint32_t xyz = xy0 + z;
1668 mValues[xyz] = zInside ? -outside : outside;
1678template<
typename ValueT,
typename BuildT,
typename StatsT>
1684 , mNode{nullptr, nullptr, nullptr, &root}
1687 template<
typename NodeT>
1690 return (ijk[0] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][0] &&
1691 (ijk[1] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][1] &&
1692 (ijk[2] & ~NodeT::MASK) == mKeys[NodeT::LEVEL][2];
1696 if (this->isCached<SrcNode0>(ijk)) {
1697 return ((
SrcNode0*)mNode[0])->getValueAndCache(ijk, *
this);
1698 }
else if (this->isCached<SrcNode1>(ijk)) {
1699 return ((SrcNode1*)mNode[1])->getValueAndCache(ijk, *
this);
1700 }
else if (this->isCached<SrcNode2>(ijk)) {
1701 return ((SrcNode2*)mNode[2])->getValueAndCache(ijk, *
this);
1703 return ((SrcRootT*)mNode[3])->getValueAndCache(ijk, *
this);
1708 if (this->isCached<SrcNode0>(ijk)) {
1710 }
else if (this->isCached<SrcNode1>(ijk)) {
1711 ((SrcNode1*)mNode[1])->setValueAndCache(ijk,
value, *
this);
1712 }
else if (this->isCached<SrcNode2>(ijk)) {
1713 ((SrcNode2*)mNode[2])->setValueAndCache(ijk,
value, *
this);
1715 ((SrcRootT*)mNode[3])->setValueAndCache(ijk,
value, *
this);
1722 if (this->isCached<SrcNode0>(ijk)) {
1723 return ((
SrcNode0*)mNode[0])->isActiveAndCache(ijk, *
this);
1724 }
else if (this->isCached<SrcNode1>(ijk)) {
1725 return ((SrcNode1*)mNode[1])->isActiveAndCache(ijk, *
this);
1726 }
else if (this->isCached<SrcNode2>(ijk)) {
1727 return ((SrcNode2*)mNode[2])->isActiveAndCache(ijk, *
this);
1729 return ((SrcRootT*)mNode[3])->isActiveAndCache(ijk, *
this);
1732 template<
typename NodeT>
1735 mKeys[NodeT::LEVEL] = ijk & ~NodeT::MASK;
1736 mNode[NodeT::LEVEL] = node;
Defines look up table to do dithering of 8^3 leaf nodes.
A unified wrapper for tbb::parallel_for and a naive std::thread fallback.
ValueT value
Definition: GridBuilder.h:1287
NanoNodeT * mDstNode
Definition: GridBuilder.h:1296
uint64_t mDstOffset
Definition: GridBuilder.h:1297
ChildT * child
Definition: GridBuilder.h:1286
Computes a pair of 32bit checksums, og a Grid, by means of Cyclic Redundancy Check (CRC)
Defines two classes, a GridRegister the defines the value type (e.g. Double, Float etc) of a NanoVDB ...
Re-computes min/max/avg/var/bbox information for each node in a pre-existing NanoVDB grid.
A unified wrapper for tbb::parallel_invoke and a naive std::thread analog.
#define NANOVDB_ASSERT(x)
Definition: NanoVDB.h:149
#define NANOVDB_MAGIC_NUMBER
Definition: NanoVDB.h:102
Custom Range class that is compatible with the tbb::blocked_range classes.
A unified wrapper for tbb::parallel_reduce and a naive std::future analog.
Compression oracle based on absolute difference.
Definition: GridBuilder.h:39
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted absolute error bounds of the exact value.
Definition: GridBuilder.h:51
float getTolerance() const
Definition: GridBuilder.h:46
AbsDiff(float tolerance=-1.0f)
Definition: GridBuilder.h:43
AbsDiff(const AbsDiff &)=default
void setTolerance(float tolerance)
Definition: GridBuilder.h:45
Signed (i, j, k) 32-bit integer coordinate class, similar to openvdb::math::Coord.
Definition: NanoVDB.h:860
Allows for the construction of NanoVDB grids without any dependecy.
Definition: GridBuilder.h:92
GridHandle< BufferT > getHandle(double voxelSize=1.0, const Vec3d &gridOrigin=Vec3d(0), const std::string &name="", const OracleT &oracle=OracleT(), const BufferT &buffer=BufferT())
Return an instance of a GridHandle (invoking move semantics)
Definition: GridBuilder.h:535
GridBuilder(ValueT background=ValueT(), GridClass gClass=GridClass::Unknown, uint64_t blindDataSize=0)
Definition: GridBuilder.h:238
void setStats(StatsMode mode=StatsMode::Default)
Definition: GridBuilder.h:197
void setChecksum(ChecksumMode mode=ChecksumMode::Default)
Definition: GridBuilder.h:199
void enableDithering(bool on=true)
Definition: GridBuilder.h:195
ValueAccessor getAccessor()
Definition: GridBuilder.h:180
void setVerbose(int mode=1)
Definition: GridBuilder.h:193
void setGridClass(GridClass mode=GridClass::Unknown)
Definition: GridBuilder.h:201
void sdfToLevelSet()
Performs multi-threaded bottum-up signed-distance flood-filling and changes GridClass to LevelSet.
Definition: GridBuilder.h:490
void operator()(const Func &func, const CoordBBox &bbox, ValueT delta=ValueT(0))
Sets grids values in domain of the bbox to those returned by the specified func with the expected sig...
Definition: GridBuilder.h:253
void sdfToFog()
Performs multi-threaded bottum-up signed-distance flood-filling followed by level-set -> FOG volume c...
Definition: GridBuilder.h:614
This class serves to manage a raw memory buffer of a NanoVDB Grid.
Definition: GridHandle.h:71
Highest level of the data structure. Contains a tree and a world->index transform (that currently onl...
Definition: NanoVDB.h:2308
Internal nodes of a VDB treedim(),.
Definition: NanoVDB.h:3121
Leaf nodes of the VDB tree. (defaults to 8x8x8 = 512 voxels)
Definition: NanoVDB.h:3684
Bit-mask to encode active states and facilitate sequential iterators and a fast codec for I/O compres...
Definition: NanoVDB.h:1795
bool isOn(uint32_t n) const
Return true if the given bit is set.
Definition: NanoVDB.h:1901
void setOn(uint32_t n)
Set the given bit on.
Definition: NanoVDB.h:1920
Compression oracle based on relative difference.
Definition: GridBuilder.h:65
RelDiff(float tolerance=-1.0f)
Definition: GridBuilder.h:69
bool operator()(float exact, float approx) const
Return true if the approximate value is within the accepted relative error bounds of the exact value.
Definition: GridBuilder.h:77
RelDiff(const RelDiff &)=default
float getTolerance() const
Definition: GridBuilder.h:72
void setTolerance(float tolerance)
Definition: GridBuilder.h:71
Top-most node of the VDB tree structure.
Definition: NanoVDB.h:2799
VDB Tree, which is a thin wrapper around a RootNode.
Definition: NanoVDB.h:2543
Codec
Optional compression codecs.
Definition: IO.h:61
bool isValid(GridType gridType, GridClass gridClass)
return true if the combination of GridType and GridClass is valid.
Definition: NanoVDB.h:520
GridClass
Classes (defined in OpenVDB) that are currently supported by NanoVDB.
Definition: NanoVDB.h:253
BBox< Coord > CoordBBox
Definition: NanoVDB.h:1658
StatsMode
Grid flags which indicate what extra information is present in the grid buffer.
Definition: GridStats.h:32
void updateChecksum(NanoGrid< ValueT > &grid, ChecksumMode mode=ChecksumMode::Default)
Updates the checksum of a grid.
Definition: GridChecksum.h:272
Vec3< double > Vec3d
Definition: NanoVDB.h:1174
static int64_t PtrDiff(const T1 *p, const T2 *q)
Definition: NanoVDB.h:433
void forEach(RangeT range, const FuncT &func)
simple wrapper for tbb::parallel_for with a naive std fallback
Definition: ForEach.h:40
std::ostream & operator<<(std::ostream &os, const AbsDiff &diff)
Definition: GridBuilder.h:57
void gridStats(NanoGrid< BuildT > &grid, StatsMode mode=StatsMode::Default)
Re-computes the min/max, stats and bbox information for an existing NanoVDB Grid.
Definition: GridStats.h:713
T reduce(RangeT range, const T &identity, const FuncT &func, const JoinT &join)
Definition: Reduce.h:41
ChecksumMode
List of different modes for computing for a checksum.
Definition: GridChecksum.h:33
Type Max(Type a, Type b)
Definition: NanoVDB.h:672
T Abs(T x)
Definition: NanoVDB.h:747
Range< 1, size_t > Range1D
Definition: Range.h:30
Definition: GridBuilder.h:1524
void addNode(NodeT *&)
Definition: GridBuilder.h:1625
Coord offsetToGlobalCoord(uint32_t n) const
Definition: GridBuilder.h:1579
void getNodes(std::vector< NodeT * > &)
Definition: GridBuilder.h:1622
const ValueT & getValue(const Coord &ijk) const
Definition: GridBuilder.h:1595
static uint32_t CoordToOffset(const Coord &ijk)
Return the linear offset corresponding to the given coordinate.
Definition: GridBuilder.h:1562
BuildLeaf(const BuildLeaf &)=delete
static constexpr uint32_t DIM
Definition: GridBuilder.h:1529
std::enable_if<!std::is_floating_point< T >::value >::type signedFloodFill(T)
Definition: GridBuilder.h:1639
uint32_t nodeCount() const
Definition: GridBuilder.h:1628
void localToGlobalCoord(Coord &ijk) const
Definition: GridBuilder.h:1574
BuildLeaf & operator=(const BuildLeaf &)=delete
ValueT ValueType
Definition: GridBuilder.h:1525
bool isActiveAndCache(const Coord &ijk, const AccT &) const
Definition: GridBuilder.h:1587
ValueT getFirstValue() const
Definition: GridBuilder.h:1592
ValueT getLastValue() const
Definition: GridBuilder.h:1593
void setValue(const Coord &ijk, const ValueT &value)
Definition: GridBuilder.h:1614
BuildLeaf & operator=(BuildLeaf &&)=delete
static Coord OffsetToLocalCoord(uint32_t n)
Definition: GridBuilder.h:1567
std::enable_if< std::is_floating_point< T >::value >::type signedFloodFill(T outside)
Definition: GridBuilder.h:1648
Mask< LOG2DIM > mValueMask
Definition: GridBuilder.h:1538
BuildLeaf(const Coord &ijk, const ValueT &value, bool state)
Definition: GridBuilder.h:1545
static constexpr uint32_t LOG2DIM
Definition: GridBuilder.h:1527
BuildLeaf(BuildLeaf &&)=delete
typename NanoNode< BuildT, 0 >::Type NanoLeafT
Definition: GridBuilder.h:1535
void setValueAndCache(const Coord &ijk, const ValueT &value, const AccT &)
Definition: GridBuilder.h:1607
static constexpr uint32_t SIZE
Definition: GridBuilder.h:1530
ValueT mValues[SIZE]
Definition: GridBuilder.h:1539
const ValueT & getValueAndCache(const Coord &ijk, const AccT &) const
Definition: GridBuilder.h:1601
Coord mOrigin
Definition: GridBuilder.h:1537
BuildT BuildType
Definition: GridBuilder.h:1526
Definition: GridBuilder.h:1279
Tile(ChildT *c=nullptr)
Definition: GridBuilder.h:1280
Definition: GridBuilder.h:1043
ValueT value
Definition: GridBuilder.h:1055
Tile(const ValueT &v, bool s)
Definition: GridBuilder.h:1048
Tile(ChildT *c=nullptr)
Definition: GridBuilder.h:1044
ChildT * child
Definition: GridBuilder.h:1054
bool state
Definition: GridBuilder.h:1056
Definition: GridBuilder.h:1681
bool isValueOn(const Coord &ijk)
Definition: GridBuilder.h:1731
ValueAccessor(SrcRootT &root)
Definition: GridBuilder.h:1682
const ValueT & getValue(const Coord &ijk)
Definition: GridBuilder.h:1694
bool isActive(const Coord &ijk)
Definition: GridBuilder.h:1720
SrcNode0 * setValue(const Coord &ijk, const ValueT &value)
Sets value in a leaf node and returns it.
Definition: GridBuilder.h:1706
bool isCached(const Coord &ijk) const
Definition: GridBuilder.h:1688
void insert(const Coord &ijk, NodeT *node)
Definition: GridBuilder.h:1733
static const int MaxNameSize
Definition: NanoVDB.h:2186
Defines an affine transform and its inverse represented as a 3x3 matrix and a vec3 translation.
Definition: NanoVDB.h:1998
void set(const Mat4T &mat, const Mat4T &invMat, double taper)
Definition: NanoVDB.h:2045
Maximum floating-point values.
Definition: NanoVDB.h:638
Trait to map from LEVEL to node type.
Definition: NanoVDB.h:3933
C++11 implementation of std::is_floating_point.
Definition: NanoVDB.h:356
C++11 implementation of std::is_same.
Definition: NanoVDB.h:327
static constexpr bool value
Definition: NanoVDB.h:328