24 std::vector<int64_t> indices(
static_cast<size_t>(num_elements));
25 std::iota(std::begin(indices), std::end(indices), 0);
26 std::vector<int64_t> non_zero_indices(num_elements);
28 auto it = std::copy_if(
29 indices.begin(), indices.end(), non_zero_indices.begin(),
30 [&src_iter](int64_t index) {
31 const void* src_ptr = src_iter.GetPtr(index);
32 CLOUDVIEWER_ASSERT(src_ptr != nullptr &&
"Internal error.");
33 return static_cast<float>(
34 *static_cast<const scalar_t*>(src_ptr)) != 0;
36 non_zero_indices.resize(std::distance(non_zero_indices.begin(), it));
41 const int64_t num_dims = src.NumDims();
42 const size_t num_non_zeros = non_zero_indices.
size();
44 SizeVector result_shape{num_dims,
static_cast<int64_t
>(num_non_zeros)};
46 TensorIterator result_iter(
result);
48 std::vector<std::vector<int64_t>> non_zero_indices_by_dimensions(
49 num_dims, std::vector<int64_t>(num_non_zeros, 0));
50 #pragma omp parallel for schedule(static) \
51 num_threads(utility::EstimateMaxThreads())
52 for (int64_t i = 0; i < static_cast<int64_t>(num_non_zeros); i++) {
53 int64_t non_zero_index = non_zero_indices[i];
54 for (int64_t dim = num_dims - 1; dim >= 0; dim--) {
55 void* result_ptr = result_iter.GetPtr(dim * num_non_zeros + i);
57 *
static_cast<int64_t*
>(result_ptr) = non_zero_index % shape[dim];
58 non_zero_index = non_zero_index / shape[dim];
#define DISPATCH_DTYPE_TO_TEMPLATE_WITH_BOOL(DTYPE,...)
#define CLOUDVIEWER_ASSERT(...)
int64_t NumElements() const
Tensor NonZeroCPU(const Tensor &src)
Generic file read and write utility for python interface.