14 #include <unordered_map>
47 const double* intrinsic_ptr = intrinsics.
GetDataPtr<
double>();
48 const double* extrinsic_ptr = extrinsics.
GetDataPtr<
double>();
49 for (
int i = 0; i < 3; ++i) {
50 for (
int j = 0; j < 4; ++j) {
51 extrinsic_[i][j] = extrinsic_ptr[i * 4 + j];
55 fx_ = intrinsic_ptr[0 * 3 + 0];
56 fy_ = intrinsic_ptr[1 * 3 + 1];
57 cx_ = intrinsic_ptr[0 * 3 + 2];
58 cy_ = intrinsic_ptr[1 * 3 + 2];
73 *x_out = x_in * extrinsic_[0][0] + y_in * extrinsic_[0][1] +
74 z_in * extrinsic_[0][2] + extrinsic_[0][3];
75 *y_out = x_in * extrinsic_[1][0] + y_in * extrinsic_[1][1] +
76 z_in * extrinsic_[1][2] + extrinsic_[1][3];
77 *z_out = x_in * extrinsic_[2][0] + y_in * extrinsic_[2][1] +
78 z_in * extrinsic_[2][2] + extrinsic_[2][3];
92 *x_out = x_in * extrinsic_[0][0] + y_in * extrinsic_[0][1] +
93 z_in * extrinsic_[0][2];
94 *y_out = x_in * extrinsic_[1][0] + y_in * extrinsic_[1][1] +
95 z_in * extrinsic_[1][2];
96 *z_out = x_in * extrinsic_[2][0] + y_in * extrinsic_[2][1] +
97 z_in * extrinsic_[2][2];
105 float* v_out)
const {
106 float inv_z = 1.0f / z_in;
107 *u_out = fx_ * x_in * inv_z + cx_;
108 *v_out = fy_ * y_in * inv_z + cy_;
117 float* z_out)
const {
118 *x_out = (u_in - cx_) * d_in / fx_;
119 *y_out = (v_in - cy_) * d_in / fy_;
131 *x = extrinsic_[0][3];
132 *y = extrinsic_[1][3];
133 *z = extrinsic_[2][3];
137 float extrinsic_[3][4];
161 template <
typename index_t>
173 "Only support contiguous tensors for general operations.");
180 "Tensor shape too large, only <= {} and <= {} array dim is "
181 "supported, but received {}.",
186 active_dims_ = active_dims;
187 for (
index_t i = 0; i < active_dims_; ++i) {
188 shape_[i] = shape[i];
192 for (
index_t i = active_dims_; i < n; ++i) {
193 element_byte_size_ *= shape[i];
200 ptr_ =
const_cast<void*
>(ndarray.
GetDataPtr());
208 "SizeVector too large, only <= {} is supported, but "
213 for (
index_t i = 0; i < active_dims_; ++i) {
214 shape_[i] = shape[i];
223 element_byte_size_ = 0;
228 return element_byte_size_;
233 for (
index_t i = 0; i < active_dims_; ++i) {
234 num_elems *= shape_[i];
242 *workload = y_in * shape_[1] + x_in;
248 *workload = (z_in * shape_[1] + y_in) * shape_[2] + x_in;
258 *workload = ((t_in * shape_[1] + z_in) * shape_[2] + y_in) * shape_[3] +
266 *x_out = workload % shape_[1];
267 *y_out = workload / shape_[1];
275 *x_out = workload % shape_[2];
276 workload = (workload - *x_out) / shape_[2];
277 *y_out = workload % shape_[1];
278 *z_out = workload / shape_[1];
287 *x_out = workload % shape_[3];
288 workload = (workload - *x_out) / shape_[3];
289 *y_out = workload % shape_[2];
290 workload = (workload - *y_out) / shape_[2];
291 *z_out = workload % shape_[1];
292 *t_out = workload / shape_[1];
296 return y >= 0 && x >= 0 && y <= shape_[0] - 1.0f &&
297 x <= shape_[1] - 1.0f;
302 return z >= 0 && y >= 0 && x >= 0 && z <= shape_[0] - 1.0f &&
303 y <= shape_[1] - 1.0f && x <= shape_[2] - 1.0f;
309 return t >= 0 && z >= 0 && y >= 0 && x >= 0 && t <= shape_[0] - 1.0f &&
310 z <= shape_[1] - 1.0f && y <= shape_[2] - 1.0f &&
311 x <= shape_[3] - 1.0f;
320 template <
typename T>
322 return static_cast<T*
>(
static_cast<void*
>(
static_cast<uint8_t*
>(ptr_) +
323 x * element_byte_size_));
326 template <
typename T>
330 return static_cast<T*
>(
static_cast<void*
>(
331 static_cast<uint8_t*
>(ptr_) + workload * element_byte_size_));
334 template <
typename T>
340 return static_cast<T*
>(
static_cast<void*
>(
341 static_cast<uint8_t*
>(ptr_) + workload * element_byte_size_));
344 template <
typename T>
351 return static_cast<T*
>(
static_cast<void*
>(
352 static_cast<uint8_t*
>(ptr_) + workload * element_byte_size_));
#define CLOUDVIEWER_HOST_DEVICE
#define AssertTensorDevice(tensor,...)
#define AssertTensorDtype(tensor,...)
#define AssertTensorShape(tensor,...)
bool IsContiguous() const
SizeVector GetShape() const
CLOUDVIEWER_HOST_DEVICE T * GetDataPtr(index_t x, index_t y, index_t z) const
CLOUDVIEWER_HOST_DEVICE T * GetDataPtr(index_t x, index_t y, index_t z, index_t t) const
CLOUDVIEWER_HOST_DEVICE void WorkloadToCoord(index_t workload, index_t *x_out, index_t *y_out) const
Workload => 2D coordinate.
CLOUDVIEWER_HOST_DEVICE index_t NumElements()
CLOUDVIEWER_HOST_DEVICE void * GetDataPtr() const
CLOUDVIEWER_HOST_DEVICE void CoordToWorkload(index_t x_in, index_t y_in, index_t *workload) const
2D coordinate => workload
CLOUDVIEWER_HOST_DEVICE void CoordToWorkload(index_t x_in, index_t y_in, index_t z_in, index_t *workload) const
3D coordinate => workload
CLOUDVIEWER_HOST_DEVICE index_t ElementByteSize()
CLOUDVIEWER_HOST_DEVICE bool InBoundary(float x, float y) const
CLOUDVIEWER_HOST_DEVICE void WorkloadToCoord(index_t workload, index_t *x_out, index_t *y_out, index_t *z_out, index_t *t_out) const
Workload => 4D coordinate.
TArrayIndexer(const core::SizeVector &shape)
Only used for simple shapes.
CLOUDVIEWER_HOST_DEVICE bool InBoundary(float x, float y, float z) const
CLOUDVIEWER_HOST_DEVICE T * GetDataPtr(index_t x, index_t y) const
CLOUDVIEWER_HOST_DEVICE void WorkloadToCoord(index_t workload, index_t *x_out, index_t *y_out, index_t *z_out) const
Workload => 3D coordinate.
CLOUDVIEWER_HOST_DEVICE void CoordToWorkload(index_t x_in, index_t y_in, index_t z_in, index_t t_in, index_t *workload) const
4D coordinate => workload
CLOUDVIEWER_HOST_DEVICE bool InBoundary(float x, float y, float z, float t) const
CLOUDVIEWER_HOST_DEVICE T * GetDataPtr(index_t x) const
TArrayIndexer(const core::Tensor &ndarray, index_t active_dims)
CLOUDVIEWER_HOST_DEVICE index_t GetShape(int i) const
Helper functions for the ml ops.
const int64_t MAX_RESOLUTION_DIMS
Generic file read and write utility for python interface.