ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
tensor.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
8 #include "core/Tensor.h"
9 
10 #include <Optional.h>
11 
12 #include <vector>
13 
14 #include "cloudViewer/core/Blob.h"
19 #include "cloudViewer/core/Dtype.h"
24 #include "pybind/core/core.h"
27 #include "pybind/docstring.h"
28 #include "pybind/pybind_utils.h"
29 
30 #define CONST_ARG const
31 #define NON_CONST_ARG
32 
33 #define BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(py_name, cpp_name, self_const) \
34  tensor.def(#py_name, [](self_const Tensor& self, const Tensor& other) { \
35  return self.cpp_name(other); \
36  }); \
37  tensor.def(#py_name, [](Tensor& self, float value) { \
38  return self.cpp_name(Scalar(value)); \
39  }); \
40  tensor.def(#py_name, [](Tensor& self, double value) { \
41  return self.cpp_name(Scalar(value)); \
42  }); \
43  tensor.def(#py_name, [](Tensor& self, int8_t value) { \
44  return self.cpp_name(Scalar(value)); \
45  }); \
46  tensor.def(#py_name, [](Tensor& self, int16_t value) { \
47  return self.cpp_name(Scalar(value)); \
48  }); \
49  tensor.def(#py_name, [](Tensor& self, int32_t value) { \
50  return self.cpp_name(Scalar(value)); \
51  }); \
52  tensor.def(#py_name, [](Tensor& self, int64_t value) { \
53  return self.cpp_name(Scalar(value)); \
54  }); \
55  tensor.def(#py_name, [](Tensor& self, uint8_t value) { \
56  return self.cpp_name(Scalar(value)); \
57  }); \
58  tensor.def(#py_name, [](Tensor& self, uint16_t value) { \
59  return self.cpp_name(Scalar(value)); \
60  }); \
61  tensor.def(#py_name, [](Tensor& self, uint32_t value) { \
62  return self.cpp_name(Scalar(value)); \
63  }); \
64  tensor.def(#py_name, [](Tensor& self, uint64_t value) { \
65  return self.cpp_name(Scalar(value)); \
66  }); \
67  tensor.def(#py_name, [](Tensor& self, bool value) { \
68  return self.cpp_name(Scalar(value)); \
69  });
70 
71 #define BIND_CLIP_SCALAR(py_name, cpp_name, self_const) \
72  tensor.def(#py_name, \
73  [](self_const Tensor& self, float min_v, float max_v) { \
74  return self.cpp_name(min_v, max_v); \
75  }); \
76  tensor.def(#py_name, \
77  [](self_const Tensor& self, double min_v, double max_v) { \
78  return self.cpp_name(min_v, max_v); \
79  }); \
80  tensor.def(#py_name, \
81  [](self_const Tensor& self, int8_t min_v, int8_t max_v) { \
82  return self.cpp_name(min_v, max_v); \
83  }); \
84  tensor.def(#py_name, \
85  [](self_const Tensor& self, int16_t min_v, int16_t max_v) { \
86  return self.cpp_name(min_v, max_v); \
87  }); \
88  tensor.def(#py_name, \
89  [](self_const Tensor& self, int32_t min_v, int32_t max_v) { \
90  return self.cpp_name(min_v, max_v); \
91  }); \
92  tensor.def(#py_name, \
93  [](self_const Tensor& self, int64_t min_v, int64_t max_v) { \
94  return self.cpp_name(min_v, max_v); \
95  }); \
96  tensor.def(#py_name, \
97  [](self_const Tensor& self, uint8_t min_v, uint8_t max_v) { \
98  return self.cpp_name(min_v, max_v); \
99  }); \
100  tensor.def(#py_name, \
101  [](self_const Tensor& self, uint16_t min_v, uint16_t max_v) { \
102  return self.cpp_name(min_v, max_v); \
103  }); \
104  tensor.def(#py_name, \
105  [](self_const Tensor& self, uint32_t min_v, uint32_t max_v) { \
106  return self.cpp_name(min_v, max_v); \
107  }); \
108  tensor.def(#py_name, \
109  [](self_const Tensor& self, uint64_t min_v, uint64_t max_v) { \
110  return self.cpp_name(min_v, max_v); \
111  }); \
112  tensor.def(#py_name, [](self_const Tensor& self, bool min_v, bool max_v) { \
113  return self.cpp_name(min_v, max_v); \
114  });
115 
116 #define BIND_BINARY_R_OP_ALL_DTYPES(py_name, cpp_name) \
117  tensor.def(#py_name, [](const Tensor& self, float value) { \
118  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
119  .cpp_name(self); \
120  }); \
121  tensor.def(#py_name, [](const Tensor& self, double value) { \
122  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
123  .cpp_name(self); \
124  }); \
125  tensor.def(#py_name, [](const Tensor& self, int8_t value) { \
126  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
127  .cpp_name(self); \
128  }); \
129  tensor.def(#py_name, [](const Tensor& self, int16_t value) { \
130  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
131  .cpp_name(self); \
132  }); \
133  tensor.def(#py_name, [](const Tensor& self, int32_t value) { \
134  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
135  .cpp_name(self); \
136  }); \
137  tensor.def(#py_name, [](const Tensor& self, int64_t value) { \
138  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
139  .cpp_name(self); \
140  }); \
141  tensor.def(#py_name, [](const Tensor& self, uint8_t value) { \
142  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
143  .cpp_name(self); \
144  }); \
145  tensor.def(#py_name, [](const Tensor& self, uint16_t value) { \
146  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
147  .cpp_name(self); \
148  }); \
149  tensor.def(#py_name, [](const Tensor& self, uint32_t value) { \
150  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
151  .cpp_name(self); \
152  }); \
153  tensor.def(#py_name, [](const Tensor& self, uint64_t value) { \
154  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
155  .cpp_name(self); \
156  }); \
157  tensor.def(#py_name, [](const Tensor& self, bool value) { \
158  return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
159  .cpp_name(self); \
160  });
161 
162 #define BIND_REDUCTION_OP(py_name, cpp_name) \
163  tensor.def( \
164  #py_name, \
165  [](const Tensor& tensor, const utility::optional<SizeVector>& dim, \
166  bool keepdim) { \
167  SizeVector reduction_dims; \
168  if (dim.has_value()) { \
169  reduction_dims = dim.value(); \
170  } else { \
171  for (int64_t i = 0; i < tensor.NumDims(); i++) { \
172  reduction_dims.push_back(i); \
173  } \
174  } \
175  return tensor.cpp_name(reduction_dims, keepdim); \
176  }, \
177  "dim"_a = py::none(), "keepdim"_a = false);
178 
179 // TODO (rishabh): add this behavior to the cpp implementation. Refer to the
180 // Tensor::Any and Tensor::All ops.
181 #define BIND_REDUCTION_OP_NO_KEEPDIM(py_name, cpp_name) \
182  tensor.def( \
183  #py_name, \
184  [](const Tensor& tensor, \
185  const utility::optional<SizeVector>& dim) { \
186  SizeVector reduction_dims; \
187  if (dim.has_value()) { \
188  reduction_dims = dim.value(); \
189  } else { \
190  for (int64_t i = 0; i < tensor.NumDims(); i++) { \
191  reduction_dims.push_back(i); \
192  } \
193  } \
194  return tensor.cpp_name(reduction_dims); \
195  }, \
196  "dim"_a = py::none());
197 
198 namespace cloudViewer {
199 namespace core {
200 
201 const std::unordered_map<std::string, std::string> argument_docs = {
202  {"dtype", "Data type for the Tensor."},
203  {"device", "Compute device to store and operate on the Tensor."},
204  {"shape", "List of Tensor dimensions."},
205  {"fill_value", "Scalar value to initialize all elements with."},
206  {"scalar_value", "Initial value for the single element tensor."},
207  {"copy",
208  "If true, a new tensor is always created; if false, the copy is "
209  "avoided when the original tensor already has the targeted dtype."}};
210 
211 template <typename T>
212 static std::vector<T> ToFlatVector(
213  py::array_t<T, py::array::c_style | py::array::forcecast> np_array) {
214  py::buffer_info info = np_array.request();
215  T* start = static_cast<T*>(info.ptr);
216  return std::vector<T>(start, start + info.size);
217 }
218 
219 template <typename func_t>
220 static void BindTensorCreation(py::module& m,
221  py::class_<Tensor>& tensor,
222  const std::string& py_name,
223  func_t cpp_func) {
224  tensor.def_static(
225  py_name.c_str(),
226  [cpp_func](const SizeVector& shape, utility::optional<Dtype> dtype,
227  utility::optional<Device> device) {
228  return cpp_func(
229  shape,
230  dtype.has_value() ? dtype.value() : core::Float32,
231  device.has_value() ? device.value() : Device("CPU:0"));
232  },
233  "Create Tensor with a given shape.", "shape"_a,
234  "dtype"_a = py::none(), "device"_a = py::none());
235 
236  docstring::ClassMethodDocInject(m, "Tensor", py_name, argument_docs);
237 }
238 
239 template <typename T>
240 static void BindTensorFullCreation(py::module& m, py::class_<Tensor>& tensor) {
241  tensor.def_static(
242  "full",
243  [](const SizeVector& shape, T fill_value,
245  utility::optional<Device> device) {
246  return Tensor::Full<T>(
247  shape, fill_value,
248  dtype.has_value() ? dtype.value() : core::Float32,
249  device.has_value() ? device.value() : Device("CPU:0"));
250  },
251  "shape"_a, "fill_value"_a, "dtype"_a = py::none(),
252  "device"_a = py::none());
253 }
254 
255 namespace {
256 Tensor maybeCopyTensor(const Tensor& data,
257  utility::optional<std::pair<DLDeviceType, int>>
258  optional_dl_device = utility::nullopt,
260  bool force_copy = copy.has_value() && copy.value();
261  bool force_move = copy.has_value() && !copy.value();
262  if (optional_dl_device.has_value()) {
263  Device to_device;
264  switch (optional_dl_device.value().first) {
266  to_device = Device("CPU:0");
267  break;
269  to_device = Device(fmt::format(
270  "CUDA:{}", optional_dl_device.value().second));
271  break;
273  to_device = Device(fmt::format(
274  "SYCL:{}", optional_dl_device.value().second));
275  break;
276  default:
277  utility::LogError("DLPack: unsupported device type {}",
278  int(optional_dl_device.value().first));
279  }
280  if (to_device != data.GetDevice()) {
281  if (!force_move)
283  " [DLPack] Cannot move (i.e. copy=False) tensor from "
284  "{} to {} without copying.",
285  data.GetDevice().ToString(), to_device.ToString());
286  return data.To(to_device);
287  }
288  }
289  if (force_copy) {
290  return data.Clone();
291  }
292  return data;
293 }
294 } // namespace
295 
296 void pybind_core_tensor(py::module& m) {
297  py::class_<Tensor> tensor(
298  m, "Tensor",
299  "A Tensor is a view of a data Blob with shape, stride, data_ptr.");
300  m.attr("capsule") = py::module_::import("typing").attr("Any");
301  // https://data-apis.org/array-api/latest/API_specification/generated/array_api.array.__dlpack_device__.html#array_api.array.__dlpack_device__
302  py::native_enum<DLDeviceType>(m, "DLDeviceType", "enum.Enum")
303  .value("CPU", DLDeviceType::kDLCPU)
304  .value("CUDA", DLDeviceType::kDLCUDA)
305  .value("CPU_PINNED", DLDeviceType::kDLCUDAHost)
306  .value("OPENCL", DLDeviceType::kDLOpenCL)
307  .value("VULKAN", DLDeviceType::kDLVulkan)
308  .value("METAL", DLDeviceType::kDLMetal)
309  .value("VPI", DLDeviceType::kDLVPI)
310  .value("ROCM", DLDeviceType::kDLROCM)
311  .value("CUDA_MANAGED", DLDeviceType::kDLCUDAManaged)
312  .value("ONE_API", DLDeviceType::kDLOneAPI)
313  .finalize();
314 
315  // o3c.Tensor(np.array([[0, 1, 2], [3, 4, 5]]), dtype=None, device=None).
316  tensor.def(py::init([](const py::array& np_array,
318  utility::optional<Device> device) {
319  Tensor t = PyArrayToTensor(np_array, /*inplace=*/false);
320  if (dtype.has_value()) {
321  t = t.To(dtype.value());
322  }
323  if (device.has_value()) {
324  t = t.To(device.value());
325  }
326  return t;
327  }),
328  "Initialize Tensor from a Numpy array.", "np_array"_a,
329  "dtype"_a = py::none(), "device"_a = py::none());
330 
331  // o3c.Tensor(True, dtype=None, device=None).
332  // Default to Bool, CPU:0.
333  tensor.def(py::init([](bool scalar_value, utility::optional<Dtype> dtype,
334  utility::optional<Device> device) {
335  return BoolToTensor(scalar_value, dtype, device);
336  }),
337  "scalar_value"_a, "dtype"_a = py::none(),
338  "device"_a = py::none());
339 
340  // o3c.Tensor(1, dtype=None, device=None).
341  // Default to Int64, CPU:0.
342  tensor.def(py::init([](int64_t scalar_value, utility::optional<Dtype> dtype,
343  utility::optional<Device> device) {
344  return IntToTensor(scalar_value, dtype, device);
345  }),
346  "scalar_value"_a, "dtype"_a = py::none(),
347  "device"_a = py::none());
348 
349  // o3c.Tensor(3.14, dtype=None, device=None).
350  // Default to Float64, CPU:0.
351  tensor.def(py::init([](double scalar_value, utility::optional<Dtype> dtype,
352  utility::optional<Device> device) {
353  return DoubleToTensor(scalar_value, dtype, device);
354  }),
355  "scalar_value"_a, "dtype"_a = py::none(),
356  "device"_a = py::none());
357 
358  // o3c.Tensor([[0, 1, 2], [3, 4, 5]], dtype=None, device=None).
359  tensor.def(
360  py::init([](const py::list& shape, utility::optional<Dtype> dtype,
361  utility::optional<Device> device) {
362  return PyListToTensor(shape, dtype, device);
363  }),
364  "Initialize Tensor from a nested list.", "shape"_a,
365  "dtype"_a = py::none(), "device"_a = py::none());
366 
367  // o3c.Tensor(((0, 1, 2), (3, 4, 5)), dtype=None, device=None).
368  tensor.def(
369  py::init([](const py::tuple& shape, utility::optional<Dtype> dtype,
370  utility::optional<Device> device) {
371  return PyTupleToTensor(shape, dtype, device);
372  }),
373  "Initialize Tensor from a nested tuple.", "shape"_a,
374  "dtype"_a = py::none(), "device"_a = py::none());
375 
376  docstring::ClassMethodDocInject(m, "Tensor", "__init__", argument_docs);
377 
379 
380  // Tensor creation API.
381  BindTensorCreation(m, tensor, "empty", Tensor::Empty);
382  BindTensorCreation(m, tensor, "zeros", Tensor::Zeros);
383  BindTensorCreation(m, tensor, "ones", Tensor::Ones);
384  BindTensorFullCreation<float>(m, tensor);
385  BindTensorFullCreation<double>(m, tensor);
386  BindTensorFullCreation<int8_t>(m, tensor);
387  BindTensorFullCreation<int16_t>(m, tensor);
388  BindTensorFullCreation<int32_t>(m, tensor);
389  BindTensorFullCreation<int64_t>(m, tensor);
390  BindTensorFullCreation<uint8_t>(m, tensor);
391  BindTensorFullCreation<uint16_t>(m, tensor);
392  BindTensorFullCreation<uint32_t>(m, tensor);
393  BindTensorFullCreation<uint64_t>(m, tensor);
394  BindTensorFullCreation<bool>(m, tensor);
395  docstring::ClassMethodDocInject(m, "Tensor", "full", argument_docs);
396 
397  // Pickling support.
398  // The tensor will be on the same device after deserialization.
399  // Non contiguous tensors will be converted to contiguous tensors after
400  // deserialization.
401  tensor.def(py::pickle(
402  [](const Tensor& t) {
403  // __getstate__
404  return py::make_tuple(t.GetDevice(),
405  TensorToPyArray(t.To(Device("CPU:0"))));
406  },
407  [](py::tuple t) {
408  // __setstate__
409  if (t.size() != 2) {
411  "Cannot unpickle Tensor! Expecting a tuple of size "
412  "2.");
413  }
414  const Device& device = t[0].cast<Device>();
415  if (!device.IsAvailable()) {
417  "Device {} is not available, tensor will be "
418  "created on CPU.",
419  device.ToString());
420  return PyArrayToTensor(t[1].cast<py::array>(), true);
421  } else {
422  return PyArrayToTensor(t[1].cast<py::array>(), true)
423  .To(device);
424  }
425  }));
426 
427  tensor.def_static(
428  "eye",
429  [](int64_t n, utility::optional<Dtype> dtype,
430  utility::optional<Device> device) {
431  return Tensor::Eye(
432  n, dtype.has_value() ? dtype.value() : core::Float32,
433  device.has_value() ? device.value() : Device("CPU:0"));
434  },
435  "Create an identity matrix of size n x n.", "n"_a,
436  "dtype"_a = py::none(), "device"_a = py::none());
437  tensor.def_static("diag", &Tensor::Diag);
438 
439  // Tensor creation from arange for int.
440  tensor.def_static(
441  "arange",
442  [](int64_t stop, utility::optional<Dtype> dtype,
443  utility::optional<Device> device) {
444  return Tensor::Arange(
445  0, stop, 1,
446  dtype.has_value() ? dtype.value() : core::Int64,
447  device.has_value() ? device.value() : Device("CPU:0"));
448  },
449  "Create a 1D tensor with evenly spaced values in the given "
450  "interval.",
451  "stop"_a, py::pos_only(), py::kw_only(), "dtype"_a = py::none(),
452  "device"_a = py::none());
453  tensor.def_static(
454  "arange",
455  [](int64_t start, int64_t stop, utility::optional<int64_t> step,
457  utility::optional<Device> device) {
458  return Tensor::Arange(
459  start, stop, step.has_value() ? step.value() : 1,
460  dtype.has_value() ? dtype.value() : core::Int64,
461  device.has_value() ? device.value() : Device("CPU:0"));
462  },
463  "Create a 1D tensor with evenly spaced values in the given "
464  "interval.",
465  "start"_a, "stop"_a, "step"_a = py::none(), "dtype"_a = py::none(),
466  py::kw_only(), "device"_a = py::none());
467 
468  // Tensor creation from arange for float.
469  tensor.def_static(
470  "arange",
471  [](double stop, utility::optional<Dtype> dtype,
472  utility::optional<Device> device) {
473  return Tensor::Arange(
474  0.0, stop, 1.0,
475  dtype.has_value() ? dtype.value() : core::Float64,
476  device.has_value() ? device.value() : Device("CPU:0"));
477  },
478  "Create a 1D tensor with evenly spaced values in the given "
479  "interval.",
480  "stop"_a, py::pos_only(), py::kw_only(), "dtype"_a = py::none(),
481  "device"_a = py::none());
482  tensor.def_static(
483  "arange",
484  [](double start, double stop, utility::optional<double> step,
486  utility::optional<Device> device) {
487  return Tensor::Arange(
488  start, stop, step.has_value() ? step.value() : 1.0,
489  dtype.has_value() ? dtype.value() : core::Float64,
490  device.has_value() ? device.value() : Device("CPU:0"));
491  },
492  "Create a 1D tensor with evenly spaced values in the given "
493  "interval.",
494  "start"_a, "stop"_a, "step"_a = py::none(), "dtype"_a = py::none(),
495  py::kw_only(), "device"_a = py::none());
496 
497  tensor.def(
498  "append",
499  [](const Tensor& tensor, const Tensor& values,
500  const utility::optional<int64_t> axis) {
501  if (axis.has_value()) {
502  return tensor.Append(values, axis);
503  }
504  return tensor.Append(values);
505  },
506  R"(Appends the `values` tensor, along the given axis and returns
507 a copy of the original tensor. Both the tensors must have same data-type
508 device, and number of dimensions. All dimensions must be the same, except the
509 dimension along the axis the tensors are to be appended.
510 
511 This is the similar to NumPy's semantics:
512 - https://numpy.org/doc/stable/reference/generated/numpy.append.html
513 
514 Returns:
515  A copy of the tensor with `values` appended to axis. Note that append
516  does not occur in-place: a new array is allocated and filled. If axis
517  is None, out is a flattened tensor.
518 
519 Example:
520  >>> a = o3d.core.Tensor([[0, 1], [2, 3]])
521  >>> b = o3d.core.Tensor([[4, 5]])
522  >>> a.append(b, axis = 0)
523  [[0 1],
524  [2 3],
525  [4 5]]
526  Tensor[shape={3, 2}, stride={2, 1}, Int64, CPU:0, 0x55555abc6b00]
527 
528  >>> a.append(b)
529  [0 1 2 3 4 5]
530  Tensor[shape={6}, stride={1}, Int64, CPU:0, 0x55555abc6b70])",
531  "values"_a, "axis"_a = py::none());
532 
533  // Device transfer.
534  tensor.def(
535  "cpu",
536  [](const Tensor& tensor) {
537  return tensor.To(core::Device("CPU:0"));
538  },
539  "Transfer the tensor to CPU. If the tensor "
540  "is already on CPU, no copy will be performed.");
541  tensor.def(
542  "cuda",
543  [](const Tensor& tensor, int device_id) {
544  return tensor.To(core::Device("CUDA", device_id));
545  },
546  "Transfer the tensor to a CUDA device. If the tensor is already "
547  "on the specified CUDA device, no copy will be performed.",
548  "device_id"_a = 0);
549 
550  // Buffer I/O for Numpy and DLPack(PyTorch).
551  tensor.def("numpy", &core::TensorToPyArray);
552 
553  tensor.def_static("from_numpy", [](py::array np_array) {
554  return core::PyArrayToTensor(np_array, /*inplace=*/true);
555  });
556 
557  auto to_dlpack = [](const Tensor& tensor,
562  dl_device = utility::nullopt,
564  bool versioned =
565  (max_version.has_value() && max_version.value().first > 0);
566  auto out_tensor = maybeCopyTensor(tensor, dl_device, copy);
567  if (versioned) {
568  auto capsule_destructor = [](PyObject* data) {
569  DLManagedTensorVersioned* dl_managed_tensor =
570  (DLManagedTensorVersioned*)PyCapsule_GetPointer(
571  data, "dltensor");
572  if (dl_managed_tensor != nullptr &&
573  dl_managed_tensor->deleter != nullptr) {
574  // the dl_managed_tensor has not been consumed,
575  // call deleter ourselves
576  dl_managed_tensor->deleter(
577  const_cast<DLManagedTensorVersioned*>(
578  dl_managed_tensor));
579  } else {
580  // The dl_managed_tensor has been consumed
581  // PyCapsule_GetPointer has set an error indicator
582  PyErr_Clear();
583  }
584  };
585  DLManagedTensorVersioned* dlmt = out_tensor.ToDLPackVersioned();
586  return py::capsule(dlmt, "dltensor", capsule_destructor);
587  } else {
588  auto capsule_destructor = [](PyObject* data) {
589  DLManagedTensor* dl_managed_tensor =
590  (DLManagedTensor*)PyCapsule_GetPointer(data,
591  "dltensor");
592  if (dl_managed_tensor != nullptr &&
593  dl_managed_tensor->deleter != nullptr) {
594  // the dl_managed_tensor has not been consumed,
595  // call deleter ourselves
596  dl_managed_tensor->deleter(
597  const_cast<DLManagedTensor*>(dl_managed_tensor));
598  } else {
599  // The dl_managed_tensor has been consumed
600  // PyCapsule_GetPointer has set an error indicator
601  PyErr_Clear();
602  }
603  };
604  DLManagedTensor* dlmt = out_tensor.ToDLPack();
605  return py::capsule(dlmt, "dltensor", capsule_destructor);
606  }
607  };
608  tensor.def(
609  "__dlpack__", to_dlpack, "stream"_a = py::none(),
610  "max_version"_a = py::none(), "dl_device"_a = py::none(),
611  "copy"_a = py::none(),
612  R"(Returns an opaque object (a "DLPack capsule") representing the tensor.
613 
614 .. note::
615 ``to_dlpack`` is a legacy DLPack interface. The capsule it returns
616 cannot be used for anything in Python other than use it as input to
617 ``from_dlpack``. The more idiomatic use of DLPack is to call
618 ``from_dlpack`` directly on the tensor object - this works when that
619 object has a ``__dlpack__`` method, which PyTorch and most other
620 libraries indeed have now.
621 
622 .. warning::
623 Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
624 Behavior when a capsule is consumed multiple times is undefined.
625 
626 Args:
627 tensor: a tensor to be exported
628 
629 The DLPack capsule shares the tensor's memory.)");
630  tensor.attr("to_dlpack") = tensor.attr("__dlpack__");
631 
632  tensor.def("__dlpack_device__", [](const Tensor& tensor) {
633  // &Open3DDLManagedTensor::getDLPackDevice
634  // Prepare dl_device_type
635  DLDeviceType dl_device_type;
636  Device device = tensor.GetDevice();
637  switch (device.GetType()) {
638  case Device::DeviceType::CPU:
639  dl_device_type = DLDeviceType::kDLCPU;
640  break;
641  case Device::DeviceType::CUDA:
642  dl_device_type = DLDeviceType::kDLCUDA;
643  break;
644  case Device::DeviceType::SYCL:
645  dl_device_type = DLDeviceType::kDLOneAPI;
646  break;
647  default:
648  utility::LogError("ToDLPack: unsupported device type {}",
649  device.ToString());
650  }
651  return std::make_pair(dl_device_type, device.GetID());
652  });
653 
654  auto from_dlpack_capsule = [](py::capsule data) {
655  auto invalid_capsule_err =
656  "from_dlpack received an invalid capsule. Note that "
657  "DLTensor capsules can be consumed only once, so you might "
658  "have already constructed a tensor from it once.";
659  Tensor t;
660  // check versioned
661  if (PyCapsule_IsValid(data, "dltensor_versioned")) {
662  DLManagedTensorVersioned* dl_managed_tensor =
663  static_cast<DLManagedTensorVersioned*>(PyCapsule_GetPointer(
664  data.ptr(), "dltensor_versioned"));
665  if (!dl_managed_tensor) {
666  utility::LogError(invalid_capsule_err);
667  }
668  if (dl_managed_tensor->version.major > DLPACK_MAJOR_VERSION) {
670  "Received DLPack capsule with major version {} > {} "
671  "(Max supported version).",
672  dl_managed_tensor->version.major, DLPACK_MAJOR_VERSION);
673  }
674  // Make sure that the PyCapsule is not used again. See:
675  // torch/csrc/Module.cpp, and
676  // https://github.com/cupy/cupy/pull/1445/files#diff-ddf01ff512087ef616db57ecab88c6ae
677  t = Tensor::FromDLPackVersioned(dl_managed_tensor);
678  PyCapsule_SetName(data.ptr(), "used_dltensor_versioned");
679  } else {
680  DLManagedTensor* dl_managed_tensor = static_cast<DLManagedTensor*>(
681  PyCapsule_GetPointer(data.ptr(), "dltensor"));
682  if (!dl_managed_tensor) {
683  utility::LogError(invalid_capsule_err);
684  }
685  t = Tensor::FromDLPack(dl_managed_tensor);
686  PyCapsule_SetName(data.ptr(), "used_dltensor");
687  }
688  PyCapsule_SetDestructor(data.ptr(), nullptr);
689  return t;
690  };
691  tensor.def_static("from_dlpack", from_dlpack_capsule, "dlpack_capsule"_a);
692  tensor.def_static(
693  "from_dlpack",
694  [from_dlpack_capsule](
695  const py::object& ext_tensor,
698  if (!hasattr(ext_tensor, "__dlpack__")) {
700  "from_dlpack: object does not define __dlpack__.");
701  }
702  py::object capsule_obj = ext_tensor.attr("__dlpack__")();
703  auto t = from_dlpack_capsule(
704  py::reinterpret_borrow<py::capsule>(capsule_obj));
705  return t;
706  },
707  "ext_tensor"_a, "device"_a = py::none(), "copy"_a = py::none(),
708  R"(Converts a tensor from an external library into an Open3D ``Tensor``.
709 
710 The returned Open3D tensor will share the memory with the input tensor
711 (which may have come from another library). Note that in-place operations
712 will therefore also affect the data of the input tensor. This may lead to
713 unexpected issues (e.g., other libraries may have read-only flags or
714 immutable data structures), so the user should only do this if they know
715 for sure that this is fine.
716 
717 Args:
718 ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
719 The tensor or DLPack capsule to convert.
720 
721 If ``ext_tensor`` is a tensor (or ndarray) object, it must support
722 the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
723 method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
724 an opaque ``PyCapsule`` instance, typically produced by a
725 ``to_dlpack`` function or method.
726 
727 device (open3d.core.device or None): An optional Open3D device
728 specifying where to place the new tensor. Only ``None`` is supported
729 (same device as ``ext_tensor``).
730 
731 copy (bool or None): An optional boolean indicating whether or not to copy
732 ``self``. This is not supported yet and no copy is made.
733 
734 Examples::
735 
736 )");
737 
738  // Numpy IO.
739  tensor.def("save", &Tensor::Save, "Save tensor to Numpy's npy format.",
740  "file_name"_a);
741  tensor.def_static("load", &Tensor::Load,
742  "Load tensor from Numpy's npy format.", "file_name"_a);
743 
745  tensor.def("det", &Tensor::Det,
746  "Compute the determinant of a 2D square tensor.");
747  tensor.def(
748  "lu_ipiv", &Tensor::LUIpiv,
749  R"(Computes LU factorisation of the 2D square tensor, using A = P * L * U;
750 where P is the permutation matrix, L is the lower-triangular matrix with
751 diagonal elements as 1.0 and U is the upper-triangular matrix, and returns
752 tuple `output` tensor of shape {n,n} and `ipiv` tensor of shape {n}, where
753 {n,n} is the shape of input tensor.
754 
755 Returns:
756  ipiv: ipiv is a 1D integer pivot indices tensor. It contains the pivot
757  indices, indicating row i of the matrix was interchanged with row
758  ipiv[i]
759  output: It has L as lower triangular values and U as upper triangle
760  values including the main diagonal (diagonal elements of L to be
761  taken as unity).
762 
763 Example:
764  >>> ipiv, output = a.lu_ipiv())");
765  tensor.def("matmul", &Tensor::Matmul,
766  "Computes matrix multiplication of a"
767  " 2D tensor with another tensor of compatible shape.");
768  tensor.def("__matmul__", &Tensor::Matmul,
769  "Computes matrix multiplication of a"
770  " 2D tensor with another tensor of compatible shape.");
771  tensor.def("lstsq", &Tensor::LeastSquares,
772  "Solves the linear system AX = B with QR decomposition and "
773  "returns X. A is a (m, n) matrix with m >= n.",
774  "B"_a);
775  tensor.def("solve", &Tensor::Solve,
776  "Solves the linear system AX = B with LU decomposition and "
777  "returns X. A must be a square matrix.",
778  "B"_a);
779  tensor.def("inv", &Tensor::Inverse,
780  "Computes the matrix inverse of the square matrix self with "
781  "LU factorization and returns the result.");
782  tensor.def("svd", &Tensor::SVD,
783  "Computes the matrix SVD decomposition :math:`A = U S V^T` and "
784  "returns "
785  "the result. Note :math:`V^T` (V transpose) is returned "
786  "instead of :math:`V`.");
787  tensor.def("triu", &Tensor::Triu,
788  "Returns the upper triangular matrix of the 2D tensor, above "
789  "the given diagonal index. [The value of diagonal = col - row, "
790  "therefore 0 is the main diagonal (row = col), and it shifts "
791  "towards right for positive values (for diagonal = 1, col - row "
792  "= 1), and towards left for negative values. The value of the "
793  "diagonal parameter must be between [-m, n] for a {m,n} shaped "
794  "tensor.",
795  "diagonal"_a = 0);
796  docstring::ClassMethodDocInject(m, "Tensor", "triu",
797  {{"diagonal",
798  "Value of [col - row], above which the "
799  "elements are to be taken for"
800  " upper triangular matrix."}});
801 
802  tensor.def("tril", &Tensor::Tril,
803  "Returns the lower triangular matrix of the 2D tensor, above "
804  "the given diagonal index. [The value of diagonal = col - row, "
805  "therefore 0 is the main diagonal (row = col), and it shifts "
806  "towards right for positive values (for diagonal = 1, col - "
807  "row "
808  "= 1), and towards left for negative values. The value of the "
809  "diagonal parameter must be between [-m, n] where {m, n} is "
810  "the "
811  "shape of input tensor.",
812  "diagonal"_a = 0);
813  docstring::ClassMethodDocInject(m, "Tensor", "tril",
814  {{"diagonal",
815  "Value of [col - row], below which "
816  "the elements are to be taken "
817  "for lower triangular matrix."}});
818 
819  tensor.def("triul", &Tensor::Triul,
820  "Returns the tuple of upper and lower triangular matrix of the "
821  "2D "
822  "tensor, above and below the given diagonal index. The "
823  "diagonal "
824  "elements of lower triangular matrix are taken to be unity. "
825  "[The "
826  "value of diagonal = col - row, therefore 0 is the main "
827  "diagonal "
828  "(row = col), and it shifts towards right for positive values "
829  "(for "
830  "diagonal = 1, col - row = 1), and towards left for negative "
831  "values. The value of the diagonal parameter must be between "
832  "[-m, "
833  "n] where {m, n} is the shape of input tensor.",
834  "diagonal"_a = 0);
836  m, "Tensor", "triul",
837  {{"diagonal",
838  "Value of [col - row], above and below which the elements "
839  "are to "
840  "be taken for upper (diag. included) and lower triangular "
841  "matrix."}});
842  tensor.def(
843  "lu",
844  [](const Tensor& tensor, bool permute_l) {
845  return tensor.LU(permute_l);
846  },
847  "permute_l"_a = false,
848  R"(Computes LU factorisation of the 2D square tensor, using A = P * L * U;
849 where P is the permutation matrix, L is the lower-triangular matrix with
850 diagonal elements as 1.0 and U is the upper-triangular matrix, and returns
851 tuple (P, L, U).
852 
853 Returns:
854  Tuple (P, L, U).)");
856  m, "Tensor", "lu", {{"permute_l", "If True, returns L as P * L."}});
857 
858  // Casting and copying.
859  tensor.def(
860  "to",
861  [](const Tensor& tensor, Dtype dtype, bool copy) {
862  return tensor.To(dtype, copy);
863  },
864  "Returns a tensor with the specified ``dtype``.", "dtype"_a,
865  "copy"_a = false);
866  tensor.def(
867  "to",
868  [](const Tensor& tensor, const Device& device, bool copy) {
869  return tensor.To(device, copy);
870  },
871  "Returns a tensor with the specified ``device``.", "device"_a,
872  "copy"_a = false);
873  tensor.def(
874  "to",
875  [](const Tensor& tensor, const Device& device, Dtype dtype,
876  bool copy) { return tensor.To(device, dtype, copy); },
877  "Returns a tensor with the specified ``device`` and ``dtype``."
878  "device"_a,
879  "dtype"_a, "copy"_a = false);
881 
882  tensor.def("clone", &Tensor::Clone, "Copy Tensor to the same device.");
883  tensor.def("T", &Tensor::T,
884  "Transpose <=2-D tensor by swapping dimension 0 and 1."
885  "0-D and 1-D Tensor remains the same.");
886  tensor.def(
887  "reshape", &Tensor::Reshape,
888  R"(Returns a tensor with the same data and number of elements as input, but
889 with the specified shape. When possible, the returned tensor will be a view of
890 input. Otherwise, it will be a copy.
891 
892 Contiguous inputs and inputs with compatible strides can be reshaped
893 without copying, but you should not depend on the copying vs. viewing
894 behavior.
895 
896 Ref:
897 - https://pytorch.org/docs/stable/tensors.html
898 - aten/src/ATen/native/TensorShape.cpp
899 - aten/src/ATen/TensorUtils.cpp)",
900  "dst_shape"_a);
901  docstring::ClassMethodDocInject(m, "Tensor", "reshape",
902  {{"dst_shape",
903  "Compatible destination shape with the "
904  "same number of elements."}});
905  tensor.def("contiguous", &Tensor::Contiguous,
906  "Returns a contiguous tensor containing the same data in the "
907  "same device. If the tensor is already contiguous, the same "
908  "underlying memory will be used.");
909  tensor.def("is_contiguous", &Tensor::IsContiguous,
910  "Returns True if the underlying memory buffer is contiguous.");
911  tensor.def(
912  "flatten", &Tensor::Flatten,
913  R"(Flattens input by reshaping it into a one-dimensional tensor. If
914 start_dim or end_dim are passed, only dimensions starting with start_dim
915 and ending with end_dim are flattened. The order of elements in input is
916 unchanged.
917 
918 Unlike NumPy’s flatten, which always copies input’s data, this function
919 may return the original object, a view, or copy. If no dimensions are
920 flattened, then the original object input is returned. Otherwise, if
921 input can be viewed as the flattened shape, then that view is returned.
922 Finally, only if the input cannot be viewed as the flattened shape is
923 input’s data copied.
924 
925 Ref:
926 - https://pytorch.org/docs/stable/tensors.html
927 - aten/src/ATen/native/TensorShape.cpp
928 - aten/src/ATen/TensorUtils.cpp)",
929  "start_dim"_a = 0, "end_dim"_a = -1);
931  m, "Tensor", "flatten",
932  {{"start_dim", "The first dimension to flatten (inclusive)."},
933  {"end_dim",
934  "The last dimension to flatten, starting from start_dim "
935  "(inclusive)."}});
936 
937  // See "emulating numeric types" section for Python built-in numeric ops.
938  // https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types
939  //
940  // BinaryEW: add.
945  BIND_BINARY_R_OP_ALL_DTYPES(__radd__, Add);
946 
947  // BinaryEW: sub.
952  BIND_BINARY_R_OP_ALL_DTYPES(__rsub__, Sub);
953 
954  // BinaryEW: mul.
959  BIND_BINARY_R_OP_ALL_DTYPES(__rmul__, Mul);
960 
961  // BinaryEW: div.
966  BIND_BINARY_R_OP_ALL_DTYPES(__rdiv__, Div);
969  BIND_BINARY_R_OP_ALL_DTYPES(__rtruediv__, Div);
970  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(__floordiv__, Div,
971  CONST_ARG); // truediv only.
973  BIND_BINARY_R_OP_ALL_DTYPES(__rfloordiv__, Div);
974 
975  // BinaryEW: and.
976  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(logical_and, LogicalAnd, CONST_ARG);
977  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(logical_and_, LogicalAnd_,
978  NON_CONST_ARG);
979  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(__and__, LogicalAnd, CONST_ARG);
981  BIND_BINARY_R_OP_ALL_DTYPES(__rand__, LogicalAnd);
982 
983  // BinaryEW: or.
984  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(logical_or, LogicalOr, CONST_ARG);
985  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(logical_or_, LogicalOr_,
986  NON_CONST_ARG);
989  BIND_BINARY_R_OP_ALL_DTYPES(__ror__, LogicalOr);
990 
991  // BinaryEW: xor.
992  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(logical_xor, LogicalXor, CONST_ARG);
993  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(logical_xor_, LogicalXor_,
994  NON_CONST_ARG);
995  BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(__xor__, LogicalXor, CONST_ARG);
997  BIND_BINARY_R_OP_ALL_DTYPES(__rxor__, LogicalXor);
998 
999  // BinaryEW: comparison ops.
1018 
1019  // Getters and setters as properties.
1020  tensor.def_property_readonly(
1021  "shape", [](const Tensor& tensor) { return tensor.GetShape(); });
1022  tensor.def_property_readonly("strides", [](const Tensor& tensor) {
1023  return tensor.GetStrides();
1024  });
1025  tensor.def_property_readonly("dtype", &Tensor::GetDtype);
1026  tensor.def_property_readonly("blob", &Tensor::GetBlob);
1027  tensor.def_property_readonly("ndim", &Tensor::NumDims);
1028  tensor.def("num_elements", &Tensor::NumElements);
1029  tensor.def("__bool__", &Tensor::IsNonZero); // Python 3.X.
1030 
1031  tensor.def_property_readonly("device", &Tensor::GetDevice);
1032  tensor.def_property_readonly("is_cpu", &Tensor::IsCPU);
1033  tensor.def_property_readonly("is_cuda", &Tensor::IsCUDA);
1034 
1035  // Length and iterator.
1036  tensor.def("__len__", &Tensor::GetLength);
1037  tensor.def(
1038  "__iter__",
1039  [](Tensor& tensor) {
1040  return py::make_iterator(tensor.begin(), tensor.end());
1041  },
1042  py::keep_alive<0, 1>()); // Keep object alive while iterator exists
1043 
1044  // Unary element-wise ops.
1045  tensor.def("sqrt", &Tensor::Sqrt);
1046  tensor.def("sqrt_", &Tensor::Sqrt_);
1047  tensor.def("sin", &Tensor::Sin);
1048  tensor.def("sin_", &Tensor::Sin_);
1049  tensor.def("cos", &Tensor::Cos);
1050  tensor.def("cos_", &Tensor::Cos_);
1051  tensor.def("neg", &Tensor::Neg);
1052  tensor.def("neg_", &Tensor::Neg_);
1053  tensor.def("__neg__", &Tensor::Neg);
1054  tensor.def("exp", &Tensor::Exp);
1055  tensor.def("exp_", &Tensor::Exp_);
1056  tensor.def("abs", &Tensor::Abs);
1057  tensor.def("abs_", &Tensor::Abs_);
1058  tensor.def("isnan", &Tensor::IsNan);
1059  tensor.def("isinf", &Tensor::IsInf);
1060  tensor.def("isfinite", &Tensor::IsFinite);
1061  tensor.def("floor", &Tensor::Floor);
1062  tensor.def("ceil", &Tensor::Ceil);
1063  tensor.def("round", &Tensor::Round);
1064  tensor.def("trunc", &Tensor::Trunc);
1065  tensor.def("logical_not", &Tensor::LogicalNot);
1066  tensor.def("logical_not_", &Tensor::LogicalNot_);
1067 
1068  BIND_CLIP_SCALAR(clip, Clip, CONST_ARG);
1069  BIND_CLIP_SCALAR(clip_, Clip_, NON_CONST_ARG);
1070 
1071  // Boolean.
1072  tensor.def(
1073  "nonzero",
1074  [](const Tensor& tensor, bool as_tuple) -> py::object {
1075  if (as_tuple) {
1076  return py::cast(tensor.NonZero());
1077  } else {
1078  return py::cast(tensor.NonZeroNumpy());
1079  }
1080  },
1081  "Find the indices of the elements that are non-zero.",
1082  "as_tuple"_a = false);
1084  m, "Tensor", "nonzero",
1085  {{"as_tuple",
1086  "If ``as_tuple`` is True, returns an int64 tensor of shape "
1087  "{num_dims, num_non_zeros}, where the i-th row contains the "
1088  "indices of the non-zero elements in i-th dimension of the "
1089  "original tensor. If ``as_tuple`` is False, Returns a vector of "
1090  "int64 Tensors, each containing the indices of the non-zero "
1091  "elements in each dimension."}});
1092  tensor.def("all", &Tensor::All, py::call_guard<py::gil_scoped_release>(),
1093  py::arg("dim") = py::none(), py::arg("keepdim") = false,
1094  "Returns true if all elements in the tensor are true. Only "
1095  "works "
1096  "for boolean tensors.");
1097  tensor.def("any", &Tensor::Any, py::call_guard<py::gil_scoped_release>(),
1098  py::arg("dim") = py::none(), py::arg("keepdim") = false,
1099  "Returns true if any elements in the tensor are true. Only "
1100  "works "
1101  "for boolean tensors.");
1102 
1103  // Reduction ops.
1104  BIND_REDUCTION_OP(sum, Sum);
1105  BIND_REDUCTION_OP(mean, Mean);
1106  BIND_REDUCTION_OP(prod, Prod);
1107  BIND_REDUCTION_OP(min, Min);
1108  BIND_REDUCTION_OP(max, Max);
1109  BIND_REDUCTION_OP_NO_KEEPDIM(argmin, ArgMin);
1110  BIND_REDUCTION_OP_NO_KEEPDIM(argmax, ArgMax);
1111 
1112  // Comparison.
1113  tensor.def(
1114  "allclose", &Tensor::AllClose, "other"_a, "rtol"_a = 1e-5,
1115  "atol"_a = 1e-8,
1116  R"(Returns true if the two tensors are element-wise equal within a tolerance.
1117 
1118 - If the ``device`` is not the same: throws exception.
1119 - If the ``dtype`` is not the same: throws exception.
1120 - If the ``shape`` is not the same: returns false.
1121 - Returns true if: ``abs(self - other) <= (atol + rtol * abs(other)``).
1122 
1123 The equation is not symmetrical, i.e. ``a.allclose(b)`` might not be the same
1124 as ``b.allclose(a)``. Also see `Numpy's documentation <https://numpy.org/doc/stable/reference/generated/numpy.allclose.html>`__.
1125 
1126 TODO:
1127  Support nan.)");
1129  m, "Tensor", "allclose",
1130  {{"other", "The other tensor to compare with."},
1131  {"rtol", "Relative tolerance."},
1132  {"atol", "Absolute tolerance."}});
1133  tensor.def("isclose", &Tensor::IsClose, "other"_a, "rtol"_a = 1e-5,
1134  "atol"_a = 1e-8,
1135  R"(Element-wise version of ``tensor.allclose``.
1136 
1137 - If the ``device`` is not the same: throws exception.
1138 - If the ``dtype`` is not the same: throws exception.
1139 - If the ``shape`` is not the same: throws exception.
1140 - For each element in the returned tensor:
1141  ``abs(self - other) <= (atol + rtol * abs(other))``.
1142 
1143 The equation is not symmetrical, i.e. a.is_close(b) might not be the same
1144 as b.is_close(a). Also see `Numpy's documentation <https://numpy.org/doc/stable/reference/generated/numpy.isclose.html>`__.
1145 
1146 TODO:
1147  Support nan.
1148 
1149 Returns:
1150  A boolean tensor indicating where the tensor is close.)");
1152  m, "Tensor", "isclose",
1153  {{"other", "The other tensor to compare with."},
1154  {"rtol", "Relative tolerance."},
1155  {"atol", "Absolute tolerance."}});
1156 
1157  tensor.def("issame", &Tensor::IsSame,
1158  "Returns true iff the tensor is the other tensor. This "
1159  "means that, the two tensors have the same underlying "
1160  "memory, device, dtype, shape, strides and etc.");
1161  // Print tensor.
1162  tensor.def("__repr__",
1163  [](const Tensor& tensor) { return tensor.ToString(); });
1164  tensor.def("__str__",
1165  [](const Tensor& tensor) { return tensor.ToString(); });
1166 
1167  // Get item from Tensor of one element.
1168  tensor.def(
1169  "item",
1170  [](const Tensor& tensor) -> py::object {
1171  Dtype dtype = tensor.GetDtype();
1172  if (dtype == core::Float32)
1173  return py::float_(tensor.Item<float>());
1174  if (dtype == core::Float64)
1175  return py::float_(tensor.Item<double>());
1176  if (dtype == core::Int8) return py::int_(tensor.Item<int8_t>());
1177  if (dtype == core::Int16)
1178  return py::int_(tensor.Item<int16_t>());
1179  if (dtype == core::Int32)
1180  return py::int_(tensor.Item<int32_t>());
1181  if (dtype == core::Int64)
1182  return py::int_(tensor.Item<int64_t>());
1183  if (dtype == core::UInt8)
1184  return py::int_(tensor.Item<uint8_t>());
1185  if (dtype == core::UInt16)
1186  return py::int_(tensor.Item<uint16_t>());
1187  if (dtype == core::UInt32)
1188  return py::int_(tensor.Item<uint32_t>());
1189  if (dtype == core::UInt64)
1190  return py::int_(tensor.Item<uint64_t>());
1191  if (dtype == core::Bool) return py::bool_(tensor.Item<bool>());
1193  "Tensor.item(): unsupported dtype to convert to "
1194  "python.");
1195  return py::none();
1196  },
1197  "Helper function to return the scalar value of a scalar tensor. "
1198  "The tensor must be 0 - dimensional (i.e. have an empty shape).");
1199 }
1200 
1201 } // namespace core
1202 } // namespace cloudViewer
Common CUDA utilities.
DLDeviceType
The device type in DLDevice.
Definition: DLPack.h:92
@ kDLVulkan
Vulkan buffer for next generation graphics.
Definition: DLPack.h:105
@ kDLMetal
Metal for Apple GPU.
Definition: DLPack.h:107
@ kDLCUDAManaged
CUDA managed/unified memory allocated by cudaMallocManaged.
Definition: DLPack.h:125
@ kDLCUDA
CUDA GPU device.
Definition: DLPack.h:97
@ kDLCUDAHost
Pinned CUDA CPU memory by cudaMallocHost.
Definition: DLPack.h:101
@ kDLOneAPI
Unified shared memory allocated on a oneAPI non-partititioned device. Call to oneAPI runtime is requi...
Definition: DLPack.h:132
@ kDLOpenCL
OpenCL devices.
Definition: DLPack.h:103
@ kDLCPU
CPU device.
Definition: DLPack.h:95
@ kDLROCM
ROCm GPUs for AMD GPUs.
Definition: DLPack.h:111
@ kDLVPI
Verilog simulator buffer.
Definition: DLPack.h:109
#define DLPACK_MAJOR_VERSION
The current major version of dlpack.
Definition: DLPack.h:37
void Add(const double in1[2], const double in2[2], double out[2])
Definition: Factor.cpp:130
filament::Texture::InternalFormat format
bool copy
Definition: VtkUtils.cpp:74
DeviceType GetType() const
Returns type of the device, e.g. DeviceType::CPU, DeviceType::CUDA.
Definition: Device.h:58
std::string ToString() const
Returns string representation of device, e.g. "CPU:0", "CUDA:0".
Definition: Device.cpp:89
int GetID() const
Returns the device index (within the same device type).
Definition: Device.h:61
bool IsAvailable() const
Returns true if the device is available.
Definition: Device.cpp:108
bool IsCUDA() const
Definition: Device.h:99
bool IsCPU() const
Definition: Device.h:95
bool AllClose(const Tensor &other, double rtol=1e-5, double atol=1e-8) const
Definition: Tensor.cpp:1895
double Det() const
Compute the determinant of a 2D square tensor.
Definition: Tensor.cpp:1092
Tensor Abs_()
Element-wise absolute value of a tensor, in-place.
Definition: Tensor.cpp:1357
Tensor Solve(const Tensor &rhs) const
Definition: Tensor.cpp:1928
Tensor Sqrt() const
Element-wise square root of a tensor, returns a new tensor.
Definition: Tensor.cpp:1296
Tensor Contiguous() const
Definition: Tensor.cpp:772
Tensor Matmul(const Tensor &rhs) const
Definition: Tensor.cpp:1919
int64_t NumDims() const
Definition: Tensor.h:1172
std::tuple< Tensor, Tensor > Triul(const int diagonal=0) const
Returns the tuple of upper and lower triangular matrix of the 2D tensor, above and below the given di...
Definition: Tensor.cpp:1976
bool IsContiguous() const
Definition: Tensor.h:1036
Tensor Neg() const
Element-wise negation of a tensor, returning a new tensor.
Definition: Tensor.cpp:1329
Tensor Triu(const int diagonal=0) const
Returns the upper triangular matrix of the 2D tensor, above the given diagonal index....
Definition: Tensor.cpp:1964
void Save(const std::string &file_name) const
Save tensor to numpy's npy format.
Definition: Tensor.cpp:1877
static Tensor Arange(const Scalar start, const Scalar stop, const Scalar step=1, const Dtype dtype=core::Int64, const Device &device=core::Device("CPU:0"))
Create a 1D tensor with evenly spaced values in the given interval.
Definition: Tensor.cpp:436
Tensor Neg_()
Element-wise negation of a tensor, in-place.
Definition: Tensor.cpp:1335
int64_t GetLength() const
Definition: Tensor.h:1125
Tensor Round() const
Element-wise round value of a tensor, returning a new tensor.
Definition: Tensor.cpp:1423
Dtype GetDtype() const
Definition: Tensor.h:1164
Tensor Trunc() const
Element-wise trunc value of a tensor, returning a new tensor.
Definition: Tensor.cpp:1429
std::tuple< Tensor, Tensor > LUIpiv() const
Computes LU factorisation of the 2D square tensor, using A = P * L * U; where P is the permutation ma...
Definition: Tensor.cpp:1956
Tensor Any(const utility::optional< SizeVector > &dims=utility::nullopt, bool keepdim=false) const
Definition: Tensor.cpp:1789
Tensor LogicalNot() const
Definition: Tensor.cpp:1442
Tensor Tril(const int diagonal=0) const
Returns the lower triangular matrix of the 2D tensor, above the given diagonal index....
Definition: Tensor.cpp:1970
static Tensor FromDLPack(const DLManagedTensor *dlmt, std::function< void(void *)> deleter=nullptr)
Convert DLManagedTensor to Tensor (DLPack v0.x).
Definition: Tensor.cpp:1868
Tensor Exp() const
Element-wise exponential of a tensor, returning a new tensor.
Definition: Tensor.cpp:1340
Tensor Cos() const
Element-wise cosine of a tensor, returning a new tensor.
Definition: Tensor.cpp:1318
Tensor Inverse() const
Definition: Tensor.cpp:1982
static Tensor Load(const std::string &file_name)
Load tensor from numpy's npy format.
Definition: Tensor.cpp:1881
bool IsNonZero() const
Definition: Tensor.cpp:1757
static Tensor Eye(int64_t n, Dtype dtype, const Device &device)
Create an identity matrix of size n x n.
Definition: Tensor.cpp:418
Tensor IsFinite() const
Definition: Tensor.cpp:1382
Tensor Abs() const
Element-wise absolute value of a tensor, returning a new tensor.
Definition: Tensor.cpp:1351
Tensor Flatten(int64_t start_dim=0, int64_t end_dim=-1) const
Definition: Tensor.cpp:685
Tensor Sin() const
Element-wise sine of a tensor, returning a new tensor.
Definition: Tensor.cpp:1307
int64_t NumElements() const
Definition: Tensor.h:1170
Tensor All(const utility::optional< SizeVector > &dims=utility::nullopt, bool keepdim=false) const
Definition: Tensor.cpp:1770
static Tensor Zeros(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with zeros.
Definition: Tensor.cpp:406
Tensor Ceil() const
Element-wise ceil value of a tensor, returning a new tensor.
Definition: Tensor.cpp:1417
Tensor Sqrt_()
Element-wise square root of a tensor, in-place.
Definition: Tensor.cpp:1302
static Tensor Diag(const Tensor &input)
Create a square matrix with specified diagonal elements in input.
Definition: Tensor.cpp:424
Tensor IsClose(const Tensor &other, double rtol=1e-5, double atol=1e-8) const
Definition: Tensor.cpp:1900
static Tensor FromDLPackVersioned(const DLManagedTensorVersioned *dlmt, std::function< void(void *)> deleter=nullptr)
Convert DLManagedTensorVersioned to Tensor (DLPack v1.x).
Definition: Tensor.cpp:1872
Device GetDevice() const override
Definition: Tensor.cpp:1435
Tensor LeastSquares(const Tensor &rhs) const
Definition: Tensor.cpp:1938
Tensor Reshape(const SizeVector &dst_shape) const
Definition: Tensor.cpp:671
Tensor Clone() const
Copy Tensor to the same device.
Definition: Tensor.h:502
static Tensor Empty(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor with uninitialized values.
Definition: Tensor.cpp:400
static Tensor Ones(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with ones.
Definition: Tensor.cpp:412
Tensor IsInf() const
Definition: Tensor.cpp:1372
Tensor IsNan() const
Definition: Tensor.cpp:1362
bool IsSame(const Tensor &other) const
Definition: Tensor.cpp:1912
Tensor Cos_()
Element-wise cosine of a tensor, in-place.
Definition: Tensor.cpp:1324
std::tuple< Tensor, Tensor, Tensor > SVD() const
Definition: Tensor.cpp:1990
Tensor Sin_()
Element-wise sine of a tensor, in-place.
Definition: Tensor.cpp:1313
Tensor T() const
Expects input to be <= 2-D Tensor by swapping dimension 0 and 1.
Definition: Tensor.cpp:1079
Tensor To(Dtype dtype, bool copy=false) const
Definition: Tensor.cpp:739
Tensor Floor() const
Element-wise floor value of a tensor, returning a new tensor.
Definition: Tensor.cpp:1411
std::shared_ptr< Blob > GetBlob() const
Definition: Tensor.h:1168
Tensor Exp_()
Element-wise base-e exponential of a tensor, in-place.
Definition: Tensor.cpp:1346
constexpr bool has_value() const noexcept
Definition: Optional.h:440
constexpr T const & value() const &
Definition: Optional.h:465
#define LogWarning(...)
Definition: Logging.h:72
#define LogError(...)
Definition: Logging.h:60
int min(int a, int b)
Definition: cutil_math.h:53
int max(int a, int b)
Definition: cutil_math.h:48
static std::vector< T > ToFlatVector(py::array_t< T, py::array::c_style|py::array::forcecast > np_array)
Definition: tensor.cpp:212
static void BindTensorCreation(py::module &m, py::class_< Tensor > &tensor, const std::string &py_name, func_t cpp_func)
Definition: tensor.cpp:220
Tensor PyListToTensor(const py::list &list, utility::optional< Dtype > dtype, utility::optional< Device > device)
const Dtype Int8
Definition: Dtype.cpp:44
const Dtype Bool
Definition: Dtype.cpp:52
Tensor BoolToTensor(bool scalar_value, utility::optional< Dtype > dtype, utility::optional< Device > device)
const Dtype Int64
Definition: Dtype.cpp:47
const Dtype UInt64
Definition: Dtype.cpp:51
const Dtype UInt32
Definition: Dtype.cpp:50
void pybind_core_tensor_accessor(py::class_< Tensor > &t)
const Dtype UInt8
Definition: Dtype.cpp:48
CLOUDVIEWER_HOST_DEVICE Pair< First, Second > make_pair(const First &_first, const Second &_second)
Definition: SlabTraits.h:49
const Dtype Int16
Definition: Dtype.cpp:45
const std::unordered_map< std::string, std::string > argument_docs
Definition: hashmap.cpp:22
py::array TensorToPyArray(const Tensor &tensor)
Convert Tensor class to py::array (Numpy array).
Tensor DoubleToTensor(double scalar_value, utility::optional< Dtype > dtype, utility::optional< Device > device)
Tensor IntToTensor(int64_t scalar_value, utility::optional< Dtype > dtype, utility::optional< Device > device)
void pybind_core_tensor(py::module &m)
Definition: tensor.cpp:296
const Dtype Float64
Definition: Dtype.cpp:43
const Dtype UInt16
Definition: Dtype.cpp:49
Tensor PyArrayToTensor(py::array array, bool inplace)
const Dtype Int32
Definition: Dtype.cpp:46
static void BindTensorFullCreation(py::module &m, py::class_< Tensor > &tensor)
Definition: tensor.cpp:240
Tensor PyTupleToTensor(const py::tuple &tuple, utility::optional< Dtype > dtype, utility::optional< Device > device)
const Dtype Float32
Definition: Dtype.cpp:42
void ClassMethodDocInject(py::module &pybind_module, const std::string &class_name, const std::string &function_name, const std::unordered_map< std::string, std::string > &map_parameter_body_docs)
Definition: docstring.cpp:27
constexpr nullopt_t nullopt
Definition: Optional.h:136
Generic file read and write utility for python interface.
A versioned and managed C Tensor object, manage memory of DLTensor.
Definition: DLPack.h:366
DLPackVersion version
The API and ABI version of the current managed Tensor.
Definition: DLPack.h:370
void(* deleter)(struct DLManagedTensorVersioned *self)
Destructor.
Definition: DLPack.h:386
C Tensor object, manage memory of DLTensor. This data structure is intended to facilitate the borrowi...
Definition: DLPack.h:319
void(* deleter)(struct DLManagedTensor *self)
Destructor - this should be called to destruct the manager_ctx which backs the DLManagedTensor....
Definition: DLPack.h:332
uint32_t major
DLPack major version.
Definition: DLPack.h:81
#define BIND_BINARY_R_OP_ALL_DTYPES(py_name, cpp_name)
Definition: tensor.cpp:116
#define CONST_ARG
Definition: tensor.cpp:30
#define BIND_CLIP_SCALAR(py_name, cpp_name, self_const)
Definition: tensor.cpp:71
#define BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(py_name, cpp_name, self_const)
Definition: tensor.cpp:33
#define NON_CONST_ARG
Definition: tensor.cpp:31
#define BIND_REDUCTION_OP(py_name, cpp_name)
Definition: tensor.cpp:162
#define BIND_REDUCTION_OP_NO_KEEPDIM(py_name, cpp_name)
Definition: tensor.cpp:181