30 #define CONST_ARG const
33 #define BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(py_name, cpp_name, self_const) \
34 tensor.def(#py_name, [](self_const Tensor& self, const Tensor& other) { \
35 return self.cpp_name(other); \
37 tensor.def(#py_name, [](Tensor& self, float value) { \
38 return self.cpp_name(Scalar(value)); \
40 tensor.def(#py_name, [](Tensor& self, double value) { \
41 return self.cpp_name(Scalar(value)); \
43 tensor.def(#py_name, [](Tensor& self, int8_t value) { \
44 return self.cpp_name(Scalar(value)); \
46 tensor.def(#py_name, [](Tensor& self, int16_t value) { \
47 return self.cpp_name(Scalar(value)); \
49 tensor.def(#py_name, [](Tensor& self, int32_t value) { \
50 return self.cpp_name(Scalar(value)); \
52 tensor.def(#py_name, [](Tensor& self, int64_t value) { \
53 return self.cpp_name(Scalar(value)); \
55 tensor.def(#py_name, [](Tensor& self, uint8_t value) { \
56 return self.cpp_name(Scalar(value)); \
58 tensor.def(#py_name, [](Tensor& self, uint16_t value) { \
59 return self.cpp_name(Scalar(value)); \
61 tensor.def(#py_name, [](Tensor& self, uint32_t value) { \
62 return self.cpp_name(Scalar(value)); \
64 tensor.def(#py_name, [](Tensor& self, uint64_t value) { \
65 return self.cpp_name(Scalar(value)); \
67 tensor.def(#py_name, [](Tensor& self, bool value) { \
68 return self.cpp_name(Scalar(value)); \
71 #define BIND_CLIP_SCALAR(py_name, cpp_name, self_const) \
72 tensor.def(#py_name, \
73 [](self_const Tensor& self, float min_v, float max_v) { \
74 return self.cpp_name(min_v, max_v); \
76 tensor.def(#py_name, \
77 [](self_const Tensor& self, double min_v, double max_v) { \
78 return self.cpp_name(min_v, max_v); \
80 tensor.def(#py_name, \
81 [](self_const Tensor& self, int8_t min_v, int8_t max_v) { \
82 return self.cpp_name(min_v, max_v); \
84 tensor.def(#py_name, \
85 [](self_const Tensor& self, int16_t min_v, int16_t max_v) { \
86 return self.cpp_name(min_v, max_v); \
88 tensor.def(#py_name, \
89 [](self_const Tensor& self, int32_t min_v, int32_t max_v) { \
90 return self.cpp_name(min_v, max_v); \
92 tensor.def(#py_name, \
93 [](self_const Tensor& self, int64_t min_v, int64_t max_v) { \
94 return self.cpp_name(min_v, max_v); \
96 tensor.def(#py_name, \
97 [](self_const Tensor& self, uint8_t min_v, uint8_t max_v) { \
98 return self.cpp_name(min_v, max_v); \
100 tensor.def(#py_name, \
101 [](self_const Tensor& self, uint16_t min_v, uint16_t max_v) { \
102 return self.cpp_name(min_v, max_v); \
104 tensor.def(#py_name, \
105 [](self_const Tensor& self, uint32_t min_v, uint32_t max_v) { \
106 return self.cpp_name(min_v, max_v); \
108 tensor.def(#py_name, \
109 [](self_const Tensor& self, uint64_t min_v, uint64_t max_v) { \
110 return self.cpp_name(min_v, max_v); \
112 tensor.def(#py_name, [](self_const Tensor& self, bool min_v, bool max_v) { \
113 return self.cpp_name(min_v, max_v); \
116 #define BIND_BINARY_R_OP_ALL_DTYPES(py_name, cpp_name) \
117 tensor.def(#py_name, [](const Tensor& self, float value) { \
118 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
121 tensor.def(#py_name, [](const Tensor& self, double value) { \
122 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
125 tensor.def(#py_name, [](const Tensor& self, int8_t value) { \
126 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
129 tensor.def(#py_name, [](const Tensor& self, int16_t value) { \
130 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
133 tensor.def(#py_name, [](const Tensor& self, int32_t value) { \
134 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
137 tensor.def(#py_name, [](const Tensor& self, int64_t value) { \
138 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
141 tensor.def(#py_name, [](const Tensor& self, uint8_t value) { \
142 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
145 tensor.def(#py_name, [](const Tensor& self, uint16_t value) { \
146 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
149 tensor.def(#py_name, [](const Tensor& self, uint32_t value) { \
150 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
153 tensor.def(#py_name, [](const Tensor& self, uint64_t value) { \
154 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
157 tensor.def(#py_name, [](const Tensor& self, bool value) { \
158 return Tensor::Full({}, value, self.GetDtype(), self.GetDevice()) \
162 #define BIND_REDUCTION_OP(py_name, cpp_name) \
165 [](const Tensor& tensor, const utility::optional<SizeVector>& dim, \
167 SizeVector reduction_dims; \
168 if (dim.has_value()) { \
169 reduction_dims = dim.value(); \
171 for (int64_t i = 0; i < tensor.NumDims(); i++) { \
172 reduction_dims.push_back(i); \
175 return tensor.cpp_name(reduction_dims, keepdim); \
177 "dim"_a = py::none(), "keepdim"_a = false);
181 #define BIND_REDUCTION_OP_NO_KEEPDIM(py_name, cpp_name) \
184 [](const Tensor& tensor, \
185 const utility::optional<SizeVector>& dim) { \
186 SizeVector reduction_dims; \
187 if (dim.has_value()) { \
188 reduction_dims = dim.value(); \
190 for (int64_t i = 0; i < tensor.NumDims(); i++) { \
191 reduction_dims.push_back(i); \
194 return tensor.cpp_name(reduction_dims); \
196 "dim"_a = py::none());
201 const std::unordered_map<std::string, std::string>
argument_docs = {
202 {
"dtype",
"Data type for the Tensor."},
203 {
"device",
"Compute device to store and operate on the Tensor."},
204 {
"shape",
"List of Tensor dimensions."},
205 {
"fill_value",
"Scalar value to initialize all elements with."},
206 {
"scalar_value",
"Initial value for the single element tensor."},
208 "If true, a new tensor is always created; if false, the copy is "
209 "avoided when the original tensor already has the targeted dtype."}};
211 template <
typename T>
213 py::array_t<T, py::array::c_style | py::array::forcecast> np_array) {
214 py::buffer_info info = np_array.request();
215 T* start =
static_cast<T*
>(info.ptr);
216 return std::vector<T>(start, start + info.size);
219 template <
typename func_t>
221 py::class_<Tensor>& tensor,
222 const std::string& py_name,
230 dtype.has_value() ? dtype.value() : core::Float32,
231 device.has_value() ? device.value() : Device(
"CPU:0"));
233 "Create Tensor with a given shape.",
"shape"_a,
234 "dtype"_a = py::none(),
"device"_a = py::none());
239 template <
typename T>
246 return Tensor::Full<T>(
251 "shape"_a,
"fill_value"_a,
"dtype"_a = py::none(),
252 "device"_a = py::none());
256 Tensor maybeCopyTensor(
const Tensor& data,
260 bool force_copy =
copy.has_value() &&
copy.value();
261 bool force_move =
copy.has_value() && !
copy.value();
262 if (optional_dl_device.has_value()) {
264 switch (optional_dl_device.value().first) {
266 to_device = Device(
"CPU:0");
270 "CUDA:{}", optional_dl_device.value().second));
274 "SYCL:{}", optional_dl_device.value().second));
278 int(optional_dl_device.value().first));
280 if (to_device != data.GetDevice()) {
283 " [DLPack] Cannot move (i.e. copy=False) tensor from "
284 "{} to {} without copying.",
285 data.GetDevice().ToString(), to_device.ToString());
286 return data.To(to_device);
297 py::class_<Tensor> tensor(
299 "A Tensor is a view of a data Blob with shape, stride, data_ptr.");
300 m.attr(
"capsule") = py::module_::import(
"typing").attr(
"Any");
302 py::native_enum<DLDeviceType>(m,
"DLDeviceType",
"enum.Enum")
316 tensor.def(py::init([](
const py::array& np_array,
328 "Initialize Tensor from a Numpy array.",
"np_array"_a,
329 "dtype"_a = py::none(),
"device"_a = py::none());
337 "scalar_value"_a,
"dtype"_a = py::none(),
338 "device"_a = py::none());
346 "scalar_value"_a,
"dtype"_a = py::none(),
347 "device"_a = py::none());
355 "scalar_value"_a,
"dtype"_a = py::none(),
356 "device"_a = py::none());
364 "Initialize Tensor from a nested list.",
"shape"_a,
365 "dtype"_a = py::none(),
"device"_a = py::none());
373 "Initialize Tensor from a nested tuple.",
"shape"_a,
374 "dtype"_a = py::none(),
"device"_a = py::none());
384 BindTensorFullCreation<float>(m, tensor);
385 BindTensorFullCreation<double>(m, tensor);
386 BindTensorFullCreation<int8_t>(m, tensor);
387 BindTensorFullCreation<int16_t>(m, tensor);
388 BindTensorFullCreation<int32_t>(m, tensor);
389 BindTensorFullCreation<int64_t>(m, tensor);
390 BindTensorFullCreation<uint8_t>(m, tensor);
391 BindTensorFullCreation<uint16_t>(m, tensor);
392 BindTensorFullCreation<uint32_t>(m, tensor);
393 BindTensorFullCreation<uint64_t>(m, tensor);
394 BindTensorFullCreation<bool>(m, tensor);
401 tensor.def(py::pickle(
411 "Cannot unpickle Tensor! Expecting a tuple of size "
417 "Device {} is not available, tensor will be "
435 "Create an identity matrix of size n x n.",
"n"_a,
436 "dtype"_a = py::none(),
"device"_a = py::none());
449 "Create a 1D tensor with evenly spaced values in the given "
451 "stop"_a, py::pos_only(), py::kw_only(),
"dtype"_a = py::none(),
452 "device"_a = py::none());
463 "Create a 1D tensor with evenly spaced values in the given "
465 "start"_a,
"stop"_a,
"step"_a = py::none(),
"dtype"_a = py::none(),
466 py::kw_only(),
"device"_a = py::none());
478 "Create a 1D tensor with evenly spaced values in the given "
480 "stop"_a, py::pos_only(), py::kw_only(),
"dtype"_a = py::none(),
481 "device"_a = py::none());
492 "Create a 1D tensor with evenly spaced values in the given "
494 "start"_a,
"stop"_a,
"step"_a = py::none(),
"dtype"_a = py::none(),
495 py::kw_only(),
"device"_a = py::none());
502 return tensor.Append(values, axis);
504 return tensor.Append(values);
506 R
"(Appends the `values` tensor, along the given axis and returns
507 a copy of the original tensor. Both the tensors must have same data-type
508 device, and number of dimensions. All dimensions must be the same, except the
509 dimension along the axis the tensors are to be appended.
511 This is the similar to NumPy's semantics:
512 - https://numpy.org/doc/stable/reference/generated/numpy.append.html
515 A copy of the tensor with `values` appended to axis. Note that append
516 does not occur in-place: a new array is allocated and filled. If axis
517 is None, out is a flattened tensor.
520 >>> a = o3d.core.Tensor([[0, 1], [2, 3]])
521 >>> b = o3d.core.Tensor([[4, 5]])
522 >>> a.append(b, axis = 0)
526 Tensor[shape={3, 2}, stride={2, 1}, Int64, CPU:0, 0x55555abc6b00]
530 Tensor[shape={6}, stride={1}, Int64, CPU:0, 0x55555abc6b70])",
531 "values"_a,
"axis"_a = py::none());
536 [](
const Tensor& tensor) {
539 "Transfer the tensor to CPU. If the tensor "
540 "is already on CPU, no copy will be performed.");
543 [](
const Tensor& tensor,
int device_id) {
546 "Transfer the tensor to a CUDA device. If the tensor is already "
547 "on the specified CUDA device, no copy will be performed.",
553 tensor.def_static(
"from_numpy", [](py::array np_array) {
557 auto to_dlpack = [](
const Tensor& tensor,
565 (max_version.has_value() && max_version.value().first > 0);
566 auto out_tensor = maybeCopyTensor(tensor, dl_device,
copy);
568 auto capsule_destructor = [](PyObject* data) {
572 if (dl_managed_tensor !=
nullptr &&
573 dl_managed_tensor->
deleter !=
nullptr) {
586 return py::capsule(dlmt,
"dltensor", capsule_destructor);
588 auto capsule_destructor = [](PyObject* data) {
592 if (dl_managed_tensor !=
nullptr &&
593 dl_managed_tensor->
deleter !=
nullptr) {
605 return py::capsule(dlmt,
"dltensor", capsule_destructor);
609 "__dlpack__", to_dlpack,
"stream"_a = py::none(),
610 "max_version"_a = py::none(),
"dl_device"_a = py::none(),
611 "copy"_a = py::none(),
612 R
"(Returns an opaque object (a "DLPack capsule") representing the tensor.
615 ``to_dlpack`` is a legacy DLPack interface. The capsule it returns
616 cannot be used for anything in Python other than use it as input to
617 ``from_dlpack``. The more idiomatic use of DLPack is to call
618 ``from_dlpack`` directly on the tensor object - this works when that
619 object has a ``__dlpack__`` method, which PyTorch and most other
620 libraries indeed have now.
623 Only call ``from_dlpack`` once per capsule produced with ``to_dlpack``.
624 Behavior when a capsule is consumed multiple times is undefined.
627 tensor: a tensor to be exported
629 The DLPack capsule shares the tensor's memory.)");
630 tensor.attr("to_dlpack") = tensor.attr(
"__dlpack__");
632 tensor.def(
"__dlpack_device__", [](
const Tensor& tensor) {
636 Device device = tensor.GetDevice();
638 case Device::DeviceType::CPU:
639 dl_device_type = DLDeviceType::kDLCPU;
641 case Device::DeviceType::CUDA:
642 dl_device_type = DLDeviceType::kDLCUDA;
644 case Device::DeviceType::SYCL:
645 dl_device_type = DLDeviceType::kDLOneAPI;
648 utility::LogError(
"ToDLPack: unsupported device type {}",
654 auto from_dlpack_capsule = [](py::capsule data) {
655 auto invalid_capsule_err =
656 "from_dlpack received an invalid capsule. Note that "
657 "DLTensor capsules can be consumed only once, so you might "
658 "have already constructed a tensor from it once.";
661 if (PyCapsule_IsValid(data,
"dltensor_versioned")) {
664 data.ptr(),
"dltensor_versioned"));
665 if (!dl_managed_tensor) {
670 "Received DLPack capsule with major version {} > {} "
671 "(Max supported version).",
678 PyCapsule_SetName(data.ptr(),
"used_dltensor_versioned");
681 PyCapsule_GetPointer(data.ptr(),
"dltensor"));
682 if (!dl_managed_tensor) {
686 PyCapsule_SetName(data.ptr(),
"used_dltensor");
688 PyCapsule_SetDestructor(data.ptr(),
nullptr);
691 tensor.def_static(
"from_dlpack", from_dlpack_capsule,
"dlpack_capsule"_a);
694 [from_dlpack_capsule](
695 const py::object& ext_tensor,
698 if (!hasattr(ext_tensor,
"__dlpack__")) {
700 "from_dlpack: object does not define __dlpack__.");
702 py::object capsule_obj = ext_tensor.attr(
"__dlpack__")();
703 auto t = from_dlpack_capsule(
704 py::reinterpret_borrow<py::capsule>(capsule_obj));
707 "ext_tensor"_a,
"device"_a = py::none(),
"copy"_a = py::none(),
708 R
"(Converts a tensor from an external library into an Open3D ``Tensor``.
710 The returned Open3D tensor will share the memory with the input tensor
711 (which may have come from another library). Note that in-place operations
712 will therefore also affect the data of the input tensor. This may lead to
713 unexpected issues (e.g., other libraries may have read-only flags or
714 immutable data structures), so the user should only do this if they know
715 for sure that this is fine.
718 ext_tensor (object with ``__dlpack__`` attribute, or a DLPack capsule):
719 The tensor or DLPack capsule to convert.
721 If ``ext_tensor`` is a tensor (or ndarray) object, it must support
722 the ``__dlpack__`` protocol (i.e., have a ``ext_tensor.__dlpack__``
723 method). Otherwise ``ext_tensor`` may be a DLPack capsule, which is
724 an opaque ``PyCapsule`` instance, typically produced by a
725 ``to_dlpack`` function or method.
727 device (open3d.core.device or None): An optional Open3D device
728 specifying where to place the new tensor. Only ``None`` is supported
729 (same device as ``ext_tensor``).
731 copy (bool or None): An optional boolean indicating whether or not to copy
732 ``self``. This is not supported yet and no copy is made.
739 tensor.def(
"save", &
Tensor::Save,
"Save tensor to Numpy's npy format.",
742 "Load tensor from Numpy's npy format.",
"file_name"_a);
746 "Compute the determinant of a 2D square tensor.");
749 R
"(Computes LU factorisation of the 2D square tensor, using A = P * L * U;
750 where P is the permutation matrix, L is the lower-triangular matrix with
751 diagonal elements as 1.0 and U is the upper-triangular matrix, and returns
752 tuple `output` tensor of shape {n,n} and `ipiv` tensor of shape {n}, where
753 {n,n} is the shape of input tensor.
756 ipiv: ipiv is a 1D integer pivot indices tensor. It contains the pivot
757 indices, indicating row i of the matrix was interchanged with row
759 output: It has L as lower triangular values and U as upper triangle
760 values including the main diagonal (diagonal elements of L to be
764 >>> ipiv, output = a.lu_ipiv())");
766 "Computes matrix multiplication of a"
767 " 2D tensor with another tensor of compatible shape.");
769 "Computes matrix multiplication of a"
770 " 2D tensor with another tensor of compatible shape.");
772 "Solves the linear system AX = B with QR decomposition and "
773 "returns X. A is a (m, n) matrix with m >= n.",
776 "Solves the linear system AX = B with LU decomposition and "
777 "returns X. A must be a square matrix.",
780 "Computes the matrix inverse of the square matrix self with "
781 "LU factorization and returns the result.");
783 "Computes the matrix SVD decomposition :math:`A = U S V^T` and "
785 "the result. Note :math:`V^T` (V transpose) is returned "
786 "instead of :math:`V`.");
788 "Returns the upper triangular matrix of the 2D tensor, above "
789 "the given diagonal index. [The value of diagonal = col - row, "
790 "therefore 0 is the main diagonal (row = col), and it shifts "
791 "towards right for positive values (for diagonal = 1, col - row "
792 "= 1), and towards left for negative values. The value of the "
793 "diagonal parameter must be between [-m, n] for a {m,n} shaped "
798 "Value of [col - row], above which the "
799 "elements are to be taken for"
800 " upper triangular matrix."}});
803 "Returns the lower triangular matrix of the 2D tensor, above "
804 "the given diagonal index. [The value of diagonal = col - row, "
805 "therefore 0 is the main diagonal (row = col), and it shifts "
806 "towards right for positive values (for diagonal = 1, col - "
808 "= 1), and towards left for negative values. The value of the "
809 "diagonal parameter must be between [-m, n] where {m, n} is "
811 "shape of input tensor.",
815 "Value of [col - row], below which "
816 "the elements are to be taken "
817 "for lower triangular matrix."}});
820 "Returns the tuple of upper and lower triangular matrix of the "
822 "tensor, above and below the given diagonal index. The "
824 "elements of lower triangular matrix are taken to be unity. "
826 "value of diagonal = col - row, therefore 0 is the main "
828 "(row = col), and it shifts towards right for positive values "
830 "diagonal = 1, col - row = 1), and towards left for negative "
831 "values. The value of the diagonal parameter must be between "
833 "n] where {m, n} is the shape of input tensor.",
836 m,
"Tensor",
"triul",
838 "Value of [col - row], above and below which the elements "
840 "be taken for upper (diag. included) and lower triangular "
844 [](
const Tensor& tensor,
bool permute_l) {
845 return tensor.LU(permute_l);
847 "permute_l"_a =
false,
848 R
"(Computes LU factorisation of the 2D square tensor, using A = P * L * U;
849 where P is the permutation matrix, L is the lower-triangular matrix with
850 diagonal elements as 1.0 and U is the upper-triangular matrix, and returns
856 m, "Tensor",
"lu", {{
"permute_l",
"If True, returns L as P * L."}});
862 return tensor.To(dtype,
copy);
864 "Returns a tensor with the specified ``dtype``.",
"dtype"_a,
869 return tensor.To(device,
copy);
871 "Returns a tensor with the specified ``device``.",
"device"_a,
876 bool copy) {
return tensor.To(device, dtype,
copy); },
877 "Returns a tensor with the specified ``device`` and ``dtype``."
879 "dtype"_a,
"copy"_a =
false);
882 tensor.def(
"clone", &
Tensor::Clone,
"Copy Tensor to the same device.");
884 "Transpose <=2-D tensor by swapping dimension 0 and 1."
885 "0-D and 1-D Tensor remains the same.");
888 R
"(Returns a tensor with the same data and number of elements as input, but
889 with the specified shape. When possible, the returned tensor will be a view of
890 input. Otherwise, it will be a copy.
892 Contiguous inputs and inputs with compatible strides can be reshaped
893 without copying, but you should not depend on the copying vs. viewing
897 - https://pytorch.org/docs/stable/tensors.html
898 - aten/src/ATen/native/TensorShape.cpp
899 - aten/src/ATen/TensorUtils.cpp)",
903 "Compatible destination shape with the "
904 "same number of elements."}});
906 "Returns a contiguous tensor containing the same data in the "
907 "same device. If the tensor is already contiguous, the same "
908 "underlying memory will be used.");
910 "Returns True if the underlying memory buffer is contiguous.");
913 R
"(Flattens input by reshaping it into a one-dimensional tensor. If
914 start_dim or end_dim are passed, only dimensions starting with start_dim
915 and ending with end_dim are flattened. The order of elements in input is
918 Unlike NumPy’s flatten, which always copies input’s data, this function
919 may return the original object, a view, or copy. If no dimensions are
920 flattened, then the original object input is returned. Otherwise, if
921 input can be viewed as the flattened shape, then that view is returned.
922 Finally, only if the input cannot be viewed as the flattened shape is
923 input’s data copied.
926 - https://pytorch.org/docs/stable/tensors.html
927 - aten/src/ATen/native/TensorShape.cpp
928 - aten/src/ATen/TensorUtils.cpp)",
929 "start_dim"_a = 0,
"end_dim"_a = -1);
931 m,
"Tensor",
"flatten",
932 {{
"start_dim",
"The first dimension to flatten (inclusive)."},
934 "The last dimension to flatten, starting from start_dim "
1020 tensor.def_property_readonly(
1021 "shape", [](
const Tensor& tensor) {
return tensor.GetShape(); });
1022 tensor.def_property_readonly(
"strides", [](
const Tensor& tensor) {
1023 return tensor.GetStrides();
1040 return py::make_iterator(tensor.begin(), tensor.end());
1042 py::keep_alive<0, 1>());
1074 [](
const Tensor& tensor,
bool as_tuple) -> py::object {
1076 return py::cast(tensor.NonZero());
1078 return py::cast(tensor.NonZeroNumpy());
1081 "Find the indices of the elements that are non-zero.",
1082 "as_tuple"_a =
false);
1084 m,
"Tensor",
"nonzero",
1086 "If ``as_tuple`` is True, returns an int64 tensor of shape "
1087 "{num_dims, num_non_zeros}, where the i-th row contains the "
1088 "indices of the non-zero elements in i-th dimension of the "
1089 "original tensor. If ``as_tuple`` is False, Returns a vector of "
1090 "int64 Tensors, each containing the indices of the non-zero "
1091 "elements in each dimension."}});
1092 tensor.def(
"all", &
Tensor::All, py::call_guard<py::gil_scoped_release>(),
1093 py::arg(
"dim") = py::none(), py::arg(
"keepdim") =
false,
1094 "Returns true if all elements in the tensor are true. Only "
1096 "for boolean tensors.");
1097 tensor.def(
"any", &
Tensor::Any, py::call_guard<py::gil_scoped_release>(),
1098 py::arg(
"dim") = py::none(), py::arg(
"keepdim") =
false,
1099 "Returns true if any elements in the tensor are true. Only "
1101 "for boolean tensors.");
1116 R
"(Returns true if the two tensors are element-wise equal within a tolerance.
1118 - If the ``device`` is not the same: throws exception.
1119 - If the ``dtype`` is not the same: throws exception.
1120 - If the ``shape`` is not the same: returns false.
1121 - Returns true if: ``abs(self - other) <= (atol + rtol * abs(other)``).
1123 The equation is not symmetrical, i.e. ``a.allclose(b)`` might not be the same
1124 as ``b.allclose(a)``. Also see `Numpy's documentation <https://numpy.org/doc/stable/reference/generated/numpy.allclose.html>`__.
1129 m, "Tensor",
"allclose",
1130 {{
"other",
"The other tensor to compare with."},
1131 {
"rtol",
"Relative tolerance."},
1132 {
"atol",
"Absolute tolerance."}});
1135 R
"(Element-wise version of ``tensor.allclose``.
1137 - If the ``device`` is not the same: throws exception.
1138 - If the ``dtype`` is not the same: throws exception.
1139 - If the ``shape`` is not the same: throws exception.
1140 - For each element in the returned tensor:
1141 ``abs(self - other) <= (atol + rtol * abs(other))``.
1143 The equation is not symmetrical, i.e. a.is_close(b) might not be the same
1144 as b.is_close(a). Also see `Numpy's documentation <https://numpy.org/doc/stable/reference/generated/numpy.isclose.html>`__.
1150 A boolean tensor indicating where the tensor is close.)");
1152 m, "Tensor",
"isclose",
1153 {{
"other",
"The other tensor to compare with."},
1154 {
"rtol",
"Relative tolerance."},
1155 {
"atol",
"Absolute tolerance."}});
1158 "Returns true iff the tensor is the other tensor. This "
1159 "means that, the two tensors have the same underlying "
1160 "memory, device, dtype, shape, strides and etc.");
1162 tensor.def(
"__repr__",
1163 [](
const Tensor& tensor) {
return tensor.ToString(); });
1164 tensor.def(
"__str__",
1165 [](
const Tensor& tensor) {
return tensor.ToString(); });
1170 [](
const Tensor& tensor) -> py::object {
1171 Dtype dtype = tensor.GetDtype();
1173 return py::float_(tensor.Item<
float>());
1175 return py::float_(tensor.Item<
double>());
1176 if (dtype ==
core::Int8)
return py::int_(tensor.Item<int8_t>());
1178 return py::int_(tensor.Item<int16_t>());
1180 return py::int_(tensor.Item<int32_t>());
1182 return py::int_(tensor.Item<int64_t>());
1184 return py::int_(tensor.Item<uint8_t>());
1186 return py::int_(tensor.Item<uint16_t>());
1188 return py::int_(tensor.Item<uint32_t>());
1190 return py::int_(tensor.Item<uint64_t>());
1191 if (dtype ==
core::Bool)
return py::bool_(tensor.Item<
bool>());
1193 "Tensor.item(): unsupported dtype to convert to "
1197 "Helper function to return the scalar value of a scalar tensor. "
1198 "The tensor must be 0 - dimensional (i.e. have an empty shape).");
DLDeviceType
The device type in DLDevice.
@ kDLVulkan
Vulkan buffer for next generation graphics.
@ kDLMetal
Metal for Apple GPU.
@ kDLCUDAManaged
CUDA managed/unified memory allocated by cudaMallocManaged.
@ kDLCUDA
CUDA GPU device.
@ kDLCUDAHost
Pinned CUDA CPU memory by cudaMallocHost.
@ kDLOneAPI
Unified shared memory allocated on a oneAPI non-partititioned device. Call to oneAPI runtime is requi...
@ kDLOpenCL
OpenCL devices.
@ kDLROCM
ROCm GPUs for AMD GPUs.
@ kDLVPI
Verilog simulator buffer.
#define DLPACK_MAJOR_VERSION
The current major version of dlpack.
void Add(const double in1[2], const double in2[2], double out[2])
filament::Texture::InternalFormat format
DeviceType GetType() const
Returns type of the device, e.g. DeviceType::CPU, DeviceType::CUDA.
std::string ToString() const
Returns string representation of device, e.g. "CPU:0", "CUDA:0".
int GetID() const
Returns the device index (within the same device type).
bool IsAvailable() const
Returns true if the device is available.
bool AllClose(const Tensor &other, double rtol=1e-5, double atol=1e-8) const
double Det() const
Compute the determinant of a 2D square tensor.
Tensor Abs_()
Element-wise absolute value of a tensor, in-place.
Tensor Solve(const Tensor &rhs) const
Tensor Sqrt() const
Element-wise square root of a tensor, returns a new tensor.
Tensor Contiguous() const
Tensor Matmul(const Tensor &rhs) const
std::tuple< Tensor, Tensor > Triul(const int diagonal=0) const
Returns the tuple of upper and lower triangular matrix of the 2D tensor, above and below the given di...
bool IsContiguous() const
Tensor Neg() const
Element-wise negation of a tensor, returning a new tensor.
Tensor Triu(const int diagonal=0) const
Returns the upper triangular matrix of the 2D tensor, above the given diagonal index....
void Save(const std::string &file_name) const
Save tensor to numpy's npy format.
static Tensor Arange(const Scalar start, const Scalar stop, const Scalar step=1, const Dtype dtype=core::Int64, const Device &device=core::Device("CPU:0"))
Create a 1D tensor with evenly spaced values in the given interval.
Tensor Neg_()
Element-wise negation of a tensor, in-place.
int64_t GetLength() const
Tensor Round() const
Element-wise round value of a tensor, returning a new tensor.
Tensor Trunc() const
Element-wise trunc value of a tensor, returning a new tensor.
std::tuple< Tensor, Tensor > LUIpiv() const
Computes LU factorisation of the 2D square tensor, using A = P * L * U; where P is the permutation ma...
Tensor Any(const utility::optional< SizeVector > &dims=utility::nullopt, bool keepdim=false) const
Tensor LogicalNot() const
Tensor Tril(const int diagonal=0) const
Returns the lower triangular matrix of the 2D tensor, above the given diagonal index....
static Tensor FromDLPack(const DLManagedTensor *dlmt, std::function< void(void *)> deleter=nullptr)
Convert DLManagedTensor to Tensor (DLPack v0.x).
Tensor Exp() const
Element-wise exponential of a tensor, returning a new tensor.
Tensor Cos() const
Element-wise cosine of a tensor, returning a new tensor.
static Tensor Load(const std::string &file_name)
Load tensor from numpy's npy format.
static Tensor Eye(int64_t n, Dtype dtype, const Device &device)
Create an identity matrix of size n x n.
Tensor Abs() const
Element-wise absolute value of a tensor, returning a new tensor.
Tensor Flatten(int64_t start_dim=0, int64_t end_dim=-1) const
Tensor Sin() const
Element-wise sine of a tensor, returning a new tensor.
int64_t NumElements() const
Tensor All(const utility::optional< SizeVector > &dims=utility::nullopt, bool keepdim=false) const
static Tensor Zeros(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with zeros.
Tensor Ceil() const
Element-wise ceil value of a tensor, returning a new tensor.
Tensor Sqrt_()
Element-wise square root of a tensor, in-place.
static Tensor Diag(const Tensor &input)
Create a square matrix with specified diagonal elements in input.
Tensor IsClose(const Tensor &other, double rtol=1e-5, double atol=1e-8) const
static Tensor FromDLPackVersioned(const DLManagedTensorVersioned *dlmt, std::function< void(void *)> deleter=nullptr)
Convert DLManagedTensorVersioned to Tensor (DLPack v1.x).
Device GetDevice() const override
Tensor LeastSquares(const Tensor &rhs) const
Tensor Reshape(const SizeVector &dst_shape) const
Tensor Clone() const
Copy Tensor to the same device.
static Tensor Empty(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor with uninitialized values.
static Tensor Ones(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with ones.
bool IsSame(const Tensor &other) const
Tensor Cos_()
Element-wise cosine of a tensor, in-place.
std::tuple< Tensor, Tensor, Tensor > SVD() const
Tensor Sin_()
Element-wise sine of a tensor, in-place.
Tensor T() const
Expects input to be <= 2-D Tensor by swapping dimension 0 and 1.
Tensor To(Dtype dtype, bool copy=false) const
Tensor Floor() const
Element-wise floor value of a tensor, returning a new tensor.
std::shared_ptr< Blob > GetBlob() const
Tensor Exp_()
Element-wise base-e exponential of a tensor, in-place.
constexpr bool has_value() const noexcept
constexpr T const & value() const &
static std::vector< T > ToFlatVector(py::array_t< T, py::array::c_style|py::array::forcecast > np_array)
static void BindTensorCreation(py::module &m, py::class_< Tensor > &tensor, const std::string &py_name, func_t cpp_func)
Tensor PyListToTensor(const py::list &list, utility::optional< Dtype > dtype, utility::optional< Device > device)
Tensor BoolToTensor(bool scalar_value, utility::optional< Dtype > dtype, utility::optional< Device > device)
void pybind_core_tensor_accessor(py::class_< Tensor > &t)
CLOUDVIEWER_HOST_DEVICE Pair< First, Second > make_pair(const First &_first, const Second &_second)
const std::unordered_map< std::string, std::string > argument_docs
py::array TensorToPyArray(const Tensor &tensor)
Convert Tensor class to py::array (Numpy array).
Tensor DoubleToTensor(double scalar_value, utility::optional< Dtype > dtype, utility::optional< Device > device)
Tensor IntToTensor(int64_t scalar_value, utility::optional< Dtype > dtype, utility::optional< Device > device)
void pybind_core_tensor(py::module &m)
Tensor PyArrayToTensor(py::array array, bool inplace)
static void BindTensorFullCreation(py::module &m, py::class_< Tensor > &tensor)
Tensor PyTupleToTensor(const py::tuple &tuple, utility::optional< Dtype > dtype, utility::optional< Device > device)
void ClassMethodDocInject(py::module &pybind_module, const std::string &class_name, const std::string &function_name, const std::unordered_map< std::string, std::string > &map_parameter_body_docs)
constexpr nullopt_t nullopt
Generic file read and write utility for python interface.
A versioned and managed C Tensor object, manage memory of DLTensor.
DLPackVersion version
The API and ABI version of the current managed Tensor.
void(* deleter)(struct DLManagedTensorVersioned *self)
Destructor.
C Tensor object, manage memory of DLTensor. This data structure is intended to facilitate the borrowi...
void(* deleter)(struct DLManagedTensor *self)
Destructor - this should be called to destruct the manager_ctx which backs the DLManagedTensor....
uint32_t major
DLPack major version.
#define BIND_BINARY_R_OP_ALL_DTYPES(py_name, cpp_name)
#define BIND_CLIP_SCALAR(py_name, cpp_name, self_const)
#define BIND_BINARY_OP_ALL_DTYPES_WITH_SCALAR(py_name, cpp_name, self_const)
#define BIND_REDUCTION_OP(py_name, cpp_name)
#define BIND_REDUCTION_OP_NO_KEEPDIM(py_name, cpp_name)