ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
NPPImage.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
9 
10 #include <Logging.h>
11 #include <npp.h>
12 
14 #include "cloudViewer/core/Dtype.h"
18 
19 namespace cloudViewer {
20 namespace t {
21 namespace geometry {
22 namespace npp {
23 
24 static NppStreamContext MakeNPPContext() {
25  NppStreamContext context;
26  context.hStream = core::cuda::GetStream();
27  context.nCudaDeviceId = core::cuda::GetDevice();
28 
29  cudaDeviceProp device_prop;
31  cudaGetDeviceProperties(&device_prop, core::cuda::GetDevice()));
32 
33  context.nMultiProcessorCount = device_prop.multiProcessorCount;
34  context.nMaxThreadsPerMultiProcessor =
35  device_prop.maxThreadsPerMultiProcessor;
36  context.nSharedMemPerBlock = device_prop.sharedMemPerBlock;
37 
38  int cc_major;
40  cudaDeviceGetAttribute(&cc_major, cudaDevAttrComputeCapabilityMajor,
41  core::cuda::GetDevice()));
42  context.nCudaDevAttrComputeCapabilityMajor = cc_major;
43 
44  int cc_minor;
46  cudaDeviceGetAttribute(&cc_minor, cudaDevAttrComputeCapabilityMinor,
47  core::cuda::GetDevice()));
48  context.nCudaDevAttrComputeCapabilityMinor = cc_minor;
49 
50 // The NPP documentation incorrectly states that nStreamFlags becomes available
51 // in NPP 10.2 (CUDA 10.2). Instead, NPP 11.1 (CUDA 11.0) is the first release
52 // to expose this member variable.
53 #if NPP_VERSION >= 11100
54  unsigned int stream_flags;
56  cudaStreamGetFlags(core::cuda::GetStream(), &stream_flags));
57  context.nStreamFlags = stream_flags;
58 #endif
59 
60  return context;
61 }
62 
63 void RGBToGray(const core::Tensor &src_im, core::Tensor &dst_im) {
64  if (src_im.GetDevice() != dst_im.GetDevice()) {
66  "src_im and dst_im are not on the same device, got {} and {}.",
67  src_im.GetDevice().ToString(), dst_im.GetDevice().ToString());
68  }
69  core::CUDAScopedDevice scoped_device(src_im.GetDevice());
70 
71  NppiSize size_ROI = {static_cast<int>(dst_im.GetShape(1)),
72  static_cast<int>(dst_im.GetShape(0))};
73 
74  auto dtype = src_im.GetDtype();
75  auto context = MakeNPPContext();
76 #define NPP_ARGS \
77  static_cast<const npp_dtype *>(src_im.GetDataPtr()), \
78  src_im.GetStride(0) * dtype.ByteSize(), \
79  static_cast<npp_dtype *>(dst_im.GetDataPtr()), \
80  dst_im.GetStride(0) * dtype.ByteSize(), size_ROI, context
81  if (dtype == core::UInt8) {
82  using npp_dtype = Npp8u;
83  nppiRGBToGray_8u_C3C1R_Ctx(NPP_ARGS);
84  } else if (dtype == core::UInt16) {
85  using npp_dtype = Npp16u;
86  nppiRGBToGray_16u_C3C1R_Ctx(NPP_ARGS);
87  } else if (dtype == core::Float32) {
88  using npp_dtype = Npp32f;
89  nppiRGBToGray_32f_C3C1R_Ctx(NPP_ARGS);
90  } else {
91  utility::LogError("npp::FilterGaussian(): Unsupported dtype {}",
92  dtype.ToString());
93  }
94 #undef NPP_ARGS
95 }
96 
97 void Resize(const cloudViewer::core::Tensor &src_im,
99  t::geometry::Image::InterpType interp_type) {
100  if (src_im.GetDevice() != dst_im.GetDevice()) {
102  "src_im and dst_im are not on the same device, got {} and {}.",
103  src_im.GetDevice().ToString(), dst_im.GetDevice().ToString());
104  }
105  core::CUDAScopedDevice scoped_device(src_im.GetDevice());
106 
107  // Supported device and datatype checking happens in calling code and will
108  // result in an exception if there are errors.
109  NppiSize src_size = {static_cast<int>(src_im.GetShape(1)),
110  static_cast<int>(src_im.GetShape(0))};
111  NppiRect src_roi = {0, 0, static_cast<int>(src_im.GetShape(1)),
112  static_cast<int>(src_im.GetShape(0))};
113 
114  // create struct with ROI size
115  NppiSize dst_size = {static_cast<int>(dst_im.GetShape(1)),
116  static_cast<int>(dst_im.GetShape(0))};
117  NppiRect dst_roi = {0, 0, static_cast<int>(dst_im.GetShape(1)),
118  static_cast<int>(dst_im.GetShape(0))};
119 
120  static const std::unordered_map<t::geometry::Image::InterpType, int>
121  type_dict = {
123  {t::geometry::Image::InterpType::Linear, NPPI_INTER_LINEAR},
124  {t::geometry::Image::InterpType::Cubic, NPPI_INTER_CUBIC},
126  NPPI_INTER_LANCZOS},
127  {t::geometry::Image::InterpType::Super, NPPI_INTER_SUPER},
128  };
129  auto it = type_dict.find(interp_type);
130  if (it == type_dict.end()) {
131  utility::LogError("Invalid interpolation type {}.",
132  static_cast<int>(interp_type));
133  }
134 
135  auto dtype = src_im.GetDtype();
136  auto context = MakeNPPContext();
137 #define NPP_ARGS \
138  static_cast<const npp_dtype *>(src_im.GetDataPtr()), \
139  src_im.GetStride(0) * dtype.ByteSize(), src_size, src_roi, \
140  static_cast<npp_dtype *>(dst_im.GetDataPtr()), \
141  dst_im.GetStride(0) * dtype.ByteSize(), dst_size, dst_roi, \
142  it->second, context
143 
144  if (dtype == core::UInt8) {
145  using npp_dtype = Npp8u;
146  if (src_im.GetShape(2) == 1) {
147  nppiResize_8u_C1R_Ctx(NPP_ARGS);
148  } else if (src_im.GetShape(2) == 3) {
149  nppiResize_8u_C3R_Ctx(NPP_ARGS);
150  } else if (src_im.GetShape(2) == 4) {
151  nppiResize_8u_C4R_Ctx(NPP_ARGS);
152  }
153  } else if (dtype == core::UInt16) {
154  using npp_dtype = Npp16u;
155  if (src_im.GetShape(2) == 1) {
156  nppiResize_16u_C1R_Ctx(NPP_ARGS);
157  } else if (src_im.GetShape(2) == 3) {
158  nppiResize_16u_C3R_Ctx(NPP_ARGS);
159  } else if (src_im.GetShape(2) == 4) {
160  nppiResize_16u_C4R_Ctx(NPP_ARGS);
161  }
162  } else if (dtype == core::Float32) {
163  using npp_dtype = Npp32f;
164  if (src_im.GetShape(2) == 1) {
165  nppiResize_32f_C1R_Ctx(NPP_ARGS);
166  } else if (src_im.GetShape(2) == 3) {
167  nppiResize_32f_C3R_Ctx(NPP_ARGS);
168  } else if (src_im.GetShape(2) == 4) {
169  nppiResize_32f_C4R_Ctx(NPP_ARGS);
170  }
171  } else {
172  utility::LogError("npp::Resize(): Unsupported dtype {}",
173  dtype.ToString());
174  }
175 #undef NPP_ARGS
176 }
177 
178 void Dilate(const core::Tensor &src_im, core::Tensor &dst_im, int kernel_size) {
179  if (src_im.GetDevice() != dst_im.GetDevice()) {
181  "src_im and dst_im are not on the same device, got {} and {}.",
182  src_im.GetDevice().ToString(), dst_im.GetDevice().ToString());
183  }
184  core::CUDAScopedDevice scoped_device(src_im.GetDevice());
185  // Supported device and datatype checking happens in calling code and will
186  // result in an exception if there are errors.
187 
188  // Create mask.
189  core::Tensor mask =
190  core::Tensor::Ones(core::SizeVector{kernel_size, kernel_size, 1},
191  core::UInt8, src_im.GetDevice());
192  NppiSize mask_size = {kernel_size, kernel_size};
193 
194  NppiSize src_size = {static_cast<int>(src_im.GetShape(1)),
195  static_cast<int>(src_im.GetShape(0))};
196  NppiPoint src_offset = {0, 0};
197 
198  // create struct with ROI size
199  NppiSize size_ROI = {static_cast<int>(dst_im.GetShape(1)),
200  static_cast<int>(dst_im.GetShape(0))};
201  NppiPoint anchor = {kernel_size / 2, kernel_size / 2};
202 
203  auto dtype = src_im.GetDtype();
204  auto context = MakeNPPContext();
205 #define NPP_ARGS \
206  static_cast<const npp_dtype *>(src_im.GetDataPtr()), \
207  src_im.GetStride(0) * dtype.ByteSize(), src_size, src_offset, \
208  static_cast<npp_dtype *>(dst_im.GetDataPtr()), \
209  dst_im.GetStride(0) * dtype.ByteSize(), size_ROI, \
210  static_cast<const uint8_t *>(mask.GetDataPtr()), mask_size, \
211  anchor, NPP_BORDER_REPLICATE, context
212  if (dtype == core::Bool || dtype == core::UInt8) {
213  using npp_dtype = Npp8u;
214  if (src_im.GetShape(2) == 1) {
215  nppiDilateBorder_8u_C1R_Ctx(NPP_ARGS);
216  } else if (src_im.GetShape(2) == 3) {
217  nppiDilateBorder_8u_C3R_Ctx(NPP_ARGS);
218  } else if (src_im.GetShape(2) == 4) {
219  nppiDilateBorder_8u_C4R_Ctx(NPP_ARGS);
220  }
221  } else if (dtype == core::UInt16) {
222  using npp_dtype = Npp16u;
223  if (src_im.GetShape(2) == 1) {
224  nppiDilateBorder_16u_C1R_Ctx(NPP_ARGS);
225  } else if (src_im.GetShape(2) == 3) {
226  nppiDilateBorder_16u_C3R_Ctx(NPP_ARGS);
227  } else if (src_im.GetShape(2) == 4) {
228  nppiDilateBorder_16u_C4R_Ctx(NPP_ARGS);
229  }
230  } else if (dtype == core::Float32) {
231  using npp_dtype = Npp32f;
232  if (src_im.GetShape(2) == 1) {
233  nppiDilateBorder_32f_C1R_Ctx(NPP_ARGS);
234  } else if (src_im.GetShape(2) == 3) {
235  nppiDilateBorder_32f_C3R_Ctx(NPP_ARGS);
236  } else if (src_im.GetShape(2) == 4) {
237  nppiDilateBorder_32f_C4R_Ctx(NPP_ARGS);
238  }
239  } else {
240  utility::LogError("npp::Dilate(): Unsupported dtype {}",
241  dtype.ToString());
242  }
243 #undef NPP_ARGS
244 }
245 
248  const cloudViewer::core::Tensor &kernel) {
249  if (src_im.GetDevice() != dst_im.GetDevice()) {
251  "src_im and dst_im are not on the same device, got {} and {}.",
252  src_im.GetDevice().ToString(), dst_im.GetDevice().ToString());
253  }
254  core::CUDAScopedDevice scoped_device(src_im.GetDevice());
255 
256  // Supported device and datatype checking happens in calling code and will
257  // result in an exception if there are errors.
258  NppiSize src_size = {static_cast<int>(src_im.GetShape(1)),
259  static_cast<int>(src_im.GetShape(0))};
260  NppiPoint src_offset = {0, 0};
261 
262  // create struct with ROI size
263  NppiSize size_ROI = {static_cast<int>(dst_im.GetShape(1)),
264  static_cast<int>(dst_im.GetShape(0))};
265 
266  // Generate separable kernel weights given the sigma value.
267  NppiSize kernel_size = {static_cast<int>(kernel.GetShape()[0]),
268  static_cast<int>(kernel.GetShape()[1])};
269  NppiPoint anchor = {static_cast<int>(kernel.GetShape()[0] / 2),
270  static_cast<int>(kernel.GetShape()[1] / 2)};
271 
272  // Filter in npp is Convolution, so we need to reverse all the entries.
273  core::Tensor kernel_flipped = kernel.Reverse();
274  const float *kernel_ptr =
275  static_cast<const float *>(kernel_flipped.GetDataPtr());
276 
277  auto dtype = src_im.GetDtype();
278  auto context = MakeNPPContext();
279 #define NPP_ARGS \
280  static_cast<const npp_dtype *>(src_im.GetDataPtr()), \
281  src_im.GetStride(0) * dtype.ByteSize(), src_size, src_offset, \
282  static_cast<npp_dtype *>(dst_im.GetDataPtr()), \
283  dst_im.GetStride(0) * dtype.ByteSize(), size_ROI, kernel_ptr, \
284  kernel_size, anchor, NPP_BORDER_REPLICATE, context
285  if (dtype == core::UInt8) {
286  using npp_dtype = Npp8u;
287  if (src_im.GetShape(2) == 1) {
288  nppiFilterBorder32f_8u_C1R_Ctx(NPP_ARGS);
289  } else if (src_im.GetShape(2) == 3) {
290  nppiFilterBorder32f_8u_C3R_Ctx(NPP_ARGS);
291  } else if (src_im.GetShape(2) == 4) {
292  nppiFilterBorder32f_8u_C4R_Ctx(NPP_ARGS);
293  }
294  } else if (dtype == core::UInt16) {
295  using npp_dtype = Npp16u;
296  if (src_im.GetShape(2) == 1) {
297  nppiFilterBorder32f_16u_C1R_Ctx(NPP_ARGS);
298  } else if (src_im.GetShape(2) == 3) {
299  nppiFilterBorder32f_16u_C3R_Ctx(NPP_ARGS);
300  } else if (src_im.GetShape(2) == 4) {
301  nppiFilterBorder32f_16u_C4R_Ctx(NPP_ARGS);
302  }
303  } else if (dtype == core::Float32) {
304  using npp_dtype = Npp32f;
305  if (src_im.GetShape(2) == 1) {
306  nppiFilterBorder_32f_C1R_Ctx(NPP_ARGS);
307  } else if (src_im.GetShape(2) == 3) {
308  nppiFilterBorder_32f_C3R_Ctx(NPP_ARGS);
309  } else if (src_im.GetShape(2) == 4) {
310  nppiFilterBorder_32f_C4R_Ctx(NPP_ARGS);
311  }
312  } else {
313  utility::LogError("npp::Filter(): Unsupported dtype {}",
314  dtype.ToString());
315  }
316 #undef NPP_ARGS
317 }
318 
319 void FilterBilateral(const core::Tensor &src_im,
320  core::Tensor &dst_im,
321  int kernel_size,
322  float value_sigma,
323  float distance_sigma) {
324  if (src_im.GetDevice() != dst_im.GetDevice()) {
326  "src_im and dst_im are not on the same device, got {} and {}.",
327  src_im.GetDevice().ToString(), dst_im.GetDevice().ToString());
328  }
329  core::CUDAScopedDevice scoped_device(src_im.GetDevice());
330 
331  // Supported device and datatype checking happens in calling code and will
332  // result in an exception if there are errors.
333  NppiSize src_size = {static_cast<int>(src_im.GetShape(1)),
334  static_cast<int>(src_im.GetShape(0))};
335  NppiPoint src_offset = {0, 0};
336 
337  // create struct with ROI size
338  NppiSize size_ROI = {static_cast<int>(dst_im.GetShape(1)),
339  static_cast<int>(dst_im.GetShape(0))};
340 
341  auto dtype = src_im.GetDtype();
342  auto context = MakeNPPContext();
343 #define NPP_ARGS \
344  static_cast<const npp_dtype *>(src_im.GetDataPtr()), \
345  src_im.GetStride(0) * dtype.ByteSize(), src_size, src_offset, \
346  static_cast<npp_dtype *>(dst_im.GetDataPtr()), \
347  dst_im.GetStride(0) * dtype.ByteSize(), size_ROI, kernel_size / 2, \
348  1, value_sigma *value_sigma, distance_sigma *distance_sigma, \
349  NPP_BORDER_REPLICATE, context
350  if (dtype == core::UInt8) {
351  using npp_dtype = Npp8u;
352  if (src_im.GetShape(2) == 1) {
353  nppiFilterBilateralGaussBorder_8u_C1R_Ctx(NPP_ARGS);
354  } else if (src_im.GetShape(2) == 3) {
355  nppiFilterBilateralGaussBorder_8u_C3R_Ctx(NPP_ARGS);
356  }
357  } else if (dtype == core::UInt16) {
358  using npp_dtype = Npp16u;
359  if (src_im.GetShape(2) == 1) {
360  nppiFilterBilateralGaussBorder_16u_C1R_Ctx(NPP_ARGS);
361  } else if (src_im.GetShape(2) == 3) {
362  nppiFilterBilateralGaussBorder_16u_C3R_Ctx(NPP_ARGS);
363  }
364  } else if (dtype == core::Float32) {
365  using npp_dtype = Npp32f;
366  if (src_im.GetShape(2) == 1) {
367  nppiFilterBilateralGaussBorder_32f_C1R_Ctx(NPP_ARGS);
368  } else if (src_im.GetShape(2) == 3) {
369  nppiFilterBilateralGaussBorder_32f_C3R_Ctx(NPP_ARGS);
370  }
371  } else {
372  utility::LogError("npp::Filter(): Unsupported dtype {}",
373  dtype.ToString());
374  }
375 #undef NPP_ARGS
376 }
377 
378 void FilterGaussian(const core::Tensor &src_im,
379  core::Tensor &dst_im,
380  int kernel_size,
381  float sigma) {
382  if (src_im.GetDevice() != dst_im.GetDevice()) {
384  "src_im and dst_im are not on the same device, got {} and {}.",
385  src_im.GetDevice().ToString(), dst_im.GetDevice().ToString());
386  }
387  core::CUDAScopedDevice scoped_device(src_im.GetDevice());
388 
389  // Generate separable kernel weights given the sigma value.
391  core::Tensor::Arange(static_cast<float>(-kernel_size / 2),
392  static_cast<float>(kernel_size / 2 + 1), 1.0f,
393  core::Float32, src_im.GetDevice());
394  core::Tensor logval = (dist * dist).Mul(-0.5f / (sigma * sigma));
395  core::Tensor mask = logval.Exp();
396  mask = mask / mask.Sum({0});
397  mask = mask.View({kernel_size, 1});
398 
399  // Use the general Filter, as NPP Gaussian/GaussianAdvanced all return
400  // inconsistent results.
401  // Outer product
402  core::Tensor kernel = mask.Matmul(mask.T()).Contiguous();
403  return Filter(src_im, dst_im, kernel);
404 }
405 
406 void FilterSobel(const core::Tensor &src_im,
407  core::Tensor &dst_im_dx,
408  core::Tensor &dst_im_dy,
409  int kernel_size) {
410  if (src_im.GetDevice() != dst_im_dx.GetDevice() ||
411  src_im.GetDevice() != dst_im_dy.GetDevice()) {
413  "src_im, dst_im_dx, and dst_im_dy are not on the same device, "
414  "got {}, {} and {}.",
415  src_im.GetDevice().ToString(), dst_im_dx.GetDevice().ToString(),
416  dst_im_dy.GetDevice().ToString());
417  }
418  core::CUDAScopedDevice scoped_device(src_im.GetDevice());
419 
420  // Supported device and datatype checking happens in calling code and will
421  // result in an exception if there are errors.
422  NppiSize src_size = {static_cast<int>(src_im.GetShape(1)),
423  static_cast<int>(src_im.GetShape(0))};
424  NppiPoint src_offset = {0, 0};
425 
426  // create struct with ROI size
427  NppiSize size_ROI = {static_cast<int>(dst_im_dx.GetShape(1)),
428  static_cast<int>(dst_im_dx.GetShape(0))};
429  auto dtype = src_im.GetDtype();
430  const static std::unordered_map<int, NppiMaskSize> kernel_size_dict = {
431  {3, NPP_MASK_SIZE_3_X_3},
432  {5, NPP_MASK_SIZE_5_X_5},
433  };
434  auto it = kernel_size_dict.find(kernel_size);
435  if (it == kernel_size_dict.end()) {
436  utility::LogError("Unsupported size {} for NPP FilterSobel",
437  kernel_size);
438  }
439 
440  // Counterintuitive conventions: dy: Horizontal, dx: Vertical.
441  // Probable reason: dy detects horizontal edges, dx detects vertical edges.
442  auto context = MakeNPPContext();
443 #define NPP_ARGS_DX \
444  static_cast<const npp_src_dtype *>(src_im.GetDataPtr()), \
445  src_im.GetStride(0) * dtype.ByteSize(), src_size, src_offset, \
446  static_cast<npp_dst_dtype *>(dst_im_dx.GetDataPtr()), \
447  dst_im_dx.GetStride(0) * dst_im_dx.GetDtype().ByteSize(), \
448  size_ROI, it->second, NPP_BORDER_REPLICATE, context
449 #define NPP_ARGS_DY \
450  static_cast<const npp_src_dtype *>(src_im.GetDataPtr()), \
451  src_im.GetStride(0) * dtype.ByteSize(), src_size, src_offset, \
452  static_cast<npp_dst_dtype *>(dst_im_dy.GetDataPtr()), \
453  dst_im_dy.GetStride(0) * dst_im_dy.GetDtype().ByteSize(), \
454  size_ROI, it->second, NPP_BORDER_REPLICATE, context
455  if (dtype == core::UInt8) {
456  using npp_src_dtype = Npp8u;
457  using npp_dst_dtype = Npp16s;
458  nppiFilterSobelVertBorder_8u16s_C1R_Ctx(NPP_ARGS_DX);
459  nppiFilterSobelHorizBorder_8u16s_C1R_Ctx(NPP_ARGS_DY);
460  } else if (dtype == core::Float32) {
461  using npp_src_dtype = Npp32f;
462  using npp_dst_dtype = Npp32f;
463  nppiFilterSobelVertMaskBorder_32f_C1R_Ctx(NPP_ARGS_DX);
464  nppiFilterSobelHorizMaskBorder_32f_C1R_Ctx(NPP_ARGS_DY);
465  } else {
466  utility::LogError("npp::FilterSobel(): Unsupported dtype {}",
467  dtype.ToString());
468  }
469 #undef NPP_ARGS_DX
470 #undef NPP_ARGS_DY
471 
472  // NPP uses a "right minus left" kernel in 10.2.
473  // https://docs.nvidia.com/cuda/npp/group__image__filter__sobel__border.html
474  // But it is observed to use "left minus right" in unit tests in 10.1.
475  // We need to negate it in-place for lower versions.
476  // TODO: this part is subject to changes given tests on more versions.
477  int cuda_version;
478  CLOUDVIEWER_CUDA_CHECK(cudaRuntimeGetVersion(&cuda_version));
479  if (cuda_version < 10020) {
480  dst_im_dx.Neg_();
481  }
482 }
483 } // namespace npp
484 } // namespace geometry
485 } // namespace t
486 } // namespace cloudViewer
Common CUDA utilities.
#define CLOUDVIEWER_CUDA_CHECK(err)
Definition: CUDAUtils.h:47
#define NPP_ARGS
#define NPP_ARGS_DX
#define NPP_ARGS_DY
When CUDA is not enabled, this is a dummy class.
Definition: CUDAUtils.h:214
std::string ToString() const
Returns string representation of device, e.g. "CPU:0", "CUDA:0".
Definition: Device.cpp:89
Tensor Reverse() const
Reverse a Tensor's elements by viewing the tensor as a 1D array.
Definition: Tensor.cpp:465
Tensor Matmul(const Tensor &rhs) const
Definition: Tensor.cpp:1919
Tensor Sum(const SizeVector &dims, bool keepdim=false) const
Definition: Tensor.cpp:1240
static Tensor Arange(const Scalar start, const Scalar stop, const Scalar step=1, const Dtype dtype=core::Int64, const Device &device=core::Device("CPU:0"))
Create a 1D tensor with evenly spaced values in the given interval.
Definition: Tensor.cpp:436
Tensor Neg_()
Element-wise negation of a tensor, in-place.
Definition: Tensor.cpp:1335
Dtype GetDtype() const
Definition: Tensor.h:1164
Tensor Exp() const
Element-wise exponential of a tensor, returning a new tensor.
Definition: Tensor.cpp:1340
Tensor View(const SizeVector &dst_shape) const
Definition: Tensor.cpp:721
Device GetDevice() const override
Definition: Tensor.cpp:1435
static Tensor Ones(const SizeVector &shape, Dtype dtype, const Device &device=Device("CPU:0"))
Create a tensor fill with ones.
Definition: Tensor.cpp:412
SizeVector GetShape() const
Definition: Tensor.h:1127
Tensor T() const
Expects input to be <= 2-D Tensor by swapping dimension 0 and 1.
Definition: Tensor.cpp:1079
InterpType
Image interpolation algorithms.
Definition: Image.h:178
@ Super
Super sampling interpolation (only downsample).
@ Lanczos
Lanczos filter interpolation.
@ Nearest
Nearest neighbors interpolation.
#define LogError(...)
Definition: Logging.h:60
ImGuiContext * context
Definition: Window.cpp:76
static double dist(double x1, double y1, double x2, double y2)
Definition: lsd.c:207
const Dtype Bool
Definition: Dtype.cpp:52
const Dtype UInt8
Definition: Dtype.cpp:48
const Dtype UInt16
Definition: Dtype.cpp:49
const Dtype Float32
Definition: Dtype.cpp:42
static NppStreamContext MakeNPPContext()
Definition: NPPImage.cpp:24
void FilterSobel(const core::Tensor &src_im, core::Tensor &dst_im_dx, core::Tensor &dst_im_dy, int kernel_size)
Definition: NPPImage.cpp:406
void Resize(const cloudViewer::core::Tensor &src_im, cloudViewer::core::Tensor &dst_im, t::geometry::Image::InterpType interp_type)
Definition: NPPImage.cpp:97
void Dilate(const core::Tensor &src_im, core::Tensor &dst_im, int kernel_size)
Definition: NPPImage.cpp:178
void FilterGaussian(const core::Tensor &src_im, core::Tensor &dst_im, int kernel_size, float sigma)
Definition: NPPImage.cpp:378
void RGBToGray(const core::Tensor &src_im, core::Tensor &dst_im)
Definition: NPPImage.cpp:63
void Filter(const cloudViewer::core::Tensor &src_im, cloudViewer::core::Tensor &dst_im, const cloudViewer::core::Tensor &kernel)
Definition: NPPImage.cpp:246
void FilterBilateral(const core::Tensor &src_im, core::Tensor &dst_im, int kernel_size, float value_sigma, float distance_sigma)
Definition: NPPImage.cpp:319
Generic file read and write utility for python interface.