ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
ShapeUtil.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
9 
10 #include <numeric>
11 
14 
15 namespace cloudViewer {
16 namespace core {
17 namespace shape_util {
18 
21 static SizeVector ExpandFrontDims(const SizeVector& shape, int64_t ndims) {
22  if (ndims < static_cast<int64_t>(shape.size())) {
23  utility::LogError("Cannot expand a shape with ndims {} to ndims {}.",
24  shape.size(), ndims);
25  }
26  SizeVector expanded_shape(ndims, 1);
27  std::copy(shape.begin(), shape.end(),
28  expanded_shape.begin() + ndims - shape.size());
29  return expanded_shape;
30 }
31 
33  const SizeVector& r_shape) {
34  int64_t l_ndims = l_shape.size();
35  int64_t r_ndims = r_shape.size();
36 
37  if (l_ndims == 0 || r_ndims == 0) {
38  return true;
39  }
40 
41  // Only need to check the last `shorter_ndims` dims
42  // E.g. LHS: [100, 200, 2, 3, 4]
43  // RHS: [2, 1, 4] <- only last 3 dims need to be checked
44  // Checked from right to left
45  int64_t shorter_ndims = std::min(l_ndims, r_ndims);
46  for (int64_t i = 0; i < shorter_ndims; ++i) {
47  int64_t l_dim = l_shape[l_ndims - 1 - i];
48  int64_t r_dim = r_shape[r_ndims - 1 - i];
49  if (!(l_dim == r_dim || l_dim == 1 || r_dim == 1)) {
50  return false;
51  }
52  }
53  return true;
54 }
55 
57  const SizeVector& r_shape) {
58  if (!IsCompatibleBroadcastShape(l_shape, r_shape)) {
59  utility::LogError("Shape {} and {} are not broadcast-compatible",
60  l_shape, r_shape);
61  }
62 
63  int64_t l_ndims = l_shape.size();
64  int64_t r_ndims = r_shape.size();
65  int64_t out_ndims = std::max(l_ndims, r_ndims);
66 
67  // Fill omitted dimensions with shape 1.
68  SizeVector l_shape_filled = ExpandFrontDims(l_shape, out_ndims);
69  SizeVector r_shape_filled = ExpandFrontDims(r_shape, out_ndims);
70 
71  SizeVector broadcasted_shape(out_ndims);
72  for (int64_t i = 0; i < out_ndims; i++) {
73  if (l_shape_filled[i] == 1) {
74  broadcasted_shape[i] = r_shape_filled[i];
75  } else if (r_shape_filled[i] == 1) {
76  broadcasted_shape[i] = l_shape_filled[i];
77  } else if (l_shape_filled[i] == r_shape_filled[i]) {
78  broadcasted_shape[i] = l_shape_filled[i];
79  } else {
81  "Internal error: dimension size {} is not compatible with "
82  "{}, however, this error shall have been captured by "
83  "IsCompatibleBroadcastShape already.",
84  l_shape_filled[i], r_shape_filled[i]);
85  }
86  }
87  return broadcasted_shape;
88 }
89 
90 bool CanBeBrocastedToShape(const SizeVector& src_shape,
91  const SizeVector& dst_shape) {
92  if (IsCompatibleBroadcastShape(src_shape, dst_shape)) {
93  return BroadcastedShape(src_shape, dst_shape) == dst_shape;
94  } else {
95  return false;
96  }
97 }
98 
100  const SizeVector& dims,
101  bool keepdim) {
102  int64_t src_ndims = src_shape.size();
103  SizeVector out_shape = src_shape;
104 
105  // WrapDim throws exception if out-of-range.
106  if (keepdim) {
107  for (const int64_t& dim : dims) {
108  out_shape[WrapDim(dim, src_ndims)] = 1;
109  }
110  } else {
111  // If dim i is reduced, dims_mask[i] == true.
112  std::vector<bool> dims_mask(src_ndims, false);
113  for (const int64_t& dim : dims) {
114  if (dims_mask[WrapDim(dim, src_ndims)]) {
115  utility::LogError("Repeated reduction dimension {}", dim);
116  }
117  dims_mask[WrapDim(dim, src_ndims)] = true;
118  }
119  int64_t to_fill = 0;
120  for (int64_t i = 0; i < src_ndims; ++i) {
121  if (!dims_mask[i]) {
122  out_shape[to_fill] = out_shape[i];
123  to_fill++;
124  }
125  }
126  out_shape.resize(to_fill);
127  }
128  return out_shape;
129 }
130 
131 int64_t WrapDim(int64_t dim, int64_t max_dim, bool inclusive) {
132  if (max_dim <= 0) {
133  utility::LogError("max_dim {} must be > 0.", max_dim);
134  }
135  int64_t min = -max_dim;
136  int64_t max = inclusive ? max_dim : max_dim - 1;
137 
138  if (dim < min || dim > max) {
140  "Index out-of-range: dim == {}, but it must satisfy {} <= dim "
141  "<= {}.",
142  dim, min, max);
143  }
144  if (dim < 0) {
145  dim += max_dim;
146  }
147  return dim;
148 }
149 
150 SizeVector InferShape(SizeVector shape, int64_t num_elements) {
151  SizeVector inferred_shape = shape;
152  int64_t new_size = 1;
153  bool has_inferred_dim = false;
154  int64_t inferred_dim = 0;
155  for (int64_t dim = 0, ndim = shape.size(); dim != ndim; dim++) {
156  if (shape[dim] == -1) {
157  if (has_inferred_dim) {
159  "Proposed shape {}, but at most one dimension can be "
160  "-1 (inferred).",
161  shape.ToString());
162  }
163  inferred_dim = dim;
164  has_inferred_dim = true;
165  } else if (shape[dim] >= 0) {
166  new_size *= shape[dim];
167  } else {
168  utility::LogError("Invalid shape dimension {}", shape[dim]);
169  }
170  }
171 
172  if (num_elements == new_size ||
173  (has_inferred_dim && new_size > 0 && num_elements % new_size == 0)) {
174  if (has_inferred_dim) {
175  // We have a degree of freedom here to select the dimension size;
176  // follow NumPy semantics and just bail. However, a nice error
177  // message is needed because users often use `view` as a way to
178  // flatten & unflatten dimensions and will otherwise be confused why
179  // empty_tensor.view( 0, 0)
180  // works yet
181  // empty_tensor.view(-1, 0)
182  // doesn't.
183  if (new_size == 0) {
185  "Cannot reshape tensor of 0 elements into shape {}, "
186  "because the unspecified dimension size -1 can be any "
187  "value and is ambiguous.",
188  shape.ToString());
189  }
190  inferred_shape[inferred_dim] = num_elements / new_size;
191  }
192  return inferred_shape;
193  }
194 
195  utility::LogError("Shape {} is invalid for {} number of elements.", shape,
196  num_elements);
197 }
198 
199 SizeVector Concat(const SizeVector& l_shape, const SizeVector& r_shape) {
200  SizeVector dst_shape = l_shape;
201  dst_shape.insert(dst_shape.end(), r_shape.begin(), r_shape.end());
202  return dst_shape;
203 }
204 
205 SizeVector Iota(int64_t n) {
206  if (n < 0) {
207  utility::LogError("Iota(n) requires n >= 0, but n == {}.", n);
208  }
209  SizeVector sv(n);
210  std::iota(sv.begin(), sv.end(), 0);
211  return sv;
212 }
213 
215  SizeVector strides(shape.size());
216  int64_t stride_size = 1;
217  for (int64_t i = shape.size(); i > 0; --i) {
218  strides[i - 1] = stride_size;
219  // Handles 0-sized dimensions
220  stride_size *= std::max<int64_t>(shape[i - 1], 1);
221  }
222  return strides;
223 }
224 
225 std::pair<bool, SizeVector> Restride(const SizeVector& old_shape,
226  const SizeVector& old_strides,
227  const SizeVector& new_shape) {
228  if (old_shape.empty()) {
229  return std::make_pair(true, SizeVector(new_shape.size(), 1));
230  }
231 
232  // NOTE: Stride is arbitrary in the numel() == 0 case. To match NumPy
233  // behavior we copy the strides if the size matches, otherwise we use the
234  // stride as if it were computed via resize. This could perhaps be combined
235  // with the below code, but the complexity didn't seem worth it.
236  int64_t numel = old_shape.NumElements();
237  if (numel == 0 && old_shape == new_shape) {
238  return std::make_pair(true, old_strides);
239  }
240 
241  SizeVector new_strides(new_shape.size());
242  if (numel == 0) {
243  for (int64_t view_d = new_shape.size() - 1; view_d >= 0; view_d--) {
244  if (view_d == (int64_t)(new_shape.size() - 1)) {
245  new_strides[view_d] = 1;
246  } else {
247  new_strides[view_d] =
248  std::max<int64_t>(new_shape[view_d + 1], 1) *
249  new_strides[view_d + 1];
250  }
251  }
252  return std::make_pair(true, new_strides);
253  }
254 
255  int64_t view_d = new_shape.size() - 1;
256  // Stride for each subspace in the chunk
257  int64_t chunk_base_stride = old_strides.back();
258  // Numel in current chunk
259  int64_t tensor_numel = 1;
260  int64_t view_numel = 1;
261  for (int64_t tensor_d = old_shape.size() - 1; tensor_d >= 0; tensor_d--) {
262  tensor_numel *= old_shape[tensor_d];
263  // If end of tensor size chunk, check view
264  if ((tensor_d == 0) ||
265  (old_shape[tensor_d - 1] != 1 &&
266  old_strides[tensor_d - 1] != tensor_numel * chunk_base_stride)) {
267  while (view_d >= 0 &&
268  (view_numel < tensor_numel || new_shape[view_d] == 1)) {
269  new_strides[view_d] = view_numel * chunk_base_stride;
270  view_numel *= new_shape[view_d];
271  view_d--;
272  }
273  if (view_numel != tensor_numel) {
274  return std::make_pair(false, SizeVector());
275  }
276  if (tensor_d > 0) {
277  chunk_base_stride = old_strides[tensor_d - 1];
278  tensor_numel = 1;
279  view_numel = 1;
280  }
281  }
282  }
283  if (view_d != -1) {
284  return std::make_pair(false, SizeVector());
285  }
286  return std::make_pair(true, new_strides);
287 }
288 
289 } // namespace shape_util
290 } // namespace core
291 } // namespace cloudViewer
bool copy
Definition: VtkUtils.cpp:74
std::string ToString() const
Definition: SizeVector.cpp:132
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:853
#define LogError(...)
Definition: Logging.h:60
int min(int a, int b)
Definition: cutil_math.h:53
int max(int a, int b)
Definition: cutil_math.h:48
SizeVector Concat(const SizeVector &l_shape, const SizeVector &r_shape)
Concatenate two shapes.
Definition: ShapeUtil.cpp:199
int64_t WrapDim(int64_t dim, int64_t max_dim, bool inclusive)
Wrap around negative dim.
Definition: ShapeUtil.cpp:131
bool CanBeBrocastedToShape(const SizeVector &src_shape, const SizeVector &dst_shape)
Returns true if src_shape can be brocasted to dst_shape.
Definition: ShapeUtil.cpp:90
SizeVector BroadcastedShape(const SizeVector &l_shape, const SizeVector &r_shape)
Returns the broadcasted shape of two shapes.
Definition: ShapeUtil.cpp:56
SizeVector ReductionShape(const SizeVector &src_shape, const SizeVector &dims, bool keepdim)
Returns the shape after reduction.
Definition: ShapeUtil.cpp:99
std::pair< bool, SizeVector > Restride(const SizeVector &old_shape, const SizeVector &old_strides, const SizeVector &new_shape)
Definition: ShapeUtil.cpp:225
SizeVector Iota(int64_t n)
Returns a SizeVector of {0, 1, ..., n - 1}, similar to std::iota.
Definition: ShapeUtil.cpp:205
SizeVector InferShape(SizeVector shape, int64_t num_elements)
Definition: ShapeUtil.cpp:150
SizeVector DefaultStrides(const SizeVector &shape)
Compute default strides for a shape when a tensor is contiguous.
Definition: ShapeUtil.cpp:214
bool IsCompatibleBroadcastShape(const SizeVector &l_shape, const SizeVector &r_shape)
Returns true if two shapes are compatible for broadcasting.
Definition: ShapeUtil.cpp:32
static SizeVector ExpandFrontDims(const SizeVector &shape, int64_t ndims)
Definition: ShapeUtil.cpp:21
CLOUDVIEWER_HOST_DEVICE Pair< First, Second > make_pair(const First &_first, const Second &_second)
Definition: SlabTraits.h:49
Generic file read and write utility for python interface.