ACloudViewer  3.9.4
A Modern Library for 3D Data Processing
MemoryManager.cpp
Go to the documentation of this file.
1 // ----------------------------------------------------------------------------
2 // - CloudViewer: www.cloudViewer.org -
3 // ----------------------------------------------------------------------------
4 // Copyright (c) 2018-2024 www.cloudViewer.org
5 // SPDX-License-Identifier: MIT
6 // ----------------------------------------------------------------------------
7 
9 
10 #include <Helper.h>
11 #include <Logging.h>
12 
13 #include <numeric>
14 #include <unordered_map>
15 
16 #include "cloudViewer/core/Blob.h"
19 
20 namespace cloudViewer {
21 namespace core {
22 
23 void* MemoryManager::Malloc(size_t byte_size, const Device& device) {
24  void* ptr = GetMemoryManagerDevice(device)->Malloc(byte_size, device);
25  MemoryManagerStatistic::GetInstance().CountMalloc(ptr, byte_size, device);
26  return ptr;
27 }
28 
29 void MemoryManager::Free(void* ptr, const Device& device) {
30  // Update statistics before freeing the memory. This ensures a consistent
31  // order in case a subsequent Malloc requires the currently freed memory.
33  GetMemoryManagerDevice(device)->Free(ptr, device);
34 }
35 
36 void MemoryManager::Memcpy(void* dst_ptr,
37  const Device& dst_device,
38  const void* src_ptr,
39  const Device& src_device,
40  size_t num_bytes) {
41  // 0-element Tensor's data_ptr_ is nullptr
42  if (num_bytes == 0) {
43  return;
44  } else if (src_ptr == nullptr || dst_ptr == nullptr) {
45  utility::LogError("src_ptr and dst_ptr cannot be nullptr.");
46  }
47 
48  std::shared_ptr<MemoryManagerDevice> device_mm;
49  // CPU.
50  if (src_device.IsCPU() && dst_device.IsCPU()) {
51  device_mm = GetMemoryManagerDevice(src_device);
52  }
53  // CUDA.
54  else if (src_device.IsCPU() && dst_device.IsCUDA()) {
55  device_mm = GetMemoryManagerDevice(dst_device);
56  } else if (src_device.IsCUDA() && dst_device.IsCPU()) {
57  device_mm = GetMemoryManagerDevice(src_device);
58  } else if (src_device.IsCUDA() && dst_device.IsCUDA()) {
59  device_mm = GetMemoryManagerDevice(src_device);
60  }
61  // SYCL.
62  else if (src_device.IsCPU() && dst_device.IsSYCL()) {
63  device_mm = GetMemoryManagerDevice(dst_device);
64  } else if (src_device.IsSYCL() && dst_device.IsCPU()) {
65  device_mm = GetMemoryManagerDevice(src_device);
66  } else if (src_device.IsSYCL() && dst_device.IsSYCL()) {
67  device_mm = GetMemoryManagerDevice(src_device);
68  }
69  // Not supporting other combinations at the moment, e.g. SYCL->CUDA.
70  else {
71  utility::LogError("Unsupported device type from {} to {}.",
72  src_device.ToString(), dst_device.ToString());
73  }
74 
75  device_mm->Memcpy(dst_ptr, dst_device, src_ptr, src_device, num_bytes);
76 }
77 
78 void MemoryManager::MemcpyFromHost(void* dst_ptr,
79  const Device& dst_device,
80  const void* host_ptr,
81  size_t num_bytes) {
82  // Currently default host is CPU:0
83  Memcpy(dst_ptr, dst_device, host_ptr, Device("CPU:0"), num_bytes);
84 }
85 
86 void MemoryManager::MemcpyToHost(void* host_ptr,
87  const void* src_ptr,
88  const Device& src_device,
89  size_t num_bytes) {
90  // Currently default host is CPU:0
91  Memcpy(host_ptr, Device("CPU:0"), src_ptr, src_device, num_bytes);
92 }
93 
94 std::shared_ptr<MemoryManagerDevice> MemoryManager::GetMemoryManagerDevice(
95  const Device& device) {
96  static std::unordered_map<Device::DeviceType,
97  std::shared_ptr<MemoryManagerDevice>,
99  map_device_type_to_memory_manager = {
101  std::make_shared<MemoryManagerCPU>()},
102 #ifdef BUILD_CUDA_MODULE
103 #ifdef ENABLE_CACHED_CUDA_MANAGER
105  std::make_shared<MemoryManagerCached>(
106  std::make_shared<MemoryManagerCUDA>())},
107 #else
109  std::make_shared<MemoryManagerCUDA>()},
110 #endif
111 #endif
112 #ifdef BUILD_SYCL_MODULE
114  std::make_shared<MemoryManagerSYCL>()},
115 #endif
116  };
117 
118  if (map_device_type_to_memory_manager.find(device.GetType()) ==
119  map_device_type_to_memory_manager.end()) {
121  "Unsupported device \"{}\". Set BUILD_CUDA_MODULE=ON to "
122  "compile for CUDA support and BUILD_SYCL_MODULE=ON to compile "
123  "for SYCL support.",
124  device.ToString());
125  }
126  return map_device_type_to_memory_manager.at(device.GetType());
127 }
128 
129 } // namespace core
130 } // namespace cloudViewer
bool IsCUDA() const
Returns true iff device type is CUDA.
Definition: Device.h:49
DeviceType GetType() const
Returns type of the device, e.g. DeviceType::CPU, DeviceType::CUDA.
Definition: Device.h:58
bool IsCPU() const
Returns true iff device type is CPU.
Definition: Device.h:46
DeviceType
Type for device.
Definition: Device.h:21
std::string ToString() const
Returns string representation of device, e.g. "CPU:0", "CUDA:0".
Definition: Device.cpp:89
bool IsSYCL() const
Returns true iff device type is SYCL GPU.
Definition: Device.h:52
void CountFree(void *ptr, const Device &device)
static MemoryManagerStatistic & GetInstance()
void CountMalloc(void *ptr, size_t byte_size, const Device &device)
Adds the given allocation to the statistics.
static void MemcpyFromHost(void *dst_ptr, const Device &dst_device, const void *host_ptr, size_t num_bytes)
Same as Memcpy, but with host (CPU:0) as default src_device.
static std::shared_ptr< MemoryManagerDevice > GetMemoryManagerDevice(const Device &device)
Internally dispatches the appropriate MemoryManagerDevice instance.
static void MemcpyToHost(void *host_ptr, const void *src_ptr, const Device &src_device, size_t num_bytes)
Same as Memcpy, but with host (CPU:0) as default dst_device.
static void * Malloc(size_t byte_size, const Device &device)
static void Free(void *ptr, const Device &device)
Frees previously allocated memory at address ptr on device device.
static void Memcpy(void *dst_ptr, const Device &dst_device, const void *src_ptr, const Device &src_device, size_t num_bytes)
#define LogError(...)
Definition: Logging.h:60
Helper functions for the ml ops.
Generic file read and write utility for python interface.