Skip to content

Commit

Permalink
Add pinned memory to host memory stats (#43096)
Browse files Browse the repository at this point in the history
* Add pinned memory to HostMemoryStats

* Add macro for WrapStatAllocator

* Fix CI errors
  • Loading branch information
From00 committed Jun 1, 2022
1 parent 0e10f24 commit c4b7c48
Show file tree
Hide file tree
Showing 4 changed files with 17 additions and 5 deletions.
8 changes: 7 additions & 1 deletion paddle/fluid/memory/allocation/allocator_facade.cc
Expand Up @@ -931,7 +931,13 @@ class AllocatorFacadePrivate {

void WrapStatAllocator() {
for (auto& pair : allocators_) {
pair.second = std::make_shared<StatAllocator>(pair.second);
// Now memory stats is only supported for CPU and GPU
const platform::Place& place = pair.first;
if (platform::is_cpu_place(place) ||
platform::is_cuda_pinned_place(place) ||
platform::is_gpu_place(place)) {
pair.second = std::make_shared<StatAllocator>(pair.second);
}
}
}

Expand Down
4 changes: 3 additions & 1 deletion paddle/fluid/memory/allocation/pinned_allocator.cc
Expand Up @@ -13,7 +13,7 @@
// limitations under the License.

#include "paddle/fluid/memory/allocation/pinned_allocator.h"

#include "paddle/fluid/memory/stats.h"
namespace paddle {
namespace memory {
namespace allocation {
Expand All @@ -24,6 +24,7 @@ void CPUPinnedAllocator::FreeImpl(phi::Allocation *allocation) {
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaFreeHost(allocation->ptr()));
#endif
HOST_MEMORY_STAT_UPDATE(Reserved, 0, -allocation->size());
delete allocation;
}
phi::Allocation *CPUPinnedAllocator::AllocateImpl(size_t size) {
Expand All @@ -33,6 +34,7 @@ phi::Allocation *CPUPinnedAllocator::AllocateImpl(size_t size) {
#else
PADDLE_ENFORCE_GPU_SUCCESS(cudaHostAlloc(&ptr, size, cudaHostAllocPortable));
#endif
HOST_MEMORY_STAT_UPDATE(Reserved, 0, size);
return new Allocation(ptr, size, platform::CUDAPinnedPlace());
}
} // namespace allocation
Expand Down
8 changes: 5 additions & 3 deletions paddle/fluid/memory/allocation/stat_allocator.h
Expand Up @@ -45,11 +45,13 @@ class StatAllocator : public Allocator {
phi::Allocator::AllocationPtr allocation =
underlying_allocator_->Allocate(size);

if (platform::is_cpu_place(allocation->place())) {
HOST_MEMORY_STAT_UPDATE(Allocated, allocation->place().GetDeviceId(),
const platform::Place& place = allocation->place();
if (platform::is_cpu_place(place) ||
platform::is_cuda_pinned_place(place)) {
HOST_MEMORY_STAT_UPDATE(Allocated, place.GetDeviceId(),
allocation->size());
} else {
DEVICE_MEMORY_STAT_UPDATE(Allocated, allocation->place().GetDeviceId(),
DEVICE_MEMORY_STAT_UPDATE(Allocated, place.GetDeviceId(),
allocation->size());
}
return allocation.release();
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/memory/detail/system_allocator.cc
Expand Up @@ -211,6 +211,7 @@ void* CUDAPinnedAllocator::Alloc(size_t* index, size_t size) {
if (result == gpuSuccess) {
*index = 1; // PINNED memory
cuda_pinnd_alloc_size_ += size;
HOST_MEMORY_STAT_UPDATE(Reserved, 0, size);
return p;
} else {
LOG(WARNING) << "cudaHostAlloc failed.";
Expand Down Expand Up @@ -255,6 +256,7 @@ void CUDAPinnedAllocator::Free(void* p, size_t size, size_t index) {
err));
}
#endif
HOST_MEMORY_STAT_UPDATE(Reserved, 0, -size);
}

bool CUDAPinnedAllocator::UseGpu() const { return false; }
Expand Down

0 comments on commit c4b7c48

Please sign in to comment.