Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ENH: Replace pool allocator to buddy allocator in Paddle #3030

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/function/ConvOp.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ class ConvFunctionBase : public FunctionBase {

template <DeviceType Device>
void resizeBuffer(size_t newSize) {
if (!memory_ || newSize * sizeof(real) > memory_->getAllocSize()) {
if (!memory_ || newSize * sizeof(real) > memory_->getSize()) {
if (Device == DEVICE_TYPE_CPU) {
memory_ = std::make_shared<CpuMemoryHandle>(newSize * sizeof(real));
} else {
Expand Down
3 changes: 2 additions & 1 deletion paddle/gserver/layers/ConvBaseProjection.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License. */

#include "ConvBaseProjection.h"
#include "paddle/math/MemoryHandle.h"
#include "paddle/utils/Stat.h"

namespace paddle {
Expand Down Expand Up @@ -179,7 +180,7 @@ void *ConvBaseProjection::getSpaceBytes(size_t size) {

int devId = hl_get_device();
MemoryHandle **localMem = &(convMem[devId]);
if (NULL == *localMem || size > (*localMem)->getAllocSize()) {
if (NULL == *localMem || size > (*localMem)->getSize()) {
*localMem = new GpuMemoryHandle(size);
}
return (*localMem)->getBuf();
Expand Down
131 changes: 0 additions & 131 deletions paddle/math/Allocator.h

This file was deleted.

2 changes: 1 addition & 1 deletion paddle/math/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ else()
cuda_add_library(paddle_math ${MATH_SOURCES})
endif()


target_link_libraries(paddle_math place paddle_memory)

add_style_check_target(paddle_math ${MATH_SOURCES})
add_style_check_target(paddle_math ${MATH_HEADERS})
Expand Down
4 changes: 2 additions & 2 deletions paddle/math/Matrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ void GpuMatrix::resetOne() {
void GpuMatrix::resize(size_t newHeight, size_t newWidth) {
size_t newSize = newHeight * newWidth;
if (NULL == memoryHandle_.get() ||
newSize * sizeof(real) > memoryHandle_->getAllocSize()) {
newSize * sizeof(real) > memoryHandle_->getSize()) {
memoryHandle_ = std::make_shared<GpuMemoryHandle>(newSize * sizeof(real));
data_ = reinterpret_cast<real*>(memoryHandle_->getBuf());
}
Expand Down Expand Up @@ -1638,7 +1638,7 @@ MatrixPtr CpuMatrix::clone(size_t height, size_t width, bool useGpu) {
void CpuMatrix::resize(size_t newHeight, size_t newWidth) {
size_t newSize = newHeight * newWidth;
if (NULL == memoryHandle_.get() ||
newSize * sizeof(real) > memoryHandle_->getAllocSize()) {
newSize * sizeof(real) > memoryHandle_->getSize()) {
memoryHandle_ = std::make_shared<CpuMemoryHandle>(newSize * sizeof(real));
data_ = reinterpret_cast<real*>(memoryHandle_->getBuf());
}
Expand Down
46 changes: 24 additions & 22 deletions paddle/math/MemoryHandle.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,44 +13,46 @@ See the License for the specific language governing permissions and
limitations under the License. */

#include "MemoryHandle.h"

#include "glog/logging.h"

#include <cmath>
#include "Storage.h"

#include "paddle/memory/memory.h"
#include "paddle/platform/place.h"

namespace paddle {

/**
* Calculate the actual allocation size according to the required size.
*/
MemoryHandle::MemoryHandle(size_t size) : size_(size), buf_(nullptr) {
if (size_ <= 256) {
// Memory allocation in cuda is always aligned to at least 256 bytes.
// In many cases it is 512 bytes.
allocSize_ = 256;
} else if (size_ <= 512) {
allocSize_ = 512;
} else if (size_ <= (1 << 16)) {
// Allocate multiple of 1024 bytes.
allocSize_ = (size + 1023) & ~(1023);
} else {
allocSize_ = size_;
}
}
MemoryHandle::MemoryHandle(size_t size) : size_(size), buf_(nullptr) {}

#ifndef PADDLE_ONLY_CPU

GpuMemoryHandle::GpuMemoryHandle(size_t size) : MemoryHandle(size) {
CHECK(size != 0) << " allocate 0 bytes";
deviceId_ = hl_get_device();
allocator_ = StorageEngine::singleton()->getGpuAllocator(deviceId_);
buf_ = allocator_->alloc(allocSize_);
deviceId_ = paddle::platform::GetCurrentDeviceId();
paddle::platform::GPUPlace gpu_place(deviceId_);
buf_ = paddle::memory::Alloc(gpu_place, size);
}

GpuMemoryHandle::~GpuMemoryHandle() {
paddle::platform::GPUPlace gpu_place(deviceId_);
paddle::memory::Free(gpu_place, buf_);
}

GpuMemoryHandle::~GpuMemoryHandle() { allocator_->free(buf_, allocSize_); }
#endif // PADDLE_ONLY_CPU

CpuMemoryHandle::CpuMemoryHandle(size_t size) : MemoryHandle(size) {
CHECK(size != 0) << " allocate 0 bytes";
allocator_ = StorageEngine::singleton()->getCpuAllocator();
buf_ = allocator_->alloc(allocSize_);
paddle::platform::GPUPlace cpu_place;
buf_ = paddle::memory::Alloc(cpu_place, size);
}

CpuMemoryHandle::~CpuMemoryHandle() { allocator_->free(buf_, allocSize_); }
CpuMemoryHandle::~CpuMemoryHandle() {
paddle::platform::CPUPlace cpu_place;
paddle::memory::Free(cpu_place, buf_);
}

} // namespace paddle
8 changes: 2 additions & 6 deletions paddle/math/MemoryHandle.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ limitations under the License. */
#pragma once

#include <memory>
#include "PoolAllocator.h"

namespace paddle {

Expand All @@ -27,13 +26,10 @@ class MemoryHandle {
public:
void* getBuf() const { return buf_; }
size_t getSize() const { return size_; }
size_t getAllocSize() const { return allocSize_; }

protected:
PoolAllocator* allocator_;
size_t size_; // the requested size
size_t allocSize_; // the allocated size
int deviceId_; // the device id of memory if gpu memory
size_t size_; // the requested size
int deviceId_; // the device id of memory if gpu memory
void* buf_;
};

Expand Down
83 changes: 0 additions & 83 deletions paddle/math/PoolAllocator.cpp

This file was deleted.

Loading